ia64/xen-unstable

changeset 19024:1dfc48a8c361

AMD IOMMU: Allocate I/O pagetable from domheap instead of xenheap

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 09 15:57:57 2009 +0000 (2009-01-09)
parents 6d040d138e8f
children b999142bca8c
files xen/drivers/passthrough/amd/iommu_init.c xen/drivers/passthrough/amd/iommu_intr.c xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/include/asm-x86/hvm/svm/amd-iommu-proto.h xen/include/xen/hvm/iommu.h
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Fri Jan 09 13:00:10 2009 +0000
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Fri Jan 09 15:57:57 2009 +0000
     1.3 @@ -535,10 +535,11 @@ void __init enable_iommu(struct amd_iomm
     1.4  static void __init deallocate_iommu_table_struct(
     1.5      struct table_struct *table)
     1.6  {
     1.7 +    int order = 0;
     1.8      if ( table->buffer )
     1.9      {
    1.10 -        free_xenheap_pages(table->buffer,
    1.11 -            get_order_from_bytes(table->alloc_size));
    1.12 +        order = get_order_from_bytes(table->alloc_size);
    1.13 +        __free_amd_iommu_tables(table->buffer, order);
    1.14          table->buffer = NULL;
    1.15      }
    1.16  }
    1.17 @@ -552,16 +553,19 @@ static void __init deallocate_iommu_tabl
    1.18  static int __init allocate_iommu_table_struct(struct table_struct *table,
    1.19                                                const char *name)
    1.20  {
    1.21 -    table->buffer = (void *) alloc_xenheap_pages(
    1.22 -        get_order_from_bytes(table->alloc_size));
    1.23 -
    1.24 -    if ( !table->buffer )
    1.25 +    int order = 0;
    1.26 +    if ( table->buffer == NULL )
    1.27      {
    1.28 -        amd_iov_error("Error allocating %s\n", name);
    1.29 -        return -ENOMEM;
    1.30 +        order = get_order_from_bytes(table->alloc_size);
    1.31 +        table->buffer = __alloc_amd_iommu_tables(order);
    1.32 +
    1.33 +        if ( table->buffer == NULL )
    1.34 +        {
    1.35 +            amd_iov_error("Error allocating %s\n", name);
    1.36 +            return -ENOMEM;
    1.37 +        }
    1.38 +        memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
    1.39      }
    1.40 -
    1.41 -    memset(table->buffer, 0, table->alloc_size);
    1.42      return 0;
    1.43  }
    1.44  
     2.1 --- a/xen/drivers/passthrough/amd/iommu_intr.c	Fri Jan 09 13:00:10 2009 +0000
     2.2 +++ b/xen/drivers/passthrough/amd/iommu_intr.c	Fri Jan 09 15:57:57 2009 +0000
     2.3 @@ -22,6 +22,7 @@
     2.4  #include <asm/amd-iommu.h>
     2.5  #include <asm/hvm/svm/amd-iommu-proto.h>
     2.6  
     2.7 +#define INTREMAP_TABLE_ORDER    1
     2.8  DEFINE_SPINLOCK(int_remap_table_lock);
     2.9  void *int_remap_table = NULL;
    2.10  
    2.11 @@ -112,14 +113,17 @@ int __init amd_iommu_setup_intremap_tabl
    2.12      unsigned long flags;
    2.13  
    2.14      spin_lock_irqsave(&int_remap_table_lock, flags);
    2.15 +
    2.16      if ( int_remap_table == NULL )
    2.17 -        int_remap_table = (void *)alloc_xenheap_pages(1);
    2.18 -    if ( !int_remap_table )
    2.19      {
    2.20 -        spin_unlock_irqrestore(&int_remap_table_lock, flags);
    2.21 -        return -ENOMEM;
    2.22 +        int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
    2.23 +        if ( int_remap_table == NULL )
    2.24 +        {
    2.25 +            spin_unlock_irqrestore(&int_remap_table_lock, flags);
    2.26 +            return -ENOMEM;
    2.27 +        }
    2.28 +        memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
    2.29      }
    2.30 -    memset((u8*)int_remap_table, 0, PAGE_SIZE*2);
    2.31      spin_unlock_irqrestore(&int_remap_table_lock, flags);
    2.32  
    2.33      return 0;
    2.34 @@ -211,7 +215,7 @@ int __init deallocate_intremap_table(voi
    2.35      spin_lock_irqsave(&int_remap_table_lock, flags);
    2.36      if ( int_remap_table )
    2.37      {
    2.38 -        free_xenheap_pages(int_remap_table, 1);
    2.39 +        __free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
    2.40          int_remap_table = NULL;
    2.41      }
    2.42      spin_unlock_irqrestore(&int_remap_table_lock, flags);
     3.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Fri Jan 09 13:00:10 2009 +0000
     3.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Fri Jan 09 15:57:57 2009 +0000
     3.3 @@ -159,21 +159,39 @@ void flush_command_buffer(struct amd_iom
     3.4      }
     3.5  }
     3.6  
     3.7 -static void clear_page_table_entry_present(u32 *pte)
     3.8 +static void clear_iommu_l1e_present(u64 l2e, unsigned long gfn)
     3.9  {
    3.10 -    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0],
    3.11 -                         IOMMU_PTE_PRESENT_MASK,
    3.12 -                         IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
    3.13 +    u32 *l1e;
    3.14 +    int offset;
    3.15 +    void *l1_table;
    3.16 +
    3.17 +    l1_table = map_domain_page(l2e >> PAGE_SHIFT);
    3.18 +
    3.19 +    offset = gfn & (~PTE_PER_TABLE_MASK);
    3.20 +    l1e = (u32*)(l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
    3.21 +
    3.22 +    /* clear l1 entry */
    3.23 +    l1e[0] = l1e[1] = 0;
    3.24 +
    3.25 +    unmap_domain_page(l1_table);
    3.26  }
    3.27  
    3.28 -static void set_page_table_entry_present(u32 *pte, u64 page_addr,
    3.29 -                                         int iw, int ir)
    3.30 +static void set_iommu_l1e_present(u64 l2e, unsigned long gfn,
    3.31 +                                 u64 maddr, int iw, int ir)
    3.32  {
    3.33      u64 addr_lo, addr_hi;
    3.34      u32 entry;
    3.35 +    void *l1_table;
    3.36 +    int offset;
    3.37 +    u32 *l1e;
    3.38  
    3.39 -    addr_lo = page_addr & DMA_32BIT_MASK;
    3.40 -    addr_hi = page_addr >> 32;
    3.41 +    l1_table = map_domain_page(l2e >> PAGE_SHIFT);
    3.42 +
    3.43 +    offset = gfn & (~PTE_PER_TABLE_MASK);
    3.44 +    l1e = (u32*)((u8*)l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
    3.45 +
    3.46 +    addr_lo = maddr & DMA_32BIT_MASK;
    3.47 +    addr_hi = maddr >> 32;
    3.48  
    3.49      set_field_in_reg_u32((u32)addr_hi, 0,
    3.50                           IOMMU_PTE_ADDR_HIGH_MASK,
    3.51 @@ -186,7 +204,7 @@ static void set_page_table_entry_present
    3.52                           IOMMU_CONTROL_DISABLED, entry,
    3.53                           IOMMU_PTE_IO_READ_PERMISSION_MASK,
    3.54                           IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
    3.55 -    pte[1] = entry;
    3.56 +    l1e[1] = entry;
    3.57  
    3.58      set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
    3.59                           IOMMU_PTE_ADDR_LOW_MASK,
    3.60 @@ -197,10 +215,11 @@ static void set_page_table_entry_present
    3.61      set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
    3.62                           IOMMU_PTE_PRESENT_MASK,
    3.63                           IOMMU_PTE_PRESENT_SHIFT, &entry);
    3.64 -    pte[0] = entry;
    3.65 +    l1e[0] = entry;
    3.66 +
    3.67 +    unmap_domain_page(l1_table);
    3.68  }
    3.69  
    3.70 -
    3.71  static void amd_iommu_set_page_directory_entry(u32 *pde, 
    3.72                                                 u64 next_ptr, u8 next_level)
    3.73  {
    3.74 @@ -327,7 +346,7 @@ void amd_iommu_set_dev_table_entry(u32 *
    3.75      dte[0] = entry;
    3.76  }
    3.77  
    3.78 -void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
    3.79 +u64 amd_iommu_get_next_table_from_pte(u32 *entry)
    3.80  {
    3.81      u64 addr_lo, addr_hi, ptr;
    3.82  
    3.83 @@ -342,7 +361,7 @@ void *amd_iommu_get_vptr_from_page_table
    3.84          IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
    3.85  
    3.86      ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
    3.87 -    return ptr ? maddr_to_virt((unsigned long)ptr) : NULL;
    3.88 +    return ptr;
    3.89  }
    3.90  
    3.91  static int amd_iommu_is_pte_present(u32 *entry)
    3.92 @@ -381,54 +400,53 @@ int amd_iommu_is_dte_page_translation_va
    3.93                                     IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
    3.94  }
    3.95  
    3.96 -static void *get_pte_from_page_tables(void *table, int level,
    3.97 -                                      unsigned long io_pfn)
    3.98 +static u64 iommu_l2e_from_pfn(struct page_info *table, int level,
    3.99 +                              unsigned long io_pfn)
   3.100  {
   3.101      unsigned long offset;
   3.102      void *pde = NULL;
   3.103 +    void *table_vaddr;
   3.104 +    u64 next_table_maddr = 0;
   3.105  
   3.106 -    BUG_ON(table == NULL);
   3.107 +    BUG_ON( table == NULL || level == 0 );
   3.108  
   3.109 -    while ( level > 0 )
   3.110 +    while ( level > 1 )
   3.111      {
   3.112          offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
   3.113                               (level - IOMMU_PAGING_MODE_LEVEL_1)));
   3.114          offset &= ~PTE_PER_TABLE_MASK;
   3.115 -        pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
   3.116  
   3.117 -        if ( level == 1 )
   3.118 -            break;
   3.119 -        if ( !pde )
   3.120 -            return NULL;
   3.121 +        table_vaddr = map_domain_page(page_to_mfn(table));
   3.122 +        pde = table_vaddr + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
   3.123 +        next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
   3.124 +
   3.125          if ( !amd_iommu_is_pte_present(pde) )
   3.126          {
   3.127 -            void *next_table = alloc_xenheap_page();
   3.128 -            if ( next_table == NULL )
   3.129 -                return NULL;
   3.130 -            memset(next_table, 0, PAGE_SIZE);
   3.131 -            if ( *(u64 *)pde == 0 )
   3.132 +            if ( next_table_maddr == 0 )
   3.133              {
   3.134 -                unsigned long next_ptr = (u64)virt_to_maddr(next_table);
   3.135 +                table = alloc_amd_iommu_pgtable();
   3.136 +                if ( table == NULL )
   3.137 +                    return 0;
   3.138 +                next_table_maddr = page_to_maddr(table);
   3.139                  amd_iommu_set_page_directory_entry(
   3.140 -                    (u32 *)pde, next_ptr, level - 1);
   3.141 +                    (u32 *)pde, next_table_maddr, level - 1);
   3.142              }
   3.143 -            else
   3.144 -            {
   3.145 -                free_xenheap_page(next_table);
   3.146 -            }
   3.147 +            else /* should never reach here */
   3.148 +                return 0;
   3.149          }
   3.150 -        table = amd_iommu_get_vptr_from_page_table_entry(pde);
   3.151 +
   3.152 +        unmap_domain_page(table_vaddr);
   3.153 +        table = maddr_to_page(next_table_maddr);
   3.154          level--;
   3.155      }
   3.156  
   3.157 -    return pde;
   3.158 +    return next_table_maddr;
   3.159  }
   3.160  
   3.161  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
   3.162  {
   3.163 -    void *pte;
   3.164 +    u64 iommu_l2e;
   3.165      unsigned long flags;
   3.166 -    u64 maddr;
   3.167      struct hvm_iommu *hd = domain_hvm_iommu(d);
   3.168      int iw = IOMMU_IO_WRITE_ENABLED;
   3.169      int ir = IOMMU_IO_READ_ENABLED;
   3.170 @@ -440,16 +458,15 @@ int amd_iommu_map_page(struct domain *d,
   3.171      if ( is_hvm_domain(d) && !hd->p2m_synchronized )
   3.172          goto out;
   3.173  
   3.174 -    maddr = (u64)mfn << PAGE_SHIFT;
   3.175 -    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
   3.176 -    if ( pte == NULL )
   3.177 +    iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
   3.178 +    if ( iommu_l2e == 0 )
   3.179      {
   3.180          amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
   3.181          spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.182          return -EFAULT;
   3.183      }
   3.184 +    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
   3.185  
   3.186 -    set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
   3.187  out:
   3.188      spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.189      return 0;
   3.190 @@ -457,10 +474,8 @@ out:
   3.191  
   3.192  int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
   3.193  {
   3.194 -    void *pte;
   3.195 +    u64 iommu_l2e;
   3.196      unsigned long flags;
   3.197 -    u64 io_addr = gfn;
   3.198 -    int requestor_id;
   3.199      struct amd_iommu *iommu;
   3.200      struct hvm_iommu *hd = domain_hvm_iommu(d);
   3.201  
   3.202 @@ -474,11 +489,9 @@ int amd_iommu_unmap_page(struct domain *
   3.203          return 0;
   3.204      }
   3.205  
   3.206 -    requestor_id = hd->domain_id;
   3.207 -    io_addr = (u64)gfn << PAGE_SHIFT;
   3.208 +    iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
   3.209  
   3.210 -    pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
   3.211 -    if ( pte == NULL )
   3.212 +    if ( iommu_l2e == 0 )
   3.213      {
   3.214          amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
   3.215          spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.216 @@ -486,14 +499,14 @@ int amd_iommu_unmap_page(struct domain *
   3.217      }
   3.218  
   3.219      /* mark PTE as 'page not present' */
   3.220 -    clear_page_table_entry_present((u32 *)pte);
   3.221 +    clear_iommu_l1e_present(iommu_l2e, gfn);
   3.222      spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.223  
   3.224      /* send INVALIDATE_IOMMU_PAGES command */
   3.225      for_each_amd_iommu ( iommu )
   3.226      {
   3.227          spin_lock_irqsave(&iommu->lock, flags);
   3.228 -        invalidate_iommu_page(iommu, io_addr, requestor_id);
   3.229 +        invalidate_iommu_page(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id);
   3.230          flush_command_buffer(iommu);
   3.231          spin_unlock_irqrestore(&iommu->lock, flags);
   3.232      }
   3.233 @@ -506,8 +519,8 @@ int amd_iommu_reserve_domain_unity_map(
   3.234      unsigned long phys_addr,
   3.235      unsigned long size, int iw, int ir)
   3.236  {
   3.237 +    u64 iommu_l2e;
   3.238      unsigned long flags, npages, i;
   3.239 -    void *pte;
   3.240      struct hvm_iommu *hd = domain_hvm_iommu(domain);
   3.241  
   3.242      npages = region_to_pages(phys_addr, size);
   3.243 @@ -515,17 +528,20 @@ int amd_iommu_reserve_domain_unity_map(
   3.244      spin_lock_irqsave(&hd->mapping_lock, flags);
   3.245      for ( i = 0; i < npages; ++i )
   3.246      {
   3.247 -        pte = get_pte_from_page_tables(
   3.248 +        iommu_l2e = iommu_l2e_from_pfn(
   3.249              hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT);
   3.250 -        if ( pte == NULL )
   3.251 +
   3.252 +        if ( iommu_l2e == 0 )
   3.253          {
   3.254              amd_iov_error(
   3.255              "Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
   3.256              spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.257              return -EFAULT;
   3.258          }
   3.259 -        set_page_table_entry_present((u32 *)pte,
   3.260 -                                     phys_addr, iw, ir);
   3.261 +
   3.262 +        set_iommu_l1e_present(iommu_l2e,
   3.263 +            (phys_addr >> PAGE_SHIFT), phys_addr, iw, ir);
   3.264 +
   3.265          phys_addr += PAGE_SIZE;
   3.266      }
   3.267      spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.268 @@ -535,8 +551,7 @@ int amd_iommu_reserve_domain_unity_map(
   3.269  int amd_iommu_sync_p2m(struct domain *d)
   3.270  {
   3.271      unsigned long mfn, gfn, flags;
   3.272 -    void *pte;
   3.273 -    u64 maddr;
   3.274 +    u64 iommu_l2e;
   3.275      struct list_head *entry;
   3.276      struct page_info *page;
   3.277      struct hvm_iommu *hd;
   3.278 @@ -563,15 +578,16 @@ int amd_iommu_sync_p2m(struct domain *d)
   3.279          if ( gfn == INVALID_M2P_ENTRY )
   3.280              continue;
   3.281  
   3.282 -        maddr = (u64)mfn << PAGE_SHIFT;
   3.283 -        pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
   3.284 -        if ( pte == NULL )
   3.285 +        iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
   3.286 +
   3.287 +        if ( iommu_l2e == 0 )
   3.288          {
   3.289              amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
   3.290              spin_unlock_irqrestore(&hd->mapping_lock, flags);
   3.291              return -EFAULT;
   3.292          }
   3.293 -        set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
   3.294 +
   3.295 +        set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
   3.296      }
   3.297  
   3.298      hd->p2m_synchronized = 1;
     4.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Jan 09 13:00:10 2009 +0000
     4.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Jan 09 15:57:57 2009 +0000
     4.3 @@ -29,17 +29,6 @@ extern unsigned short ivrs_bdf_entries;
     4.4  extern struct ivrs_mappings *ivrs_mappings;
     4.5  extern void *int_remap_table;
     4.6  
     4.7 -static void deallocate_domain_page_tables(struct hvm_iommu *hd)
     4.8 -{
     4.9 -    if ( hd->root_table )
    4.10 -        free_xenheap_page(hd->root_table);
    4.11 -}
    4.12 -
    4.13 -static void deallocate_domain_resources(struct hvm_iommu *hd)
    4.14 -{
    4.15 -    deallocate_domain_page_tables(hd);
    4.16 -}
    4.17 -
    4.18  int __init amd_iommu_init(void)
    4.19  {
    4.20      struct amd_iommu *iommu;
    4.21 @@ -79,8 +68,6 @@ static void amd_iommu_setup_domain_devic
    4.22      struct domain *domain, struct amd_iommu *iommu, int bdf)
    4.23  {
    4.24      void *dte;
    4.25 -    u64 root_ptr;
    4.26 -    u64 intremap_ptr;
    4.27      unsigned long flags;
    4.28      int req_id;
    4.29      u8 sys_mgt, dev_ex;
    4.30 @@ -88,22 +75,21 @@ static void amd_iommu_setup_domain_devic
    4.31  
    4.32      BUG_ON( !hd->root_table || !hd->paging_mode || !int_remap_table );
    4.33  
    4.34 -    root_ptr = (u64)virt_to_maddr(hd->root_table);
    4.35      /* get device-table entry */
    4.36      req_id = ivrs_mappings[bdf].dte_requestor_id;
    4.37 -    dte = iommu->dev_table.buffer +
    4.38 -        (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
    4.39 +    dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
    4.40  
    4.41 -    intremap_ptr = (u64)virt_to_maddr(int_remap_table);
    4.42 +    spin_lock_irqsave(&iommu->lock, flags);
    4.43  
    4.44      if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
    4.45      {
    4.46 -        spin_lock_irqsave(&iommu->lock, flags); 
    4.47 -
    4.48          /* bind DTE to domain page-tables */
    4.49          sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
    4.50          dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
    4.51 -        amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr, intremap_ptr,
    4.52 +
    4.53 +        amd_iommu_set_dev_table_entry((u32 *)dte,
    4.54 +                                      page_to_maddr(hd->root_table),
    4.55 +                                      virt_to_maddr(int_remap_table),
    4.56                                        hd->domain_id, sys_mgt, dev_ex,
    4.57                                        hd->paging_mode);
    4.58  
    4.59 @@ -111,11 +97,15 @@ static void amd_iommu_setup_domain_devic
    4.60          invalidate_interrupt_table(iommu, req_id);
    4.61          flush_command_buffer(iommu);
    4.62          amd_iov_info("Enable DTE:0x%x, "
    4.63 -                "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
    4.64 -                req_id, root_ptr, hd->domain_id, hd->paging_mode);
    4.65 +                "root_table:%"PRIx64", interrupt_table:%"PRIx64", "
    4.66 +                "domain_id:%d, paging_mode:%d\n",
    4.67 +                req_id, (u64)page_to_maddr(hd->root_table),
    4.68 +                (u64)virt_to_maddr(int_remap_table), hd->domain_id,
    4.69 +                hd->paging_mode);
    4.70 +    }
    4.71  
    4.72 -        spin_unlock_irqrestore(&iommu->lock, flags);
    4.73 -    }
    4.74 +    spin_unlock_irqrestore(&iommu->lock, flags);
    4.75 +
    4.76  }
    4.77  
    4.78  static void amd_iommu_setup_dom0_devices(struct domain *d)
    4.79 @@ -188,10 +178,9 @@ static int allocate_domain_resources(str
    4.80      spin_lock_irqsave(&hd->mapping_lock, flags);
    4.81      if ( !hd->root_table )
    4.82      {
    4.83 -        hd->root_table = (void *)alloc_xenheap_page();
    4.84 +        hd->root_table = alloc_amd_iommu_pgtable();
    4.85          if ( !hd->root_table )
    4.86              goto error_out;
    4.87 -        memset((u8*)hd->root_table, 0, PAGE_SIZE);
    4.88      }
    4.89      spin_unlock_irqrestore(&hd->mapping_lock, flags);
    4.90  
    4.91 @@ -228,7 +217,8 @@ static int amd_iommu_domain_init(struct 
    4.92      /* allocate page directroy */
    4.93      if ( allocate_domain_resources(hd) != 0 )
    4.94      {
    4.95 -        deallocate_domain_resources(hd);
    4.96 +        if ( hd->root_table )
    4.97 +            free_domheap_page(hd->root_table);
    4.98          return -ENOMEM;
    4.99      }
   4.100  
   4.101 @@ -258,12 +248,11 @@ static void amd_iommu_disable_domain_dev
   4.102      int req_id;
   4.103  
   4.104      req_id = ivrs_mappings[bdf].dte_requestor_id;
   4.105 -    dte = iommu->dev_table.buffer +
   4.106 -        (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   4.107 +    dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
   4.108  
   4.109 +    spin_lock_irqsave(&iommu->lock, flags); 
   4.110      if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
   4.111      {
   4.112 -        spin_lock_irqsave(&iommu->lock, flags); 
   4.113          memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
   4.114          invalidate_dev_table_entry(iommu, req_id);
   4.115          flush_command_buffer(iommu);
   4.116 @@ -271,8 +260,8 @@ static void amd_iommu_disable_domain_dev
   4.117                  " domain_id:%d, paging_mode:%d\n",
   4.118                  req_id,  domain_hvm_iommu(domain)->domain_id,
   4.119                  domain_hvm_iommu(domain)->paging_mode);
   4.120 -        spin_unlock_irqrestore(&iommu->lock, flags);
   4.121      }
   4.122 +    spin_unlock_irqrestore(&iommu->lock, flags);
   4.123  }
   4.124  
   4.125  static int reassign_device( struct domain *source, struct domain *target,
   4.126 @@ -338,55 +327,43 @@ static int amd_iommu_assign_device(struc
   4.127      return reassign_device(dom0, d, bus, devfn);
   4.128  }
   4.129  
   4.130 -static void deallocate_next_page_table(void *table, unsigned long index,
   4.131 -                                       int level)
   4.132 +static void deallocate_next_page_table(struct page_info* pg, int level)
   4.133  {
   4.134 -    unsigned long next_index;
   4.135 -    void *next_table, *pde;
   4.136 -    int next_level;
   4.137 +    void *table_vaddr, *pde;
   4.138 +    u64 next_table_maddr;
   4.139 +    int index;
   4.140  
   4.141 -    pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
   4.142 -    next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde);
   4.143 +    table_vaddr = map_domain_page(page_to_mfn(pg));
   4.144  
   4.145 -    if ( next_table )
   4.146 +    if ( level > 1 )
   4.147      {
   4.148 -        next_level = level - 1;
   4.149 -        if ( next_level > 1 )
   4.150 +        for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
   4.151          {
   4.152 -            next_index = 0;
   4.153 -            do
   4.154 +            pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
   4.155 +            next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
   4.156 +            if ( next_table_maddr != 0 )
   4.157              {
   4.158 -                deallocate_next_page_table(next_table,
   4.159 -                                           next_index, next_level);
   4.160 -                next_index++;
   4.161 -            } while (next_index < PTE_PER_TABLE_SIZE);
   4.162 +                deallocate_next_page_table(
   4.163 +                    maddr_to_page(next_table_maddr), level - 1);
   4.164 +            }
   4.165          }
   4.166 +    }
   4.167  
   4.168 -        free_xenheap_page(next_table);
   4.169 -    }
   4.170 +    unmap_domain_page(table_vaddr);
   4.171 +    free_amd_iommu_pgtable(pg);
   4.172  }
   4.173  
   4.174  static void deallocate_iommu_page_tables(struct domain *d)
   4.175  {
   4.176 -    unsigned long index;
   4.177      struct hvm_iommu *hd  = domain_hvm_iommu(d);
   4.178  
   4.179 -    if ( hd ->root_table )
   4.180 +    if ( hd->root_table )
   4.181      {
   4.182 -        index = 0;
   4.183 +        deallocate_next_page_table(hd->root_table, hd->paging_mode);
   4.184 +        hd->root_table = NULL;
   4.185 +    }
   4.186 +}
   4.187  
   4.188 -        do
   4.189 -        {
   4.190 -            deallocate_next_page_table(hd->root_table,
   4.191 -                                       index, hd->paging_mode);
   4.192 -            index++;
   4.193 -        } while ( index < PTE_PER_TABLE_SIZE );
   4.194 -
   4.195 -        free_xenheap_page(hd ->root_table);
   4.196 -    }
   4.197 -
   4.198 -    hd ->root_table = NULL;
   4.199 -}
   4.200  
   4.201  static void amd_iommu_domain_destroy(struct domain *d)
   4.202  {
     5.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Jan 09 13:00:10 2009 +0000
     5.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Jan 09 15:57:57 2009 +0000
     5.3 @@ -23,6 +23,7 @@
     5.4  
     5.5  #include <xen/sched.h>
     5.6  #include <asm/amd-iommu.h>
     5.7 +#include <xen/domain_page.h>
     5.8  
     5.9  #define for_each_amd_iommu(amd_iommu) \
    5.10      list_for_each_entry(amd_iommu, \
    5.11 @@ -59,7 +60,7 @@ int __init amd_iommu_setup_shared_tables
    5.12  /* mapping functions */
    5.13  int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
    5.14  int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
    5.15 -void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
    5.16 +u64 amd_iommu_get_next_table_from_pte(u32 *entry);
    5.17  int amd_iommu_reserve_domain_unity_map(struct domain *domain,
    5.18          unsigned long phys_addr, unsigned long size, int iw, int ir);
    5.19  int amd_iommu_sync_p2m(struct domain *d);
    5.20 @@ -69,8 +70,7 @@ void invalidate_all_iommu_pages(struct d
    5.21  void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
    5.22          u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
    5.23  int amd_iommu_is_dte_page_translation_valid(u32 *entry);
    5.24 -void invalidate_dev_table_entry(struct amd_iommu *iommu,
    5.25 -            u16 devic_id);
    5.26 +void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
    5.27  
    5.28  /* send cmd to iommu */
    5.29  int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
    5.30 @@ -117,4 +117,36 @@ static inline unsigned long region_to_pa
    5.31      return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
    5.32  }
    5.33  
    5.34 +static inline struct page_info* alloc_amd_iommu_pgtable(void)
    5.35 +{
    5.36 +    struct page_info *pg;
    5.37 +    void *vaddr;
    5.38 +
    5.39 +    pg = alloc_domheap_page(NULL, 0);
    5.40 +    vaddr = map_domain_page(page_to_mfn(pg));
    5.41 +    if ( !vaddr )
    5.42 +        return 0;
    5.43 +    memset(vaddr, 0, PAGE_SIZE);
    5.44 +    unmap_domain_page(vaddr);
    5.45 +    return pg;
    5.46 +}
    5.47 +
    5.48 +static inline void free_amd_iommu_pgtable(struct page_info *pg)
    5.49 +{
    5.50 +    if ( pg != 0 )
    5.51 +        free_domheap_page(pg);
    5.52 +}
    5.53 +
    5.54 +static inline void* __alloc_amd_iommu_tables(int order)
    5.55 +{
    5.56 +    void *buf;
    5.57 +    buf = alloc_xenheap_pages(order);
    5.58 +    return buf;
    5.59 +}
    5.60 +
    5.61 +static inline void __free_amd_iommu_tables(void *table, int order)
    5.62 +{
    5.63 +    free_xenheap_pages(table, order);
    5.64 +}
    5.65 +
    5.66  #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */
     6.1 --- a/xen/include/xen/hvm/iommu.h	Fri Jan 09 13:00:10 2009 +0000
     6.2 +++ b/xen/include/xen/hvm/iommu.h	Fri Jan 09 15:57:57 2009 +0000
     6.3 @@ -40,7 +40,7 @@ struct hvm_iommu {
     6.4      /* amd iommu support */
     6.5      int domain_id;
     6.6      int paging_mode;
     6.7 -    void *root_table;
     6.8 +    struct page_info *root_table;
     6.9      bool_t p2m_synchronized;
    6.10  
    6.11      /* iommu_ops */