ia64/xen-unstable

changeset 17390:85d8d3f5c651

AMD IOMMU: Defer IO pagetable construction until device assignment

During HVM domain creation, I/O page tables are filled by coping p2m
entries from p2m table, which is a useless step for non-passthru
domain. This patch defers I/O page table construction until the moment
of device assignment. In case that pci devices are never assigned or
hot plugged, the unnecessary duplication will be avoided.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Apr 04 13:54:05 2008 +0100 (2008-04-04)
parents 7d617282f18e
children 633099ff88a8
files xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/include/asm-x86/hvm/svm/amd-iommu-proto.h xen/include/xen/hvm/iommu.h
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Fri Apr 04 13:10:34 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Fri Apr 04 13:54:05 2008 +0100
     1.3 @@ -388,17 +388,17 @@ int amd_iommu_map_page(struct domain *d,
     1.4      unsigned long flags;
     1.5      u64 maddr;
     1.6      struct hvm_iommu *hd = domain_hvm_iommu(d);
     1.7 -    int iw, ir;
     1.8 +    int iw = IOMMU_IO_WRITE_ENABLED;
     1.9 +    int ir = IOMMU_IO_READ_ENABLED;
    1.10  
    1.11      BUG_ON( !hd->root_table );
    1.12  
    1.13 -    maddr = (u64)mfn << PAGE_SHIFT;
    1.14 -
    1.15 -    iw = IOMMU_IO_WRITE_ENABLED;
    1.16 -    ir = IOMMU_IO_READ_ENABLED;
    1.17 -
    1.18      spin_lock_irqsave(&hd->mapping_lock, flags);
    1.19  
    1.20 +    if ( is_hvm_domain(d) && !hd->p2m_synchronized )
    1.21 +        goto out;
    1.22 +
    1.23 +    maddr = (u64)mfn << PAGE_SHIFT;
    1.24      pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
    1.25      if ( pte == NULL )
    1.26      {
    1.27 @@ -409,7 +409,7 @@ int amd_iommu_map_page(struct domain *d,
    1.28      }
    1.29  
    1.30      set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
    1.31 -
    1.32 +out:
    1.33      spin_unlock_irqrestore(&hd->mapping_lock, flags);
    1.34      return 0;
    1.35  }
    1.36 @@ -425,11 +425,17 @@ int amd_iommu_unmap_page(struct domain *
    1.37  
    1.38      BUG_ON( !hd->root_table );
    1.39  
    1.40 +    spin_lock_irqsave(&hd->mapping_lock, flags);
    1.41 +
    1.42 +    if ( is_hvm_domain(d) && !hd->p2m_synchronized )
    1.43 +    {
    1.44 +        spin_unlock_irqrestore(&hd->mapping_lock, flags);
    1.45 +        return 0;
    1.46 +    }
    1.47 +
    1.48      requestor_id = hd->domain_id;
    1.49      io_addr = (u64)gfn << PAGE_SHIFT;
    1.50  
    1.51 -    spin_lock_irqsave(&hd->mapping_lock, flags);
    1.52 -
    1.53      pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
    1.54      if ( pte == NULL )
    1.55      {
    1.56 @@ -486,3 +492,53 @@ int amd_iommu_reserve_domain_unity_map(
    1.57      spin_unlock_irqrestore(&hd->mapping_lock, flags);
    1.58      return 0;
    1.59  }
    1.60 +
    1.61 +int amd_iommu_sync_p2m(struct domain *d)
    1.62 +{
    1.63 +    unsigned long mfn, gfn, flags;
    1.64 +    void *pte;
    1.65 +    u64 maddr;
    1.66 +    struct list_head *entry;
    1.67 +    struct page_info *page;
    1.68 +    struct hvm_iommu *hd;
    1.69 +    int iw = IOMMU_IO_WRITE_ENABLED;
    1.70 +    int ir = IOMMU_IO_READ_ENABLED;
    1.71 +
    1.72 +    if ( !is_hvm_domain(d) )
    1.73 +        return;
    1.74 +
    1.75 +    hd = domain_hvm_iommu(d);
    1.76 +
    1.77 +    spin_lock_irqsave(&hd->mapping_lock, flags);
    1.78 +
    1.79 +    if ( hd->p2m_synchronized )
    1.80 +        goto out;
    1.81 +
    1.82 +    for ( entry = d->page_list.next; entry != &d->page_list;
    1.83 +            entry = entry->next )
    1.84 +    {
    1.85 +        page = list_entry(entry, struct page_info, list);
    1.86 +        mfn = page_to_mfn(page);
    1.87 +        gfn = get_gpfn_from_mfn(mfn);
    1.88 +
    1.89 +        if ( gfn == INVALID_M2P_ENTRY )
    1.90 +            continue;
    1.91 +
    1.92 +        maddr = (u64)mfn << PAGE_SHIFT;
    1.93 +        pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
    1.94 +        if ( pte == NULL )
    1.95 +        {
    1.96 +            dprintk(XENLOG_ERR,
    1.97 +                    "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn);
    1.98 +            spin_unlock_irqrestore(&hd->mapping_lock, flags);
    1.99 +            return -EFAULT;
   1.100 +        }
   1.101 +        set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
   1.102 +    }
   1.103 +
   1.104 +    hd->p2m_synchronized = 1;
   1.105 +
   1.106 +out:
   1.107 +    spin_unlock_irqrestore(&hd->mapping_lock, flags);
   1.108 +    return 0;
   1.109 +}
     2.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Apr 04 13:10:34 2008 +0100
     2.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Apr 04 13:54:05 2008 +0100
     2.3 @@ -553,8 +553,9 @@ static int reassign_device( struct domai
     2.4  int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
     2.5  {
     2.6      int bdf = (bus << 8) | devfn;
     2.7 -    int req_id;
     2.8 -    req_id = ivrs_mappings[bdf].dte_requestor_id;
     2.9 +    int req_id = ivrs_mappings[bdf].dte_requestor_id;
    2.10 +
    2.11 +    amd_iommu_sync_p2m(d);
    2.12  
    2.13      if ( ivrs_mappings[req_id].unity_map_enable )
    2.14      {
     3.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Apr 04 13:10:34 2008 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Fri Apr 04 13:54:05 2008 +0100
     3.3 @@ -57,6 +57,7 @@ int amd_iommu_unmap_page(struct domain *
     3.4  void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
     3.5  int amd_iommu_reserve_domain_unity_map(struct domain *domain,
     3.6          unsigned long phys_addr, unsigned long size, int iw, int ir);
     3.7 +int amd_iommu_sync_p2m(struct domain *d);
     3.8  
     3.9  /* device table functions */
    3.10  void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr,
     4.1 --- a/xen/include/xen/hvm/iommu.h	Fri Apr 04 13:10:34 2008 +0100
     4.2 +++ b/xen/include/xen/hvm/iommu.h	Fri Apr 04 13:54:05 2008 +0100
     4.3 @@ -48,9 +48,10 @@ struct hvm_iommu {
     4.4      int domain_id;
     4.5      int paging_mode;
     4.6      void *root_table;
     4.7 +    bool_t p2m_synchronized;
     4.8  
     4.9      /* iommu_ops */
    4.10      struct iommu_ops *platform_ops;
    4.11  };
    4.12  
    4.13 -#endif // __ASM_X86_HVM_IOMMU_H__
    4.14 +#endif /* __ASM_X86_HVM_IOMMU_H__ */