ia64/xen-unstable

changeset 17748:542897539045

vtd: TLB flush fixups.
Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed May 28 10:05:57 2008 +0100 (2008-05-28)
parents 4c75850a0caa
children 132243507b49
files xen/arch/x86/mm/hap/p2m-ept.c xen/drivers/passthrough/vtd/iommu.c xen/include/xen/iommu.h
line diff
     1.1 --- a/xen/arch/x86/mm/hap/p2m-ept.c	Wed May 28 10:02:00 2008 +0100
     1.2 +++ b/xen/arch/x86/mm/hap/p2m-ept.c	Wed May 28 10:05:57 2008 +0100
     1.3 @@ -267,12 +267,6 @@ out:
     1.4          }
     1.5      }
     1.6  
     1.7 -#ifdef P2M_SHARE_WITH_VTD_PAGE_TABLE
     1.8 -    /* If p2m table is shared with vtd page-table. */
     1.9 -    if ( iommu_enabled && is_hvm_domain(d) && (p2mt == p2m_mmio_direct) )
    1.10 -        iommu_flush(d, gfn, (u64*)ept_entry);
    1.11 -#endif
    1.12 -
    1.13      return rv;
    1.14  }
    1.15  
     2.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Wed May 28 10:02:00 2008 +0100
     2.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Wed May 28 10:05:57 2008 +0100
     2.3 @@ -485,9 +485,12 @@ static int flush_iotlb_reg(void *_iommu,
     2.4      /* check IOTLB invalidation granularity */
     2.5      if ( DMA_TLB_IAIG(val) == 0 )
     2.6          printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n");
     2.7 +
     2.8 +#ifdef VTD_DEBUG
     2.9      if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
    2.10          printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n",
    2.11                 (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
    2.12 +#endif
    2.13      /* flush context entry will implictly flush write buffer */
    2.14      return 0;
    2.15  }
    2.16 @@ -581,30 +584,29 @@ static void dma_pte_clear_one(struct dom
    2.17      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
    2.18  
    2.19      /* get last level pte */
    2.20 -    pg_maddr = dma_addr_level_page_maddr(domain, addr, 1);
    2.21 +    pg_maddr = dma_addr_level_page_maddr(domain, addr, 2);
    2.22      if ( pg_maddr == 0 )
    2.23          return;
    2.24      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
    2.25      pte = page + address_level_offset(addr, 1);
    2.26 -    if ( pte )
    2.27 +
    2.28 +    if ( !dma_pte_present(*pte) )
    2.29      {
    2.30 -        dma_clear_pte(*pte);
    2.31 -        iommu_flush_cache_entry(drhd->iommu, pte);
    2.32 -
    2.33 -        for_each_drhd_unit ( drhd )
    2.34 -        {
    2.35 -            iommu = drhd->iommu;
    2.36 +        unmap_vtd_domain_page(page);
    2.37 +        return;
    2.38 +    }
    2.39  
    2.40 -            if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
    2.41 -                continue;
    2.42 +    dma_clear_pte(*pte); 
    2.43 +    iommu_flush_cache_entry(drhd->iommu, pte);
    2.44  
    2.45 -            if ( cap_caching_mode(iommu->cap) )
    2.46 -                iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
    2.47 -                                      addr, 1, 0);
    2.48 -            else if (cap_rwbf(iommu->cap))
    2.49 -                iommu_flush_write_buffer(iommu);
    2.50 -        }
    2.51 +    for_each_drhd_unit ( drhd )
    2.52 +    {
    2.53 +        iommu = drhd->iommu;
    2.54 +
    2.55 +        if ( test_bit(iommu->index, &hd->iommu_bitmap) )
    2.56 +            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), addr, 1, 0);
    2.57      }
    2.58 +
    2.59      unmap_vtd_domain_page(page);
    2.60  }
    2.61  
    2.62 @@ -1191,12 +1193,13 @@ static int domain_context_mapping_one(
    2.63  
    2.64      unmap_vtd_domain_page(context_entries);
    2.65  
    2.66 +    /* it's a non-present to present mapping */
    2.67      if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain),
    2.68                                      (((u16)bus) << 8) | devfn,
    2.69                                      DMA_CCMD_MASK_NOBIT, 1) )
    2.70          iommu_flush_write_buffer(iommu);
    2.71      else
    2.72 -        iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
    2.73 +        iommu_flush_iotlb_dsi(iommu, 0, 0);
    2.74  
    2.75      set_bit(iommu->index, &hd->iommu_bitmap);
    2.76      spin_unlock_irqrestore(&iommu->lock, flags);
    2.77 @@ -1555,10 +1558,11 @@ int intel_iommu_map_page(
    2.78          if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
    2.79              continue;
    2.80  
    2.81 -        if ( pte_present || cap_caching_mode(iommu->cap) )
    2.82 +        if ( pte_present )
    2.83              iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
    2.84                                    (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
    2.85 -        else if ( cap_rwbf(iommu->cap) )
    2.86 +        else if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
    2.87 +                                   (paddr_t)gfn << PAGE_SHIFT_4K, 1, 1) )
    2.88              iommu_flush_write_buffer(iommu);
    2.89      }
    2.90  
    2.91 @@ -1567,11 +1571,8 @@ int intel_iommu_map_page(
    2.92  
    2.93  int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
    2.94  {
    2.95 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
    2.96      struct acpi_drhd_unit *drhd;
    2.97      struct iommu *iommu;
    2.98 -    struct dma_pte *page = NULL, *pte = NULL;
    2.99 -    u64 pg_maddr;
   2.100  
   2.101      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
   2.102      iommu = drhd->iommu;
   2.103 @@ -1582,26 +1583,7 @@ int intel_iommu_unmap_page(struct domain
   2.104          return 0;
   2.105  #endif
   2.106  
   2.107 -    pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K);
   2.108 -    if ( pg_maddr == 0 )
   2.109 -        return -ENOMEM;
   2.110 -    page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
   2.111 -    pte = page + (gfn & LEVEL_MASK);
   2.112 -    dma_clear_pte(*pte);
   2.113 -    iommu_flush_cache_entry(drhd->iommu, pte);
   2.114 -    unmap_vtd_domain_page(page);
   2.115 -
   2.116 -    for_each_drhd_unit ( drhd )
   2.117 -    {
   2.118 -        iommu = drhd->iommu;
   2.119 - 
   2.120 -        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
   2.121 -            continue;
   2.122 -
   2.123 -       if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
   2.124 -                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0) )
   2.125 -           iommu_flush_write_buffer(iommu);
   2.126 -    }
   2.127 +    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
   2.128  
   2.129      return 0;
   2.130  }
   2.131 @@ -1647,10 +1629,8 @@ int iommu_page_mapping(struct domain *do
   2.132          if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
   2.133              continue;
   2.134  
   2.135 -        if ( cap_caching_mode(iommu->cap) )
   2.136 -            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
   2.137 -                                  iova, index, 0);
   2.138 -        else if ( cap_rwbf(iommu->cap) )
   2.139 +        if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
   2.140 +                                   iova, index, 1) )
   2.141              iommu_flush_write_buffer(iommu);
   2.142      }
   2.143  
   2.144 @@ -1664,30 +1644,6 @@ int iommu_page_unmapping(struct domain *
   2.145      return 0;
   2.146  }
   2.147  
   2.148 -void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry)
   2.149 -{
   2.150 -    struct hvm_iommu *hd = domain_hvm_iommu(d);
   2.151 -    struct acpi_drhd_unit *drhd;
   2.152 -    struct iommu *iommu = NULL;
   2.153 -    struct dma_pte *pte = (struct dma_pte *) p2m_entry;
   2.154 -
   2.155 -    for_each_drhd_unit ( drhd )
   2.156 -    {
   2.157 -        iommu = drhd->iommu;
   2.158 -
   2.159 -        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
   2.160 -            continue;
   2.161 -
   2.162 -        if ( cap_caching_mode(iommu->cap) )
   2.163 -            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
   2.164 -                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
   2.165 -        else if ( cap_rwbf(iommu->cap) )
   2.166 -            iommu_flush_write_buffer(iommu);
   2.167 -    }
   2.168 -
   2.169 -    iommu_flush_cache_entry(iommu, pte);
   2.170 -}
   2.171 -
   2.172  static int iommu_prepare_rmrr_dev(
   2.173      struct domain *d,
   2.174      struct acpi_rmrr_unit *rmrr,
     3.1 --- a/xen/include/xen/iommu.h	Wed May 28 10:02:00 2008 +0100
     3.2 +++ b/xen/include/xen/iommu.h	Wed May 28 10:05:57 2008 +0100
     3.3 @@ -66,7 +66,6 @@ void reassign_device_ownership(struct do
     3.4                                 u8 bus, u8 devfn);
     3.5  int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
     3.6  int iommu_unmap_page(struct domain *d, unsigned long gfn);
     3.7 -void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry);
     3.8  void iommu_set_pgd(struct domain *d);
     3.9  void iommu_free_pgd(struct domain *d);
    3.10  void iommu_domain_teardown(struct domain *d);