ia64/xen-unstable

changeset 17746:3613160e4fd1

vtd: IOTLB flush fixups

On map: only flush when old PTE was valid or invalid PTE may be
cached.
On unmap: always flush old entry, but skip flush for unaffected
IOMMUs.

Signed-off-by: Espen Skoglund <espen.skoglund@netronome.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed May 28 09:31:49 2008 +0100 (2008-05-28)
parents 38ec0d674842
children 4c75850a0caa
files xen/drivers/passthrough/vtd/iommu.c
line diff
     1.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Wed May 28 09:31:04 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Wed May 28 09:31:49 2008 +0100
     1.3 @@ -1526,6 +1526,7 @@ int intel_iommu_map_page(
     1.4      struct iommu *iommu;
     1.5      struct dma_pte *page = NULL, *pte = NULL;
     1.6      u64 pg_maddr;
     1.7 +    int pte_present;
     1.8  
     1.9      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
    1.10      iommu = drhd->iommu;
    1.11 @@ -1541,6 +1542,7 @@ int intel_iommu_map_page(
    1.12          return -ENOMEM;
    1.13      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
    1.14      pte = page + (gfn & LEVEL_MASK);
    1.15 +    pte_present = dma_pte_present(*pte);
    1.16      dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K);
    1.17      dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
    1.18      iommu_flush_cache_entry(iommu, pte);
    1.19 @@ -1553,7 +1555,7 @@ int intel_iommu_map_page(
    1.20          if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
    1.21              continue;
    1.22  
    1.23 -        if ( cap_caching_mode(iommu->cap) )
    1.24 +        if ( pte_present || cap_caching_mode(iommu->cap) )
    1.25              iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
    1.26                                    (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
    1.27          else if ( cap_rwbf(iommu->cap) )
    1.28 @@ -1565,6 +1567,7 @@ int intel_iommu_map_page(
    1.29  
    1.30  int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
    1.31  {
    1.32 +    struct hvm_iommu *hd = domain_hvm_iommu(d);
    1.33      struct acpi_drhd_unit *drhd;
    1.34      struct iommu *iommu;
    1.35      struct dma_pte *page = NULL, *pte = NULL;
    1.36 @@ -1591,11 +1594,13 @@ int intel_iommu_unmap_page(struct domain
    1.37      for_each_drhd_unit ( drhd )
    1.38      {
    1.39          iommu = drhd->iommu;
    1.40 -        if ( cap_caching_mode(iommu->cap) )
    1.41 -            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
    1.42 -                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
    1.43 -        else if ( cap_rwbf(iommu->cap) )
    1.44 -            iommu_flush_write_buffer(iommu);
    1.45 + 
    1.46 +        if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
    1.47 +            continue;
    1.48 +
    1.49 +       if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
    1.50 +                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0) )
    1.51 +           iommu_flush_write_buffer(iommu);
    1.52      }
    1.53  
    1.54      return 0;