ia64/xen-unstable

changeset 17752:f681c4de91fc

vtd: More TLB flush fixes.
- Made the non-present flush testing a bit simpler.
- Removed dma_addr_level_page_maddr(). Use a modified
addr_to_dma_page_maddr() instead.
- Upon mapping new context entry: flush old entry using domid 0 and
always flush iotlb.

Signed-off-by: Espen Skoglund <espen.skoglund@netronome.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed May 28 16:14:10 2008 +0100 (2008-05-28)
parents b60cf40fae13
children 4505418b6393
files xen/drivers/passthrough/vtd/iommu.c
line diff
     1.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Wed May 28 14:41:23 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Wed May 28 16:14:10 2008 +0100
     1.3 @@ -191,7 +191,7 @@ static int device_context_mapped(struct 
     1.4      return ret;
     1.5  }
     1.6  
     1.7 -static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr)
     1.8 +static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc)
     1.9  {
    1.10      struct hvm_iommu *hd = domain_hvm_iommu(domain);
    1.11      struct acpi_drhd_unit *drhd;
    1.12 @@ -201,7 +201,7 @@ static u64 addr_to_dma_page_maddr(struct
    1.13      int level = agaw_to_level(hd->agaw);
    1.14      int offset;
    1.15      unsigned long flags;
    1.16 -    u64 pte_maddr = 0;
    1.17 +    u64 pte_maddr = 0, maddr;
    1.18      u64 *vaddr = NULL;
    1.19  
    1.20      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
    1.21 @@ -211,6 +211,8 @@ static u64 addr_to_dma_page_maddr(struct
    1.22      spin_lock_irqsave(&hd->mapping_lock, flags);
    1.23      if ( hd->pgd_maddr == 0 )
    1.24      {
    1.25 +        if ( !alloc )
    1.26 +            return 0;
    1.27          hd->pgd_maddr = alloc_pgtable_maddr();
    1.28          if ( hd->pgd_maddr == 0 )
    1.29              return 0;
    1.30 @@ -224,7 +226,9 @@ static u64 addr_to_dma_page_maddr(struct
    1.31  
    1.32          if ( dma_pte_addr(*pte) == 0 )
    1.33          {
    1.34 -            u64 maddr = alloc_pgtable_maddr();
    1.35 +            if ( !alloc )
    1.36 +                break;
    1.37 +            maddr = alloc_pgtable_maddr();
    1.38              dma_set_pte_addr(*pte, maddr);
    1.39              vaddr = map_vtd_domain_page(maddr);
    1.40              if ( !vaddr )
    1.41 @@ -263,41 +267,6 @@ static u64 addr_to_dma_page_maddr(struct
    1.42      return pte_maddr;
    1.43  }
    1.44  
    1.45 -/* return address's page at specific level */
    1.46 -static u64 dma_addr_level_page_maddr(
    1.47 -    struct domain *domain, u64 addr, int level)
    1.48 -{
    1.49 -    struct hvm_iommu *hd = domain_hvm_iommu(domain);
    1.50 -    struct dma_pte *parent, *pte = NULL;
    1.51 -    int total = agaw_to_level(hd->agaw);
    1.52 -    int offset;
    1.53 -    u64 pg_maddr = hd->pgd_maddr;
    1.54 -
    1.55 -    if ( pg_maddr == 0 )
    1.56 -        return 0;
    1.57 -
    1.58 -    parent = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
    1.59 -    while ( level <= total )
    1.60 -    {
    1.61 -        offset = address_level_offset(addr, total);
    1.62 -        pte = &parent[offset];
    1.63 -        if ( dma_pte_addr(*pte) == 0 )
    1.64 -            break;
    1.65 -
    1.66 -        pg_maddr = pte->val & PAGE_MASK_4K;
    1.67 -        unmap_vtd_domain_page(parent);
    1.68 -
    1.69 -        if ( level == total )
    1.70 -            return pg_maddr;
    1.71 -
    1.72 -        parent = map_vtd_domain_page(pte->val);
    1.73 -        total--;
    1.74 -    }
    1.75 -
    1.76 -    unmap_vtd_domain_page(parent);
    1.77 -    return 0;
    1.78 -}
    1.79 -
    1.80  static void iommu_flush_write_buffer(struct iommu *iommu)
    1.81  {
    1.82      u32 val;
    1.83 @@ -584,7 +553,7 @@ static void dma_pte_clear_one(struct dom
    1.84      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
    1.85  
    1.86      /* get last level pte */
    1.87 -    pg_maddr = dma_addr_level_page_maddr(domain, addr, 2);
    1.88 +    pg_maddr = addr_to_dma_page_maddr(domain, addr, 0);
    1.89      if ( pg_maddr == 0 )
    1.90          return;
    1.91      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
    1.92 @@ -1193,13 +1162,11 @@ static int domain_context_mapping_one(
    1.93  
    1.94      unmap_vtd_domain_page(context_entries);
    1.95  
    1.96 -    /* it's a non-present to present mapping */
    1.97 -    if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain),
    1.98 -                                    (((u16)bus) << 8) | devfn,
    1.99 -                                    DMA_CCMD_MASK_NOBIT, 1) )
   1.100 +    /* Context entry was previously non-present (with domid 0). */
   1.101 +    iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
   1.102 +                               DMA_CCMD_MASK_NOBIT, 1);
   1.103 +    if ( iommu_flush_iotlb_dsi(iommu, 0, 1) )
   1.104          iommu_flush_write_buffer(iommu);
   1.105 -    else
   1.106 -        iommu_flush_iotlb_dsi(iommu, 0, 0);
   1.107  
   1.108      set_bit(iommu->index, &hd->iommu_bitmap);
   1.109      spin_unlock_irqrestore(&iommu->lock, flags);
   1.110 @@ -1540,7 +1507,7 @@ int intel_iommu_map_page(
   1.111          return 0;
   1.112  #endif
   1.113  
   1.114 -    pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K);
   1.115 +    pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
   1.116      if ( pg_maddr == 0 )
   1.117          return -ENOMEM;
   1.118      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
   1.119 @@ -1558,11 +1525,9 @@ int intel_iommu_map_page(
   1.120          if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
   1.121              continue;
   1.122  
   1.123 -        if ( pte_present )
   1.124 -            iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
   1.125 -                                  (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
   1.126 -        else if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
   1.127 -                                   (paddr_t)gfn << PAGE_SHIFT_4K, 1, 1) )
   1.128 +        if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
   1.129 +                                   (paddr_t)gfn << PAGE_SHIFT_4K, 1,
   1.130 +                                   !pte_present) )
   1.131              iommu_flush_write_buffer(iommu);
   1.132      }
   1.133  
   1.134 @@ -1609,7 +1574,7 @@ int iommu_page_mapping(struct domain *do
   1.135      index = 0;
   1.136      while ( start_pfn < end_pfn )
   1.137      {
   1.138 -        pg_maddr = addr_to_dma_page_maddr(domain, iova + PAGE_SIZE_4K * index);
   1.139 +        pg_maddr = addr_to_dma_page_maddr(domain, iova + PAGE_SIZE_4K*index, 1);
   1.140          if ( pg_maddr == 0 )
   1.141              return -ENOMEM;
   1.142          page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);