ia64/xen-unstable

changeset 17477:da4042899fd2

[VTD] Fix VT-d PAE issues.

Cast unsigned long PFNs to paddr_t before shifting left.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Apr 16 13:40:46 2008 +0100 (2008-04-16)
parents cd5dc735bdf3
children 837ea1f0aa8a
files xen/drivers/passthrough/vtd/iommu.c xen/drivers/passthrough/vtd/x86/vtd.c
line diff
     1.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Wed Apr 16 13:36:44 2008 +0100
     1.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Wed Apr 16 13:40:46 2008 +0100
     1.3 @@ -1600,7 +1600,7 @@ int intel_iommu_map_page(
     1.4          return 0;
     1.5  #endif
     1.6  
     1.7 -    pg_maddr = addr_to_dma_page_maddr(d, gfn << PAGE_SHIFT_4K);
     1.8 +    pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K);
     1.9      if ( pg_maddr == 0 )
    1.10          return -ENOMEM;
    1.11      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
    1.12 @@ -1643,11 +1643,11 @@ int intel_iommu_unmap_page(struct domain
    1.13  }
    1.14  
    1.15  int iommu_page_mapping(struct domain *domain, paddr_t iova,
    1.16 -                       void *hpa, size_t size, int prot)
    1.17 +                       paddr_t hpa, size_t size, int prot)
    1.18  {
    1.19      struct acpi_drhd_unit *drhd;
    1.20      struct iommu *iommu;
    1.21 -    unsigned long start_pfn, end_pfn;
    1.22 +    u64 start_pfn, end_pfn;
    1.23      struct dma_pte *page = NULL, *pte = NULL;
    1.24      int index;
    1.25      u64 pg_maddr;
    1.26 @@ -1657,9 +1657,8 @@ int iommu_page_mapping(struct domain *do
    1.27      if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 )
    1.28          return -EINVAL;
    1.29      iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;
    1.30 -    start_pfn = (unsigned long)(((unsigned long) hpa) >> PAGE_SHIFT_4K);
    1.31 -    end_pfn = (unsigned long)
    1.32 -        ((PAGE_ALIGN_4K(((unsigned long)hpa) + size)) >> PAGE_SHIFT_4K);
    1.33 +    start_pfn = hpa >> PAGE_SHIFT_4K;
    1.34 +    end_pfn = (PAGE_ALIGN_4K(hpa + size)) >> PAGE_SHIFT_4K;
    1.35      index = 0;
    1.36      while ( start_pfn < end_pfn )
    1.37      {
    1.38 @@ -1668,7 +1667,7 @@ int iommu_page_mapping(struct domain *do
    1.39              return -ENOMEM;
    1.40          page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
    1.41          pte = page + (start_pfn & LEVEL_MASK);
    1.42 -        dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
    1.43 +        dma_set_pte_addr(*pte, (paddr_t)start_pfn << PAGE_SHIFT_4K);
    1.44          dma_set_pte_prot(*pte, prot);
    1.45          iommu_flush_cache_entry(iommu, pte);
    1.46          unmap_vtd_domain_page(page);
    1.47 @@ -1727,7 +1726,7 @@ static int iommu_prepare_rmrr_dev(
    1.48      /* page table init */
    1.49      size = rmrr->end_address - rmrr->base_address + 1;
    1.50      ret = iommu_page_mapping(d, rmrr->base_address,
    1.51 -                             (void *)rmrr->base_address, size,
    1.52 +                             rmrr->base_address, size,
    1.53                               DMA_PTE_READ|DMA_PTE_WRITE);
    1.54      if ( ret )
    1.55          return ret;
     2.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c	Wed Apr 16 13:36:44 2008 +0100
     2.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c	Wed Apr 16 13:40:46 2008 +0100
     2.3 @@ -153,12 +153,12 @@ void iommu_set_pgd(struct domain *d)
     2.4                  return;
     2.5              }
     2.6              pgd_mfn = _mfn(dma_pte_addr(*dpte) >> PAGE_SHIFT_4K);
     2.7 -            hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K;
     2.8 +            hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K;
     2.9              unmap_domain_page(dpte);
    2.10              break;
    2.11          case VTD_PAGE_TABLE_LEVEL_4:
    2.12              pgd_mfn = _mfn(p2m_table);
    2.13 -            hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K;
    2.14 +            hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K;
    2.15              break;
    2.16          default:
    2.17              gdprintk(XENLOG_ERR VTDPREFIX,
    2.18 @@ -250,12 +250,12 @@ void iommu_set_pgd(struct domain *d)
    2.19              }
    2.20  
    2.21              pgd_mfn = _mfn(l3e_get_pfn(*l3e));
    2.22 -            hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K;
    2.23 +            hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K;
    2.24              unmap_domain_page(l3e);
    2.25              break;
    2.26          case VTD_PAGE_TABLE_LEVEL_4:
    2.27              pgd_mfn = _mfn(p2m_table);
    2.28 -            hd->pgd_maddr = mfn_x(pgd_mfn) << PAGE_SHIFT_4K;
    2.29 +            hd->pgd_maddr = (paddr_t)(mfn_x(pgd_mfn)) << PAGE_SHIFT_4K;
    2.30              break;
    2.31          default:
    2.32              gdprintk(XENLOG_ERR VTDPREFIX,