ia64/xen-unstable

changeset 19139:2d70ad9c3bc7

amd-iommu: obtain page_alloc_lock before traversing a domain's page list

From all I can tell, this doesn't violate lock ordering as other
places call heap allocation functions from inside hd->mapping_lock.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:13:06 2009 +0000 (2009-01-30)
parents 162cdb596b9a
children 102576868e8d
files xen/drivers/passthrough/amd/iommu_map.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Fri Jan 30 11:10:43 2009 +0000
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Fri Jan 30 11:13:06 2009 +0000
     1.3 @@ -567,6 +567,8 @@ int amd_iommu_sync_p2m(struct domain *d)
     1.4      if ( hd->p2m_synchronized )
     1.5          goto out;
     1.6  
     1.7 +    spin_lock(&d->page_alloc_lock);
     1.8 +
     1.9      page_list_for_each ( page, &d->page_list )
    1.10      {
    1.11          mfn = page_to_mfn(page);
    1.12 @@ -579,6 +581,7 @@ int amd_iommu_sync_p2m(struct domain *d)
    1.13  
    1.14          if ( iommu_l2e == 0 )
    1.15          {
    1.16 +            spin_unlock(&d->page_alloc_lock);
    1.17              amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
    1.18              spin_unlock_irqrestore(&hd->mapping_lock, flags);
    1.19              return -EFAULT;
    1.20 @@ -587,6 +590,8 @@ int amd_iommu_sync_p2m(struct domain *d)
    1.21          set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
    1.22      }
    1.23  
    1.24 +    spin_unlock(&d->page_alloc_lock);
    1.25 +
    1.26      hd->p2m_synchronized = 1;
    1.27  
    1.28  out: