ia64/xen-unstable

changeset 17692:70ca37d22895

Handle IOMMU pagetable allocations when set_p2m_entry is called with
non-zero page order.
Signed-off-by: Xin Xiaohui <xiaohui.xin@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed May 21 11:07:23 2008 +0100 (2008-05-21)
parents ff23c9a11085
children b58e95aee14f
files xen/arch/x86/mm/p2m.c
line diff
     1.1 --- a/xen/arch/x86/mm/p2m.c	Wed May 21 11:02:51 2008 +0100
     1.2 +++ b/xen/arch/x86/mm/p2m.c	Wed May 21 11:07:23 2008 +0100
     1.3 @@ -250,7 +250,7 @@ p2m_set_entry(struct domain *d, unsigned
     1.4      // XXX -- this might be able to be faster iff current->domain == d
     1.5      mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
     1.6      void *table =map_domain_page(mfn_x(table_mfn));
     1.7 -    unsigned long gfn_remainder = gfn;
     1.8 +    unsigned long i, gfn_remainder = gfn;
     1.9      l1_pgentry_t *p2m_entry;
    1.10      l1_pgentry_t entry_content;
    1.11      l2_pgentry_t l2e_content;
    1.12 @@ -328,9 +328,11 @@ p2m_set_entry(struct domain *d, unsigned
    1.13      if ( iommu_enabled && is_hvm_domain(d) )
    1.14      {
    1.15          if ( p2mt == p2m_ram_rw )
    1.16 -            iommu_map_page(d, gfn, mfn_x(mfn));
    1.17 +            for ( i = 0; i < (1UL << page_order); i++ )
    1.18 +                iommu_map_page(d, gfn+i, mfn_x(mfn)+i );
    1.19          else
    1.20 -            iommu_unmap_page(d, gfn);
    1.21 +            for ( int i = 0; i < (1UL << page_order); i++ )
    1.22 +                iommu_unmap_page(d, gfn+i);
    1.23      }
    1.24  
    1.25      /* Success */