ia64/xen-unstable

changeset 17852:09dd5999401b

x86: remove use of per-domain lock from page table entry handling

This change results in a 5% performance improvement for kernel builds
on dual-socket quad-core systems (which is what I used for reference
for both 32- and 64-bit). Along with that, the amount of time reported
as spent in the kernel gets reduced by almost 25% (the fraction of
time spent in the kernel is generally reported significantly higher
under Xen than with a native kernel).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 12 18:14:00 2008 +0100 (2008-06-12)
parents 98ed32885ec0
children 8a0415fac759
files xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/include/asm-x86/mm.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu Jun 12 17:57:03 2008 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Thu Jun 12 18:14:00 2008 +0100
     1.3 @@ -174,7 +174,7 @@ static int setup_compat_l4(struct vcpu *
     1.4          return -ENOMEM;
     1.5  
     1.6      /* This page needs to look like a pagetable so that it can be shadowed */
     1.7 -    pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated;
     1.8 +    pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
     1.9  
    1.10      l4tab = copy_page(page_to_virt(pg), idle_pg_table);
    1.11      l4tab[0] = l4e_empty();
     2.1 --- a/xen/arch/x86/domain_build.c	Thu Jun 12 17:57:03 2008 +0100
     2.2 +++ b/xen/arch/x86/domain_build.c	Thu Jun 12 18:14:00 2008 +0100
     2.3 @@ -575,6 +575,7 @@ int __init construct_dom0(
     2.4          page = alloc_domheap_page(NULL, 0);
     2.5          if ( !page )
     2.6              panic("Not enough RAM for domain 0 PML4.\n");
     2.7 +        page->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
     2.8          l4start = l4tab = page_to_virt(page);
     2.9      }
    2.10      copy_page(l4tab, idle_pg_table);
     3.1 --- a/xen/arch/x86/mm.c	Thu Jun 12 17:57:03 2008 +0100
     3.2 +++ b/xen/arch/x86/mm.c	Thu Jun 12 18:14:00 2008 +0100
     3.3 @@ -201,6 +201,11 @@ void __init init_frametable(void)
     3.4      }
     3.5  
     3.6      memset(frame_table, 0, nr_pages << PAGE_SHIFT);
     3.7 +
     3.8 +#if defined(__x86_64__)
     3.9 +    for ( i = 0; i < max_page; i ++ )
    3.10 +        spin_lock_init(&frame_table[i].lock);
    3.11 +#endif
    3.12  }
    3.13  
    3.14  void __init arch_init_memory(void)
    3.15 @@ -1356,6 +1361,25 @@ static void free_l4_table(struct page_in
    3.16  
    3.17  #endif
    3.18  
    3.19 +static void page_lock(struct page_info *page)
    3.20 +{
    3.21 +#if defined(__i386__)
    3.22 +    while ( unlikely(test_and_set_bit(_PGC_locked, &page->count_info)) )
    3.23 +        while ( test_bit(_PGC_locked, &page->count_info) )
    3.24 +            cpu_relax();
    3.25 +#else
    3.26 +    spin_lock(&page->lock);
    3.27 +#endif
    3.28 +}
    3.29 +
    3.30 +static void page_unlock(struct page_info *page)
    3.31 +{
    3.32 +#if defined(__i386__)
    3.33 +    clear_bit(_PGC_locked, &page->count_info);
    3.34 +#else
    3.35 +    spin_unlock(&page->lock);
    3.36 +#endif
    3.37 +}
    3.38  
    3.39  /* How to write an entry to the guest pagetables.
    3.40   * Returns 0 for failure (pointer not valid), 1 for success. */
    3.41 @@ -1417,24 +1441,33 @@ static int mod_l1_entry(l1_pgentry_t *pl
    3.42      struct vcpu *curr = current;
    3.43      struct domain *d = curr->domain;
    3.44      unsigned long mfn;
    3.45 +    struct page_info *l1pg = mfn_to_page(gl1mfn);
    3.46 +    int rc = 1;
    3.47 +
    3.48 +    page_lock(l1pg);
    3.49  
    3.50      if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
    3.51 -        return 0;
    3.52 +        return page_unlock(l1pg), 0;
    3.53  
    3.54      if ( unlikely(paging_mode_refcounts(d)) )
    3.55 -        return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, preserve_ad);
    3.56 +    {
    3.57 +        rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, preserve_ad);
    3.58 +        page_unlock(l1pg);
    3.59 +        return rc;
    3.60 +    }
    3.61  
    3.62      if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
    3.63      {
    3.64          /* Translate foreign guest addresses. */
    3.65          mfn = gmfn_to_mfn(FOREIGNDOM, l1e_get_pfn(nl1e));
    3.66          if ( unlikely(mfn == INVALID_MFN) )
    3.67 -            return 0;
    3.68 +            return page_unlock(l1pg), 0;
    3.69          ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
    3.70          nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
    3.71  
    3.72          if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(d)) )
    3.73          {
    3.74 +            page_unlock(l1pg);
    3.75              MEM_LOG("Bad L1 flags %x",
    3.76                      l1e_get_flags(nl1e) & l1_disallow_mask(d));
    3.77              return 0;
    3.78 @@ -1444,30 +1477,33 @@ static int mod_l1_entry(l1_pgentry_t *pl
    3.79          if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
    3.80          {
    3.81              adjust_guest_l1e(nl1e, d);
    3.82 -            return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
    3.83 -                                preserve_ad);
    3.84 +            rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
    3.85 +                              preserve_ad);
    3.86 +            page_unlock(l1pg);
    3.87 +            return rc;
    3.88          }
    3.89  
    3.90          if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
    3.91 -            return 0;
    3.92 +            return page_unlock(l1pg), 0;
    3.93          
    3.94          adjust_guest_l1e(nl1e, d);
    3.95          if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
    3.96                                      preserve_ad)) )
    3.97          {
    3.98 -            put_page_from_l1e(nl1e, d);
    3.99 -            return 0;
   3.100 +            ol1e = nl1e;
   3.101 +            rc = 0;
   3.102          }
   3.103      }
   3.104 -    else
   3.105 +    else if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
   3.106 +                                     preserve_ad)) )
   3.107      {
   3.108 -        if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr,
   3.109 -                                    preserve_ad)) )
   3.110 -            return 0;
   3.111 +        page_unlock(l1pg);
   3.112 +        return 0;
   3.113      }
   3.114  
   3.115 +    page_unlock(l1pg);
   3.116      put_page_from_l1e(ol1e, d);
   3.117 -    return 1;
   3.118 +    return rc;
   3.119  }
   3.120  
   3.121  
   3.122 @@ -1481,6 +1517,8 @@ static int mod_l2_entry(l2_pgentry_t *pl
   3.123      l2_pgentry_t ol2e;
   3.124      struct vcpu *curr = current;
   3.125      struct domain *d = curr->domain;
   3.126 +    struct page_info *l2pg = mfn_to_page(pfn);
   3.127 +    int rc = 1;
   3.128  
   3.129      if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) )
   3.130      {
   3.131 @@ -1488,13 +1526,16 @@ static int mod_l2_entry(l2_pgentry_t *pl
   3.132          return 0;
   3.133      }
   3.134  
   3.135 +    page_lock(l2pg);
   3.136 +
   3.137      if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
   3.138 -        return 0;
   3.139 +        return page_unlock(l2pg), 0;
   3.140  
   3.141      if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
   3.142      {
   3.143          if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
   3.144          {
   3.145 +            page_unlock(l2pg);
   3.146              MEM_LOG("Bad L2 flags %x",
   3.147                      l2e_get_flags(nl2e) & L2_DISALLOW_MASK);
   3.148              return 0;
   3.149 @@ -1504,28 +1545,32 @@ static int mod_l2_entry(l2_pgentry_t *pl
   3.150          if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) )
   3.151          {
   3.152              adjust_guest_l2e(nl2e, d);
   3.153 -            return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr, preserve_ad);
   3.154 +            rc = UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr, preserve_ad);
   3.155 +            page_unlock(l2pg);
   3.156 +            return rc;
   3.157          }
   3.158  
   3.159          if ( unlikely(!get_page_from_l2e(nl2e, pfn, d)) )
   3.160 -            return 0;
   3.161 +            return page_unlock(l2pg), 0;
   3.162  
   3.163          adjust_guest_l2e(nl2e, d);
   3.164          if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
   3.165                                      preserve_ad)) )
   3.166          {
   3.167 -            put_page_from_l2e(nl2e, pfn);
   3.168 -            return 0;
   3.169 +            ol2e = nl2e;
   3.170 +            rc = 0;
   3.171          }
   3.172      }
   3.173      else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr,
   3.174                                       preserve_ad)) )
   3.175      {
   3.176 +        page_unlock(l2pg);
   3.177          return 0;
   3.178      }
   3.179  
   3.180 +    page_unlock(l2pg);
   3.181      put_page_from_l2e(ol2e, pfn);
   3.182 -    return 1;
   3.183 +    return rc;
   3.184  }
   3.185  
   3.186  #if CONFIG_PAGING_LEVELS >= 3
   3.187 @@ -1539,7 +1584,8 @@ static int mod_l3_entry(l3_pgentry_t *pl
   3.188      l3_pgentry_t ol3e;
   3.189      struct vcpu *curr = current;
   3.190      struct domain *d = curr->domain;
   3.191 -    int okay;
   3.192 +    struct page_info *l3pg = mfn_to_page(pfn);
   3.193 +    int okay, rc = 1;
   3.194  
   3.195      if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
   3.196      {
   3.197 @@ -1554,13 +1600,16 @@ static int mod_l3_entry(l3_pgentry_t *pl
   3.198      if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
   3.199          return 0;
   3.200  
   3.201 +    page_lock(l3pg);
   3.202 +
   3.203      if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
   3.204 -        return 0;
   3.205 +        return page_unlock(l3pg), 0;
   3.206  
   3.207      if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
   3.208      {
   3.209          if ( unlikely(l3e_get_flags(nl3e) & l3_disallow_mask(d)) )
   3.210          {
   3.211 +            page_unlock(l3pg);
   3.212              MEM_LOG("Bad L3 flags %x",
   3.213                      l3e_get_flags(nl3e) & l3_disallow_mask(d));
   3.214              return 0;
   3.215 @@ -1570,23 +1619,26 @@ static int mod_l3_entry(l3_pgentry_t *pl
   3.216          if ( !l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT) )
   3.217          {
   3.218              adjust_guest_l3e(nl3e, d);
   3.219 -            return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr, preserve_ad);
   3.220 +            rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr, preserve_ad);
   3.221 +            page_unlock(l3pg);
   3.222 +            return rc;
   3.223          }
   3.224  
   3.225          if ( unlikely(!get_page_from_l3e(nl3e, pfn, d)) )
   3.226 -            return 0;
   3.227 +            return page_unlock(l3pg), 0;
   3.228  
   3.229          adjust_guest_l3e(nl3e, d);
   3.230          if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr,
   3.231                                      preserve_ad)) )
   3.232          {
   3.233 -            put_page_from_l3e(nl3e, pfn);
   3.234 -            return 0;
   3.235 +            ol3e = nl3e;
   3.236 +            rc = 0;
   3.237          }
   3.238      }
   3.239      else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr,
   3.240                                       preserve_ad)) )
   3.241      {
   3.242 +        page_unlock(l3pg);
   3.243          return 0;
   3.244      }
   3.245  
   3.246 @@ -1595,8 +1647,9 @@ static int mod_l3_entry(l3_pgentry_t *pl
   3.247  
   3.248      pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
   3.249  
   3.250 +    page_unlock(l3pg);
   3.251      put_page_from_l3e(ol3e, pfn);
   3.252 -    return 1;
   3.253 +    return rc;
   3.254  }
   3.255  
   3.256  #endif
   3.257 @@ -1612,6 +1665,8 @@ static int mod_l4_entry(l4_pgentry_t *pl
   3.258      struct vcpu *curr = current;
   3.259      struct domain *d = curr->domain;
   3.260      l4_pgentry_t ol4e;
   3.261 +    struct page_info *l4pg = mfn_to_page(pfn);
   3.262 +    int rc = 1;
   3.263  
   3.264      if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
   3.265      {
   3.266 @@ -1619,13 +1674,16 @@ static int mod_l4_entry(l4_pgentry_t *pl
   3.267          return 0;
   3.268      }
   3.269  
   3.270 +    page_lock(l4pg);
   3.271 +
   3.272      if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
   3.273 -        return 0;
   3.274 +        return page_unlock(l4pg), 0;
   3.275  
   3.276      if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
   3.277      {
   3.278          if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
   3.279          {
   3.280 +            page_unlock(l4pg);
   3.281              MEM_LOG("Bad L4 flags %x",
   3.282                      l4e_get_flags(nl4e) & L4_DISALLOW_MASK);
   3.283              return 0;
   3.284 @@ -1635,28 +1693,32 @@ static int mod_l4_entry(l4_pgentry_t *pl
   3.285          if ( !l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT) )
   3.286          {
   3.287              adjust_guest_l4e(nl4e, d);
   3.288 -            return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr, preserve_ad);
   3.289 +            rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr, preserve_ad);
   3.290 +            page_unlock(l4pg);
   3.291 +            return rc;
   3.292          }
   3.293  
   3.294          if ( unlikely(!get_page_from_l4e(nl4e, pfn, d)) )
   3.295 -            return 0;
   3.296 +            return page_unlock(l4pg), 0;
   3.297  
   3.298          adjust_guest_l4e(nl4e, d);
   3.299          if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr,
   3.300                                      preserve_ad)) )
   3.301          {
   3.302 -            put_page_from_l4e(nl4e, pfn);
   3.303 -            return 0;
   3.304 +            ol4e = nl4e;
   3.305 +            rc = 0;
   3.306          }
   3.307      }
   3.308      else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr,
   3.309                                       preserve_ad)) )
   3.310      {
   3.311 +        page_unlock(l4pg);
   3.312          return 0;
   3.313      }
   3.314  
   3.315 +    page_unlock(l4pg);
   3.316      put_page_from_l4e(ol4e, pfn);
   3.317 -    return 1;
   3.318 +    return rc;
   3.319  }
   3.320  
   3.321  #endif
   3.322 @@ -2185,8 +2247,6 @@ int do_mmuext_op(
   3.323          goto out;
   3.324      }
   3.325  
   3.326 -    domain_lock(d);
   3.327 -
   3.328      for ( i = 0; i < count; i++ )
   3.329      {
   3.330          if ( hypercall_preempt_check() )
   3.331 @@ -2434,8 +2494,6 @@ int do_mmuext_op(
   3.332  
   3.333      process_deferred_ops();
   3.334  
   3.335 -    domain_unlock(d);
   3.336 -
   3.337      perfc_add(num_mmuext_ops, i);
   3.338  
   3.339   out:
   3.340 @@ -2489,8 +2547,6 @@ int do_mmu_update(
   3.341  
   3.342      domain_mmap_cache_init(&mapcache);
   3.343  
   3.344 -    domain_lock(d);
   3.345 -
   3.346      for ( i = 0; i < count; i++ )
   3.347      {
   3.348          if ( hypercall_preempt_check() )
   3.349 @@ -2663,8 +2719,6 @@ int do_mmu_update(
   3.350  
   3.351      process_deferred_ops();
   3.352  
   3.353 -    domain_unlock(d);
   3.354 -
   3.355      domain_mmap_cache_destroy(&mapcache);
   3.356  
   3.357      perfc_add(num_page_updates, i);
   3.358 @@ -2717,14 +2771,19 @@ static int create_grant_pte_mapping(
   3.359          goto failed;
   3.360      }
   3.361  
   3.362 +    page_lock(page);
   3.363 +
   3.364      ol1e = *(l1_pgentry_t *)va;
   3.365      if ( !UPDATE_ENTRY(l1, (l1_pgentry_t *)va, ol1e, nl1e, mfn, v, 0) )
   3.366      {
   3.367 +        page_unlock(page);
   3.368          put_page_type(page);
   3.369          rc = GNTST_general_error;
   3.370          goto failed;
   3.371      } 
   3.372  
   3.373 +    page_unlock(page);
   3.374 +
   3.375      if ( !paging_mode_refcounts(d) )
   3.376          put_page_from_l1e(ol1e, d);
   3.377  
   3.378 @@ -2768,16 +2827,14 @@ static int destroy_grant_pte_mapping(
   3.379          goto failed;
   3.380      }
   3.381  
   3.382 -    if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) )
   3.383 -    {
   3.384 -        put_page_type(page);
   3.385 -        rc = GNTST_general_error;
   3.386 -        goto failed;
   3.387 -    }
   3.388 +    page_lock(page);
   3.389 +
   3.390 +    ol1e = *(l1_pgentry_t *)va;
   3.391      
   3.392      /* Check that the virtual address supplied is actually mapped to frame. */
   3.393      if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) )
   3.394      {
   3.395 +        page_unlock(page);
   3.396          MEM_LOG("PTE entry %lx for address %"PRIx64" doesn't match frame %lx",
   3.397                  (unsigned long)l1e_get_intpte(ol1e), addr, frame);
   3.398          put_page_type(page);
   3.399 @@ -2792,12 +2849,14 @@ static int destroy_grant_pte_mapping(
   3.400                     d->vcpu[0] /* Change if we go to per-vcpu shadows. */,
   3.401                     0)) )
   3.402      {
   3.403 +        page_unlock(page);
   3.404          MEM_LOG("Cannot delete PTE entry at %p", va);
   3.405          put_page_type(page);
   3.406          rc = GNTST_general_error;
   3.407          goto failed;
   3.408      }
   3.409  
   3.410 +    page_unlock(page);
   3.411      put_page_type(page);
   3.412  
   3.413   failed:
   3.414 @@ -2813,6 +2872,7 @@ static int create_grant_va_mapping(
   3.415      l1_pgentry_t *pl1e, ol1e;
   3.416      struct domain *d = v->domain;
   3.417      unsigned long gl1mfn;
   3.418 +    struct page_info *l1pg;
   3.419      int okay;
   3.420      
   3.421      ASSERT(domain_is_locked(d));
   3.422 @@ -2825,8 +2885,11 @@ static int create_grant_va_mapping(
   3.423          MEM_LOG("Could not find L1 PTE for address %lx", va);
   3.424          return GNTST_general_error;
   3.425      }
   3.426 +    l1pg = mfn_to_page(gl1mfn);
   3.427 +    page_lock(l1pg);
   3.428      ol1e = *pl1e;
   3.429      okay = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0);
   3.430 +    page_unlock(l1pg);
   3.431      guest_unmap_l1e(v, pl1e);
   3.432      pl1e = NULL;
   3.433  
   3.434 @@ -2844,6 +2907,7 @@ static int replace_grant_va_mapping(
   3.435  {
   3.436      l1_pgentry_t *pl1e, ol1e;
   3.437      unsigned long gl1mfn;
   3.438 +    struct page_info *l1pg;
   3.439      int rc = 0;
   3.440      
   3.441      pl1e = guest_map_l1e(v, addr, &gl1mfn);
   3.442 @@ -2852,11 +2916,15 @@ static int replace_grant_va_mapping(
   3.443          MEM_LOG("Could not find L1 PTE for address %lx", addr);
   3.444          return GNTST_general_error;
   3.445      }
   3.446 +
   3.447 +    l1pg = mfn_to_page(gl1mfn);
   3.448 +    page_lock(l1pg);
   3.449      ol1e = *pl1e;
   3.450  
   3.451      /* Check that the virtual address supplied is actually mapped to frame. */
   3.452      if ( unlikely(l1e_get_pfn(ol1e) != frame) )
   3.453      {
   3.454 +        page_unlock(l1pg);
   3.455          MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
   3.456                  l1e_get_pfn(ol1e), addr, frame);
   3.457          rc = GNTST_general_error;
   3.458 @@ -2866,11 +2934,14 @@ static int replace_grant_va_mapping(
   3.459      /* Delete pagetable entry. */
   3.460      if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0)) )
   3.461      {
   3.462 +        page_unlock(l1pg);
   3.463          MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
   3.464          rc = GNTST_general_error;
   3.465          goto out;
   3.466      }
   3.467  
   3.468 +    page_unlock(l1pg);
   3.469 +
   3.470   out:
   3.471      guest_unmap_l1e(v, pl1e);
   3.472      return rc;
   3.473 @@ -2905,6 +2976,7 @@ int replace_grant_host_mapping(
   3.474      struct vcpu *curr = current;
   3.475      l1_pgentry_t *pl1e, ol1e;
   3.476      unsigned long gl1mfn;
   3.477 +    struct page_info *l1pg;
   3.478      int rc;
   3.479      
   3.480      if ( flags & GNTMAP_contains_pte )
   3.481 @@ -2926,16 +2998,21 @@ int replace_grant_host_mapping(
   3.482                  (unsigned long)new_addr);
   3.483          return GNTST_general_error;
   3.484      }
   3.485 +
   3.486 +    l1pg = mfn_to_page(gl1mfn);
   3.487 +    page_lock(l1pg);
   3.488      ol1e = *pl1e;
   3.489  
   3.490      if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(),
   3.491                                  gl1mfn, curr, 0)) )
   3.492      {
   3.493 +        page_unlock(l1pg);
   3.494          MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
   3.495          guest_unmap_l1e(curr, pl1e);
   3.496          return GNTST_general_error;
   3.497      }
   3.498  
   3.499 +    page_unlock(l1pg);
   3.500      guest_unmap_l1e(curr, pl1e);
   3.501  
   3.502      rc = replace_grant_va_mapping(addr, frame, ol1e, curr);
   3.503 @@ -3013,8 +3090,6 @@ int do_update_va_mapping(unsigned long v
   3.504      if ( rc )
   3.505          return rc;
   3.506  
   3.507 -    domain_lock(d);
   3.508 -
   3.509      pl1e = guest_map_l1e(v, va, &gl1mfn);
   3.510  
   3.511      if ( unlikely(!pl1e || !mod_l1_entry(pl1e, val, gl1mfn, 0)) )
   3.512 @@ -3026,8 +3101,6 @@ int do_update_va_mapping(unsigned long v
   3.513  
   3.514      process_deferred_ops();
   3.515  
   3.516 -    domain_unlock(d);
   3.517 -
   3.518      switch ( flags & UVMF_FLUSHTYPE_MASK )
   3.519      {
   3.520      case UVMF_TLB_FLUSH:
   3.521 @@ -3647,8 +3720,6 @@ int ptwr_do_page_fault(struct vcpu *v, u
   3.522      struct ptwr_emulate_ctxt ptwr_ctxt;
   3.523      int rc;
   3.524  
   3.525 -    domain_lock(d);
   3.526 -
   3.527      /* Attempt to read the PTE that maps the VA being accessed. */
   3.528      guest_get_eff_l1e(v, addr, &pte);
   3.529      page = l1e_get_page(pte);
   3.530 @@ -3668,16 +3739,16 @@ int ptwr_do_page_fault(struct vcpu *v, u
   3.531      ptwr_ctxt.cr2 = addr;
   3.532      ptwr_ctxt.pte = pte;
   3.533  
   3.534 +    page_lock(page);
   3.535      rc = x86_emulate(&ptwr_ctxt.ctxt, &ptwr_emulate_ops);
   3.536 +    page_unlock(page);
   3.537      if ( rc == X86EMUL_UNHANDLEABLE )
   3.538          goto bail;
   3.539  
   3.540 -    domain_unlock(d);
   3.541      perfc_incr(ptwr_emulations);
   3.542      return EXCRET_fault_fixed;
   3.543  
   3.544   bail:
   3.545 -    domain_unlock(d);
   3.546      return 0;
   3.547  }
   3.548  
     4.1 --- a/xen/include/asm-x86/mm.h	Thu Jun 12 17:57:03 2008 +0100
     4.2 +++ b/xen/include/asm-x86/mm.h	Thu Jun 12 18:14:00 2008 +0100
     4.3 @@ -46,6 +46,10 @@ struct page_info
     4.4  
     4.5      } u;
     4.6  
     4.7 +#if defined(__x86_64__)
     4.8 +    spinlock_t lock;
     4.9 +#endif
    4.10 +
    4.11      union {
    4.12          /*
    4.13           * Timestamp from 'TLB clock', used to avoid extra safety flushes.
    4.14 @@ -61,10 +65,6 @@ struct page_info
    4.15           */
    4.16          u32 shadow_flags;
    4.17      };
    4.18 -
    4.19 -#if defined(__x86_64__)
    4.20 -    spinlock_t lock;
    4.21 -#endif
    4.22  };
    4.23  
    4.24   /* The following page types are MUTUALLY EXCLUSIVE. */