ia64/xen-unstable

changeset 19163:13a0272c8c02

x86: Clean up PV guest LDT handling.
1. Do not touch deferred_ops in invalidate_shadow_ldt(), as we may
not always be in a context where deferred_ops is valid.
2. Protected the shadow LDT with a lock, now that mmu updates are not
protected by the per-domain lock.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 04 15:08:46 2009 +0000 (2009-02-04)
parents 271697e6d9b2
children de853e901b5c
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/include/asm-x86/domain.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Wed Feb 04 14:46:47 2009 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Wed Feb 04 15:08:46 2009 +0000
     1.3 @@ -352,6 +352,8 @@ int vcpu_initialise(struct vcpu *v)
     1.4      v->arch.perdomain_ptes =
     1.5          d->arch.mm_perdomain_pt + (v->vcpu_id << GDT_LDT_VCPU_SHIFT);
     1.6  
     1.7 +    spin_lock_init(&v->arch.shadow_ldt_lock);
     1.8 +
     1.9      return (is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
    1.10  }
    1.11  
     2.1 --- a/xen/arch/x86/mm.c	Wed Feb 04 14:46:47 2009 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Wed Feb 04 15:08:46 2009 +0000
     2.3 @@ -179,12 +179,6 @@ l2_pgentry_t *compat_idle_pg_table_l2 = 
     2.4  #define l3_disallow_mask(d) L3_DISALLOW_MASK
     2.5  #endif
     2.6  
     2.7 -static void queue_deferred_ops(struct domain *d, unsigned int ops)
     2.8 -{
     2.9 -    ASSERT(d == current->domain);
    2.10 -    this_cpu(percpu_mm_info).deferred_ops |= ops;
    2.11 -}
    2.12 -
    2.13  void __init init_frametable(void)
    2.14  {
    2.15      unsigned long nr_pages, page_step, i, mfn;
    2.16 @@ -464,14 +458,18 @@ void update_cr3(struct vcpu *v)
    2.17  }
    2.18  
    2.19  
    2.20 -static void invalidate_shadow_ldt(struct vcpu *v)
    2.21 +static void invalidate_shadow_ldt(struct vcpu *v, int flush)
    2.22  {
    2.23      int i;
    2.24      unsigned long pfn;
    2.25      struct page_info *page;
    2.26  
    2.27 +    BUG_ON(unlikely(in_irq()));
    2.28 +
    2.29 +    spin_lock(&v->arch.shadow_ldt_lock);
    2.30 +
    2.31      if ( v->arch.shadow_ldt_mapcnt == 0 )
    2.32 -        return;
    2.33 +        goto out;
    2.34  
    2.35      v->arch.shadow_ldt_mapcnt = 0;
    2.36  
    2.37 @@ -486,11 +484,12 @@ static void invalidate_shadow_ldt(struct
    2.38          put_page_and_type(page);
    2.39      }
    2.40  
    2.41 -    /* Dispose of the (now possibly invalid) mappings from the TLB.  */
    2.42 -    if ( v == current )
    2.43 -        queue_deferred_ops(v->domain, DOP_FLUSH_TLB | DOP_RELOAD_LDT);
    2.44 -    else
    2.45 -        flush_tlb_mask(v->domain->domain_dirty_cpumask);
    2.46 +    /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
    2.47 +    if ( flush )
    2.48 +        flush_tlb_mask(v->vcpu_dirty_cpumask);
    2.49 +
    2.50 + out:
    2.51 +    spin_unlock(&v->arch.shadow_ldt_lock);
    2.52  }
    2.53  
    2.54  
    2.55 @@ -541,8 +540,10 @@ int map_ldt_shadow_page(unsigned int off
    2.56  
    2.57      nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW);
    2.58  
    2.59 +    spin_lock(&v->arch.shadow_ldt_lock);
    2.60      l1e_write(&v->arch.perdomain_ptes[off + 16], nl1e);
    2.61      v->arch.shadow_ldt_mapcnt++;
    2.62 +    spin_unlock(&v->arch.shadow_ldt_lock);
    2.63  
    2.64      return 1;
    2.65  }
    2.66 @@ -989,7 +990,7 @@ void put_page_from_l1e(l1_pgentry_t l1e,
    2.67               (d == e) )
    2.68          {
    2.69              for_each_vcpu ( d, v )
    2.70 -                invalidate_shadow_ldt(v);
    2.71 +                invalidate_shadow_ldt(v, 1);
    2.72          }
    2.73          put_page(page);
    2.74      }
    2.75 @@ -2375,7 +2376,7 @@ int new_guest_cr3(unsigned long mfn)
    2.76              return 0;
    2.77          }
    2.78  
    2.79 -        invalidate_shadow_ldt(curr);
    2.80 +        invalidate_shadow_ldt(curr, 0);
    2.81          write_ptbase(curr);
    2.82  
    2.83          return 1;
    2.84 @@ -2390,7 +2391,7 @@ int new_guest_cr3(unsigned long mfn)
    2.85          return 0;
    2.86      }
    2.87  
    2.88 -    invalidate_shadow_ldt(curr);
    2.89 +    invalidate_shadow_ldt(curr, 0);
    2.90  
    2.91      old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
    2.92  
    2.93 @@ -2427,6 +2428,10 @@ static void process_deferred_ops(void)
    2.94              flush_tlb_local();
    2.95      }
    2.96  
    2.97 +    /*
    2.98 +     * Do this after flushing TLBs, to ensure we see fresh LDT mappings
    2.99 +     * via the linear pagetable mapping.
   2.100 +     */
   2.101      if ( deferred_ops & DOP_RELOAD_LDT )
   2.102          (void)map_ldt_shadow_page(0);
   2.103  
   2.104 @@ -2799,7 +2804,8 @@ int do_mmuext_op(
   2.105              else if ( (curr->arch.guest_context.ldt_ents != ents) || 
   2.106                        (curr->arch.guest_context.ldt_base != ptr) )
   2.107              {
   2.108 -                invalidate_shadow_ldt(curr);
   2.109 +                invalidate_shadow_ldt(curr, 0);
   2.110 +                this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_TLB;
   2.111                  curr->arch.guest_context.ldt_base = ptr;
   2.112                  curr->arch.guest_context.ldt_ents = ents;
   2.113                  load_LDT(curr);
     3.1 --- a/xen/include/asm-x86/domain.h	Wed Feb 04 14:46:47 2009 +0000
     3.2 +++ b/xen/include/asm-x86/domain.h	Wed Feb 04 15:08:46 2009 +0000
     3.3 @@ -352,6 +352,7 @@ struct arch_vcpu
     3.4  
     3.5      /* Current LDT details. */
     3.6      unsigned long shadow_ldt_mapcnt;
     3.7 +    spinlock_t shadow_ldt_lock;
     3.8  
     3.9      struct paging_vcpu paging;
    3.10