ia64/xen-unstable

changeset 19152:c3b5e36248c9

x86: avoid redundant TLB flushes

While in some places the guest requested flushes were already folded
into the deferred ops, this wasn't done consistently.

Also avoid using an uninitialized variable, even if doing so has no
correctness issue.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 03 18:14:19 2009 +0000 (2009-02-03)
parents 66020c204f14
children 861ebefa7f39
files xen/arch/x86/mm.c
line diff
     1.1 --- a/xen/arch/x86/mm.c	Tue Feb 03 18:13:55 2009 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Tue Feb 03 18:14:19 2009 +0000
     1.3 @@ -2773,7 +2773,7 @@ int do_mmuext_op(
     1.4          }
     1.5  
     1.6          case MMUEXT_TLB_FLUSH_ALL:
     1.7 -            flush_tlb_mask(d->domain_dirty_cpumask);
     1.8 +            this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_ALL_TLBS;
     1.9              break;
    1.10      
    1.11          case MMUEXT_INVLPG_ALL:
    1.12 @@ -3567,34 +3567,40 @@ int do_update_va_mapping(unsigned long v
    1.13      if ( pl1e )
    1.14          guest_unmap_l1e(v, pl1e);
    1.15  
    1.16 -    process_deferred_ops();
    1.17 -
    1.18      switch ( flags & UVMF_FLUSHTYPE_MASK )
    1.19      {
    1.20      case UVMF_TLB_FLUSH:
    1.21          switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
    1.22          {
    1.23          case UVMF_LOCAL:
    1.24 -            flush_tlb_local();
    1.25 +            this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_TLB;
    1.26              break;
    1.27          case UVMF_ALL:
    1.28 -            flush_tlb_mask(d->domain_dirty_cpumask);
    1.29 +            this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_ALL_TLBS;
    1.30              break;
    1.31          default:
    1.32 +            if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_ALL_TLBS )
    1.33 +                break;
    1.34              if ( unlikely(!is_pv_32on64_domain(d) ?
    1.35                            get_user(vmask, (unsigned long *)bmap_ptr) :
    1.36                            get_user(vmask, (unsigned int *)bmap_ptr)) )
    1.37 -                rc = -EFAULT;
    1.38 +                rc = -EFAULT, vmask = 0;
    1.39              pmask = vcpumask_to_pcpumask(d, vmask);
    1.40 +            if ( cpu_isset(smp_processor_id(), pmask) )
    1.41 +                this_cpu(percpu_mm_info).deferred_ops &= ~DOP_FLUSH_TLB;
    1.42              flush_tlb_mask(pmask);
    1.43              break;
    1.44          }
    1.45          break;
    1.46  
    1.47      case UVMF_INVLPG:
    1.48 +        if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_ALL_TLBS )
    1.49 +            break;
    1.50          switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
    1.51          {
    1.52          case UVMF_LOCAL:
    1.53 +            if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_TLB )
    1.54 +                break;
    1.55              if ( !paging_mode_enabled(d) ||
    1.56                   (paging_invlpg(v, va) != 0) ) 
    1.57                  flush_tlb_one_local(va);
    1.58 @@ -3606,14 +3612,18 @@ int do_update_va_mapping(unsigned long v
    1.59              if ( unlikely(!is_pv_32on64_domain(d) ?
    1.60                            get_user(vmask, (unsigned long *)bmap_ptr) :
    1.61                            get_user(vmask, (unsigned int *)bmap_ptr)) )
    1.62 -                rc = -EFAULT;
    1.63 +                rc = -EFAULT, vmask = 0;
    1.64              pmask = vcpumask_to_pcpumask(d, vmask);
    1.65 +            if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_TLB )
    1.66 +                cpu_clear(smp_processor_id(), pmask);
    1.67              flush_tlb_one_mask(pmask, va);
    1.68              break;
    1.69          }
    1.70          break;
    1.71      }
    1.72  
    1.73 +    process_deferred_ops();
    1.74 +
    1.75      return rc;
    1.76  }
    1.77