ia64/xen-unstable

changeset 5301:6b7a4f646fef

bitkeeper revision 1.1662 (42a057ceLMHS_nHxLb-mZCG6csf27A)

All cpu bitmasks in Xen now use the cpumask_t type and its operators.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jun 03 13:14:54 2005 +0000 (2005-06-03)
parents 56913f1c3e0d
children bbd1d54e014d 6946d70596e8
files xen/arch/ia64/domain.c xen/arch/ia64/smp.c xen/arch/ia64/xenmisc.c xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/smp.c xen/common/domain.c xen/common/grant_table.c xen/common/page_alloc.c xen/common/schedule.c xen/include/asm-ia64/flushtlb.h xen/include/asm-x86/flushtlb.h xen/include/asm-x86/mm.h xen/include/public/xen.h xen/include/xen/sched.h xen/include/xen/smp.h
line diff
     1.1 --- a/xen/arch/ia64/domain.c	Fri Jun 03 11:22:37 2005 +0000
     1.2 +++ b/xen/arch/ia64/domain.c	Fri Jun 03 13:14:54 2005 +0000
     1.3 @@ -1242,24 +1242,3 @@ void domain_pend_keyboard_interrupt(int 
     1.4  {
     1.5  	vcpu_pend_interrupt(dom0->vcpu[0],irq);
     1.6  }
     1.7 -
     1.8 -/////////////////////////////////
     1.9 -// added 01Apr2005, to accomodate change in xen/sched.h, not clear
    1.10 -//  yet if this functionality is needed on ia64
    1.11 -#if 0
    1.12 -static void __synchronise_lazy_execstate(void *unused)
    1.13 -{
    1.14 -    if ( percpu_ctxt[smp_processor_id()].curr_ed != current )
    1.15 -    {
    1.16 -        __context_switch();
    1.17 -        load_LDT(current);
    1.18 -        clear_segments();
    1.19 -    }
    1.20 -}
    1.21 -#endif
    1.22 -
    1.23 -void synchronise_lazy_execstate(unsigned long cpuset)
    1.24 -{
    1.25 -    //smp_subset_call_function(__synchronise_lazy_execstate, NULL, 1, cpuset);
    1.26 -}
    1.27 -/////////////////////////////////
     2.1 --- a/xen/arch/ia64/smp.c	Fri Jun 03 11:22:37 2005 +0000
     2.2 +++ b/xen/arch/ia64/smp.c	Fri Jun 03 13:14:54 2005 +0000
     2.3 @@ -20,14 +20,14 @@
     2.4  
     2.5  
     2.6  //Huh? This seems to be used on ia64 even if !CONFIG_SMP
     2.7 -void flush_tlb_mask(unsigned long mask)
     2.8 +void flush_tlb_mask(cpumask_t mask)
     2.9  {
    2.10  	dummy();
    2.11  }
    2.12  //#if CONFIG_SMP || IA64
    2.13  #if CONFIG_SMP
    2.14  //Huh? This seems to be used on ia64 even if !CONFIG_SMP
    2.15 -void smp_send_event_check_mask(unsigned long cpu_mask)
    2.16 +void smp_send_event_check_mask(cpumask_t mask)
    2.17  {
    2.18  	dummy();
    2.19  	//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
    2.20 @@ -35,7 +35,7 @@ void smp_send_event_check_mask(unsigned 
    2.21  
    2.22  
    2.23  //Huh? This seems to be used on ia64 even if !CONFIG_SMP
    2.24 -int try_flush_tlb_mask(unsigned long mask)
    2.25 +int try_flush_tlb_mask(cpumask_t mask)
    2.26  {
    2.27  	dummy();
    2.28  	return 1;
     3.1 --- a/xen/arch/ia64/xenmisc.c	Fri Jun 03 11:22:37 2005 +0000
     3.2 +++ b/xen/arch/ia64/xenmisc.c	Fri Jun 03 13:14:54 2005 +0000
     3.3 @@ -58,7 +58,8 @@ platform_is_hp_ski(void)
     3.4  
     3.5  /* calls in xen/common code that are unused on ia64 */
     3.6  
     3.7 -void sync_lazy_execstate_cpuset(unsigned long cpuset) {}
     3.8 +void sync_lazy_execstate_cpu(unsigned int cpu) {}
     3.9 +void sync_lazy_execstate_mask(cpumask_t mask) {}
    3.10  void sync_lazy_execstate_all(void) {}
    3.11  
    3.12  int grant_table_create(struct domain *d) { return 0; }
     4.1 --- a/xen/arch/x86/domain.c	Fri Jun 03 11:22:37 2005 +0000
     4.2 +++ b/xen/arch/x86/domain.c	Fri Jun 03 13:14:54 2005 +0000
     4.3 @@ -94,7 +94,7 @@ void startup_cpu_idle_loop(void)
     4.4  
     4.5      ASSERT(is_idle_task(v->domain));
     4.6      percpu_ctxt[smp_processor_id()].curr_vcpu = v;
     4.7 -    set_bit(smp_processor_id(), &v->domain->cpuset);
     4.8 +    cpu_set(smp_processor_id(), v->domain->cpumask);
     4.9      v->arch.schedule_tail = continue_idle_task;
    4.10  
    4.11      idle_loop();
    4.12 @@ -744,7 +744,7 @@ static void __context_switch(void)
    4.13      }
    4.14  
    4.15      if ( p->domain != n->domain )
    4.16 -        set_bit(cpu, &n->domain->cpuset);
    4.17 +        cpu_set(cpu, n->domain->cpumask);
    4.18  
    4.19      write_ptbase(n);
    4.20  
    4.21 @@ -757,7 +757,7 @@ static void __context_switch(void)
    4.22      }
    4.23  
    4.24      if ( p->domain != n->domain )
    4.25 -        clear_bit(cpu, &p->domain->cpuset);
    4.26 +        cpu_clear(cpu, p->domain->cpumask);
    4.27  
    4.28      percpu_ctxt[cpu].curr_vcpu = n;
    4.29  }
    4.30 @@ -817,19 +817,27 @@ int __sync_lazy_execstate(void)
    4.31      return 1;
    4.32  }
    4.33  
    4.34 -void sync_lazy_execstate_cpuset(unsigned long cpuset)
    4.35 +void sync_lazy_execstate_cpu(unsigned int cpu)
    4.36  {
    4.37 -    if ( cpuset & (1 << smp_processor_id()) )
    4.38 +    if ( cpu == smp_processor_id() )
    4.39 +        (void)__sync_lazy_execstate();
    4.40 +    else
    4.41 +        flush_tlb_mask(cpumask_of_cpu(cpu));
    4.42 +}
    4.43 +
    4.44 +void sync_lazy_execstate_mask(cpumask_t mask)
    4.45 +{
    4.46 +    if ( cpu_isset(smp_processor_id(), mask) )
    4.47          (void)__sync_lazy_execstate();
    4.48      /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
    4.49 -    flush_tlb_mask(cpuset & ~(1 << smp_processor_id()));
    4.50 +    flush_tlb_mask(mask);
    4.51  }
    4.52  
    4.53  void sync_lazy_execstate_all(void)
    4.54  {
    4.55      __sync_lazy_execstate();
    4.56      /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
    4.57 -    flush_tlb_mask(((1<<num_online_cpus())-1) & ~(1 << smp_processor_id()));
    4.58 +    flush_tlb_mask(cpu_online_map);
    4.59  }
    4.60  
    4.61  unsigned long __hypercall_create_continuation(
    4.62 @@ -971,7 +979,7 @@ void domain_relinquish_resources(struct 
    4.63  {
    4.64      struct vcpu *v;
    4.65  
    4.66 -    BUG_ON(d->cpuset != 0);
    4.67 +    BUG_ON(!cpus_empty(d->cpumask));
    4.68  
    4.69      physdev_destroy_state(d);
    4.70  
     5.1 --- a/xen/arch/x86/mm.c	Fri Jun 03 11:22:37 2005 +0000
     5.2 +++ b/xen/arch/x86/mm.c	Fri Jun 03 13:14:54 2005 +0000
     5.3 @@ -1348,13 +1348,13 @@ int get_page_type(struct pfn_info *page,
     5.4                   * may be unnecessary (e.g., page was GDT/LDT) but those
     5.5                   * circumstances should be very rare.
     5.6                   */
     5.7 -                unsigned long cpuset = tlbflush_filter_cpuset(
     5.8 -                    page_get_owner(page)->cpuset, page->tlbflush_timestamp);
     5.9 -
    5.10 -                if ( unlikely(cpuset != 0) )
    5.11 +                cpumask_t mask = page_get_owner(page)->cpumask;
    5.12 +                tlbflush_filter(mask, page->tlbflush_timestamp);
    5.13 +
    5.14 +                if ( unlikely(!cpus_empty(mask)) )
    5.15                  {
    5.16                      perfc_incrc(need_flush_tlb_flush);
    5.17 -                    flush_tlb_mask(cpuset);
    5.18 +                    flush_tlb_mask(mask);
    5.19                  }
    5.20  
    5.21                  /* We lose existing type, back pointer, and validity. */
    5.22 @@ -1555,23 +1555,23 @@ static int set_foreigndom(unsigned int c
    5.23      return okay;
    5.24  }
    5.25  
    5.26 -static inline unsigned long vcpuset_to_pcpuset(
    5.27 -    struct domain *d, unsigned long vset)
    5.28 +static inline cpumask_t vcpumask_to_pcpumask(
    5.29 +    struct domain *d, unsigned long vmask)
    5.30  {
    5.31 -    unsigned int  vcpu;
    5.32 -    unsigned long pset = 0;
    5.33 +    unsigned int vcpu_id;
    5.34 +    cpumask_t    pmask;
    5.35      struct vcpu *v;
    5.36  
    5.37 -    while ( vset != 0 )
    5.38 +    while ( vmask != 0 )
    5.39      {
    5.40 -        vcpu = find_first_set_bit(vset);
    5.41 -        vset &= ~(1UL << vcpu);
    5.42 -        if ( (vcpu < MAX_VIRT_CPUS) &&
    5.43 -             ((v = d->vcpu[vcpu]) != NULL) )
    5.44 -            pset |= 1UL << v->processor;
    5.45 +        vcpu_id = find_first_set_bit(vmask);
    5.46 +        vmask &= ~(1UL << vcpu_id);
    5.47 +        if ( (vcpu_id < MAX_VIRT_CPUS) &&
    5.48 +             ((v = d->vcpu[vcpu_id]) != NULL) )
    5.49 +            cpu_set(v->processor, pmask);
    5.50      }
    5.51  
    5.52 -    return pset;
    5.53 +    return pmask;
    5.54  }
    5.55  
    5.56  int do_mmuext_op(
    5.57 @@ -1731,34 +1731,28 @@ int do_mmuext_op(
    5.58          case MMUEXT_TLB_FLUSH_MULTI:
    5.59          case MMUEXT_INVLPG_MULTI:
    5.60          {
    5.61 -            unsigned long vset, pset;
    5.62 -            if ( unlikely(get_user(vset, (unsigned long *)op.cpuset)) )
    5.63 +            unsigned long vmask;
    5.64 +            cpumask_t     pmask;
    5.65 +            if ( unlikely(get_user(vmask, (unsigned long *)op.vcpumask)) )
    5.66              {
    5.67                  okay = 0;
    5.68                  break;
    5.69              }
    5.70 -            pset = vcpuset_to_pcpuset(d, vset);
    5.71 +            pmask = vcpumask_to_pcpumask(d, vmask);
    5.72 +            cpus_and(pmask, pmask, d->cpumask);
    5.73              if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
    5.74 -            {
    5.75 -                BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
    5.76 -                flush_tlb_mask(pset & d->cpuset);
    5.77 -            }
    5.78 +                flush_tlb_mask(pmask);
    5.79              else
    5.80 -            {
    5.81 -                BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<<cpu)));
    5.82 -                flush_tlb_one_mask(pset & d->cpuset, op.linear_addr);
    5.83 -            }
    5.84 +                flush_tlb_one_mask(pmask, op.linear_addr);
    5.85              break;
    5.86          }
    5.87  
    5.88          case MMUEXT_TLB_FLUSH_ALL:
    5.89 -            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
    5.90 -            flush_tlb_mask(d->cpuset);
    5.91 +            flush_tlb_mask(d->cpumask);
    5.92              break;
    5.93      
    5.94          case MMUEXT_INVLPG_ALL:
    5.95 -            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
    5.96 -            flush_tlb_one_mask(d->cpuset, op.linear_addr);
    5.97 +            flush_tlb_one_mask(d->cpumask, op.linear_addr);
    5.98              break;
    5.99  
   5.100          case MMUEXT_FLUSH_CACHE:
   5.101 @@ -2256,7 +2250,8 @@ int do_update_va_mapping(unsigned long v
   5.102      struct vcpu   *v   = current;
   5.103      struct domain *d   = v->domain;
   5.104      unsigned int   cpu = v->processor;
   5.105 -    unsigned long  vset, pset, bmap_ptr;
   5.106 +    unsigned long  vmask, bmap_ptr;
   5.107 +    cpumask_t      pmask;
   5.108      int            rc  = 0;
   5.109  
   5.110      perfc_incrc(calls_to_update_va);
   5.111 @@ -2304,14 +2299,14 @@ int do_update_va_mapping(unsigned long v
   5.112              local_flush_tlb();
   5.113              break;
   5.114          case UVMF_ALL:
   5.115 -            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
   5.116 -            flush_tlb_mask(d->cpuset);
   5.117 +            flush_tlb_mask(d->cpumask);
   5.118              break;
   5.119          default:
   5.120 -            if ( unlikely(get_user(vset, (unsigned long *)bmap_ptr)) )
   5.121 +            if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
   5.122                  rc = -EFAULT;
   5.123 -            pset = vcpuset_to_pcpuset(d, vset);
   5.124 -            flush_tlb_mask(pset & d->cpuset);
   5.125 +            pmask = vcpumask_to_pcpumask(d, vmask);
   5.126 +            cpus_and(pmask, pmask, d->cpumask);
   5.127 +            flush_tlb_mask(pmask);
   5.128              break;
   5.129          }
   5.130          break;
   5.131 @@ -2325,15 +2320,14 @@ int do_update_va_mapping(unsigned long v
   5.132              local_flush_tlb_one(va);
   5.133              break;
   5.134          case UVMF_ALL:
   5.135 -            BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<<cpu)));
   5.136 -            flush_tlb_one_mask(d->cpuset, va);
   5.137 +            flush_tlb_one_mask(d->cpumask, va);
   5.138              break;
   5.139          default:
   5.140 -            if ( unlikely(get_user(vset, (unsigned long *)bmap_ptr)) )
   5.141 +            if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
   5.142                  rc = -EFAULT;
   5.143 -            pset = vcpuset_to_pcpuset(d, vset);
   5.144 -            BUG_ON(shadow_mode_enabled(d) && (pset != (1<<cpu)));
   5.145 -            flush_tlb_one_mask(pset & d->cpuset, va);
   5.146 +            pmask = vcpumask_to_pcpumask(d, vmask);
   5.147 +            cpus_and(pmask, pmask, d->cpumask);
   5.148 +            flush_tlb_one_mask(pmask, va);
   5.149              break;
   5.150          }
   5.151          break;
   5.152 @@ -2646,7 +2640,7 @@ void ptwr_flush(struct domain *d, const 
   5.153  
   5.154      /* Ensure that there are no stale writable mappings in any TLB. */
   5.155      /* NB. INVLPG is a serialising instruction: flushes pending updates. */
   5.156 -    flush_tlb_one_mask(d->cpuset, l1va);
   5.157 +    flush_tlb_one_mask(d->cpumask, l1va);
   5.158      PTWR_PRINTK("[%c] disconnected_l1va at %p now %lx\n",
   5.159                  PTWR_PRINT_WHICH, ptep, pte);
   5.160  
   5.161 @@ -2911,7 +2905,7 @@ int ptwr_do_page_fault(struct domain *d,
   5.162      if ( which == PTWR_PT_ACTIVE )
   5.163      {
   5.164          l2e_remove_flags(*pl2e, _PAGE_PRESENT);
   5.165 -        flush_tlb_mask(d->cpuset);
   5.166 +        flush_tlb_mask(d->cpumask);
   5.167      }
   5.168      
   5.169      /* Temporarily map the L1 page, and make a copy of it. */
     6.1 --- a/xen/arch/x86/shadow.c	Fri Jun 03 11:22:37 2005 +0000
     6.2 +++ b/xen/arch/x86/shadow.c	Fri Jun 03 13:14:54 2005 +0000
     6.3 @@ -428,7 +428,7 @@ void free_shadow_page(unsigned long smfn
     6.4      // No TLB flushes are needed the next time this page gets allocated.
     6.5      //
     6.6      page->tlbflush_timestamp = 0;
     6.7 -    page->u.free.cpu_mask = 0;
     6.8 +    page->u.free.cpumask     = CPU_MASK_NONE;
     6.9  
    6.10      if ( type == PGT_l1_shadow )
    6.11      {
    6.12 @@ -2532,7 +2532,7 @@ void __shadow_sync_all(struct domain *d)
    6.13      // page table page needs to be vcpu private).
    6.14      //
    6.15  #if 0 // this should be enabled for SMP guests...
    6.16 -    flush_tlb_mask(((1<<num_online_cpus()) - 1) & ~(1<<smp_processor_id()));
    6.17 +    flush_tlb_mask(cpu_online_map);
    6.18  #endif
    6.19      need_flush = 1;
    6.20  
     7.1 --- a/xen/arch/x86/smp.c	Fri Jun 03 11:22:37 2005 +0000
     7.2 +++ b/xen/arch/x86/smp.c	Fri Jun 03 13:14:54 2005 +0000
     7.3 @@ -185,7 +185,8 @@ inline void send_IPI_mask_sequence(cpuma
     7.4  #include <mach_ipi.h>
     7.5  
     7.6  static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
     7.7 -static unsigned long flush_cpumask, flush_va;
     7.8 +static cpumask_t flush_cpumask;
     7.9 +static unsigned long flush_va;
    7.10  
    7.11  asmlinkage void smp_invalidate_interrupt(void)
    7.12  {
    7.13 @@ -198,30 +199,26 @@ asmlinkage void smp_invalidate_interrupt
    7.14          else
    7.15              local_flush_tlb_one(flush_va);
    7.16      }
    7.17 -    clear_bit(smp_processor_id(), &flush_cpumask);
    7.18 +    cpu_clear(smp_processor_id(), flush_cpumask);
    7.19  }
    7.20  
    7.21 -void __flush_tlb_mask(unsigned long mask, unsigned long va)
    7.22 +void __flush_tlb_mask(cpumask_t mask, unsigned long va)
    7.23  {
    7.24      ASSERT(local_irq_is_enabled());
    7.25      
    7.26 -    if ( mask & (1UL << smp_processor_id()) )
    7.27 +    if ( cpu_isset(smp_processor_id(), mask) )
    7.28      {
    7.29          local_flush_tlb();
    7.30 -        mask &= ~(1UL << smp_processor_id());
    7.31 +        cpu_clear(smp_processor_id(), mask);
    7.32      }
    7.33  
    7.34 -    if ( mask != 0 )
    7.35 +    if ( !cpus_empty(mask) )
    7.36      {
    7.37          spin_lock(&flush_lock);
    7.38          flush_cpumask = mask;
    7.39          flush_va      = va;
    7.40 -        {
    7.41 -            cpumask_t _mask;
    7.42 -            cpus_addr(_mask)[0] = mask;
    7.43 -            send_IPI_mask(_mask, INVALIDATE_TLB_VECTOR);
    7.44 -        }
    7.45 -        while ( flush_cpumask != 0 )
    7.46 +        send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
    7.47 +        while ( !cpus_empty(flush_cpumask) )
    7.48              cpu_relax();
    7.49          spin_unlock(&flush_lock);
    7.50      }
    7.51 @@ -236,11 +233,11 @@ void new_tlbflush_clock_period(void)
    7.52      if ( num_online_cpus() > 1 )
    7.53      {
    7.54          spin_lock(&flush_lock);
    7.55 -        flush_cpumask  = (1UL << num_online_cpus()) - 1;
    7.56 -        flush_cpumask &= ~(1UL << smp_processor_id());
    7.57 -        flush_va       = FLUSHVA_ALL;
    7.58 +        flush_cpumask = cpu_online_map;
    7.59 +        flush_va      = FLUSHVA_ALL;
    7.60          send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
    7.61 -        while ( flush_cpumask != 0 )
    7.62 +        cpu_clear(smp_processor_id(), flush_cpumask);
    7.63 +        while ( !cpus_empty(flush_cpumask) )
    7.64              cpu_relax();
    7.65          spin_unlock(&flush_lock);
    7.66      }
    7.67 @@ -261,12 +258,10 @@ void flush_tlb_all_pge(void)
    7.68      local_flush_tlb_pge();
    7.69  }
    7.70  
    7.71 -void smp_send_event_check_mask(unsigned long cpu_mask)
    7.72 +void smp_send_event_check_mask(cpumask_t mask)
    7.73  {
    7.74 -    cpumask_t mask;
    7.75 -    cpu_mask &= ~(1UL << smp_processor_id());
    7.76 -    cpus_addr(mask)[0] = cpu_mask;
    7.77 -    if ( cpu_mask != 0 )
    7.78 +    cpu_clear(smp_processor_id(), mask);
    7.79 +    if ( !cpus_empty(mask) )
    7.80          send_IPI_mask(mask, EVENT_CHECK_VECTOR);
    7.81  }
    7.82  
    7.83 @@ -277,9 +272,8 @@ void smp_send_event_check_mask(unsigned 
    7.84  struct call_data_struct {
    7.85      void (*func) (void *info);
    7.86      void *info;
    7.87 -    unsigned long started;
    7.88 -    unsigned long finished;
    7.89 -    int wait;
    7.90 +    atomic_t started;
    7.91 +    atomic_t finished;
    7.92  };
    7.93  
    7.94  static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
    7.95 @@ -296,18 +290,17 @@ int smp_call_function(
    7.96      void (*func) (void *info), void *info, int unused, int wait)
    7.97  {
    7.98      struct call_data_struct data;
    7.99 -    unsigned long cpuset;
   7.100 +    unsigned int nr_cpus = num_online_cpus() - 1;
   7.101  
   7.102      ASSERT(local_irq_is_enabled());
   7.103  
   7.104 -    cpuset = ((1UL << num_online_cpus()) - 1) & ~(1UL << smp_processor_id());
   7.105 -    if ( cpuset == 0 )
   7.106 +    if ( nr_cpus == 0 )
   7.107          return 0;
   7.108  
   7.109      data.func = func;
   7.110      data.info = info;
   7.111 -    data.started = data.finished = 0;
   7.112 -    data.wait = wait;
   7.113 +    atomic_set(&data.started, 0);
   7.114 +    atomic_set(&data.finished, 0);
   7.115  
   7.116      spin_lock(&call_lock);
   7.117  
   7.118 @@ -316,7 +309,7 @@ int smp_call_function(
   7.119  
   7.120      send_IPI_allbutself(CALL_FUNCTION_VECTOR);
   7.121  
   7.122 -    while ( (wait ? data.finished : data.started) != cpuset )
   7.123 +    while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
   7.124          cpu_relax();
   7.125  
   7.126      spin_unlock(&call_lock);
   7.127 @@ -358,16 +351,11 @@ asmlinkage void smp_call_function_interr
   7.128      ack_APIC_irq();
   7.129      perfc_incrc(ipis);
   7.130  
   7.131 -    if ( call_data->wait )
   7.132 -    {
   7.133 -        (*func)(info);
   7.134 -        mb();
   7.135 -        set_bit(smp_processor_id(), &call_data->finished);
   7.136 -    }
   7.137 -    else
   7.138 -    {
   7.139 -        mb();
   7.140 -        set_bit(smp_processor_id(), &call_data->started);
   7.141 -        (*func)(info);
   7.142 -    }
   7.143 +    mb();
   7.144 +    atomic_inc(&call_data->started);
   7.145 +
   7.146 +    (*func)(info);
   7.147 +
   7.148 +    mb();
   7.149 +    atomic_inc(&call_data->finished);
   7.150  }
     8.1 --- a/xen/common/domain.c	Fri Jun 03 11:22:37 2005 +0000
     8.2 +++ b/xen/common/domain.c	Fri Jun 03 13:14:54 2005 +0000
     8.3 @@ -166,8 +166,8 @@ static void domain_shutdown_finalise(voi
     8.4          while ( test_bit(_VCPUF_running, &v->vcpu_flags) )
     8.5              cpu_relax();
     8.6  
     8.7 -    sync_lazy_execstate_cpuset(d->cpuset);
     8.8 -    BUG_ON(d->cpuset != 0);
     8.9 +    sync_lazy_execstate_mask(d->cpumask);
    8.10 +    BUG_ON(!cpus_empty(d->cpumask));
    8.11  
    8.12      sync_pagetable_state(d);
    8.13  
     9.1 --- a/xen/common/grant_table.c	Fri Jun 03 11:22:37 2005 +0000
     9.2 +++ b/xen/common/grant_table.c	Fri Jun 03 13:14:54 2005 +0000
     9.3 @@ -445,9 +445,9 @@ gnttab_map_grant_ref(
     9.4              flush++;
     9.5  
     9.6      if ( flush == 1 )
     9.7 -        flush_tlb_one_mask(current->domain->cpuset, va);
     9.8 +        flush_tlb_one_mask(current->domain->cpumask, va);
     9.9      else if ( flush != 0 ) 
    9.10 -        flush_tlb_mask(current->domain->cpuset);
    9.11 +        flush_tlb_mask(current->domain->cpumask);
    9.12  
    9.13      return 0;
    9.14  }
    9.15 @@ -641,9 +641,9 @@ gnttab_unmap_grant_ref(
    9.16              flush++;
    9.17  
    9.18      if ( flush == 1 )
    9.19 -        flush_tlb_one_mask(current->domain->cpuset, va);
    9.20 +        flush_tlb_one_mask(current->domain->cpumask, va);
    9.21      else if ( flush != 0 ) 
    9.22 -        flush_tlb_mask(current->domain->cpuset);
    9.23 +        flush_tlb_mask(current->domain->cpumask);
    9.24  
    9.25      return 0;
    9.26  }
    10.1 --- a/xen/common/page_alloc.c	Fri Jun 03 11:22:37 2005 +0000
    10.2 +++ b/xen/common/page_alloc.c	Fri Jun 03 13:14:54 2005 +0000
    10.3 @@ -485,7 +485,7 @@ void init_domheap_pages(unsigned long ps
    10.4  struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
    10.5  {
    10.6      struct pfn_info *pg;
    10.7 -    unsigned long mask = 0;
    10.8 +    cpumask_t mask;
    10.9      int i;
   10.10  
   10.11      ASSERT(!in_irq());
   10.12 @@ -493,17 +493,27 @@ struct pfn_info *alloc_domheap_pages(str
   10.13      if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
   10.14          return NULL;
   10.15  
   10.16 -    for ( i = 0; i < (1 << order); i++ )
   10.17 +    mask = pg->u.free.cpumask;
   10.18 +    tlbflush_filter(mask, pg->tlbflush_timestamp);
   10.19 +
   10.20 +    pg->count_info        = 0;
   10.21 +    pg->u.inuse._domain   = 0;
   10.22 +    pg->u.inuse.type_info = 0;
   10.23 +
   10.24 +    for ( i = 1; i < (1 << order); i++ )
   10.25      {
   10.26 -        mask |= tlbflush_filter_cpuset(
   10.27 -            pg[i].u.free.cpu_mask & ~mask, pg[i].tlbflush_timestamp);
   10.28 +        /* Add in any extra CPUs that need flushing because of this page. */
   10.29 +        cpumask_t extra_cpus_mask;
   10.30 +        cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask);
   10.31 +        tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
   10.32 +        cpus_or(mask, mask, extra_cpus_mask);
   10.33  
   10.34          pg[i].count_info        = 0;
   10.35          pg[i].u.inuse._domain   = 0;
   10.36          pg[i].u.inuse.type_info = 0;
   10.37      }
   10.38  
   10.39 -    if ( unlikely(mask != 0) )
   10.40 +    if ( unlikely(!cpus_empty(mask)) )
   10.41      {
   10.42          perfc_incrc(need_flush_tlb_flush);
   10.43          flush_tlb_mask(mask);
   10.44 @@ -576,7 +586,7 @@ void free_domheap_pages(struct pfn_info 
   10.45              ASSERT(((pg[i].u.inuse.type_info & PGT_count_mask) == 0) ||
   10.46                     shadow_tainted_refcnts(d));
   10.47              pg[i].tlbflush_timestamp  = tlbflush_current_time();
   10.48 -            pg[i].u.free.cpu_mask     = d->cpuset;
   10.49 +            pg[i].u.free.cpumask      = d->cpumask;
   10.50              list_del(&pg[i].list);
   10.51          }
   10.52  
    11.1 --- a/xen/common/schedule.c	Fri Jun 03 11:22:37 2005 +0000
    11.2 +++ b/xen/common/schedule.c	Fri Jun 03 13:14:54 2005 +0000
    11.3 @@ -209,7 +209,8 @@ void domain_sleep_sync(struct vcpu *v)
    11.4      while ( test_bit(_VCPUF_running, &v->vcpu_flags) && !domain_runnable(v) )
    11.5          cpu_relax();
    11.6  
    11.7 -    sync_lazy_execstate_cpuset(v->domain->cpuset & (1UL << v->processor));
    11.8 +    if ( cpu_isset(v->processor, v->domain->cpumask) )
    11.9 +        sync_lazy_execstate_cpu(v->processor);
   11.10  }
   11.11  
   11.12  void domain_wake(struct vcpu *v)
    12.1 --- a/xen/include/asm-ia64/flushtlb.h	Fri Jun 03 11:22:37 2005 +0000
    12.2 +++ b/xen/include/asm-ia64/flushtlb.h	Fri Jun 03 13:14:54 2005 +0000
    12.3 @@ -8,7 +8,7 @@ extern u32 tlbflush_clock;
    12.4  extern u32 tlbflush_time[NR_CPUS];
    12.5  
    12.6  #define tlbflush_current_time() tlbflush_clock
    12.7 -#define tlbflush_filter_cpuset(x,y) (0)
    12.8 +#define tlbflush_filter(x,y) ((void)0)
    12.9  #define NEED_FLUSH(x, y) (0)
   12.10  
   12.11  #endif
    13.1 --- a/xen/include/asm-x86/flushtlb.h	Fri Jun 03 11:22:37 2005 +0000
    13.2 +++ b/xen/include/asm-x86/flushtlb.h	Fri Jun 03 13:14:54 2005 +0000
    13.3 @@ -44,24 +44,16 @@ static inline int NEED_FLUSH(u32 cpu_sta
    13.4  }
    13.5  
    13.6  /*
    13.7 - * Filter the given set of CPUs, returning only those that may not have
    13.8 - * flushed their TLBs since @page_timestamp.
    13.9 + * Filter the given set of CPUs, removing those that definitely flushed their
   13.10 + * TLB since @page_timestamp.
   13.11   */
   13.12 -static inline unsigned long tlbflush_filter_cpuset(
   13.13 -    unsigned long cpuset, u32 page_timestamp)
   13.14 -{
   13.15 -    int i;
   13.16 -    unsigned long remain;
   13.17 -
   13.18 -    for ( i = 0, remain = ~0UL; (cpuset & remain) != 0; i++, remain <<= 1 )
   13.19 -    {
   13.20 -        if ( (cpuset & (1UL << i)) &&
   13.21 -             !NEED_FLUSH(tlbflush_time[i], page_timestamp) )
   13.22 -            cpuset &= ~(1UL << i);
   13.23 -    }
   13.24 -
   13.25 -    return cpuset;
   13.26 -}
   13.27 +#define tlbflush_filter(mask, page_timestamp)                   \
   13.28 +do {                                                            \
   13.29 +    unsigned int cpu;                                           \
   13.30 +    for_each_cpu_mask ( cpu, mask )                             \
   13.31 +        if ( !NEED_FLUSH(tlbflush_time[cpu], page_timestamp) )  \
   13.32 +            cpu_clear(cpu, mask);                               \
   13.33 +} while ( 0 )
   13.34  
   13.35  extern void new_tlbflush_clock_period(void);
   13.36  
   13.37 @@ -93,19 +85,19 @@ extern void write_cr3(unsigned long cr3)
   13.38  #define local_flush_tlb_one(__addr) \
   13.39      __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
   13.40  
   13.41 -#define flush_tlb_all()     flush_tlb_mask((1 << num_online_cpus()) - 1)
   13.42 +#define flush_tlb_all()     flush_tlb_mask(cpu_online_map)
   13.43  
   13.44  #ifndef CONFIG_SMP
   13.45 -#define flush_tlb_all_pge()          local_flush_tlb_pge()
   13.46 -#define flush_tlb_mask(_mask)        local_flush_tlb()
   13.47 -#define flush_tlb_one_mask(_mask,_v) local_flush_tlb_one(_v)
   13.48 +#define flush_tlb_all_pge()        local_flush_tlb_pge()
   13.49 +#define flush_tlb_mask(mask)       local_flush_tlb()
   13.50 +#define flush_tlb_one_mask(mask,v) local_flush_tlb_one(_v)
   13.51  #else
   13.52  #include <xen/smp.h>
   13.53  #define FLUSHVA_ALL (~0UL)
   13.54  extern void flush_tlb_all_pge(void);
   13.55 -extern void __flush_tlb_mask(unsigned long mask, unsigned long va);
   13.56 -#define flush_tlb_mask(_mask)        __flush_tlb_mask(_mask,FLUSHVA_ALL)
   13.57 -#define flush_tlb_one_mask(_mask,_v) __flush_tlb_mask(_mask,_v)
   13.58 +extern void __flush_tlb_mask(cpumask_t mask, unsigned long va);
   13.59 +#define flush_tlb_mask(mask)       __flush_tlb_mask(mask,FLUSHVA_ALL)
   13.60 +#define flush_tlb_one_mask(mask,v) __flush_tlb_mask(mask,v)
   13.61  #endif
   13.62  
   13.63  #endif /* __FLUSHTLB_H__ */
    14.1 --- a/xen/include/asm-x86/mm.h	Fri Jun 03 11:22:37 2005 +0000
    14.2 +++ b/xen/include/asm-x86/mm.h	Fri Jun 03 13:14:54 2005 +0000
    14.3 @@ -3,6 +3,7 @@
    14.4  #define __ASM_X86_MM_H__
    14.5  
    14.6  #include <xen/config.h>
    14.7 +#include <xen/cpumask.h>
    14.8  #include <xen/list.h>
    14.9  #include <asm/io.h>
   14.10  #include <asm/uaccess.h>
   14.11 @@ -41,7 +42,7 @@ struct pfn_info
   14.12          /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
   14.13          struct {
   14.14              /* Mask of possibly-tainted TLBs. */
   14.15 -            u32 cpu_mask;
   14.16 +            cpumask_t cpumask;
   14.17              /* Order-size of the free chunk this page is the head of. */
   14.18              u8 order;
   14.19          } PACKED free;
    15.1 --- a/xen/include/public/xen.h	Fri Jun 03 11:22:37 2005 +0000
    15.2 +++ b/xen/include/public/xen.h	Fri Jun 03 13:14:54 2005 +0000
    15.3 @@ -124,11 +124,11 @@
    15.4   * linear_addr: Linear address to be flushed from the local TLB.
    15.5   * 
    15.6   * cmd: MMUEXT_TLB_FLUSH_MULTI
    15.7 - * cpuset: Pointer to bitmap of VCPUs to be flushed.
    15.8 + * vcpumask: Pointer to bitmap of VCPUs to be flushed.
    15.9   * 
   15.10   * cmd: MMUEXT_INVLPG_MULTI
   15.11   * linear_addr: Linear address to be flushed.
   15.12 - * cpuset: Pointer to bitmap of VCPUs to be flushed.
   15.13 + * vcpumask: Pointer to bitmap of VCPUs to be flushed.
   15.14   * 
   15.15   * cmd: MMUEXT_TLB_FLUSH_ALL
   15.16   * No additional arguments. Flushes all VCPUs' TLBs.
   15.17 @@ -177,7 +177,7 @@ struct mmuext_op {
   15.18          /* SET_LDT */
   15.19          unsigned int nr_ents;
   15.20          /* TLB_FLUSH_MULTI, INVLPG_MULTI */
   15.21 -        void *cpuset;
   15.22 +        void *vcpumask;
   15.23      };
   15.24  };
   15.25  #endif
   15.26 @@ -185,11 +185,11 @@ struct mmuext_op {
   15.27  /* These are passed as 'flags' to update_va_mapping. They can be ORed. */
   15.28  /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap.   */
   15.29  /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer.         */
   15.30 -#define UVMF_NONE               (0UL)    /* No flushing at all.   */
   15.31 +#define UVMF_NONE               (0UL<<0) /* No flushing at all.   */
   15.32  #define UVMF_TLB_FLUSH          (1UL<<0) /* Flush entire TLB(s).  */
   15.33  #define UVMF_INVLPG             (2UL<<0) /* Flush only one entry. */
   15.34  #define UVMF_FLUSHTYPE_MASK     (3UL<<0)
   15.35 -#define UVMF_MULTI              (0UL<<1) /* Flush subset of TLBs. */
   15.36 +#define UVMF_MULTI              (0UL<<2) /* Flush subset of TLBs. */
   15.37  #define UVMF_LOCAL              (0UL<<2) /* Flush local TLB.      */
   15.38  #define UVMF_ALL                (1UL<<2) /* Flush all TLBs.       */
   15.39  
    16.1 --- a/xen/include/xen/sched.h	Fri Jun 03 11:22:37 2005 +0000
    16.2 +++ b/xen/include/xen/sched.h	Fri Jun 03 13:14:54 2005 +0000
    16.3 @@ -131,8 +131,8 @@ struct domain
    16.4  
    16.5      struct vcpu *vcpu[MAX_VIRT_CPUS];
    16.6  
    16.7 -    /* Bitmask of CPUs on which this domain is running. */
    16.8 -    unsigned long cpuset;
    16.9 +    /* Bitmask of CPUs which are holding onto this domain's state. */
   16.10 +    cpumask_t        cpumask;
   16.11  
   16.12      struct arch_domain arch;
   16.13  };
   16.14 @@ -249,7 +249,8 @@ void domain_sleep_sync(struct vcpu *d);
   16.15   * Force loading of currently-executing domain state on the specified set
   16.16   * of CPUs. This is used to counteract lazy state switching where required.
   16.17   */
   16.18 -extern void sync_lazy_execstate_cpuset(unsigned long cpuset);
   16.19 +extern void sync_lazy_execstate_cpu(unsigned int cpu);
   16.20 +extern void sync_lazy_execstate_mask(cpumask_t mask);
   16.21  extern void sync_lazy_execstate_all(void);
   16.22  extern int __sync_lazy_execstate(void);
   16.23  
    17.1 --- a/xen/include/xen/smp.h	Fri Jun 03 11:22:37 2005 +0000
    17.2 +++ b/xen/include/xen/smp.h	Fri Jun 03 13:14:54 2005 +0000
    17.3 @@ -22,8 +22,9 @@
    17.4   */
    17.5  extern void smp_send_stop(void);
    17.6  
    17.7 -extern void smp_send_event_check_mask(unsigned long cpu_mask);
    17.8 -#define smp_send_event_check_cpu(_cpu) smp_send_event_check_mask(1<<(_cpu))
    17.9 +extern void smp_send_event_check_mask(cpumask_t mask);
   17.10 +#define smp_send_event_check_cpu(cpu) \
   17.11 +    smp_send_event_check_mask(cpumask_of_cpu(cpu))
   17.12  
   17.13  /*
   17.14   * Prepare machine for booting other CPUs.
   17.15 @@ -86,8 +87,8 @@ void smp_prepare_boot_cpu(void);
   17.16   *	These macros fold the SMP functionality into a single CPU system
   17.17   */
   17.18  
   17.19 -#define smp_send_event_check_mask(_m)           ((void)0)
   17.20 -#define smp_send_event_check_cpu(_p)            ((void)0) 
   17.21 +#define smp_send_event_check_mask(m)            ((void)0)
   17.22 +#define smp_send_event_check_cpu(p)             ((void)0) 
   17.23  #ifndef __smp_processor_id
   17.24  #define smp_processor_id()			0
   17.25  #endif