ia64/xen-unstable
changeset 19651:822ea2bf0c54
Pass cpumasks by reference always.
Rather than passing cpumasks by value in all cases (which is
problematic for large NR_CPUS configurations), pass them 'by
reference' (i.e. through a pointer to a const cpumask).
On x86 this changes send_IPI_mask() to always only send IPIs to remote
CPUs (meaning any caller needing to handle the current CPU as well has
to do so on its own).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Rather than passing cpumasks by value in all cases (which is
problematic for large NR_CPUS configurations), pass them 'by
reference' (i.e. through a pointer to a const cpumask).
On x86 this changes send_IPI_mask() to always only send IPIs to remote
CPUs (meaning any caller needing to handle the current CPU as well has
to do so on its own).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
line diff
1.1 --- a/xen/arch/ia64/linux-xen/smp.c Wed May 27 10:38:51 2009 +0100 1.2 +++ b/xen/arch/ia64/linux-xen/smp.c Wed May 27 11:15:08 2009 +0100 1.3 @@ -57,19 +57,18 @@ 1.4 //#if CONFIG_SMP || IA64 1.5 #if CONFIG_SMP 1.6 //Huh? This seems to be used on ia64 even if !CONFIG_SMP 1.7 -void smp_send_event_check_mask(cpumask_t mask) 1.8 +void smp_send_event_check_mask(const cpumask_t *mask) 1.9 { 1.10 int cpu; 1.11 1.12 /* Not for me. */ 1.13 - cpu_clear(smp_processor_id(), mask); 1.14 - if (cpus_empty(mask)) 1.15 + if (cpus_subset(*mask, *cpumask_of(smp_processor_id()))) 1.16 return; 1.17 1.18 //printf("smp_send_event_check_mask called\n"); 1.19 1.20 for (cpu = 0; cpu < NR_CPUS; ++cpu) 1.21 - if (cpu_isset(cpu, mask)) 1.22 + if (cpu_isset(cpu, *mask) && cpu != smp_processor_id()) 1.23 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); 1.24 } 1.25 #endif 1.26 @@ -438,11 +437,11 @@ EXPORT_SYMBOL(smp_call_function); 1.27 1.28 #ifdef XEN 1.29 int 1.30 -on_selected_cpus(cpumask_t selected, void (*func) (void *info), void *info, 1.31 - int retry, int wait) 1.32 +on_selected_cpus(const cpumask_t *selected, void (*func) (void *info), 1.33 + void *info, int retry, int wait) 1.34 { 1.35 struct call_data_struct data; 1.36 - unsigned int cpu, nr_cpus = cpus_weight(selected); 1.37 + unsigned int cpu, nr_cpus = cpus_weight(*selected); 1.38 1.39 ASSERT(local_irq_is_enabled()); 1.40 1.41 @@ -460,7 +459,7 @@ on_selected_cpus(cpumask_t selected, voi 1.42 call_data = &data; 1.43 wmb(); 1.44 1.45 - for_each_cpu_mask(cpu, selected) 1.46 + for_each_cpu_mask(cpu, *selected) 1.47 send_IPI_single(cpu, IPI_CALL_FUNC); 1.48 1.49 while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
2.1 --- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c Wed May 27 10:38:51 2009 +0100 2.2 +++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c Wed May 27 11:15:08 2009 +0100 2.3 @@ -239,7 +239,7 @@ sn2_global_tlb_purge(unsigned long start 2.4 flush_data.start = start; 2.5 flush_data.end = end; 2.6 flush_data.nbits = nbits; 2.7 - on_selected_cpus(selected_cpus, sn_flush_ptcga_cpu, 2.8 + on_selected_cpus(&selected_cpus, sn_flush_ptcga_cpu, 2.9 &flush_data, 1, 1); 2.10 } 2.11 spin_unlock(&sn2_ptcg_lock2);
3.1 --- a/xen/arch/ia64/xen/mm.c Wed May 27 10:38:51 2009 +0100 3.2 +++ b/xen/arch/ia64/xen/mm.c Wed May 27 11:15:08 2009 +0100 3.3 @@ -3189,7 +3189,7 @@ int get_page_type(struct page_info *page 3.4 if ( unlikely(!cpus_empty(mask)) ) 3.5 { 3.6 perfc_incr(need_flush_tlb_flush); 3.7 - flush_tlb_mask(mask); 3.8 + flush_tlb_mask(&mask); 3.9 } 3.10 3.11 /* We lose existing type, back pointer, and validity. */
4.1 --- a/xen/arch/ia64/xen/vhpt.c Wed May 27 10:38:51 2009 +0100 4.2 +++ b/xen/arch/ia64/xen/vhpt.c Wed May 27 11:15:08 2009 +0100 4.3 @@ -548,22 +548,21 @@ void flush_tlb_for_log_dirty(struct doma 4.4 cpus_clear (d->domain_dirty_cpumask); 4.5 } 4.6 4.7 -void flush_tlb_mask(cpumask_t mask) 4.8 +void flush_tlb_mask(const cpumask_t *mask) 4.9 { 4.10 int cpu; 4.11 4.12 cpu = smp_processor_id(); 4.13 - if (cpu_isset (cpu, mask)) { 4.14 - cpu_clear(cpu, mask); 4.15 + if (cpu_isset(cpu, *mask)) 4.16 flush_tlb_vhpt_all (NULL); 4.17 - } 4.18 4.19 - if (cpus_empty(mask)) 4.20 + if (cpus_subset(*mask, *cpumask_of(cpu))) 4.21 return; 4.22 4.23 - for_each_cpu_mask (cpu, mask) 4.24 - smp_call_function_single 4.25 - (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1); 4.26 + for_each_cpu_mask (cpu, *mask) 4.27 + if (cpu != smp_processor_id()) 4.28 + smp_call_function_single 4.29 + (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1); 4.30 } 4.31 4.32 #ifdef PERF_COUNTERS
5.1 --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c Wed May 27 10:38:51 2009 +0100 5.2 +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c Wed May 27 11:15:08 2009 +0100 5.3 @@ -186,7 +186,7 @@ static void drv_read(struct drv_cmd *cmd 5.4 if (likely(cpu_isset(smp_processor_id(), cmd->mask))) 5.5 do_drv_read((void *)cmd); 5.6 else 5.7 - on_selected_cpus( cmd->mask, do_drv_read, (void *)cmd, 0, 1); 5.8 + on_selected_cpus(&cmd->mask, do_drv_read, (void *)cmd, 0, 1); 5.9 } 5.10 5.11 static void drv_write(struct drv_cmd *cmd) 5.12 @@ -195,7 +195,7 @@ static void drv_write(struct drv_cmd *cm 5.13 cpu_isset(smp_processor_id(), cmd->mask)) 5.14 do_drv_write((void *)cmd); 5.15 else 5.16 - on_selected_cpus( cmd->mask, do_drv_write, (void *)cmd, 0, 0); 5.17 + on_selected_cpus(&cmd->mask, do_drv_write, (void *)cmd, 0, 0); 5.18 } 5.19 5.20 static u32 get_cur_val(cpumask_t mask) 5.21 @@ -274,7 +274,6 @@ static unsigned int get_measured_perf(un 5.22 struct cpufreq_policy *policy; 5.23 struct perf_pair readin, cur, *saved; 5.24 unsigned int perf_percent; 5.25 - cpumask_t cpumask; 5.26 unsigned int retval; 5.27 5.28 if (!cpu_online(cpu)) 5.29 @@ -303,8 +302,7 @@ static unsigned int get_measured_perf(un 5.30 if (cpu == smp_processor_id()) { 5.31 read_measured_perf_ctrs((void *)&readin); 5.32 } else { 5.33 - cpumask = cpumask_of_cpu(cpu); 5.34 - on_selected_cpus(cpumask, read_measured_perf_ctrs, 5.35 + on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs, 5.36 (void *)&readin, 0, 1); 5.37 } 5.38
6.1 --- a/xen/arch/x86/acpi/cpufreq/powernow.c Wed May 27 10:38:51 2009 +0100 6.2 +++ b/xen/arch/x86/acpi/cpufreq/powernow.c Wed May 27 11:15:08 2009 +0100 6.3 @@ -121,7 +121,7 @@ static int powernow_cpufreq_target(struc 6.4 6.5 cmd.val = next_perf_state; 6.6 6.7 - on_selected_cpus( cmd.mask, transition_pstate, (void *) &cmd, 0, 0); 6.8 + on_selected_cpus(&cmd.mask, transition_pstate, (void *) &cmd, 0, 0); 6.9 6.10 perf->state = next_perf_state; 6.11 policy->cur = freqs.new;
7.1 --- a/xen/arch/x86/cpu/mcheck/mce.c Wed May 27 10:38:51 2009 +0100 7.2 +++ b/xen/arch/x86/cpu/mcheck/mce.c Wed May 27 11:15:08 2009 +0100 7.3 @@ -1205,8 +1205,8 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u 7.4 7.5 add_taint(TAINT_ERROR_INJECT); 7.6 7.7 - on_selected_cpus(cpumask_of_cpu(target), 7.8 - x86_mc_msrinject, mc_msrinject, 1, 1); 7.9 + on_selected_cpus(cpumask_of(target), x86_mc_msrinject, 7.10 + mc_msrinject, 1, 1); 7.11 7.12 break; 7.13 7.14 @@ -1225,8 +1225,8 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u 7.15 7.16 add_taint(TAINT_ERROR_INJECT); 7.17 7.18 - on_selected_cpus(cpumask_of_cpu(target), x86_mc_mceinject, 7.19 - mc_mceinject, 1, 1); 7.20 + on_selected_cpus(cpumask_of(target), x86_mc_mceinject, 7.21 + mc_mceinject, 1, 1); 7.22 break; 7.23 7.24 default:
8.1 --- a/xen/arch/x86/cpu/mtrr/main.c Wed May 27 10:38:51 2009 +0100 8.2 +++ b/xen/arch/x86/cpu/mtrr/main.c Wed May 27 11:15:08 2009 +0100 8.3 @@ -688,7 +688,7 @@ void mtrr_save_state(void) 8.4 if (cpu == 0) 8.5 mtrr_save_fixed_ranges(NULL); 8.6 else 8.7 - on_selected_cpus(cpumask_of_cpu(0), mtrr_save_fixed_ranges, NULL, 1, 1); 8.8 + on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1, 1); 8.9 put_cpu(); 8.10 } 8.11
9.1 --- a/xen/arch/x86/crash.c Wed May 27 10:38:51 2009 +0100 9.2 +++ b/xen/arch/x86/crash.c Wed May 27 11:15:08 2009 +0100 9.3 @@ -13,7 +13,6 @@ 9.4 #include <asm/percpu.h> 9.5 #include <xen/types.h> 9.6 #include <xen/irq.h> 9.7 -#include <asm/ipi.h> 9.8 #include <asm/nmi.h> 9.9 #include <xen/string.h> 9.10 #include <xen/elf.h> 9.11 @@ -51,19 +50,6 @@ static int crash_nmi_callback(struct cpu 9.12 return 1; 9.13 } 9.14 9.15 -/* 9.16 - * By using the NMI code instead of a vector we just sneak thru the 9.17 - * word generator coming out with just what we want. AND it does 9.18 - * not matter if clustered_apic_mode is set or not. 9.19 - */ 9.20 -static void smp_send_nmi_allbutself(void) 9.21 -{ 9.22 - cpumask_t allbutself = cpu_online_map; 9.23 - cpu_clear(smp_processor_id(), allbutself); 9.24 - if ( !cpus_empty(allbutself) ) 9.25 - send_IPI_mask(allbutself, APIC_DM_NMI); 9.26 -} 9.27 - 9.28 static void nmi_shootdown_cpus(void) 9.29 { 9.30 unsigned long msecs;
10.1 --- a/xen/arch/x86/domain.c Wed May 27 10:38:51 2009 +0100 10.2 +++ b/xen/arch/x86/domain.c Wed May 27 11:15:08 2009 +0100 10.3 @@ -1316,7 +1316,7 @@ void context_switch(struct vcpu *prev, s 10.4 if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) ) 10.5 { 10.6 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ 10.7 - flush_tlb_mask(dirty_mask); 10.8 + flush_tlb_mask(&dirty_mask); 10.9 } 10.10 10.11 if ( is_hvm_vcpu(prev) && !list_empty(&prev->arch.hvm_vcpu.tm_list) ) 10.12 @@ -1410,7 +1410,7 @@ void sync_vcpu_execstate(struct vcpu *v) 10.13 (void)__sync_lazy_execstate(); 10.14 10.15 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ 10.16 - flush_tlb_mask(v->vcpu_dirty_cpumask); 10.17 + flush_tlb_mask(&v->vcpu_dirty_cpumask); 10.18 } 10.19 10.20 struct migrate_info {
11.1 --- a/xen/arch/x86/genapic/x2apic.c Wed May 27 10:38:51 2009 +0100 11.2 +++ b/xen/arch/x86/genapic/x2apic.c Wed May 27 11:15:08 2009 +0100 11.3 @@ -56,7 +56,7 @@ unsigned int cpu_mask_to_apicid_x2apic(c 11.4 return cpu_physical_id(first_cpu(cpumask)); 11.5 } 11.6 11.7 -void send_IPI_mask_x2apic(cpumask_t cpumask, int vector) 11.8 +void send_IPI_mask_x2apic(const cpumask_t *cpumask, int vector) 11.9 { 11.10 unsigned int cpu, cfg; 11.11 unsigned long flags; 11.12 @@ -76,8 +76,9 @@ void send_IPI_mask_x2apic(cpumask_t cpum 11.13 local_irq_save(flags); 11.14 11.15 cfg = APIC_DM_FIXED | 0 /* no shorthand */ | APIC_DEST_PHYSICAL | vector; 11.16 - for_each_cpu_mask ( cpu, cpumask ) 11.17 - apic_wrmsr(APIC_ICR, cfg, cpu_physical_id(cpu)); 11.18 + for_each_cpu_mask ( cpu, *cpumask ) 11.19 + if ( cpu != smp_processor_id() ) 11.20 + apic_wrmsr(APIC_ICR, cfg, cpu_physical_id(cpu)); 11.21 11.22 local_irq_restore(flags); 11.23 }
12.1 --- a/xen/arch/x86/hpet.c Wed May 27 10:38:51 2009 +0100 12.2 +++ b/xen/arch/x86/hpet.c Wed May 27 11:15:08 2009 +0100 12.3 @@ -617,7 +617,7 @@ void hpet_disable_legacy_broadcast(void) 12.4 12.5 spin_unlock_irq(&legacy_hpet_event.lock); 12.6 12.7 - smp_send_event_check_mask(cpu_online_map); 12.8 + smp_send_event_check_mask(&cpu_online_map); 12.9 } 12.10 12.11 void hpet_broadcast_enter(void)
13.1 --- a/xen/arch/x86/hvm/hvm.c Wed May 27 10:38:51 2009 +0100 13.2 +++ b/xen/arch/x86/hvm/hvm.c Wed May 27 11:15:08 2009 +0100 13.3 @@ -2426,7 +2426,7 @@ static int hvmop_flush_tlb_all(void) 13.4 paging_update_cr3(v); 13.5 13.6 /* Flush all dirty TLBs. */ 13.7 - flush_tlb_mask(d->domain_dirty_cpumask); 13.8 + flush_tlb_mask(&d->domain_dirty_cpumask); 13.9 13.10 /* Done. */ 13.11 for_each_vcpu ( d, v )
14.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed May 27 10:38:51 2009 +0100 14.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed May 27 11:15:08 2009 +0100 14.3 @@ -264,7 +264,7 @@ static void vmx_clear_vmcs(struct vcpu * 14.4 int cpu = v->arch.hvm_vmx.active_cpu; 14.5 14.6 if ( cpu != -1 ) 14.7 - on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1); 14.8 + on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1, 1); 14.9 } 14.10 14.11 static void vmx_load_vmcs(struct vcpu *v) 14.12 @@ -900,7 +900,7 @@ void vmx_do_resume(struct vcpu *v) 14.13 { 14.14 int cpu = v->arch.hvm_vmx.active_cpu; 14.15 if ( cpu != -1 ) 14.16 - on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1); 14.17 + on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1, 1); 14.18 } 14.19 14.20 vmx_clear_vmcs(v);
15.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed May 27 10:38:51 2009 +0100 15.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed May 27 11:15:08 2009 +0100 15.3 @@ -2164,7 +2164,7 @@ static void ept_handle_violation(unsigne 15.4 { 15.5 paging_mark_dirty(d, mfn_x(mfn)); 15.6 p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw); 15.7 - flush_tlb_mask(d->domain_dirty_cpumask); 15.8 + flush_tlb_mask(&d->domain_dirty_cpumask); 15.9 } 15.10 return; 15.11 }
16.1 --- a/xen/arch/x86/irq.c Wed May 27 10:38:51 2009 +0100 16.2 +++ b/xen/arch/x86/irq.c Wed May 27 11:15:08 2009 +0100 16.3 @@ -522,7 +522,7 @@ static void __pirq_guest_eoi(struct doma 16.4 } 16.5 16.6 if ( !cpus_empty(cpu_eoi_map) ) 16.7 - on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0); 16.8 + on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0); 16.9 } 16.10 16.11 int pirq_guest_eoi(struct domain *d, int irq) 16.12 @@ -761,7 +761,7 @@ static irq_guest_action_t *__pirq_guest_ 16.13 { 16.14 cpu_eoi_map = action->cpu_eoi_map; 16.15 spin_unlock_irq(&desc->lock); 16.16 - on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0); 16.17 + on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0); 16.18 spin_lock_irq(&desc->lock); 16.19 } 16.20 break; 16.21 @@ -799,7 +799,7 @@ static irq_guest_action_t *__pirq_guest_ 16.22 { 16.23 BUG_ON(action->ack_type != ACKTYPE_EOI); 16.24 spin_unlock_irq(&desc->lock); 16.25 - on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 1); 16.26 + on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 1); 16.27 spin_lock_irq(&desc->lock); 16.28 } 16.29
17.1 --- a/xen/arch/x86/machine_kexec.c Wed May 27 10:38:51 2009 +0100 17.2 +++ b/xen/arch/x86/machine_kexec.c Wed May 27 11:15:08 2009 +0100 17.3 @@ -91,7 +91,6 @@ static void __machine_reboot_kexec(void 17.4 void machine_reboot_kexec(xen_kexec_image_t *image) 17.5 { 17.6 int reboot_cpu_id; 17.7 - cpumask_t reboot_cpu; 17.8 17.9 reboot_cpu_id = 0; 17.10 17.11 @@ -100,9 +99,8 @@ void machine_reboot_kexec(xen_kexec_imag 17.12 17.13 if ( reboot_cpu_id != smp_processor_id() ) 17.14 { 17.15 - cpus_clear(reboot_cpu); 17.16 - cpu_set(reboot_cpu_id, reboot_cpu); 17.17 - on_selected_cpus(reboot_cpu, __machine_reboot_kexec, image, 1, 0); 17.18 + on_selected_cpus(cpumask_of(reboot_cpu_id), __machine_reboot_kexec, 17.19 + image, 1, 0); 17.20 for (;;) 17.21 ; /* nothing */ 17.22 }
18.1 --- a/xen/arch/x86/mm.c Wed May 27 10:38:51 2009 +0100 18.2 +++ b/xen/arch/x86/mm.c Wed May 27 11:15:08 2009 +0100 18.3 @@ -517,7 +517,7 @@ static void invalidate_shadow_ldt(struct 18.4 18.5 /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */ 18.6 if ( flush ) 18.7 - flush_tlb_mask(v->vcpu_dirty_cpumask); 18.8 + flush_tlb_mask(&v->vcpu_dirty_cpumask); 18.9 18.10 out: 18.11 spin_unlock(&v->arch.shadow_ldt_lock); 18.12 @@ -1250,7 +1250,7 @@ static void pae_flush_pgd( 18.13 paging_update_cr3(v); 18.14 cpus_or(m, m, v->vcpu_dirty_cpumask); 18.15 } 18.16 - flush_tlb_mask(m); 18.17 + flush_tlb_mask(&m); 18.18 } 18.19 18.20 /* If below 4GB then the pgdir is not shadowed in low memory. */ 18.21 @@ -1275,7 +1275,7 @@ static void pae_flush_pgd( 18.22 spin_unlock(&cache->lock); 18.23 } 18.24 18.25 - flush_tlb_mask(d->domain_dirty_cpumask); 18.26 + flush_tlb_mask(&d->domain_dirty_cpumask); 18.27 } 18.28 #else 18.29 # define pae_flush_pgd(mfn, idx, nl3e) ((void)0) 18.30 @@ -2290,7 +2290,7 @@ static int __get_page_type(struct page_i 18.31 ((nx & PGT_type_mask) == PGT_writable_page)) ) 18.32 { 18.33 perfc_incr(need_flush_tlb_flush); 18.34 - flush_tlb_mask(mask); 18.35 + flush_tlb_mask(&mask); 18.36 } 18.37 18.38 /* We lose existing type and validity. */ 18.39 @@ -2489,7 +2489,7 @@ static void process_deferred_ops(void) 18.40 if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) ) 18.41 { 18.42 if ( deferred_ops & DOP_FLUSH_ALL_TLBS ) 18.43 - flush_tlb_mask(d->domain_dirty_cpumask); 18.44 + flush_tlb_mask(&d->domain_dirty_cpumask); 18.45 else 18.46 flush_tlb_local(); 18.47 } 18.48 @@ -2824,9 +2824,9 @@ int do_mmuext_op( 18.49 } 18.50 pmask = vcpumask_to_pcpumask(d, vmask); 18.51 if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI ) 18.52 - flush_tlb_mask(pmask); 18.53 + flush_tlb_mask(&pmask); 18.54 else 18.55 - flush_tlb_one_mask(pmask, op.arg1.linear_addr); 18.56 + flush_tlb_one_mask(&pmask, op.arg1.linear_addr); 18.57 break; 18.58 } 18.59 18.60 @@ -2835,7 +2835,7 @@ int do_mmuext_op( 18.61 break; 18.62 18.63 case MMUEXT_INVLPG_ALL: 18.64 - flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr); 18.65 + flush_tlb_one_mask(&d->domain_dirty_cpumask, op.arg1.linear_addr); 18.66 break; 18.67 18.68 case MMUEXT_FLUSH_CACHE: 18.69 @@ -3688,7 +3688,7 @@ int do_update_va_mapping(unsigned long v 18.70 pmask = vcpumask_to_pcpumask(d, vmask); 18.71 if ( cpu_isset(smp_processor_id(), pmask) ) 18.72 this_cpu(percpu_mm_info).deferred_ops &= ~DOP_FLUSH_TLB; 18.73 - flush_tlb_mask(pmask); 18.74 + flush_tlb_mask(&pmask); 18.75 break; 18.76 } 18.77 break; 18.78 @@ -3706,7 +3706,7 @@ int do_update_va_mapping(unsigned long v 18.79 flush_tlb_one_local(va); 18.80 break; 18.81 case UVMF_ALL: 18.82 - flush_tlb_one_mask(d->domain_dirty_cpumask, va); 18.83 + flush_tlb_one_mask(&d->domain_dirty_cpumask, va); 18.84 break; 18.85 default: 18.86 if ( unlikely(!is_pv_32on64_domain(d) ? 18.87 @@ -3716,7 +3716,7 @@ int do_update_va_mapping(unsigned long v 18.88 pmask = vcpumask_to_pcpumask(d, vmask); 18.89 if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_TLB ) 18.90 cpu_clear(smp_processor_id(), pmask); 18.91 - flush_tlb_one_mask(pmask, va); 18.92 + flush_tlb_one_mask(&pmask, va); 18.93 break; 18.94 } 18.95 break;
19.1 --- a/xen/arch/x86/mm/hap/hap.c Wed May 27 10:38:51 2009 +0100 19.2 +++ b/xen/arch/x86/mm/hap/hap.c Wed May 27 11:15:08 2009 +0100 19.3 @@ -64,7 +64,7 @@ int hap_enable_log_dirty(struct domain * 19.4 19.5 /* set l1e entries of P2M table to be read-only. */ 19.6 p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty); 19.7 - flush_tlb_mask(d->domain_dirty_cpumask); 19.8 + flush_tlb_mask(&d->domain_dirty_cpumask); 19.9 return 0; 19.10 } 19.11 19.12 @@ -83,7 +83,7 @@ void hap_clean_dirty_bitmap(struct domai 19.13 { 19.14 /* set l1e entries of P2M table to be read-only. */ 19.15 p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty); 19.16 - flush_tlb_mask(d->domain_dirty_cpumask); 19.17 + flush_tlb_mask(&d->domain_dirty_cpumask); 19.18 } 19.19 19.20 /************************************************/ 19.21 @@ -643,7 +643,7 @@ hap_write_p2m_entry(struct vcpu *v, unsi 19.22 safe_write_pte(p, new); 19.23 if ( (old_flags & _PAGE_PRESENT) 19.24 && (level == 1 || (level == 2 && (old_flags & _PAGE_PSE))) ) 19.25 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 19.26 + flush_tlb_mask(&v->domain->domain_dirty_cpumask); 19.27 19.28 #if CONFIG_PAGING_LEVELS == 3 19.29 /* install P2M in monitor table for PAE Xen */
20.1 --- a/xen/arch/x86/mm/shadow/common.c Wed May 27 10:38:51 2009 +0100 20.2 +++ b/xen/arch/x86/mm/shadow/common.c Wed May 27 11:15:08 2009 +0100 20.3 @@ -695,7 +695,7 @@ static int oos_remove_write_access(struc 20.4 } 20.5 20.6 if ( ftlb ) 20.7 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 20.8 + flush_tlb_mask(&v->domain->domain_dirty_cpumask); 20.9 20.10 return 0; 20.11 } 20.12 @@ -1145,7 +1145,7 @@ sh_validate_guest_pt_write(struct vcpu * 20.13 rc = sh_validate_guest_entry(v, gmfn, entry, size); 20.14 if ( rc & SHADOW_SET_FLUSH ) 20.15 /* Need to flush TLBs to pick up shadow PT changes */ 20.16 - flush_tlb_mask(d->domain_dirty_cpumask); 20.17 + flush_tlb_mask(&d->domain_dirty_cpumask); 20.18 if ( rc & SHADOW_SET_ERROR ) 20.19 { 20.20 /* This page is probably not a pagetable any more: tear it out of the 20.21 @@ -1393,7 +1393,7 @@ static void _shadow_prealloc( 20.22 /* See if that freed up enough space */ 20.23 if ( space_is_available(d, order, count) ) 20.24 { 20.25 - flush_tlb_mask(d->domain_dirty_cpumask); 20.26 + flush_tlb_mask(&d->domain_dirty_cpumask); 20.27 return; 20.28 } 20.29 } 20.30 @@ -1447,7 +1447,7 @@ static void shadow_blow_tables(struct do 20.31 pagetable_get_mfn(v->arch.shadow_table[i])); 20.32 20.33 /* Make sure everyone sees the unshadowings */ 20.34 - flush_tlb_mask(d->domain_dirty_cpumask); 20.35 + flush_tlb_mask(&d->domain_dirty_cpumask); 20.36 } 20.37 20.38 void shadow_blow_tables_per_domain(struct domain *d) 20.39 @@ -1554,7 +1554,7 @@ mfn_t shadow_alloc(struct domain *d, 20.40 if ( unlikely(!cpus_empty(mask)) ) 20.41 { 20.42 perfc_incr(shadow_alloc_tlbflush); 20.43 - flush_tlb_mask(mask); 20.44 + flush_tlb_mask(&mask); 20.45 } 20.46 /* Now safe to clear the page for reuse */ 20.47 p = sh_map_domain_page(page_to_mfn(sp+i)); 20.48 @@ -2803,7 +2803,7 @@ void sh_remove_shadows(struct vcpu *v, m 20.49 20.50 /* Need to flush TLBs now, so that linear maps are safe next time we 20.51 * take a fault. */ 20.52 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 20.53 + flush_tlb_mask(&v->domain->domain_dirty_cpumask); 20.54 20.55 if ( do_locking ) shadow_unlock(v->domain); 20.56 } 20.57 @@ -3435,7 +3435,7 @@ shadow_write_p2m_entry(struct vcpu *v, u 20.58 { 20.59 sh_remove_all_shadows_and_parents(v, mfn); 20.60 if ( sh_remove_all_mappings(v, mfn) ) 20.61 - flush_tlb_mask(d->domain_dirty_cpumask); 20.62 + flush_tlb_mask(&d->domain_dirty_cpumask); 20.63 } 20.64 } 20.65 20.66 @@ -3474,7 +3474,7 @@ shadow_write_p2m_entry(struct vcpu *v, u 20.67 } 20.68 omfn = _mfn(mfn_x(omfn) + 1); 20.69 } 20.70 - flush_tlb_mask(flushmask); 20.71 + flush_tlb_mask(&flushmask); 20.72 20.73 if ( npte ) 20.74 unmap_domain_page(npte); 20.75 @@ -3752,7 +3752,7 @@ int shadow_track_dirty_vram(struct domai 20.76 } 20.77 } 20.78 if ( flush_tlb ) 20.79 - flush_tlb_mask(d->domain_dirty_cpumask); 20.80 + flush_tlb_mask(&d->domain_dirty_cpumask); 20.81 goto out; 20.82 20.83 out_sl1ma:
21.1 --- a/xen/arch/x86/mm/shadow/multi.c Wed May 27 10:38:51 2009 +0100 21.2 +++ b/xen/arch/x86/mm/shadow/multi.c Wed May 27 11:15:08 2009 +0100 21.3 @@ -3146,7 +3146,7 @@ static int sh_page_fault(struct vcpu *v, 21.4 */ 21.5 perfc_incr(shadow_rm_write_flush_tlb); 21.6 atomic_inc(&d->arch.paging.shadow.gtable_dirty_version); 21.7 - flush_tlb_mask(d->domain_dirty_cpumask); 21.8 + flush_tlb_mask(&d->domain_dirty_cpumask); 21.9 } 21.10 21.11 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 21.12 @@ -4135,7 +4135,7 @@ sh_update_cr3(struct vcpu *v, int do_loc 21.13 * (old) shadow linear maps in the writeable mapping heuristics. */ 21.14 #if GUEST_PAGING_LEVELS == 2 21.15 if ( sh_remove_write_access(v, gmfn, 2, 0) != 0 ) 21.16 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 21.17 + flush_tlb_mask(&v->domain->domain_dirty_cpumask); 21.18 sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow); 21.19 #elif GUEST_PAGING_LEVELS == 3 21.20 /* PAE guests have four shadow_table entries, based on the 21.21 @@ -4158,7 +4158,7 @@ sh_update_cr3(struct vcpu *v, int do_loc 21.22 } 21.23 } 21.24 if ( flush ) 21.25 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 21.26 + flush_tlb_mask(&v->domain->domain_dirty_cpumask); 21.27 /* Now install the new shadows. */ 21.28 for ( i = 0; i < 4; i++ ) 21.29 { 21.30 @@ -4179,7 +4179,7 @@ sh_update_cr3(struct vcpu *v, int do_loc 21.31 } 21.32 #elif GUEST_PAGING_LEVELS == 4 21.33 if ( sh_remove_write_access(v, gmfn, 4, 0) != 0 ) 21.34 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 21.35 + flush_tlb_mask(&v->domain->domain_dirty_cpumask); 21.36 sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow); 21.37 #else 21.38 #error This should never happen
22.1 --- a/xen/arch/x86/shutdown.c Wed May 27 10:38:51 2009 +0100 22.2 +++ b/xen/arch/x86/shutdown.c Wed May 27 11:15:08 2009 +0100 22.3 @@ -310,7 +310,7 @@ void machine_restart(unsigned int delay_ 22.4 if ( get_apic_id() != boot_cpu_physical_apicid ) 22.5 { 22.6 /* Send IPI to the boot CPU (logical cpu 0). */ 22.7 - on_selected_cpus(cpumask_of_cpu(0), __machine_restart, 22.8 + on_selected_cpus(cpumask_of(0), __machine_restart, 22.9 &delay_millisecs, 1, 0); 22.10 for ( ; ; ) 22.11 halt();
23.1 --- a/xen/arch/x86/smp.c Wed May 27 10:38:51 2009 +0100 23.2 +++ b/xen/arch/x86/smp.c Wed May 27 11:15:08 2009 +0100 23.3 @@ -19,11 +19,16 @@ 23.4 #include <asm/mc146818rtc.h> 23.5 #include <asm/flushtlb.h> 23.6 #include <asm/hardirq.h> 23.7 -#include <asm/ipi.h> 23.8 #include <asm/hvm/support.h> 23.9 #include <mach_apic.h> 23.10 23.11 /* 23.12 + * send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask, 23.13 + * excluding the local CPU. @cpumask may be empty. 23.14 + */ 23.15 +#define send_IPI_mask (genapic->send_IPI_mask) 23.16 + 23.17 +/* 23.18 * Some notes on x86 processor bugs affecting SMP operation: 23.19 * 23.20 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. 23.21 @@ -84,14 +89,15 @@ void apic_wait_icr_idle(void) 23.22 cpu_relax(); 23.23 } 23.24 23.25 -void send_IPI_mask_flat(cpumask_t cpumask, int vector) 23.26 +void send_IPI_mask_flat(const cpumask_t *cpumask, int vector) 23.27 { 23.28 - unsigned long mask = cpus_addr(cpumask)[0]; 23.29 + unsigned long mask = cpus_addr(*cpumask)[0]; 23.30 unsigned long cfg; 23.31 unsigned long flags; 23.32 23.33 - /* An IPI with no target generates a send accept error from P5/P6 APICs. */ 23.34 - WARN_ON(mask == 0); 23.35 + mask &= ~(1UL << smp_processor_id()); 23.36 + if ( mask == 0 ) 23.37 + return; 23.38 23.39 local_irq_save(flags); 23.40 23.41 @@ -119,15 +125,18 @@ void send_IPI_mask_flat(cpumask_t cpumas 23.42 local_irq_restore(flags); 23.43 } 23.44 23.45 -void send_IPI_mask_phys(cpumask_t mask, int vector) 23.46 +void send_IPI_mask_phys(const cpumask_t *mask, int vector) 23.47 { 23.48 unsigned long cfg, flags; 23.49 unsigned int query_cpu; 23.50 23.51 local_irq_save(flags); 23.52 23.53 - for_each_cpu_mask ( query_cpu, mask ) 23.54 + for_each_cpu_mask ( query_cpu, *mask ) 23.55 { 23.56 + if ( query_cpu == smp_processor_id() ) 23.57 + continue; 23.58 + 23.59 /* 23.60 * Wait for idle. 23.61 */ 23.62 @@ -170,20 +179,17 @@ fastcall void smp_invalidate_interrupt(v 23.63 irq_exit(); 23.64 } 23.65 23.66 -void flush_area_mask(cpumask_t mask, const void *va, unsigned int flags) 23.67 +void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags) 23.68 { 23.69 ASSERT(local_irq_is_enabled()); 23.70 23.71 - if ( cpu_isset(smp_processor_id(), mask) ) 23.72 - { 23.73 + if ( cpu_isset(smp_processor_id(), *mask) ) 23.74 flush_area_local(va, flags); 23.75 - cpu_clear(smp_processor_id(), mask); 23.76 - } 23.77 23.78 - if ( !cpus_empty(mask) ) 23.79 + if ( !cpus_subset(*mask, *cpumask_of(smp_processor_id())) ) 23.80 { 23.81 spin_lock(&flush_lock); 23.82 - flush_cpumask = mask; 23.83 + cpus_andnot(flush_cpumask, *mask, *cpumask_of(smp_processor_id())); 23.84 flush_va = va; 23.85 flush_flags = flags; 23.86 send_IPI_mask(mask, INVALIDATE_TLB_VECTOR); 23.87 @@ -201,18 +207,16 @@ void new_tlbflush_clock_period(void) 23.88 /* Flush everyone else. We definitely flushed just before entry. */ 23.89 allbutself = cpu_online_map; 23.90 cpu_clear(smp_processor_id(), allbutself); 23.91 - flush_mask(allbutself, FLUSH_TLB); 23.92 + flush_mask(&allbutself, FLUSH_TLB); 23.93 23.94 /* No need for atomicity: we are the only possible updater. */ 23.95 ASSERT(tlbflush_clock == 0); 23.96 tlbflush_clock++; 23.97 } 23.98 23.99 -void smp_send_event_check_mask(cpumask_t mask) 23.100 +void smp_send_event_check_mask(const cpumask_t *mask) 23.101 { 23.102 - cpu_clear(smp_processor_id(), mask); 23.103 - if ( !cpus_empty(mask) ) 23.104 - send_IPI_mask(mask, EVENT_CHECK_VECTOR); 23.105 + send_IPI_mask(mask, EVENT_CHECK_VECTOR); 23.106 } 23.107 23.108 /* 23.109 @@ -225,11 +229,12 @@ struct call_data_struct { 23.110 int wait; 23.111 atomic_t started; 23.112 atomic_t finished; 23.113 - cpumask_t selected; 23.114 + const cpumask_t *selected; 23.115 }; 23.116 23.117 static DEFINE_SPINLOCK(call_lock); 23.118 static struct call_data_struct *call_data; 23.119 +static void __smp_call_function_interrupt(void); 23.120 23.121 int smp_call_function( 23.122 void (*func) (void *info), 23.123 @@ -239,34 +244,21 @@ int smp_call_function( 23.124 { 23.125 cpumask_t allbutself = cpu_online_map; 23.126 cpu_clear(smp_processor_id(), allbutself); 23.127 - return on_selected_cpus(allbutself, func, info, retry, wait); 23.128 + return on_selected_cpus(&allbutself, func, info, retry, wait); 23.129 } 23.130 23.131 int on_selected_cpus( 23.132 - cpumask_t selected, 23.133 + const cpumask_t *selected, 23.134 void (*func) (void *info), 23.135 void *info, 23.136 int retry, 23.137 int wait) 23.138 { 23.139 struct call_data_struct data; 23.140 - unsigned int nr_cpus = cpus_weight(selected); 23.141 + unsigned int nr_cpus = cpus_weight(*selected); 23.142 23.143 ASSERT(local_irq_is_enabled()); 23.144 23.145 - /* Legacy UP system with no APIC to deliver IPIs? */ 23.146 - if ( unlikely(!cpu_has_apic) ) 23.147 - { 23.148 - ASSERT(num_online_cpus() == 1); 23.149 - if ( cpu_isset(0, selected) ) 23.150 - { 23.151 - local_irq_disable(); 23.152 - func(info); 23.153 - local_irq_enable(); 23.154 - } 23.155 - return 0; 23.156 - } 23.157 - 23.158 if ( nr_cpus == 0 ) 23.159 return 0; 23.160 23.161 @@ -283,6 +275,13 @@ int on_selected_cpus( 23.162 23.163 send_IPI_mask(selected, CALL_FUNCTION_VECTOR); 23.164 23.165 + if ( cpu_isset(smp_processor_id(), *call_data->selected) ) 23.166 + { 23.167 + local_irq_disable(); 23.168 + __smp_call_function_interrupt(); 23.169 + local_irq_enable(); 23.170 + } 23.171 + 23.172 while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus ) 23.173 cpu_relax(); 23.174 23.175 @@ -335,21 +334,23 @@ void smp_send_stop(void) 23.176 local_irq_enable(); 23.177 } 23.178 23.179 +void smp_send_nmi_allbutself(void) 23.180 +{ 23.181 + send_IPI_mask(&cpu_online_map, APIC_DM_NMI); 23.182 +} 23.183 + 23.184 fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs) 23.185 { 23.186 ack_APIC_irq(); 23.187 perfc_incr(ipis); 23.188 } 23.189 23.190 -fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs) 23.191 +static void __smp_call_function_interrupt(void) 23.192 { 23.193 void (*func)(void *info) = call_data->func; 23.194 void *info = call_data->info; 23.195 23.196 - ack_APIC_irq(); 23.197 - perfc_incr(ipis); 23.198 - 23.199 - if ( !cpu_isset(smp_processor_id(), call_data->selected) ) 23.200 + if ( !cpu_isset(smp_processor_id(), *call_data->selected) ) 23.201 return; 23.202 23.203 irq_enter(); 23.204 @@ -369,3 +370,10 @@ fastcall void smp_call_function_interrup 23.205 23.206 irq_exit(); 23.207 } 23.208 + 23.209 +fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs) 23.210 +{ 23.211 + ack_APIC_irq(); 23.212 + perfc_incr(ipis); 23.213 + __smp_call_function_interrupt(); 23.214 +}
24.1 --- a/xen/arch/x86/time.c Wed May 27 10:38:51 2009 +0100 24.2 +++ b/xen/arch/x86/time.c Wed May 27 11:15:08 2009 +0100 24.3 @@ -1189,7 +1189,7 @@ static void time_calibration(void *unuse 24.4 }; 24.5 24.6 /* @wait=1 because we must wait for all cpus before freeing @r. */ 24.7 - on_selected_cpus(r.cpu_calibration_map, 24.8 + on_selected_cpus(&r.cpu_calibration_map, 24.9 opt_consistent_tscs 24.10 ? time_calibration_tsc_rendezvous 24.11 : time_calibration_std_rendezvous,
25.1 --- a/xen/common/Makefile Wed May 27 10:38:51 2009 +0100 25.2 +++ b/xen/common/Makefile Wed May 27 11:15:08 2009 +0100 25.3 @@ -1,4 +1,5 @@ 25.4 obj-y += bitmap.o 25.5 +obj-y += cpu.o 25.6 obj-y += domctl.o 25.7 obj-y += domain.o 25.8 obj-y += event_channel.o
26.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 26.2 +++ b/xen/common/cpu.c Wed May 27 11:15:08 2009 +0100 26.3 @@ -0,0 +1,26 @@ 26.4 +#include <xen/config.h> 26.5 +#include <xen/cpumask.h> 26.6 + 26.7 +/* 26.8 + * cpu_bit_bitmap[] is a special, "compressed" data structure that 26.9 + * represents all NR_CPUS bits binary values of 1<<nr. 26.10 + * 26.11 + * It is used by cpumask_of() to get a constant address to a CPU 26.12 + * mask value that has a single bit set only. 26.13 + */ 26.14 + 26.15 +/* cpu_bit_bitmap[0] is empty - so we can back into it */ 26.16 +#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) 26.17 +#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 26.18 +#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 26.19 +#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 26.20 + 26.21 +const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 26.22 + 26.23 + MASK_DECLARE_8(0), MASK_DECLARE_8(8), 26.24 + MASK_DECLARE_8(16), MASK_DECLARE_8(24), 26.25 +#if BITS_PER_LONG > 32 26.26 + MASK_DECLARE_8(32), MASK_DECLARE_8(40), 26.27 + MASK_DECLARE_8(48), MASK_DECLARE_8(56), 26.28 +#endif 26.29 +};
27.1 --- a/xen/common/grant_table.c Wed May 27 10:38:51 2009 +0100 27.2 +++ b/xen/common/grant_table.c Wed May 27 11:15:08 2009 +0100 27.3 @@ -715,7 +715,7 @@ gnttab_unmap_grant_ref( 27.4 goto fault; 27.5 } 27.6 27.7 - flush_tlb_mask(current->domain->domain_dirty_cpumask); 27.8 + flush_tlb_mask(¤t->domain->domain_dirty_cpumask); 27.9 27.10 for ( i = 0; i < partial_done; i++ ) 27.11 __gnttab_unmap_common_complete(&(common[i])); 27.12 @@ -727,7 +727,7 @@ gnttab_unmap_grant_ref( 27.13 return 0; 27.14 27.15 fault: 27.16 - flush_tlb_mask(current->domain->domain_dirty_cpumask); 27.17 + flush_tlb_mask(¤t->domain->domain_dirty_cpumask); 27.18 27.19 for ( i = 0; i < partial_done; i++ ) 27.20 __gnttab_unmap_common_complete(&(common[i])); 27.21 @@ -774,7 +774,7 @@ gnttab_unmap_and_replace( 27.22 goto fault; 27.23 } 27.24 27.25 - flush_tlb_mask(current->domain->domain_dirty_cpumask); 27.26 + flush_tlb_mask(¤t->domain->domain_dirty_cpumask); 27.27 27.28 for ( i = 0; i < partial_done; i++ ) 27.29 __gnttab_unmap_common_complete(&(common[i])); 27.30 @@ -786,7 +786,7 @@ gnttab_unmap_and_replace( 27.31 return 0; 27.32 27.33 fault: 27.34 - flush_tlb_mask(current->domain->domain_dirty_cpumask); 27.35 + flush_tlb_mask(¤t->domain->domain_dirty_cpumask); 27.36 27.37 for ( i = 0; i < partial_done; i++ ) 27.38 __gnttab_unmap_common_complete(&(common[i])); 27.39 @@ -1123,7 +1123,7 @@ gnttab_transfer( 27.40 #ifndef __ia64__ /* IA64 implicitly replaces the old page in steal_page(). */ 27.41 guest_physmap_remove_page(d, gop.mfn, mfn, 0); 27.42 #endif 27.43 - flush_tlb_mask(d->domain_dirty_cpumask); 27.44 + flush_tlb_mask(&d->domain_dirty_cpumask); 27.45 27.46 /* Find the target domain. */ 27.47 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
28.1 --- a/xen/common/keyhandler.c Wed May 27 10:38:51 2009 +0100 28.2 +++ b/xen/common/keyhandler.c Wed May 27 11:15:08 2009 +0100 28.3 @@ -119,7 +119,7 @@ static void dump_registers(unsigned char 28.4 if ( cpu == smp_processor_id() ) 28.5 continue; 28.6 printk("\n*** Dumping CPU%d host state: ***\n", cpu); 28.7 - on_selected_cpus(cpumask_of_cpu(cpu), __dump_execstate, NULL, 1, 1); 28.8 + on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1, 1); 28.9 } 28.10 28.11 printk("\n");
29.1 --- a/xen/common/page_alloc.c Wed May 27 10:38:51 2009 +0100 29.2 +++ b/xen/common/page_alloc.c Wed May 27 11:15:08 2009 +0100 29.3 @@ -431,7 +431,7 @@ static struct page_info *alloc_heap_page 29.4 if ( unlikely(!cpus_empty(mask)) ) 29.5 { 29.6 perfc_incr(need_flush_tlb_flush); 29.7 - flush_tlb_mask(mask); 29.8 + flush_tlb_mask(&mask); 29.9 } 29.10 29.11 return pg;
30.1 --- a/xen/include/asm-ia64/tlbflush.h Wed May 27 10:38:51 2009 +0100 30.2 +++ b/xen/include/asm-ia64/tlbflush.h Wed May 27 11:15:08 2009 +0100 30.3 @@ -39,7 +39,7 @@ void domain_flush_tlb_vhpt(struct domain 30.4 void flush_tlb_for_log_dirty(struct domain *d); 30.5 30.6 /* Flush v-tlb on cpus set in mask for current domain. */ 30.7 -void flush_tlb_mask(cpumask_t mask); 30.8 +void flush_tlb_mask(const cpumask_t *mask); 30.9 30.10 /* Flush local machine TLB. */ 30.11 void local_flush_tlb_all (void);
31.1 --- a/xen/include/asm-x86/flushtlb.h Wed May 27 10:38:51 2009 +0100 31.2 +++ b/xen/include/asm-x86/flushtlb.h Wed May 27 11:15:08 2009 +0100 31.3 @@ -90,12 +90,12 @@ void flush_area_local(const void *va, un 31.4 #define flush_local(flags) flush_area_local(NULL, flags) 31.5 31.6 /* Flush specified CPUs' TLBs/caches */ 31.7 -void flush_area_mask(cpumask_t, const void *va, unsigned int flags); 31.8 +void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags); 31.9 #define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags) 31.10 31.11 /* Flush all CPUs' TLBs/caches */ 31.12 -#define flush_area_all(va, flags) flush_area_mask(cpu_online_map, va, flags) 31.13 -#define flush_all(flags) flush_mask(cpu_online_map, flags) 31.14 +#define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags) 31.15 +#define flush_all(flags) flush_mask(&cpu_online_map, flags) 31.16 31.17 /* Flush local TLBs */ 31.18 #define flush_tlb_local() \ 31.19 @@ -111,8 +111,8 @@ void flush_area_mask(cpumask_t, const vo 31.20 31.21 /* Flush all CPUs' TLBs */ 31.22 #define flush_tlb_all() \ 31.23 - flush_tlb_mask(cpu_online_map) 31.24 + flush_tlb_mask(&cpu_online_map) 31.25 #define flush_tlb_one_all(v) \ 31.26 - flush_tlb_one_mask(cpu_online_map, v) 31.27 + flush_tlb_one_mask(&cpu_online_map, v) 31.28 31.29 #endif /* __FLUSHTLB_H__ */
32.1 --- a/xen/include/asm-x86/genapic.h Wed May 27 10:38:51 2009 +0100 32.2 +++ b/xen/include/asm-x86/genapic.h Wed May 27 11:15:08 2009 +0100 32.3 @@ -35,7 +35,7 @@ struct genapic { 32.4 void (*clustered_apic_check)(void); 32.5 cpumask_t (*target_cpus)(void); 32.6 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 32.7 - void (*send_IPI_mask)(cpumask_t mask, int vector); 32.8 + void (*send_IPI_mask)(const cpumask_t *mask, int vector); 32.9 }; 32.10 32.11 #define APICFUNC(x) .x = x 32.12 @@ -52,7 +52,7 @@ void init_apic_ldr_flat(void); 32.13 void clustered_apic_check_flat(void); 32.14 cpumask_t target_cpus_flat(void); 32.15 unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask); 32.16 -void send_IPI_mask_flat(cpumask_t mask, int vector); 32.17 +void send_IPI_mask_flat(const cpumask_t *mask, int vector); 32.18 #define GENAPIC_FLAT \ 32.19 .int_delivery_mode = dest_LowestPrio, \ 32.20 .int_dest_mode = 1 /* logical delivery */, \ 32.21 @@ -66,7 +66,7 @@ void init_apic_ldr_x2apic(void); 32.22 void clustered_apic_check_x2apic(void); 32.23 cpumask_t target_cpus_x2apic(void); 32.24 unsigned int cpu_mask_to_apicid_x2apic(cpumask_t cpumask); 32.25 -void send_IPI_mask_x2apic(cpumask_t mask, int vector); 32.26 +void send_IPI_mask_x2apic(const cpumask_t *mask, int vector); 32.27 #define GENAPIC_X2APIC \ 32.28 .int_delivery_mode = dest_Fixed, \ 32.29 .int_dest_mode = 0 /* physical delivery */, \ 32.30 @@ -80,7 +80,7 @@ void init_apic_ldr_phys(void); 32.31 void clustered_apic_check_phys(void); 32.32 cpumask_t target_cpus_phys(void); 32.33 unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask); 32.34 -void send_IPI_mask_phys(cpumask_t mask, int vector); 32.35 +void send_IPI_mask_phys(const cpumask_t *mask, int vector); 32.36 #define GENAPIC_PHYS \ 32.37 .int_delivery_mode = dest_Fixed, \ 32.38 .int_dest_mode = 0 /* physical delivery */, \
33.1 --- a/xen/include/asm-x86/ipi.h Wed May 27 10:38:51 2009 +0100 33.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 33.3 @@ -1,8 +0,0 @@ 33.4 -#ifndef __ASM_IPI_H 33.5 -#define __ASM_IPI_H 33.6 - 33.7 -#include <asm/genapic.h> 33.8 - 33.9 -#define send_IPI_mask (genapic->send_IPI_mask) 33.10 - 33.11 -#endif /* __ASM_IPI_H */
34.1 --- a/xen/include/asm-x86/smp.h Wed May 27 10:38:51 2009 +0100 34.2 +++ b/xen/include/asm-x86/smp.h Wed May 27 11:15:08 2009 +0100 34.3 @@ -35,6 +35,8 @@ extern int pic_mode; 34.4 extern cpumask_t cpu_sibling_map[]; 34.5 extern cpumask_t cpu_core_map[]; 34.6 34.7 +void smp_send_nmi_allbutself(void); 34.8 + 34.9 extern void (*mtrr_hook) (void); 34.10 34.11 #ifdef CONFIG_X86_64
35.1 --- a/xen/include/xen/cpumask.h Wed May 27 10:38:51 2009 +0100 35.2 +++ b/xen/include/xen/cpumask.h Wed May 27 11:15:08 2009 +0100 35.3 @@ -80,7 +80,6 @@ 35.4 #include <xen/kernel.h> 35.5 35.6 typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 35.7 -extern cpumask_t _unused_cpumask_arg_; 35.8 35.9 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 35.10 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) 35.11 @@ -244,17 +243,23 @@ static inline int __cycle_cpu(int n, con 35.12 return nxt; 35.13 } 35.14 35.15 -#define cpumask_of_cpu(cpu) \ 35.16 -({ \ 35.17 - typeof(_unused_cpumask_arg_) m; \ 35.18 - if (sizeof(m) == sizeof(unsigned long)) { \ 35.19 - m.bits[0] = 1UL<<(cpu); \ 35.20 - } else { \ 35.21 - cpus_clear(m); \ 35.22 - cpu_set((cpu), m); \ 35.23 - } \ 35.24 - m; \ 35.25 -}) 35.26 +/* 35.27 + * Special-case data structure for "single bit set only" constant CPU masks. 35.28 + * 35.29 + * We pre-generate all the 64 (or 32) possible bit positions, with enough 35.30 + * padding to the left and the right, and return the constant pointer 35.31 + * appropriately offset. 35.32 + */ 35.33 +extern const unsigned long 35.34 + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 35.35 + 35.36 +static inline const cpumask_t *cpumask_of(unsigned int cpu) 35.37 +{ 35.38 + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 35.39 + return (const cpumask_t *)(p - cpu / BITS_PER_LONG); 35.40 +} 35.41 + 35.42 +#define cpumask_of_cpu(cpu) (*cpumask_of(cpu)) 35.43 35.44 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) 35.45
36.1 --- a/xen/include/xen/smp.h Wed May 27 10:38:51 2009 +0100 36.2 +++ b/xen/include/xen/smp.h Wed May 27 11:15:08 2009 +0100 36.3 @@ -9,9 +9,9 @@ 36.4 */ 36.5 extern void smp_send_stop(void); 36.6 36.7 -extern void smp_send_event_check_mask(cpumask_t mask); 36.8 +extern void smp_send_event_check_mask(const cpumask_t *mask); 36.9 #define smp_send_event_check_cpu(cpu) \ 36.10 - smp_send_event_check_mask(cpumask_of_cpu(cpu)) 36.11 + smp_send_event_check_mask(cpumask_of(cpu)) 36.12 36.13 /* 36.14 * Prepare machine for booting other CPUs. 36.15 @@ -41,7 +41,7 @@ extern int smp_call_function( 36.16 * Call a function on a selection of processors 36.17 */ 36.18 extern int on_selected_cpus( 36.19 - cpumask_t selected, 36.20 + const cpumask_t *selected, 36.21 void (*func) (void *info), 36.22 void *info, 36.23 int retry, 36.24 @@ -62,7 +62,7 @@ static inline int on_each_cpu( 36.25 int retry, 36.26 int wait) 36.27 { 36.28 - return on_selected_cpus(cpu_online_map, func, info, retry, wait); 36.29 + return on_selected_cpus(&cpu_online_map, func, info, retry, wait); 36.30 } 36.31 36.32 #define smp_processor_id() raw_smp_processor_id()
37.1 --- a/xen/include/xen/softirq.h Wed May 27 10:38:51 2009 +0100 37.2 +++ b/xen/include/xen/softirq.h Wed May 27 11:15:08 2009 +0100 37.3 @@ -39,7 +39,7 @@ static inline void cpumask_raise_softirq 37.4 cpu_clear(cpu, mask); 37.5 } 37.6 37.7 - smp_send_event_check_mask(mask); 37.8 + smp_send_event_check_mask(&mask); 37.9 } 37.10 37.11 static inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)