ia64/xen-unstable
changeset 14657:b7ae31726aa6
xen: Get rid of some VCPUF_* flags and move into their own byte fields.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
line diff
1.1 --- a/xen/arch/ia64/vmx/vlsapic.c Thu Mar 29 15:33:32 2007 +0000 1.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Thu Mar 29 16:52:40 2007 +0100 1.3 @@ -692,7 +692,7 @@ static void vlsapic_write_ipi(VCPU *vcpu 1.4 if (targ == NULL) 1.5 panic_domain(NULL, "Unknown IPI cpu\n"); 1.6 1.7 - if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags) || 1.8 + if (!targ->is_initialised || 1.9 test_bit(_VCPUF_down, &targ->vcpu_flags)) { 1.10 1.11 struct pt_regs *targ_regs = vcpu_regs(targ); 1.12 @@ -717,7 +717,7 @@ static void vlsapic_write_ipi(VCPU *vcpu 1.13 printk("arch_boot_vcpu: huh, already awake!"); 1.14 } 1.15 } else { 1.16 - int running = test_bit(_VCPUF_running, &targ->vcpu_flags); 1.17 + int running = targ->is_running; 1.18 vlsapic_deliver_ipi(targ, ((ipi_d_t)value).dm, 1.19 ((ipi_d_t)value).vector); 1.20 vcpu_unblock(targ);
2.1 --- a/xen/arch/ia64/vmx/vmmu.c Thu Mar 29 15:33:32 2007 +0000 2.2 +++ b/xen/arch/ia64/vmx/vmmu.c Thu Mar 29 16:52:40 2007 +0100 2.3 @@ -598,7 +598,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u6 2.4 vcpu_get_rr(vcpu, va, &args.rid); 2.5 args.ps = ps; 2.6 for_each_vcpu (d, v) { 2.7 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 2.8 + if (!v->is_initialised) 2.9 continue; 2.10 2.11 args.vcpu = v;
3.1 --- a/xen/arch/ia64/xen/domain.c Thu Mar 29 15:33:32 2007 +0000 3.2 +++ b/xen/arch/ia64/xen/domain.c Thu Mar 29 16:52:40 2007 +0100 3.3 @@ -657,7 +657,7 @@ int arch_set_info_guest(struct vcpu *v, 3.4 v->arch.iva = er->iva; 3.5 } 3.6 3.7 - if (test_bit(_VCPUF_initialised, &v->vcpu_flags)) 3.8 + if (v->is_initialised) 3.9 return 0; 3.10 3.11 if (d->arch.is_vti) { 3.12 @@ -676,10 +676,12 @@ int arch_set_info_guest(struct vcpu *v, 3.13 /* This overrides some registers. */ 3.14 vcpu_init_regs(v); 3.15 3.16 - /* Don't redo final setup. Auto-online VCPU0. */ 3.17 - if (!test_and_set_bit(_VCPUF_initialised, &v->vcpu_flags) && 3.18 - (v->vcpu_id == 0)) 3.19 - clear_bit(_VCPUF_down, &v->vcpu_flags); 3.20 + if (!v->is_initialised) { 3.21 + v->is_initialised = 1; 3.22 + /* Auto-online VCPU0 when it is initialised. */ 3.23 + if (v->vcpu_id == 0) 3.24 + clear_bit(_VCPUF_down, &v->vcpu_flags); 3.25 + } 3.26 3.27 return 0; 3.28 } 3.29 @@ -1067,7 +1069,7 @@ int construct_dom0(struct domain *d, 3.30 /* Sanity! */ 3.31 BUG_ON(d != dom0); 3.32 BUG_ON(d->vcpu[0] == NULL); 3.33 - BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags)); 3.34 + BUG_ON(v->is_initialised); 3.35 3.36 printk("*** LOADING DOMAIN 0 ***\n"); 3.37 3.38 @@ -1188,7 +1190,7 @@ int construct_dom0(struct domain *d, 3.39 3.40 printk("Dom0: 0x%lx\n", (u64)dom0); 3.41 3.42 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 3.43 + v->is_initialised = 1; 3.44 clear_bit(_VCPUF_down, &v->vcpu_flags); 3.45 3.46 /* Build firmware.
4.1 --- a/xen/arch/ia64/xen/hypercall.c Thu Mar 29 15:33:32 2007 +0000 4.2 +++ b/xen/arch/ia64/xen/hypercall.c Thu Mar 29 16:52:40 2007 +0100 4.3 @@ -81,11 +81,11 @@ fw_hypercall_ipi (struct pt_regs *regs) 4.4 return; 4.5 4.6 if (vector == XEN_SAL_BOOT_RENDEZ_VEC 4.7 - && (!test_bit(_VCPUF_initialised, &targ->vcpu_flags) 4.8 + && (!targ->is_initialised 4.9 || test_bit(_VCPUF_down, &targ->vcpu_flags))) { 4.10 4.11 /* First start: initialize vpcu. */ 4.12 - if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) { 4.13 + if (!targ->is_initialised) { 4.14 struct vcpu_guest_context c; 4.15 4.16 memset (&c, 0, sizeof (c)); 4.17 @@ -112,9 +112,7 @@ fw_hypercall_ipi (struct pt_regs *regs) 4.18 printk ("arch_boot_vcpu: huu, already awaken!\n"); 4.19 } 4.20 else { 4.21 - int running = test_bit(_VCPUF_running, 4.22 - &targ->vcpu_flags); 4.23 - 4.24 + int running = targ->is_running; 4.25 vcpu_pend_interrupt(targ, vector); 4.26 vcpu_unblock(targ); 4.27 if (running)
5.1 --- a/xen/arch/ia64/xen/vhpt.c Thu Mar 29 15:33:32 2007 +0000 5.2 +++ b/xen/arch/ia64/xen/vhpt.c Thu Mar 29 16:52:40 2007 +0100 5.3 @@ -184,7 +184,7 @@ domain_purge_swtc_entries(struct domain 5.4 { 5.5 struct vcpu* v; 5.6 for_each_vcpu(d, v) { 5.7 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 5.8 + if (!v->is_initialised) 5.9 continue; 5.10 5.11 /* Purge TC entries. 5.12 @@ -202,7 +202,7 @@ domain_purge_swtc_entries_vcpu_dirty_mas 5.13 5.14 for_each_vcpu_mask(vcpu, vcpu_dirty_mask) { 5.15 struct vcpu* v = d->vcpu[vcpu]; 5.16 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 5.17 + if (!v->is_initialised) 5.18 continue; 5.19 5.20 /* Purge TC entries. 5.21 @@ -263,7 +263,7 @@ void domain_flush_vtlb_all(struct domain 5.22 struct vcpu *v; 5.23 5.24 for_each_vcpu(d, v) { 5.25 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 5.26 + if (!v->is_initialised) 5.27 continue; 5.28 5.29 if (v->processor == cpu) 5.30 @@ -341,7 +341,7 @@ void domain_flush_vtlb_range (struct dom 5.31 smp_mb(); 5.32 5.33 for_each_vcpu (d, v) { 5.34 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 5.35 + if (!v->is_initialised) 5.36 continue; 5.37 5.38 if (HAS_PERVCPU_VHPT(d)) { 5.39 @@ -407,7 +407,7 @@ void 5.40 if (HAS_PERVCPU_VHPT(d)) { 5.41 for_each_vcpu_mask(vcpu, entry->vcpu_dirty_mask) { 5.42 v = d->vcpu[vcpu]; 5.43 - if (!test_bit(_VCPUF_initialised, &v->vcpu_flags)) 5.44 + if (!v->is_initialised) 5.45 continue; 5.46 5.47 /* Invalidate VHPT entries. */
6.1 --- a/xen/arch/powerpc/domain.c Thu Mar 29 15:33:32 2007 +0000 6.2 +++ b/xen/arch/powerpc/domain.c Thu Mar 29 16:52:40 2007 +0100 6.3 @@ -168,10 +168,13 @@ int arch_set_info_guest(struct vcpu *v, 6.4 d->shared_info->wc_nsec = dom0->shared_info->wc_nsec; 6.5 d->shared_info->arch.boot_timebase = dom0->shared_info->arch.boot_timebase; 6.6 6.7 - /* Auto-online VCPU0 when it is initialised. */ 6.8 - if ( !test_and_set_bit(_VCPUF_initialised, &v->vcpu_flags) && 6.9 - (v->vcpu_id == 0) ) 6.10 - clear_bit(_VCPUF_down, &v->vcpu_flags); 6.11 + if ( !v->is_initialised ) 6.12 + { 6.13 + v->is_initialised = 1; 6.14 + /* Auto-online VCPU0 when it is initialised. */ 6.15 + if ( v->vcpu_id == 0 ) 6.16 + clear_bit(_VCPUF_down, &v->vcpu_flags); 6.17 + } 6.18 6.19 cpu_init_vcpu(v); 6.20
7.1 --- a/xen/arch/powerpc/domain_build.c Thu Mar 29 15:33:32 2007 +0000 7.2 +++ b/xen/arch/powerpc/domain_build.c Thu Mar 29 16:52:40 2007 +0100 7.3 @@ -273,7 +273,7 @@ int construct_dom0(struct domain *d, 7.4 7.5 ofd_dom0_fixup(d, *ofh_tree + rma, cmdline, shared_info_addr); 7.6 7.7 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 7.8 + v->is_initialised = 1; 7.9 clear_bit(_VCPUF_down, &v->vcpu_flags); 7.10 7.11 rc = 0;
8.1 --- a/xen/arch/x86/domain.c Thu Mar 29 15:33:32 2007 +0000 8.2 +++ b/xen/arch/x86/domain.c Thu Mar 29 16:52:40 2007 +0100 8.3 @@ -563,9 +563,7 @@ int arch_set_info_guest( 8.4 #endif 8.5 } 8.6 8.7 - clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 8.8 - if ( flags & VGCF_I387_VALID ) 8.9 - set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 8.10 + v->fpu_initialised = !!(flags & VGCF_I387_VALID); 8.11 8.12 v->arch.flags &= ~TF_kernel_mode; 8.13 if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ ) 8.14 @@ -600,7 +598,7 @@ int arch_set_info_guest( 8.15 hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs); 8.16 } 8.17 8.18 - if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 8.19 + if ( v->is_initialised ) 8.20 goto out; 8.21 8.22 memset(v->arch.guest_context.debugreg, 0, 8.23 @@ -699,7 +697,7 @@ int arch_set_info_guest( 8.24 update_domain_wallclock_time(d); 8.25 8.26 /* Don't redo final setup */ 8.27 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 8.28 + v->is_initialised = 1; 8.29 8.30 if ( paging_mode_enabled(d) ) 8.31 paging_update_paging_modes(v);
9.1 --- a/xen/arch/x86/domain_build.c Thu Mar 29 15:33:32 2007 +0000 9.2 +++ b/xen/arch/x86/domain_build.c Thu Mar 29 16:52:40 2007 +0100 9.3 @@ -254,7 +254,7 @@ int construct_dom0(struct domain *d, 9.4 /* Sanity! */ 9.5 BUG_ON(d->domain_id != 0); 9.6 BUG_ON(d->vcpu[0] == NULL); 9.7 - BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags)); 9.8 + BUG_ON(v->is_initialised); 9.9 9.10 printk("*** LOADING DOMAIN 0 ***\n"); 9.11 9.12 @@ -901,7 +901,7 @@ int construct_dom0(struct domain *d, 9.13 9.14 update_domain_wallclock_time(d); 9.15 9.16 - set_bit(_VCPUF_initialised, &v->vcpu_flags); 9.17 + v->is_initialised = 1; 9.18 clear_bit(_VCPUF_down, &v->vcpu_flags); 9.19 9.20 /*
10.1 --- a/xen/arch/x86/domctl.c Thu Mar 29 15:33:32 2007 +0000 10.2 +++ b/xen/arch/x86/domctl.c Thu Mar 29 16:52:40 2007 +0100 10.3 @@ -448,7 +448,7 @@ void arch_get_info_guest(struct vcpu *v, 10.4 #endif 10.5 10.6 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel)); 10.7 - if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) ) 10.8 + if ( v->fpu_initialised ) 10.9 c(flags |= VGCF_i387_valid); 10.10 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) ) 10.11 c(flags |= VGCF_online);
11.1 --- a/xen/arch/x86/hvm/hvm.c Thu Mar 29 15:33:32 2007 +0000 11.2 +++ b/xen/arch/x86/hvm/hvm.c Thu Mar 29 16:52:40 2007 +0100 11.3 @@ -85,7 +85,7 @@ void hvm_disable(void) 11.4 void hvm_stts(struct vcpu *v) 11.5 { 11.6 /* FPU state already dirty? Then no need to setup_fpu() lazily. */ 11.7 - if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) 11.8 + if ( !v->fpu_dirtied ) 11.9 hvm_funcs.stts(v); 11.10 } 11.11 11.12 @@ -332,10 +332,10 @@ void hvm_vcpu_reset(struct vcpu *v) 11.13 hvm_funcs.vcpu_initialise(v); 11.14 11.15 set_bit(_VCPUF_down, &v->vcpu_flags); 11.16 - clear_bit(_VCPUF_initialised, &v->vcpu_flags); 11.17 - clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 11.18 - clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags); 11.19 clear_bit(_VCPUF_blocked, &v->vcpu_flags); 11.20 + v->fpu_initialised = 0; 11.21 + v->fpu_dirtied = 0; 11.22 + v->is_initialised = 0; 11.23 11.24 vcpu_unpause(v); 11.25 } 11.26 @@ -722,7 +722,7 @@ int hvm_bringup_ap(int vcpuid, int tramp 11.27 11.28 LOCK_BIGLOCK(d); 11.29 rc = -EEXIST; 11.30 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 11.31 + if ( !v->is_initialised ) 11.32 rc = boot_vcpu(d, vcpuid, ctxt); 11.33 UNLOCK_BIGLOCK(d); 11.34
12.1 --- a/xen/arch/x86/hvm/vlapic.c Thu Mar 29 15:33:32 2007 +0000 12.2 +++ b/xen/arch/x86/hvm/vlapic.c Thu Mar 29 16:52:40 2007 +0100 12.3 @@ -303,7 +303,7 @@ static int vlapic_accept_irq(struct vcpu 12.4 if ( trig_mode && !(level & APIC_INT_ASSERT) ) 12.5 break; 12.6 /* FIXME How to check the situation after vcpu reset? */ 12.7 - if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 12.8 + if ( v->is_initialised ) 12.9 hvm_vcpu_reset(v); 12.10 v->arch.hvm_vcpu.init_sipi_sipi_state = 12.11 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI; 12.12 @@ -318,7 +318,7 @@ static int vlapic_accept_irq(struct vcpu 12.13 v->arch.hvm_vcpu.init_sipi_sipi_state = 12.14 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM; 12.15 12.16 - if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 12.17 + if ( v->is_initialised ) 12.18 { 12.19 gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id); 12.20 goto exit_and_crash;
13.1 --- a/xen/arch/x86/i387.c Thu Mar 29 15:33:32 2007 +0000 13.2 +++ b/xen/arch/x86/i387.c Thu Mar 29 16:52:40 2007 +0100 13.3 @@ -21,7 +21,7 @@ void init_fpu(void) 13.4 __asm__ __volatile__ ( "fninit" ); 13.5 if ( cpu_has_xmm ) 13.6 load_mxcsr(0x1f80); 13.7 - set_bit(_VCPUF_fpu_initialised, ¤t->vcpu_flags); 13.8 + current->fpu_initialised = 1; 13.9 } 13.10 13.11 void save_init_fpu(struct vcpu *v) 13.12 @@ -76,7 +76,7 @@ void save_init_fpu(struct vcpu *v) 13.13 : "=m" (*fpu_ctxt) ); 13.14 } 13.15 13.16 - clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags); 13.17 + v->fpu_dirtied = 0; 13.18 write_cr0(cr0|X86_CR0_TS); 13.19 } 13.20
14.1 --- a/xen/arch/x86/mm.c Thu Mar 29 15:33:32 2007 +0000 14.2 +++ b/xen/arch/x86/mm.c Thu Mar 29 16:52:40 2007 +0100 14.3 @@ -1089,7 +1089,7 @@ static int alloc_l3_table(struct page_in 14.4 */ 14.5 if ( (pfn >= 0x100000) && 14.6 unlikely(!VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3)) && 14.7 - d->vcpu[0] && test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) ) 14.8 + d->vcpu[0] && d->vcpu[0]->is_initialised ) 14.9 { 14.10 MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn); 14.11 return 0;
15.1 --- a/xen/arch/x86/mm/hap/hap.c Thu Mar 29 15:33:32 2007 +0000 15.2 +++ b/xen/arch/x86/mm/hap/hap.c Thu Mar 29 16:52:40 2007 +0100 15.3 @@ -569,7 +569,8 @@ void hap_update_cr3(struct vcpu *v, int 15.4 15.5 HERE_I_AM; 15.6 /* Don't do anything on an uninitialised vcpu */ 15.7 - if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) { 15.8 + if ( !is_hvm_domain(d) && !v->is_initialised ) 15.9 + { 15.10 ASSERT(v->arch.cr3 == 0); 15.11 return; 15.12 }
16.1 --- a/xen/arch/x86/mm/shadow/multi.c Thu Mar 29 15:33:32 2007 +0000 16.2 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Mar 29 16:52:40 2007 +0100 16.3 @@ -3427,7 +3427,7 @@ sh_update_cr3(struct vcpu *v, int do_loc 16.4 #endif 16.5 16.6 /* Don't do anything on an uninitialised vcpu */ 16.7 - if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 16.8 + if ( !is_hvm_domain(d) && !v->is_initialised ) 16.9 { 16.10 ASSERT(v->arch.cr3 == 0); 16.11 return;
17.1 --- a/xen/arch/x86/traps.c Thu Mar 29 15:33:32 2007 +0000 17.2 +++ b/xen/arch/x86/traps.c Thu Mar 29 16:52:40 2007 +0100 17.3 @@ -1030,7 +1030,7 @@ long do_fpu_taskswitch(int set) 17.4 else 17.5 { 17.6 v->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS; 17.7 - if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) 17.8 + if ( v->fpu_dirtied ) 17.9 clts(); 17.10 } 17.11
18.1 --- a/xen/common/compat/domain.c Thu Mar 29 15:33:32 2007 +0000 18.2 +++ b/xen/common/compat/domain.c Thu Mar 29 16:52:40 2007 +0100 18.3 @@ -44,7 +44,7 @@ int compat_vcpu_op(int cmd, int vcpuid, 18.4 18.5 LOCK_BIGLOCK(d); 18.6 rc = -EEXIST; 18.7 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 18.8 + if ( !v->is_initialised ) 18.9 rc = boot_vcpu(d, vcpuid, cmp_ctxt); 18.10 UNLOCK_BIGLOCK(d); 18.11
19.1 --- a/xen/common/domain.c Thu Mar 29 15:33:32 2007 +0000 19.2 +++ b/xen/common/domain.c Thu Mar 29 16:52:40 2007 +0100 19.3 @@ -484,7 +484,7 @@ int boot_vcpu(struct domain *d, int vcpu 19.4 { 19.5 struct vcpu *v = d->vcpu[vcpuid]; 19.6 19.7 - BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags)); 19.8 + BUG_ON(v->is_initialised); 19.9 19.10 return arch_set_info_guest(v, ctxt); 19.11 } 19.12 @@ -503,13 +503,13 @@ int vcpu_reset(struct vcpu *v) 19.13 19.14 set_bit(_VCPUF_down, &v->vcpu_flags); 19.15 19.16 - clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 19.17 - clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags); 19.18 + v->fpu_initialised = 0; 19.19 + v->fpu_dirtied = 0; 19.20 + v->is_polling = 0; 19.21 + v->is_initialised = 0; 19.22 clear_bit(_VCPUF_blocked, &v->vcpu_flags); 19.23 - clear_bit(_VCPUF_initialised, &v->vcpu_flags); 19.24 clear_bit(_VCPUF_nmi_pending, &v->vcpu_flags); 19.25 clear_bit(_VCPUF_nmi_masked, &v->vcpu_flags); 19.26 - clear_bit(_VCPUF_polling, &v->vcpu_flags); 19.27 19.28 out: 19.29 UNLOCK_BIGLOCK(v->domain); 19.30 @@ -546,7 +546,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN 19.31 19.32 LOCK_BIGLOCK(d); 19.33 rc = -EEXIST; 19.34 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 19.35 + if ( !v->is_initialised ) 19.36 rc = boot_vcpu(d, vcpuid, ctxt); 19.37 UNLOCK_BIGLOCK(d); 19.38 19.39 @@ -554,7 +554,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN 19.40 break; 19.41 19.42 case VCPUOP_up: 19.43 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 19.44 + if ( !v->is_initialised ) 19.45 return -EINVAL; 19.46 19.47 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
20.1 --- a/xen/common/domctl.c Thu Mar 29 15:33:32 2007 +0000 20.2 +++ b/xen/common/domctl.c Thu Mar 29 16:52:40 2007 +0100 20.3 @@ -105,7 +105,7 @@ void getdomaininfo(struct domain *d, str 20.4 { 20.5 if ( !(v->vcpu_flags & VCPUF_blocked) ) 20.6 flags &= ~XEN_DOMINF_blocked; 20.7 - if ( v->vcpu_flags & VCPUF_running ) 20.8 + if ( v->is_running ) 20.9 flags |= XEN_DOMINF_running; 20.10 info->nr_online_vcpus++; 20.11 } 20.12 @@ -517,7 +517,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 20.13 goto getvcpucontext_out; 20.14 20.15 ret = -ENODATA; 20.16 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 20.17 + if ( !v->is_initialised ) 20.18 goto getvcpucontext_out; 20.19 20.20 #ifdef CONFIG_COMPAT 20.21 @@ -576,7 +576,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 20.22 20.23 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags); 20.24 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags); 20.25 - op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags); 20.26 + op->u.getvcpuinfo.running = v->is_running; 20.27 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running]; 20.28 op->u.getvcpuinfo.cpu = v->processor; 20.29 ret = 0;
21.1 --- a/xen/common/event_channel.c Thu Mar 29 15:33:32 2007 +0000 21.2 +++ b/xen/common/event_channel.c Thu Mar 29 16:52:40 2007 +0100 21.3 @@ -529,11 +529,17 @@ void evtchn_set_pending(struct vcpu *v, 21.4 } 21.5 21.6 /* Check if some VCPU might be polling for this event. */ 21.7 - if ( unlikely(d->is_polling) && likely(xchg(&d->is_polling, 0)) ) 21.8 + if ( unlikely(d->is_polling) ) 21.9 { 21.10 + d->is_polling = 0; 21.11 + smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */ 21.12 for_each_vcpu ( d, v ) 21.13 - if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) ) 21.14 - vcpu_unblock(v); 21.15 + { 21.16 + if ( !v->is_polling ) 21.17 + continue; 21.18 + v->is_polling = 0; 21.19 + vcpu_unblock(v); 21.20 + } 21.21 } 21.22 } 21.23
22.1 --- a/xen/common/keyhandler.c Thu Mar 29 15:33:32 2007 +0000 22.2 +++ b/xen/common/keyhandler.c Thu Mar 29 16:52:40 2007 +0100 22.3 @@ -188,7 +188,7 @@ static void dump_domains(unsigned char k 22.4 printk(" VCPU%d: CPU%d [has=%c] flags=%lx " 22.5 "upcall_pend = %02x, upcall_mask = %02x ", 22.6 v->vcpu_id, v->processor, 22.7 - test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F', 22.8 + v->is_running ? 'T':'F', 22.9 v->vcpu_flags, 22.10 vcpu_info(v, evtchn_upcall_pending), 22.11 vcpu_info(v, evtchn_upcall_mask));
23.1 --- a/xen/common/sched_credit.c Thu Mar 29 15:33:32 2007 +0000 23.2 +++ b/xen/common/sched_credit.c Thu Mar 29 16:52:40 2007 +0100 23.3 @@ -411,8 +411,7 @@ static inline int 23.4 * Don't pick up work that's in the peer's scheduling tail. Also only pick 23.5 * up work that's allowed to run on our CPU. 23.6 */ 23.7 - return !test_bit(_VCPUF_running, &vc->vcpu_flags) && 23.8 - cpu_isset(dest_cpu, vc->cpu_affinity); 23.9 + return !vc->is_running && cpu_isset(dest_cpu, vc->cpu_affinity); 23.10 } 23.11 23.12 static int
24.1 --- a/xen/common/sched_sedf.c Thu Mar 29 15:33:32 2007 +0000 24.2 +++ b/xen/common/sched_sedf.c Thu Mar 29 16:52:40 2007 +0100 24.3 @@ -1189,7 +1189,7 @@ void sedf_wake(struct vcpu *d) 24.4 static void sedf_dump_domain(struct vcpu *d) 24.5 { 24.6 printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id, 24.7 - test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F'); 24.8 + d->is_running ? 'T':'F'); 24.9 printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu" 24.10 " sc=%i xtr(%s)=%"PRIu64" ew=%hu", 24.11 EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs,
25.1 --- a/xen/common/schedule.c Thu Mar 29 15:33:32 2007 +0000 25.2 +++ b/xen/common/schedule.c Thu Mar 29 16:52:40 2007 +0100 25.3 @@ -123,7 +123,7 @@ int sched_init_vcpu(struct vcpu *v, unsi 25.4 { 25.5 per_cpu(schedule_data, v->processor).curr = v; 25.6 per_cpu(schedule_data, v->processor).idle = v; 25.7 - set_bit(_VCPUF_running, &v->vcpu_flags); 25.8 + v->is_running = 1; 25.9 } 25.10 25.11 TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id); 25.12 @@ -172,7 +172,7 @@ void vcpu_sleep_sync(struct vcpu *v) 25.13 { 25.14 vcpu_sleep_nosync(v); 25.15 25.16 - while ( !vcpu_runnable(v) && test_bit(_VCPUF_running, &v->vcpu_flags) ) 25.17 + while ( !vcpu_runnable(v) && v->is_running ) 25.18 cpu_relax(); 25.19 25.20 sync_vcpu_execstate(v); 25.21 @@ -208,7 +208,12 @@ static void vcpu_migrate(struct vcpu *v) 25.22 25.23 vcpu_schedule_lock_irqsave(v, flags); 25.24 25.25 - if ( test_bit(_VCPUF_running, &v->vcpu_flags) || 25.26 + /* 25.27 + * NB. Check of v->running happens /after/ setting migration flag 25.28 + * because they both happen in (different) spinlock regions, and those 25.29 + * regions are strictly serialised. 25.30 + */ 25.31 + if ( v->is_running || 25.32 !test_and_clear_bit(_VCPUF_migrating, &v->vcpu_flags) ) 25.33 { 25.34 vcpu_schedule_unlock_irqrestore(v, flags); 25.35 @@ -234,7 +239,7 @@ static void vcpu_migrate(struct vcpu *v) 25.36 void vcpu_force_reschedule(struct vcpu *v) 25.37 { 25.38 vcpu_schedule_lock_irq(v); 25.39 - if ( test_bit(_VCPUF_running, &v->vcpu_flags) ) 25.40 + if ( v->is_running ) 25.41 set_bit(_VCPUF_migrating, &v->vcpu_flags); 25.42 vcpu_schedule_unlock_irq(v); 25.43 25.44 @@ -310,14 +315,13 @@ static long do_poll(struct sched_poll *s 25.45 if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) ) 25.46 return -EFAULT; 25.47 25.48 - /* These operations must occur in order. */ 25.49 set_bit(_VCPUF_blocked, &v->vcpu_flags); 25.50 - set_bit(_VCPUF_polling, &v->vcpu_flags); 25.51 - smp_wmb(); 25.52 + v->is_polling = 1; 25.53 d->is_polling = 1; 25.54 + 25.55 + /* Check for events /after/ setting flags: avoids wakeup waiting race. */ 25.56 smp_wmb(); 25.57 25.58 - /* Check for events /after/ setting flags: avoids wakeup waiting race. */ 25.59 for ( i = 0; i < sched_poll->nr_ports; i++ ) 25.60 { 25.61 rc = -EFAULT; 25.62 @@ -342,7 +346,7 @@ static long do_poll(struct sched_poll *s 25.63 return 0; 25.64 25.65 out: 25.66 - clear_bit(_VCPUF_polling, &v->vcpu_flags); 25.67 + v->is_polling = 0; 25.68 clear_bit(_VCPUF_blocked, &v->vcpu_flags); 25.69 return rc; 25.70 } 25.71 @@ -651,8 +655,8 @@ static void schedule(void) 25.72 ASSERT(next->runstate.state != RUNSTATE_running); 25.73 vcpu_runstate_change(next, RUNSTATE_running, now); 25.74 25.75 - ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags)); 25.76 - set_bit(_VCPUF_running, &next->vcpu_flags); 25.77 + ASSERT(!next->is_running); 25.78 + next->is_running = 1; 25.79 25.80 spin_unlock_irq(&sd->schedule_lock); 25.81 25.82 @@ -673,7 +677,13 @@ static void schedule(void) 25.83 25.84 void context_saved(struct vcpu *prev) 25.85 { 25.86 - clear_bit(_VCPUF_running, &prev->vcpu_flags); 25.87 + /* Clear running flag /after/ writing context to memory. */ 25.88 + smp_wmb(); 25.89 + 25.90 + prev->is_running = 0; 25.91 + 25.92 + /* Check for migration request /after/ clearing running flag. */ 25.93 + smp_mb(); 25.94 25.95 if ( unlikely(test_bit(_VCPUF_migrating, &prev->vcpu_flags)) ) 25.96 vcpu_migrate(prev); 25.97 @@ -704,8 +714,12 @@ static void vcpu_singleshot_timer_fn(voi 25.98 static void poll_timer_fn(void *data) 25.99 { 25.100 struct vcpu *v = data; 25.101 - if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) ) 25.102 - vcpu_unblock(v); 25.103 + 25.104 + if ( !v->is_polling ) 25.105 + return; 25.106 + 25.107 + v->is_polling = 0; 25.108 + vcpu_unblock(v); 25.109 } 25.110 25.111 /* Initialise the data structures. */
26.1 --- a/xen/include/asm-ia64/event.h Thu Mar 29 15:33:32 2007 +0000 26.2 +++ b/xen/include/asm-ia64/event.h Thu Mar 29 16:52:40 2007 +0100 26.3 @@ -20,10 +20,10 @@ static inline void vcpu_kick(struct vcpu 26.4 * locks) but the key insight is that each change will cause 26.5 * evtchn_upcall_pending to be polled. 26.6 * 26.7 - * NB2. We save VCPUF_running across the unblock to avoid a needless 26.8 + * NB2. We save the running flag across the unblock to avoid a needless 26.9 * IPI for domains that we IPI'd to unblock. 26.10 */ 26.11 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 26.12 + int running = v->is_running; 26.13 vcpu_unblock(v); 26.14 if ( running ) 26.15 smp_send_event_check_cpu(v->processor);
27.1 --- a/xen/include/asm-powerpc/event.h Thu Mar 29 15:33:32 2007 +0000 27.2 +++ b/xen/include/asm-powerpc/event.h Thu Mar 29 16:52:40 2007 +0100 27.3 @@ -27,7 +27,7 @@ 27.4 static inline void evtchn_notify(struct vcpu *v) 27.5 { 27.6 #ifdef XXX_NO_SMP_YET 27.7 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 27.8 + int running = v->is_running; 27.9 vcpu_unblock(v); 27.10 if (running) 27.11 smp_send_event_check_cpu(v->processor); 27.12 @@ -73,10 +73,10 @@ static inline void vcpu_kick(struct vcpu 27.13 * locks) but the key insight is that each change will cause 27.14 * evtchn_upcall_pending to be polled. 27.15 * 27.16 - * NB2. We save VCPUF_running across the unblock to avoid a needless 27.17 + * NB2. We save the running flag across the unblock to avoid a needless 27.18 * IPI for domains that we IPI'd to unblock. 27.19 */ 27.20 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 27.21 + int running = v->is_running; 27.22 vcpu_unblock(v); 27.23 if (running) 27.24 smp_send_event_check_cpu(v->processor);
28.1 --- a/xen/include/asm-x86/event.h Thu Mar 29 15:33:32 2007 +0000 28.2 +++ b/xen/include/asm-x86/event.h Thu Mar 29 16:52:40 2007 +0100 28.3 @@ -20,10 +20,10 @@ static inline void vcpu_kick(struct vcpu 28.4 * locks) but the key insight is that each change will cause 28.5 * evtchn_upcall_pending to be polled. 28.6 * 28.7 - * NB2. We save VCPUF_running across the unblock to avoid a needless 28.8 + * NB2. We save the running flag across the unblock to avoid a needless 28.9 * IPI for domains that we IPI'd to unblock. 28.10 */ 28.11 - int running = test_bit(_VCPUF_running, &v->vcpu_flags); 28.12 + int running = v->is_running; 28.13 vcpu_unblock(v); 28.14 if ( running ) 28.15 smp_send_event_check_cpu(v->processor);
29.1 --- a/xen/include/asm-x86/i387.h Thu Mar 29 15:33:32 2007 +0000 29.2 +++ b/xen/include/asm-x86/i387.h Thu Mar 29 16:52:40 2007 +0100 29.3 @@ -18,9 +18,9 @@ extern void init_fpu(void); 29.4 extern void save_init_fpu(struct vcpu *v); 29.5 extern void restore_fpu(struct vcpu *v); 29.6 29.7 -#define unlazy_fpu(v) do { \ 29.8 - if ( test_bit(_VCPUF_fpu_dirtied, &(v)->vcpu_flags) ) \ 29.9 - save_init_fpu(v); \ 29.10 +#define unlazy_fpu(v) do { \ 29.11 + if ( (v)->fpu_dirtied ) \ 29.12 + save_init_fpu(v); \ 29.13 } while ( 0 ) 29.14 29.15 #define load_mxcsr(val) do { \ 29.16 @@ -33,9 +33,10 @@ static inline void setup_fpu(struct vcpu 29.17 /* Avoid recursion. */ 29.18 clts(); 29.19 29.20 - if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) 29.21 + if ( !v->fpu_dirtied ) 29.22 { 29.23 - if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) ) 29.24 + v->fpu_dirtied = 1; 29.25 + if ( v->fpu_initialised ) 29.26 restore_fpu(v); 29.27 else 29.28 init_fpu();
30.1 --- a/xen/include/xen/sched.h Thu Mar 29 15:33:32 2007 +0000 30.2 +++ b/xen/include/xen/sched.h Thu Mar 29 16:52:40 2007 +0100 30.3 @@ -100,6 +100,17 @@ struct vcpu 30.4 } runstate_guest; /* guest address */ 30.5 #endif 30.6 30.7 + /* Has the FPU been initialised? */ 30.8 + bool_t fpu_initialised; 30.9 + /* Has the FPU been used since it was last saved? */ 30.10 + bool_t fpu_dirtied; 30.11 + /* Is this VCPU polling any event channels (SCHEDOP_poll)? */ 30.12 + bool_t is_polling; 30.13 + /* Initialization completed for this VCPU? */ 30.14 + bool_t is_initialised; 30.15 + /* Currently running on a CPU? */ 30.16 + bool_t is_running; 30.17 + 30.18 unsigned long vcpu_flags; 30.19 30.20 spinlock_t pause_lock; 30.21 @@ -423,41 +434,26 @@ extern struct domain *domain_list; 30.22 /* 30.23 * Per-VCPU flags (vcpu_flags). 30.24 */ 30.25 - /* Has the FPU been initialised? */ 30.26 -#define _VCPUF_fpu_initialised 0 30.27 -#define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised) 30.28 - /* Has the FPU been used since it was last saved? */ 30.29 -#define _VCPUF_fpu_dirtied 1 30.30 -#define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied) 30.31 /* Domain is blocked waiting for an event. */ 30.32 -#define _VCPUF_blocked 2 30.33 +#define _VCPUF_blocked 0 30.34 #define VCPUF_blocked (1UL<<_VCPUF_blocked) 30.35 - /* Currently running on a CPU? */ 30.36 -#define _VCPUF_running 3 30.37 -#define VCPUF_running (1UL<<_VCPUF_running) 30.38 - /* Initialization completed. */ 30.39 -#define _VCPUF_initialised 4 30.40 -#define VCPUF_initialised (1UL<<_VCPUF_initialised) 30.41 /* VCPU is offline. */ 30.42 -#define _VCPUF_down 5 30.43 +#define _VCPUF_down 1 30.44 #define VCPUF_down (1UL<<_VCPUF_down) 30.45 /* NMI callback pending for this VCPU? */ 30.46 -#define _VCPUF_nmi_pending 8 30.47 +#define _VCPUF_nmi_pending 2 30.48 #define VCPUF_nmi_pending (1UL<<_VCPUF_nmi_pending) 30.49 /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */ 30.50 -#define _VCPUF_nmi_masked 9 30.51 +#define _VCPUF_nmi_masked 3 30.52 #define VCPUF_nmi_masked (1UL<<_VCPUF_nmi_masked) 30.53 - /* VCPU is polling a set of event channels (SCHEDOP_poll). */ 30.54 -#define _VCPUF_polling 10 30.55 -#define VCPUF_polling (1UL<<_VCPUF_polling) 30.56 /* VCPU is paused by the hypervisor? */ 30.57 -#define _VCPUF_paused 11 30.58 +#define _VCPUF_paused 4 30.59 #define VCPUF_paused (1UL<<_VCPUF_paused) 30.60 /* VCPU is blocked awaiting an event to be consumed by Xen. */ 30.61 -#define _VCPUF_blocked_in_xen 12 30.62 +#define _VCPUF_blocked_in_xen 5 30.63 #define VCPUF_blocked_in_xen (1UL<<_VCPUF_blocked_in_xen) 30.64 /* VCPU affinity has changed: migrating to a new CPU. */ 30.65 -#define _VCPUF_migrating 13 30.66 +#define _VCPUF_migrating 6 30.67 #define VCPUF_migrating (1UL<<_VCPUF_migrating) 30.68 30.69 static inline int vcpu_runnable(struct vcpu *v)