ia64/xen-unstable
changeset 8886:6f874e089244
Fix HVM MSR save/restore.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Feb 17 16:29:24 2006 +0100 (2006-02-17) |
parents | 4ca6f052cdf6 |
children | aeeeedc6c9b7 |
files | xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/support.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Fri Feb 17 11:49:11 2006 +0000 1.2 +++ b/xen/arch/x86/domain.c Fri Feb 17 16:29:24 2006 +0100 1.3 @@ -435,8 +435,6 @@ int arch_set_info_guest( 1.4 1.5 if ( !hvm_initialize_guest_resources(v) ) 1.6 return -EINVAL; 1.7 - 1.8 - hvm_switch_on = 1; 1.9 } 1.10 1.11 update_pagetables(v); 1.12 @@ -685,6 +683,8 @@ static void __context_switch(void) 1.13 CTXT_SWITCH_STACK_BYTES); 1.14 unlazy_fpu(p); 1.15 save_segments(p); 1.16 + if ( HVM_DOMAIN(p) ) 1.17 + hvm_load_msrs(); 1.18 } 1.19 1.20 if ( !is_idle_vcpu(n) ) 1.21 @@ -710,6 +710,10 @@ static void __context_switch(void) 1.22 set_int80_direct_trap(n); 1.23 switch_kernel_stack(n, cpu); 1.24 } 1.25 + else 1.26 + { 1.27 + hvm_restore_msrs(next); 1.28 + } 1.29 } 1.30 1.31 if ( p->domain != n->domain ) 1.32 @@ -765,16 +769,10 @@ void context_switch(struct vcpu *prev, s 1.33 /* Re-enable interrupts before restoring state which may fault. */ 1.34 local_irq_enable(); 1.35 1.36 - if ( HVM_DOMAIN(next) ) 1.37 - { 1.38 - hvm_restore_msrs(next); 1.39 - } 1.40 - else 1.41 + if ( !HVM_DOMAIN(next) ) 1.42 { 1.43 load_LDT(next); 1.44 load_segments(next); 1.45 - if ( HVM_DOMAIN(next) ) 1.46 - hvm_load_msrs(next); 1.47 } 1.48 } 1.49
2.1 --- a/xen/arch/x86/hvm/hvm.c Fri Feb 17 11:49:11 2006 +0000 2.2 +++ b/xen/arch/x86/hvm/hvm.c Fri Feb 17 16:29:24 2006 +0100 2.3 @@ -46,7 +46,6 @@ 2.4 #include <public/hvm/hvm_info_table.h> 2.5 2.6 int hvm_enabled = 0; 2.7 -int hvm_switch_on = 0; 2.8 2.9 unsigned int opt_hvm_debug_level = 0; 2.10 integer_param("hvm_debug", opt_hvm_debug_level);
3.1 --- a/xen/arch/x86/hvm/svm/svm.c Fri Feb 17 11:49:11 2006 +0000 3.2 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Feb 17 16:29:24 2006 +0100 3.3 @@ -251,14 +251,11 @@ void svm_save_segments(struct vcpu *v) 3.4 * are not modified once set for generic domains, we don't save them, 3.5 * but simply reset them to the values set at percpu_traps_init(). 3.6 */ 3.7 -void svm_load_msrs(struct vcpu *n) 3.8 +void svm_load_msrs(void) 3.9 { 3.10 struct svm_msr_state *host_state = &percpu_msr[smp_processor_id()]; 3.11 int i; 3.12 3.13 - if ( !hvm_switch_on ) 3.14 - return; 3.15 - 3.16 while ( host_state->flags ) 3.17 { 3.18 i = find_first_set_bit(host_state->flags);
4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Feb 17 11:49:11 2006 +0000 4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Feb 17 16:29:24 2006 +0100 4.3 @@ -124,14 +124,11 @@ void vmx_save_segments(struct vcpu *v) 4.4 * are not modified once set for generic domains, we don't save them, 4.5 * but simply reset them to the values set at percpu_traps_init(). 4.6 */ 4.7 -void vmx_load_msrs(struct vcpu *n) 4.8 +void vmx_load_msrs(void) 4.9 { 4.10 struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()]; 4.11 int i; 4.12 4.13 - if ( !hvm_switch_on ) 4.14 - return; 4.15 - 4.16 while ( host_state->flags ) 4.17 { 4.18 i = find_first_set_bit(host_state->flags);
5.1 --- a/xen/include/asm-x86/hvm/hvm.h Fri Feb 17 11:49:11 2006 +0000 5.2 +++ b/xen/include/asm-x86/hvm/hvm.h Fri Feb 17 16:29:24 2006 +0100 5.3 @@ -50,7 +50,7 @@ struct hvm_function_table { 5.4 void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r); 5.5 #ifdef __x86_64__ 5.6 void (*save_segments)(struct vcpu *v); 5.7 - void (*load_msrs)(struct vcpu *v); 5.8 + void (*load_msrs)(void); 5.9 void (*restore_msrs)(struct vcpu *v); 5.10 #endif 5.11 void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]); 5.12 @@ -116,10 +116,10 @@ hvm_save_segments(struct vcpu *v) 5.13 } 5.14 5.15 static inline void 5.16 -hvm_load_msrs(struct vcpu *v) 5.17 +hvm_load_msrs(void) 5.18 { 5.19 if (hvm_funcs.load_msrs) 5.20 - hvm_funcs.load_msrs(v); 5.21 + hvm_funcs.load_msrs(); 5.22 } 5.23 5.24 static inline void
6.1 --- a/xen/include/asm-x86/hvm/support.h Fri Feb 17 11:49:11 2006 +0000 6.2 +++ b/xen/include/asm-x86/hvm/support.h Fri Feb 17 16:29:24 2006 +0100 6.3 @@ -133,7 +133,6 @@ extern unsigned int opt_hvm_debug_level; 6.4 } while (0) 6.5 6.6 extern int hvm_enabled; 6.7 -extern int hvm_switch_on; 6.8 6.9 enum { HVM_COPY_IN = 0, HVM_COPY_OUT }; 6.10 extern int hvm_copy(void *buf, unsigned long vaddr, int size, int dir);