ia64/xen-unstable
changeset 9436:c947b278a349
Merge hvm_store_cpu_guest_regs() and hvm_store_cpu_guest_ctrl_regs()
into a single function.
On VMX, make the function work even when the passed VCPU is not the
currently-executing VCPU.
This allows gdbserver-xen to get correct cpu register context for VMX guests.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
into a single function.
On VMX, make the function work even when the passed VCPU is not the
currently-executing VCPU.
This allows gdbserver-xen to get correct cpu register context for VMX guests.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Mar 23 15:53:52 2006 +0100 (2006-03-23) |
parents | bd108ccc38a0 |
children | 6aa5179f2416 |
files | xen/arch/x86/dom0_ops.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/hvm/hvm.h |
line diff
1.1 --- a/xen/arch/x86/dom0_ops.c Thu Mar 23 15:30:00 2006 +0100 1.2 +++ b/xen/arch/x86/dom0_ops.c Thu Mar 23 15:53:52 2006 +0100 1.3 @@ -460,8 +460,7 @@ void arch_getdomaininfo_ctxt( 1.4 1.5 if ( hvm_guest(v) ) 1.6 { 1.7 - hvm_store_cpu_guest_regs(v, &c->user_regs); 1.8 - hvm_store_cpu_guest_ctrl_regs(v, c->ctrlreg); 1.9 + hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg); 1.10 } 1.11 else 1.12 {
2.1 --- a/xen/arch/x86/hvm/platform.c Thu Mar 23 15:30:00 2006 +0100 2.2 +++ b/xen/arch/x86/hvm/platform.c Thu Mar 23 15:53:52 2006 +0100 2.3 @@ -773,7 +773,7 @@ void handle_mmio(unsigned long va, unsig 2.4 mmio_opp = &v->arch.hvm_vcpu.mmio_op; 2.5 2.6 regs = mmio_opp->inst_decoder_regs; 2.7 - hvm_store_cpu_guest_regs(v, regs); 2.8 + hvm_store_cpu_guest_regs(v, regs, NULL); 2.9 2.10 if ((inst_len = hvm_instruction_length(v)) <= 0) { 2.11 printf("handle_mmio: failed to get instruction length\n");
3.1 --- a/xen/arch/x86/hvm/svm/svm.c Thu Mar 23 15:30:00 2006 +0100 3.2 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Mar 23 15:53:52 2006 +0100 3.3 @@ -201,31 +201,41 @@ int svm_initialize_guest_resources(struc 3.4 } 3.5 3.6 static void svm_store_cpu_guest_regs( 3.7 - struct vcpu *v, struct cpu_user_regs *regs) 3.8 + struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs) 3.9 { 3.10 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 3.11 3.12 + if ( regs != NULL ) 3.13 + { 3.14 #if defined (__x86_64__) 3.15 - regs->rip = vmcb->rip; 3.16 - regs->rsp = vmcb->rsp; 3.17 - regs->rflags = vmcb->rflags; 3.18 - regs->cs = vmcb->cs.sel; 3.19 - regs->ds = vmcb->ds.sel; 3.20 - regs->es = vmcb->es.sel; 3.21 - regs->ss = vmcb->ss.sel; 3.22 - regs->gs = vmcb->gs.sel; 3.23 - regs->fs = vmcb->fs.sel; 3.24 + regs->rip = vmcb->rip; 3.25 + regs->rsp = vmcb->rsp; 3.26 + regs->rflags = vmcb->rflags; 3.27 + regs->cs = vmcb->cs.sel; 3.28 + regs->ds = vmcb->ds.sel; 3.29 + regs->es = vmcb->es.sel; 3.30 + regs->ss = vmcb->ss.sel; 3.31 + regs->gs = vmcb->gs.sel; 3.32 + regs->fs = vmcb->fs.sel; 3.33 #elif defined (__i386__) 3.34 - regs->eip = vmcb->rip; 3.35 - regs->esp = vmcb->rsp; 3.36 - regs->eflags = vmcb->rflags; 3.37 - regs->cs = vmcb->cs.sel; 3.38 - regs->ds = vmcb->ds.sel; 3.39 - regs->es = vmcb->es.sel; 3.40 - regs->ss = vmcb->ss.sel; 3.41 - regs->gs = vmcb->gs.sel; 3.42 - regs->fs = vmcb->fs.sel; 3.43 + regs->eip = vmcb->rip; 3.44 + regs->esp = vmcb->rsp; 3.45 + regs->eflags = vmcb->rflags; 3.46 + regs->cs = vmcb->cs.sel; 3.47 + regs->ds = vmcb->ds.sel; 3.48 + regs->es = vmcb->es.sel; 3.49 + regs->ss = vmcb->ss.sel; 3.50 + regs->gs = vmcb->gs.sel; 3.51 + regs->fs = vmcb->fs.sel; 3.52 #endif 3.53 + } 3.54 + 3.55 + if ( crs != NULL ) 3.56 + { 3.57 + crs[0] = vmcb->cr0; 3.58 + crs[3] = vmcb->cr3; 3.59 + crs[4] = vmcb->cr4; 3.60 + } 3.61 } 3.62 3.63 static void svm_load_cpu_guest_regs( 3.64 @@ -372,15 +382,6 @@ static inline int long_mode_do_msr_write 3.65 return 1; 3.66 } 3.67 3.68 -void svm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8]) 3.69 -{ 3.70 - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 3.71 - 3.72 - crs[0] = vmcb->cr0; 3.73 - crs[3] = vmcb->cr3; 3.74 - crs[4] = vmcb->cr4; 3.75 -} 3.76 - 3.77 void svm_modify_guest_state(struct vcpu *v) 3.78 { 3.79 svm_modify_vmcb(v, &v->arch.guest_context.user_regs); 3.80 @@ -448,7 +449,6 @@ int start_svm(void) 3.81 hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs; 3.82 hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs; 3.83 3.84 - hvm_funcs.store_cpu_guest_ctrl_regs = svm_store_cpu_guest_ctrl_regs; 3.85 hvm_funcs.modify_guest_state = svm_modify_guest_state; 3.86 3.87 hvm_funcs.realmode = svm_realmode;
4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Mar 23 15:30:00 2006 +0100 4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Mar 23 15:53:52 2006 +0100 4.3 @@ -398,31 +398,81 @@ void vmx_migrate_timers(struct vcpu *v) 4.4 migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor); 4.5 } 4.6 4.7 -void vmx_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) 4.8 +struct vmx_store_cpu_guest_regs_callback_info { 4.9 + struct vcpu *v; 4.10 + struct cpu_user_regs *regs; 4.11 + unsigned long *crs; 4.12 +}; 4.13 + 4.14 +static void vmx_store_cpu_guest_regs( 4.15 + struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs); 4.16 + 4.17 +static void vmx_store_cpu_guest_regs_callback(void *data) 4.18 +{ 4.19 + struct vmx_store_cpu_guest_regs_callback_info *info = data; 4.20 + vmx_store_cpu_guest_regs(info->v, info->regs, info->crs); 4.21 +} 4.22 + 4.23 +static void vmx_store_cpu_guest_regs( 4.24 + struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs) 4.25 { 4.26 + if ( v != current ) 4.27 + { 4.28 + /* Non-current VCPUs must be paused to get a register snapshot. */ 4.29 + ASSERT(atomic_read(&v->pausecnt) != 0); 4.30 + 4.31 + if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() ) 4.32 + { 4.33 + /* Get register details from remote CPU. */ 4.34 + struct vmx_store_cpu_guest_regs_callback_info info = { 4.35 + .v = v, .regs = regs, .crs = crs }; 4.36 + cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu); 4.37 + on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback, 4.38 + &info, 1, 1); 4.39 + return; 4.40 + } 4.41 + 4.42 + /* Register details are on this CPU. Load the correct VMCS. */ 4.43 + __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); 4.44 + } 4.45 + 4.46 + ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id()); 4.47 + 4.48 + if ( regs != NULL ) 4.49 + { 4.50 #if defined (__x86_64__) 4.51 - __vmread(GUEST_RFLAGS, ®s->rflags); 4.52 - __vmread(GUEST_SS_SELECTOR, ®s->ss); 4.53 - __vmread(GUEST_CS_SELECTOR, ®s->cs); 4.54 - __vmread(GUEST_DS_SELECTOR, ®s->ds); 4.55 - __vmread(GUEST_ES_SELECTOR, ®s->es); 4.56 - __vmread(GUEST_GS_SELECTOR, ®s->gs); 4.57 - __vmread(GUEST_FS_SELECTOR, ®s->fs); 4.58 - __vmread(GUEST_RIP, ®s->rip); 4.59 - __vmread(GUEST_RSP, ®s->rsp); 4.60 + __vmread(GUEST_RFLAGS, ®s->rflags); 4.61 + __vmread(GUEST_SS_SELECTOR, ®s->ss); 4.62 + __vmread(GUEST_CS_SELECTOR, ®s->cs); 4.63 + __vmread(GUEST_DS_SELECTOR, ®s->ds); 4.64 + __vmread(GUEST_ES_SELECTOR, ®s->es); 4.65 + __vmread(GUEST_GS_SELECTOR, ®s->gs); 4.66 + __vmread(GUEST_FS_SELECTOR, ®s->fs); 4.67 + __vmread(GUEST_RIP, ®s->rip); 4.68 + __vmread(GUEST_RSP, ®s->rsp); 4.69 #elif defined (__i386__) 4.70 - __vmread(GUEST_RFLAGS, ®s->eflags); 4.71 - __vmread(GUEST_SS_SELECTOR, ®s->ss); 4.72 - __vmread(GUEST_CS_SELECTOR, ®s->cs); 4.73 - __vmread(GUEST_DS_SELECTOR, ®s->ds); 4.74 - __vmread(GUEST_ES_SELECTOR, ®s->es); 4.75 - __vmread(GUEST_GS_SELECTOR, ®s->gs); 4.76 - __vmread(GUEST_FS_SELECTOR, ®s->fs); 4.77 - __vmread(GUEST_RIP, ®s->eip); 4.78 - __vmread(GUEST_RSP, ®s->esp); 4.79 -#else 4.80 -#error Unsupported architecture 4.81 + __vmread(GUEST_RFLAGS, ®s->eflags); 4.82 + __vmread(GUEST_SS_SELECTOR, ®s->ss); 4.83 + __vmread(GUEST_CS_SELECTOR, ®s->cs); 4.84 + __vmread(GUEST_DS_SELECTOR, ®s->ds); 4.85 + __vmread(GUEST_ES_SELECTOR, ®s->es); 4.86 + __vmread(GUEST_GS_SELECTOR, ®s->gs); 4.87 + __vmread(GUEST_FS_SELECTOR, ®s->fs); 4.88 + __vmread(GUEST_RIP, ®s->eip); 4.89 + __vmread(GUEST_RSP, ®s->esp); 4.90 #endif 4.91 + } 4.92 + 4.93 + if ( crs != NULL ) 4.94 + { 4.95 + __vmread(CR0_READ_SHADOW, &crs[0]); 4.96 + __vmread(GUEST_CR3, &crs[3]); 4.97 + __vmread(CR4_READ_SHADOW, &crs[4]); 4.98 + } 4.99 + 4.100 + /* Reload current VCPU's VMCS if it was temporarily unloaded. */ 4.101 + if ( (v != current) && hvm_guest(current) ) 4.102 + __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs)); 4.103 } 4.104 4.105 void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) 4.106 @@ -456,13 +506,6 @@ void vmx_load_cpu_guest_regs(struct vcpu 4.107 #endif 4.108 } 4.109 4.110 -void vmx_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8]) 4.111 -{ 4.112 - __vmread(CR0_READ_SHADOW, &crs[0]); 4.113 - __vmread(GUEST_CR3, &crs[3]); 4.114 - __vmread(CR4_READ_SHADOW, &crs[4]); 4.115 -} 4.116 - 4.117 void vmx_modify_guest_state(struct vcpu *v) 4.118 { 4.119 modify_vmcs(&v->arch.hvm_vmx, &v->arch.guest_context.user_regs); 4.120 @@ -616,7 +659,6 @@ int start_vmx(void) 4.121 hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs; 4.122 hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs; 4.123 4.124 - hvm_funcs.store_cpu_guest_ctrl_regs = vmx_store_cpu_guest_ctrl_regs; 4.125 hvm_funcs.modify_guest_state = vmx_modify_guest_state; 4.126 4.127 hvm_funcs.realmode = vmx_realmode;
5.1 --- a/xen/arch/x86/x86_32/traps.c Thu Mar 23 15:30:00 2006 +0100 5.2 +++ b/xen/arch/x86/x86_32/traps.c Thu Mar 23 15:53:52 2006 +0100 5.3 @@ -27,8 +27,7 @@ void show_registers(struct cpu_user_regs 5.4 if ( hvm_guest(current) && guest_mode(regs) ) 5.5 { 5.6 context = "hvm"; 5.7 - hvm_store_cpu_guest_regs(current, &fault_regs); 5.8 - hvm_store_cpu_guest_ctrl_regs(current, fault_crs); 5.9 + hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs); 5.10 } 5.11 else 5.12 {
6.1 --- a/xen/arch/x86/x86_64/traps.c Thu Mar 23 15:30:00 2006 +0100 6.2 +++ b/xen/arch/x86/x86_64/traps.c Thu Mar 23 15:53:52 2006 +0100 6.3 @@ -27,8 +27,7 @@ void show_registers(struct cpu_user_regs 6.4 if ( hvm_guest(current) && guest_mode(regs) ) 6.5 { 6.6 context = "hvm"; 6.7 - hvm_store_cpu_guest_regs(current, &fault_regs); 6.8 - hvm_store_cpu_guest_ctrl_regs(current, fault_crs); 6.9 + hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs); 6.10 } 6.11 else 6.12 {
7.1 --- a/xen/include/asm-x86/hvm/hvm.h Thu Mar 23 15:30:00 2006 +0100 7.2 +++ b/xen/include/asm-x86/hvm/hvm.h Thu Mar 23 15:53:52 2006 +0100 7.3 @@ -41,12 +41,12 @@ struct hvm_function_table { 7.4 /* 7.5 * Store and load guest state: 7.6 * 1) load/store guest register state, 7.7 - * 2) store guest control register state (used for panic dumps), 7.8 - * 3) modify guest state (e.g., set debug flags). 7.9 + * 2) modify guest state (e.g., set debug flags). 7.10 */ 7.11 - void (*store_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r); 7.12 - void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r); 7.13 - void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]); 7.14 + void (*store_cpu_guest_regs)( 7.15 + struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs); 7.16 + void (*load_cpu_guest_regs)( 7.17 + struct vcpu *v, struct cpu_user_regs *r); 7.18 void (*modify_guest_state)(struct vcpu *v); 7.19 7.20 /* 7.21 @@ -93,9 +93,10 @@ hvm_relinquish_guest_resources(struct do 7.22 } 7.23 7.24 static inline void 7.25 -hvm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r) 7.26 +hvm_store_cpu_guest_regs( 7.27 + struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs) 7.28 { 7.29 - hvm_funcs.store_cpu_guest_regs(v, r); 7.30 + hvm_funcs.store_cpu_guest_regs(v, r, crs); 7.31 } 7.32 7.33 static inline void 7.34 @@ -105,12 +106,6 @@ hvm_load_cpu_guest_regs(struct vcpu *v, 7.35 } 7.36 7.37 static inline void 7.38 -hvm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8]) 7.39 -{ 7.40 - hvm_funcs.store_cpu_guest_ctrl_regs(v, crs); 7.41 -} 7.42 - 7.43 -static inline void 7.44 hvm_modify_guest_state(struct vcpu *v) 7.45 { 7.46 hvm_funcs.modify_guest_state(v);