direct-io.hg
changeset 12578:62b0b520ea53
[HVM] Fix MSR access code.
- rdmsr/wrmsr always use ECX (not RCX) as register index.
- SVM still had the function names explicitly in the HVM_DBG_LOG() output
- the guest should (at the very minimum) see GP fault for MSRs
accesses to which even fault in Xen itself
Signed-off-by: Jan Beulich <jbeulich@novell.com>
- rdmsr/wrmsr always use ECX (not RCX) as register index.
- SVM still had the function names explicitly in the HVM_DBG_LOG() output
- the guest should (at the very minimum) see GP fault for MSRs
accesses to which even fault in Xen itself
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author | kfraser@localhost.localdomain |
---|---|
date | Tue Nov 28 11:43:39 2006 +0000 (2006-11-28) |
parents | 7a5246955bef |
children | 519a74928bd4 |
files | xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Tue Nov 28 11:35:10 2006 +0000 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Nov 28 11:43:39 2006 +0000 1.3 @@ -277,7 +277,7 @@ static inline int long_mode_do_msr_read( 1.4 struct vcpu *vc = current; 1.5 struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb; 1.6 1.7 - switch (regs->ecx) 1.8 + switch ((u32)regs->ecx) 1.9 { 1.10 case MSR_EFER: 1.11 msr_content = vmcb->efer; 1.12 @@ -315,7 +315,7 @@ static inline int long_mode_do_msr_read( 1.13 return 0; 1.14 } 1.15 1.16 - HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n", 1.17 + HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: %"PRIx64"\n", 1.18 msr_content); 1.19 1.20 regs->eax = (u32)(msr_content >> 0); 1.21 @@ -329,11 +329,10 @@ static inline int long_mode_do_msr_write 1.22 struct vcpu *v = current; 1.23 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.24 1.25 - HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx " 1.26 - "msr_content %"PRIx64"\n", 1.27 - (unsigned long)regs->ecx, msr_content); 1.28 - 1.29 - switch ( regs->ecx ) 1.30 + HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n", 1.31 + (u32)regs->ecx, msr_content); 1.32 + 1.33 + switch ( (u32)regs->ecx ) 1.34 { 1.35 case MSR_EFER: 1.36 #ifdef __x86_64__ 1.37 @@ -1855,22 +1854,18 @@ static inline void svm_do_msr_access( 1.38 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.39 int inst_len; 1.40 u64 msr_content=0; 1.41 - u32 eax, edx; 1.42 + u32 ecx = regs->ecx, eax, edx; 1.43 1.44 ASSERT(vmcb); 1.45 1.46 - HVM_DBG_LOG(DBG_LEVEL_1, "svm_do_msr_access: ecx=%lx, eax=%lx, edx=%lx, " 1.47 - "exitinfo = %lx", (unsigned long)regs->ecx, 1.48 - (unsigned long)regs->eax, (unsigned long)regs->edx, 1.49 + HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x, exitinfo = %lx", 1.50 + ecx, (u32)regs->eax, (u32)regs->edx, 1.51 (unsigned long)vmcb->exitinfo1); 1.52 1.53 /* is it a read? */ 1.54 if (vmcb->exitinfo1 == 0) 1.55 { 1.56 - inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL); 1.57 - 1.58 - regs->edx = 0; 1.59 - switch (regs->ecx) { 1.60 + switch (ecx) { 1.61 case MSR_IA32_TIME_STAMP_COUNTER: 1.62 msr_content = hvm_get_guest_time(v); 1.63 break; 1.64 @@ -1890,25 +1885,30 @@ static inline void svm_do_msr_access( 1.65 if (long_mode_do_msr_read(regs)) 1.66 goto done; 1.67 1.68 - if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) ) 1.69 + if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) || 1.70 + rdmsr_safe(ecx, eax, edx) == 0 ) 1.71 { 1.72 regs->eax = eax; 1.73 regs->edx = edx; 1.74 goto done; 1.75 } 1.76 - 1.77 - rdmsr_safe(regs->ecx, regs->eax, regs->edx); 1.78 - break; 1.79 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 1.80 + return; 1.81 } 1.82 regs->eax = msr_content & 0xFFFFFFFF; 1.83 regs->edx = msr_content >> 32; 1.84 + 1.85 + done: 1.86 + HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", 1.87 + ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); 1.88 + 1.89 + inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL); 1.90 } 1.91 else 1.92 { 1.93 - inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL); 1.94 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); 1.95 1.96 - switch (regs->ecx) 1.97 + switch (ecx) 1.98 { 1.99 case MSR_IA32_TIME_STAMP_COUNTER: 1.100 hvm_set_guest_time(v, msr_content); 1.101 @@ -1927,18 +1927,13 @@ static inline void svm_do_msr_access( 1.102 break; 1.103 default: 1.104 if ( !long_mode_do_msr_write(regs) ) 1.105 - wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx); 1.106 + wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); 1.107 break; 1.108 } 1.109 + 1.110 + inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL); 1.111 } 1.112 1.113 - done: 1.114 - 1.115 - HVM_DBG_LOG(DBG_LEVEL_1, "svm_do_msr_access returns: " 1.116 - "ecx=%lx, eax=%lx, edx=%lx", 1.117 - (unsigned long)regs->ecx, (unsigned long)regs->eax, 1.118 - (unsigned long)regs->edx); 1.119 - 1.120 __update_guest_eip(vmcb, inst_len); 1.121 } 1.122
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Nov 28 11:35:10 2006 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Nov 28 11:43:39 2006 +0000 2.3 @@ -116,7 +116,7 @@ static inline int long_mode_do_msr_read( 2.4 struct vcpu *v = current; 2.5 struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state; 2.6 2.7 - switch ( regs->ecx ) { 2.8 + switch ( (u32)regs->ecx ) { 2.9 case MSR_EFER: 2.10 HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content); 2.11 msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_EFER]; 2.12 @@ -169,10 +169,10 @@ static inline int long_mode_do_msr_write 2.13 struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state; 2.14 struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state); 2.15 2.16 - HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n", 2.17 - (unsigned long)regs->ecx, msr_content); 2.18 + HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n", 2.19 + (u32)regs->ecx, msr_content); 2.20 2.21 - switch ( regs->ecx ) { 2.22 + switch ( (u32)regs->ecx ) { 2.23 case MSR_EFER: 2.24 /* offending reserved bit will cause #GP */ 2.25 if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) ) 2.26 @@ -1790,16 +1790,16 @@ static int vmx_cr_access(unsigned long e 2.27 return 1; 2.28 } 2.29 2.30 -static inline void vmx_do_msr_read(struct cpu_user_regs *regs) 2.31 +static inline int vmx_do_msr_read(struct cpu_user_regs *regs) 2.32 { 2.33 u64 msr_content = 0; 2.34 - u32 eax, edx; 2.35 + u32 ecx = regs->ecx, eax, edx; 2.36 struct vcpu *v = current; 2.37 2.38 - HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%lx, eax=%lx, edx=%lx", 2.39 - (unsigned long)regs->ecx, (unsigned long)regs->eax, 2.40 - (unsigned long)regs->edx); 2.41 - switch (regs->ecx) { 2.42 + HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x", 2.43 + ecx, (u32)regs->eax, (u32)regs->edx); 2.44 + 2.45 + switch (ecx) { 2.46 case MSR_IA32_TIME_STAMP_COUNTER: 2.47 msr_content = hvm_get_guest_time(v); 2.48 break; 2.49 @@ -1817,39 +1817,41 @@ static inline void vmx_do_msr_read(struc 2.50 break; 2.51 default: 2.52 if ( long_mode_do_msr_read(regs) ) 2.53 - return; 2.54 + goto done; 2.55 2.56 - if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) ) 2.57 + if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) || 2.58 + rdmsr_safe(ecx, eax, edx) == 0 ) 2.59 { 2.60 regs->eax = eax; 2.61 regs->edx = edx; 2.62 - return; 2.63 + goto done; 2.64 } 2.65 - 2.66 - rdmsr_safe(regs->ecx, regs->eax, regs->edx); 2.67 - return; 2.68 + vmx_inject_hw_exception(v, TRAP_gp_fault, 0); 2.69 + return 0; 2.70 } 2.71 2.72 regs->eax = msr_content & 0xFFFFFFFF; 2.73 regs->edx = msr_content >> 32; 2.74 2.75 - HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%lx, eax=%lx, edx=%lx", 2.76 - (unsigned long)regs->ecx, (unsigned long)regs->eax, 2.77 +done: 2.78 + HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", 2.79 + ecx, (unsigned long)regs->eax, 2.80 (unsigned long)regs->edx); 2.81 + return 1; 2.82 } 2.83 2.84 -static inline void vmx_do_msr_write(struct cpu_user_regs *regs) 2.85 +static inline int vmx_do_msr_write(struct cpu_user_regs *regs) 2.86 { 2.87 + u32 ecx = regs->ecx; 2.88 u64 msr_content; 2.89 struct vcpu *v = current; 2.90 2.91 - HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%lx, eax=%lx, edx=%lx", 2.92 - (unsigned long)regs->ecx, (unsigned long)regs->eax, 2.93 - (unsigned long)regs->edx); 2.94 + HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x", 2.95 + ecx, (u32)regs->eax, (u32)regs->edx); 2.96 2.97 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); 2.98 2.99 - switch (regs->ecx) { 2.100 + switch (ecx) { 2.101 case MSR_IA32_TIME_STAMP_COUNTER: 2.102 { 2.103 struct periodic_time *pt = 2.104 @@ -1874,13 +1876,11 @@ static inline void vmx_do_msr_write(stru 2.105 break; 2.106 default: 2.107 if ( !long_mode_do_msr_write(regs) ) 2.108 - wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx); 2.109 + wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); 2.110 break; 2.111 } 2.112 2.113 - HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%lx, eax=%lx, edx=%lx", 2.114 - (unsigned long)regs->ecx, (unsigned long)regs->eax, 2.115 - (unsigned long)regs->edx); 2.116 + return 1; 2.117 } 2.118 2.119 static void vmx_do_hlt(void) 2.120 @@ -2244,16 +2244,16 @@ asmlinkage void vmx_vmexit_handler(struc 2.121 break; 2.122 case EXIT_REASON_MSR_READ: 2.123 inst_len = __get_instruction_length(); /* Safe: RDMSR */ 2.124 - __update_guest_eip(inst_len); 2.125 - vmx_do_msr_read(regs); 2.126 + if ( vmx_do_msr_read(regs) ) 2.127 + __update_guest_eip(inst_len); 2.128 TRACE_VMEXIT(1, regs->ecx); 2.129 TRACE_VMEXIT(2, regs->eax); 2.130 TRACE_VMEXIT(3, regs->edx); 2.131 break; 2.132 case EXIT_REASON_MSR_WRITE: 2.133 inst_len = __get_instruction_length(); /* Safe: WRMSR */ 2.134 - __update_guest_eip(inst_len); 2.135 - vmx_do_msr_write(regs); 2.136 + if ( vmx_do_msr_write(regs) ) 2.137 + __update_guest_eip(inst_len); 2.138 TRACE_VMEXIT(1, regs->ecx); 2.139 TRACE_VMEXIT(2, regs->eax); 2.140 TRACE_VMEXIT(3, regs->edx);