ia64/xen-unstable
changeset 14276:8bae3387b86c
[SVM] Use svm_long_mode_enabled() when checking if guest is in long mode
This patch fixes the inconsistent method of checking if the guest is in
long mode (EFER.LMA == 1).
Signed-off-by: Travis Betak <travis.betak@amd.com>
This patch fixes the inconsistent method of checking if the guest is in
long mode (EFER.LMA == 1).
Signed-off-by: Travis Betak <travis.betak@amd.com>
author | Travis Betak <travis.betak@amd.com> |
---|---|
date | Mon Mar 05 16:21:50 2007 -0600 (2007-03-05) |
parents | 647fcc8f0c83 |
children | c8843678cb9b |
files | xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/hvm/svm/svm.c xen/include/asm-x86/hvm/svm/emulate.h xen/include/asm-x86/hvm/svm/svm.h |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/emulate.c Mon Mar 05 16:21:11 2007 -0600 1.2 +++ b/xen/arch/x86/hvm/svm/emulate.c Mon Mar 05 16:21:50 2007 -0600 1.3 @@ -24,9 +24,11 @@ 1.4 #include <asm/msr.h> 1.5 #include <asm/hvm/hvm.h> 1.6 #include <asm/hvm/support.h> 1.7 +#include <asm/hvm/svm/svm.h> 1.8 #include <asm/hvm/svm/vmcb.h> 1.9 #include <asm/hvm/svm/emulate.h> 1.10 1.11 + 1.12 extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, 1.13 int inst_len); 1.14 1.15 @@ -133,13 +135,15 @@ static inline unsigned long DECODE_GPR_V 1.16 #define sib operand [1] 1.17 1.18 1.19 -unsigned long get_effective_addr_modrm64(struct vmcb_struct *vmcb, 1.20 - struct cpu_user_regs *regs, const u8 prefix, int inst_len, 1.21 - const u8 *operand, u8 *size) 1.22 +unsigned long get_effective_addr_modrm64(struct cpu_user_regs *regs, 1.23 + const u8 prefix, int inst_len, 1.24 + const u8 *operand, u8 *size) 1.25 { 1.26 unsigned long effective_addr = (unsigned long) -1; 1.27 u8 length, modrm_mod, modrm_rm; 1.28 u32 disp = 0; 1.29 + struct vcpu *v = current; 1.30 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.31 1.32 HVM_DBG_LOG(DBG_LEVEL_1, "get_effective_addr_modrm64(): prefix = %x, " 1.33 "length = %d, operand[0,1] = %x %x.\n", prefix, *size, operand [0], 1.34 @@ -198,7 +202,7 @@ unsigned long get_effective_addr_modrm64 1.35 1.36 #if __x86_64__ 1.37 /* 64-bit mode */ 1.38 - if (vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA)) 1.39 + if (vmcb->cs.attr.fields.l && svm_long_mode_enabled(v)) 1.40 return vmcb->rip + inst_len + *size + disp; 1.41 #endif 1.42 return disp; 1.43 @@ -310,7 +314,7 @@ unsigned int decode_src_reg(u8 prefix, u 1.44 } 1.45 1.46 1.47 -unsigned long svm_rip2pointer(struct vmcb_struct *vmcb) 1.48 +unsigned long svm_rip2pointer(struct vcpu *v) 1.49 { 1.50 /* 1.51 * The following is subtle. Intuitively this code would be something like: 1.52 @@ -322,8 +326,9 @@ unsigned long svm_rip2pointer(struct vmc 1.53 * %cs is update, but fortunately, base contain the valid base address 1.54 * no matter what kind of addressing is used. 1.55 */ 1.56 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.57 unsigned long p = vmcb->cs.base + vmcb->rip; 1.58 - if (!(vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA)) 1.59 + if (!(vmcb->cs.attr.fields.l && svm_long_mode_enabled(v))) 1.60 return (u32)p; /* mask to 32 bits */ 1.61 /* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */ 1.62 return p; 1.63 @@ -410,10 +415,11 @@ static const u8 *opc_bytes[INSTR_MAX_COU 1.64 * The caller can either pass a NULL pointer to the guest_eip_buf, or a pointer 1.65 * to enough bytes to satisfy the instruction including prefix bytes. 1.66 */ 1.67 -int __get_instruction_length_from_list(struct vmcb_struct *vmcb, 1.68 +int __get_instruction_length_from_list(struct vcpu *v, 1.69 enum instruction_index *list, unsigned int list_count, 1.70 u8 *guest_eip_buf, enum instruction_index *match) 1.71 { 1.72 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.73 unsigned int inst_len = 0; 1.74 unsigned int i; 1.75 unsigned int j; 1.76 @@ -429,7 +435,7 @@ int __get_instruction_length_from_list(s 1.77 } 1.78 else 1.79 { 1.80 - inst_copy_from_guest(buffer, svm_rip2pointer(vmcb), MAX_INST_LEN); 1.81 + inst_copy_from_guest(buffer, svm_rip2pointer(v), MAX_INST_LEN); 1.82 buf = buffer; 1.83 } 1.84
2.1 --- a/xen/arch/x86/hvm/svm/svm.c Mon Mar 05 16:21:11 2007 -0600 2.2 +++ b/xen/arch/x86/hvm/svm/svm.c Mon Mar 05 16:21:50 2007 -0600 2.3 @@ -148,35 +148,6 @@ static void svm_store_cpu_guest_regs( 2.4 } 2.5 } 2.6 2.7 -static int svm_lme_is_set(struct vcpu *v) 2.8 -{ 2.9 - u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer; 2.10 - return guest_efer & EFER_LME; 2.11 -} 2.12 - 2.13 -static int svm_cr4_pae_is_set(struct vcpu *v) 2.14 -{ 2.15 - unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4; 2.16 - return guest_cr4 & X86_CR4_PAE; 2.17 -} 2.18 - 2.19 -static int svm_paging_enabled(struct vcpu *v) 2.20 -{ 2.21 - unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0; 2.22 - return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG); 2.23 -} 2.24 - 2.25 -static int svm_pae_enabled(struct vcpu *v) 2.26 -{ 2.27 - unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4; 2.28 - return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE); 2.29 -} 2.30 - 2.31 -static int svm_long_mode_enabled(struct vcpu *v) 2.32 -{ 2.33 - u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer; 2.34 - return guest_efer & EFER_LMA; 2.35 -} 2.36 2.37 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) 2.38 { 2.39 @@ -657,7 +628,7 @@ static int svm_guest_x86_mode(struct vcp 2.40 { 2.41 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.42 2.43 - if ( (vmcb->efer & EFER_LMA) && vmcb->cs.attr.fields.l ) 2.44 + if ( svm_long_mode_enabled(v) && vmcb->cs.attr.fields.l ) 2.45 return 8; 2.46 2.47 if ( svm_realmode(v) ) 2.48 @@ -707,7 +678,7 @@ static unsigned long svm_get_segment_bas 2.49 int long_mode = 0; 2.50 2.51 #ifdef __x86_64__ 2.52 - long_mode = vmcb->cs.attr.fields.l && (vmcb->efer & EFER_LMA); 2.53 + long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v); 2.54 #endif 2.55 switch ( seg ) 2.56 { 2.57 @@ -1140,7 +1111,7 @@ static void svm_do_general_protection_fa 2.58 printk("Huh? We got a GP Fault with an invalid IDTR!\n"); 2.59 svm_dump_vmcb(__func__, vmcb); 2.60 svm_dump_regs(__func__, regs); 2.61 - svm_dump_inst(svm_rip2pointer(vmcb)); 2.62 + svm_dump_inst(svm_rip2pointer(v)); 2.63 domain_crash(v->domain); 2.64 return; 2.65 } 2.66 @@ -1235,7 +1206,7 @@ static void svm_vmexit_do_cpuid(struct v 2.67 HVMTRACE_3D(CPUID, v, input, 2.68 ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx); 2.69 2.70 - inst_len = __get_instruction_length(vmcb, INSTR_CPUID, NULL); 2.71 + inst_len = __get_instruction_length(v, INSTR_CPUID, NULL); 2.72 ASSERT(inst_len > 0); 2.73 __update_guest_eip(vmcb, inst_len); 2.74 } 2.75 @@ -1338,15 +1309,16 @@ static void svm_dr_access(struct vcpu *v 2.76 } 2.77 2.78 2.79 -static void svm_get_prefix_info( 2.80 - struct vmcb_struct *vmcb, 2.81 - unsigned int dir, svm_segment_register_t **seg, unsigned int *asize) 2.82 +static void svm_get_prefix_info(struct vcpu *v, unsigned int dir, 2.83 + svm_segment_register_t **seg, 2.84 + unsigned int *asize) 2.85 { 2.86 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.87 unsigned char inst[MAX_INST_LEN]; 2.88 int i; 2.89 2.90 memset(inst, 0, MAX_INST_LEN); 2.91 - if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst)) 2.92 + if (inst_copy_from_guest(inst, svm_rip2pointer(v), sizeof(inst)) 2.93 != MAX_INST_LEN) 2.94 { 2.95 gdprintk(XENLOG_ERR, "get guest instruction failed\n"); 2.96 @@ -1426,7 +1398,7 @@ static inline int svm_get_io_address( 2.97 2.98 #ifdef __x86_64__ 2.99 /* If we're in long mode, we shouldn't check the segment presence & limit */ 2.100 - long_mode = vmcb->cs.attr.fields.l && vmcb->efer & EFER_LMA; 2.101 + long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v); 2.102 #endif 2.103 2.104 /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 2.105 @@ -1445,7 +1417,7 @@ static inline int svm_get_io_address( 2.106 isize --; 2.107 2.108 if (isize > 1) 2.109 - svm_get_prefix_info(vmcb, info.fields.type, &seg, &asize); 2.110 + svm_get_prefix_info(v, info.fields.type, &seg, &asize); 2.111 2.112 if (info.fields.type == IOREQ_WRITE) 2.113 { 2.114 @@ -1876,12 +1848,6 @@ static void mov_from_cr(int cr, int gp, 2.115 } 2.116 2.117 2.118 -static inline int svm_pgbit_test(struct vcpu *v) 2.119 -{ 2.120 - return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG; 2.121 -} 2.122 - 2.123 - 2.124 /* 2.125 * Write to control registers 2.126 */ 2.127 @@ -2046,7 +2012,7 @@ static int svm_cr_access(struct vcpu *v, 2.128 2.129 ASSERT(vmcb); 2.130 2.131 - inst_copy_from_guest(buffer, svm_rip2pointer(vmcb), sizeof(buffer)); 2.132 + inst_copy_from_guest(buffer, svm_rip2pointer(v), sizeof(buffer)); 2.133 2.134 /* get index to first actual instruction byte - as we will need to know 2.135 where the prefix lives later on */ 2.136 @@ -2055,12 +2021,12 @@ static int svm_cr_access(struct vcpu *v, 2.137 if ( type == TYPE_MOV_TO_CR ) 2.138 { 2.139 inst_len = __get_instruction_length_from_list( 2.140 - vmcb, list_a, ARR_SIZE(list_a), &buffer[index], &match); 2.141 + v, list_a, ARR_SIZE(list_a), &buffer[index], &match); 2.142 } 2.143 else /* type == TYPE_MOV_FROM_CR */ 2.144 { 2.145 inst_len = __get_instruction_length_from_list( 2.146 - vmcb, list_b, ARR_SIZE(list_b), &buffer[index], &match); 2.147 + v, list_b, ARR_SIZE(list_b), &buffer[index], &match); 2.148 } 2.149 2.150 ASSERT(inst_len > 0); 2.151 @@ -2095,7 +2061,7 @@ static int svm_cr_access(struct vcpu *v, 2.152 2.153 case INSTR_LMSW: 2.154 if (svm_dbg_on) 2.155 - svm_dump_inst(svm_rip2pointer(vmcb)); 2.156 + svm_dump_inst(svm_rip2pointer(v)); 2.157 2.158 gpreg = decode_src_reg(prefix, buffer[index+2]); 2.159 value = get_reg(gpreg, regs, vmcb) & 0xF; 2.160 @@ -2114,7 +2080,7 @@ static int svm_cr_access(struct vcpu *v, 2.161 2.162 case INSTR_SMSW: 2.163 if (svm_dbg_on) 2.164 - svm_dump_inst(svm_rip2pointer(vmcb)); 2.165 + svm_dump_inst(svm_rip2pointer(v)); 2.166 value = v->arch.hvm_svm.cpu_shadow_cr0; 2.167 gpreg = decode_src_reg(prefix, buffer[index+2]); 2.168 set_reg(gpreg, value, regs, vmcb); 2.169 @@ -2190,7 +2156,7 @@ static inline void svm_do_msr_access( 2.170 HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", 2.171 ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); 2.172 2.173 - inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL); 2.174 + inst_len = __get_instruction_length(v, INSTR_RDMSR, NULL); 2.175 } 2.176 else 2.177 { 2.178 @@ -2222,7 +2188,7 @@ static inline void svm_do_msr_access( 2.179 break; 2.180 } 2.181 2.182 - inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL); 2.183 + inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL); 2.184 } 2.185 2.186 __update_guest_eip(vmcb, inst_len); 2.187 @@ -2245,8 +2211,9 @@ static inline void svm_vmexit_do_hlt(str 2.188 } 2.189 2.190 2.191 -static void svm_vmexit_do_invd(struct vmcb_struct *vmcb) 2.192 +static void svm_vmexit_do_invd(struct vcpu *v) 2.193 { 2.194 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.195 int inst_len; 2.196 2.197 /* Invalidate the cache - we can't really do that safely - maybe we should 2.198 @@ -2259,7 +2226,7 @@ static void svm_vmexit_do_invd(struct vm 2.199 */ 2.200 printk("INVD instruction intercepted - ignored\n"); 2.201 2.202 - inst_len = __get_instruction_length(vmcb, INSTR_INVD, NULL); 2.203 + inst_len = __get_instruction_length(v, INSTR_INVD, NULL); 2.204 __update_guest_eip(vmcb, inst_len); 2.205 } 2.206 2.207 @@ -2311,7 +2278,7 @@ void svm_handle_invlpg(const short invlp 2.208 * Unknown how many bytes the invlpg instruction will take. Use the 2.209 * maximum instruction length here 2.210 */ 2.211 - if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length) 2.212 + if (inst_copy_from_guest(opcode, svm_rip2pointer(v), length) < length) 2.213 { 2.214 gdprintk(XENLOG_ERR, "Error reading memory %d bytes\n", length); 2.215 domain_crash(v->domain); 2.216 @@ -2320,7 +2287,7 @@ void svm_handle_invlpg(const short invlp 2.217 2.218 if (invlpga) 2.219 { 2.220 - inst_len = __get_instruction_length(vmcb, INSTR_INVLPGA, opcode); 2.221 + inst_len = __get_instruction_length(v, INSTR_INVLPGA, opcode); 2.222 ASSERT(inst_len > 0); 2.223 __update_guest_eip(vmcb, inst_len); 2.224 2.225 @@ -2334,7 +2301,7 @@ void svm_handle_invlpg(const short invlp 2.226 { 2.227 /* What about multiple prefix codes? */ 2.228 prefix = (is_prefix(opcode[0])?opcode[0]:0); 2.229 - inst_len = __get_instruction_length(vmcb, INSTR_INVLPG, opcode); 2.230 + inst_len = __get_instruction_length(v, INSTR_INVLPG, opcode); 2.231 ASSERT(inst_len > 0); 2.232 2.233 inst_len--; 2.234 @@ -2345,7 +2312,7 @@ void svm_handle_invlpg(const short invlp 2.235 * displacement to get effective address and length in bytes. Assume 2.236 * the system in either 32- or 64-bit mode. 2.237 */ 2.238 - g_vaddr = get_effective_addr_modrm64(vmcb, regs, prefix, inst_len, 2.239 + g_vaddr = get_effective_addr_modrm64(regs, prefix, inst_len, 2.240 &opcode[inst_len], &length); 2.241 2.242 inst_len += length; 2.243 @@ -2466,7 +2433,7 @@ static int svm_do_vmmcall(struct vcpu *v 2.244 ASSERT(vmcb); 2.245 ASSERT(regs); 2.246 2.247 - inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL); 2.248 + inst_len = __get_instruction_length(v, INSTR_VMCALL, NULL); 2.249 ASSERT(inst_len > 0); 2.250 2.251 HVMTRACE_1D(VMMCALL, v, regs->eax); 2.252 @@ -2876,7 +2843,7 @@ asmlinkage void svm_vmexit_handler(struc 2.253 2.254 svm_dump_vmcb(__func__, vmcb); 2.255 svm_dump_regs(__func__, regs); 2.256 - svm_dump_inst(svm_rip2pointer(vmcb)); 2.257 + svm_dump_inst(svm_rip2pointer(v)); 2.258 } 2.259 2.260 #if defined(__i386__) 2.261 @@ -2978,7 +2945,7 @@ asmlinkage void svm_vmexit_handler(struc 2.262 /* Debug info to hopefully help debug WHY the guest double-faulted. */ 2.263 svm_dump_vmcb(__func__, vmcb); 2.264 svm_dump_regs(__func__, regs); 2.265 - svm_dump_inst(svm_rip2pointer(vmcb)); 2.266 + svm_dump_inst(svm_rip2pointer(v)); 2.267 svm_inject_exception(v, TRAP_double_fault, 1, 0); 2.268 break; 2.269 2.270 @@ -2988,7 +2955,7 @@ asmlinkage void svm_vmexit_handler(struc 2.271 break; 2.272 2.273 case VMEXIT_INVD: 2.274 - svm_vmexit_do_invd(vmcb); 2.275 + svm_vmexit_do_invd(v); 2.276 break; 2.277 2.278 case VMEXIT_GDTR_WRITE:
3.1 --- a/xen/include/asm-x86/hvm/svm/emulate.h Mon Mar 05 16:21:11 2007 -0600 3.2 +++ b/xen/include/asm-x86/hvm/svm/emulate.h Mon Mar 05 16:21:50 2007 -0600 3.3 @@ -76,7 +76,7 @@ enum instruction_index { 3.4 }; 3.5 3.6 3.7 -extern unsigned long get_effective_addr_modrm64(struct vmcb_struct *vmcb, 3.8 +extern unsigned long get_effective_addr_modrm64( 3.9 struct cpu_user_regs *regs, const u8 prefix, int inst_len, 3.10 const u8 *operand, u8 *size); 3.11 extern unsigned long get_effective_addr_sib(struct vmcb_struct *vmcb, 3.12 @@ -85,17 +85,17 @@ extern unsigned long get_effective_addr_ 3.13 extern OPERATING_MODE get_operating_mode (struct vmcb_struct *vmcb); 3.14 extern unsigned int decode_dest_reg(u8 prefix, u8 modrm); 3.15 extern unsigned int decode_src_reg(u8 prefix, u8 modrm); 3.16 -extern unsigned long svm_rip2pointer(struct vmcb_struct *vmcb); 3.17 -extern int __get_instruction_length_from_list(struct vmcb_struct *vmcb, 3.18 +extern unsigned long svm_rip2pointer(struct vcpu *v); 3.19 +extern int __get_instruction_length_from_list(struct vcpu *v, 3.20 enum instruction_index *list, unsigned int list_count, 3.21 u8 *guest_eip_buf, enum instruction_index *match); 3.22 3.23 3.24 -static inline int __get_instruction_length(struct vmcb_struct *vmcb, 3.25 +static inline int __get_instruction_length(struct vcpu *v, 3.26 enum instruction_index instr, u8 *guest_eip_buf) 3.27 { 3.28 return __get_instruction_length_from_list( 3.29 - vmcb, &instr, 1, guest_eip_buf, NULL); 3.30 + v, &instr, 1, guest_eip_buf, NULL); 3.31 } 3.32 3.33
4.1 --- a/xen/include/asm-x86/hvm/svm/svm.h Mon Mar 05 16:21:11 2007 -0600 4.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h Mon Mar 05 16:21:50 2007 -0600 4.3 @@ -34,6 +34,41 @@ extern void arch_svm_do_resume(struct vc 4.4 4.5 extern u64 root_vmcb_pa[NR_CPUS]; 4.6 4.7 +static inline int svm_long_mode_enabled(struct vcpu *v) 4.8 +{ 4.9 + u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer; 4.10 + return guest_efer & EFER_LMA; 4.11 +} 4.12 + 4.13 +static inline int svm_lme_is_set(struct vcpu *v) 4.14 +{ 4.15 + u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer; 4.16 + return guest_efer & EFER_LME; 4.17 +} 4.18 + 4.19 +static inline int svm_cr4_pae_is_set(struct vcpu *v) 4.20 +{ 4.21 + unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4; 4.22 + return guest_cr4 & X86_CR4_PAE; 4.23 +} 4.24 + 4.25 +static inline int svm_paging_enabled(struct vcpu *v) 4.26 +{ 4.27 + unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0; 4.28 + return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG); 4.29 +} 4.30 + 4.31 +static inline int svm_pae_enabled(struct vcpu *v) 4.32 +{ 4.33 + unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4; 4.34 + return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE); 4.35 +} 4.36 + 4.37 +static inline int svm_pgbit_test(struct vcpu *v) 4.38 +{ 4.39 + return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG; 4.40 +} 4.41 + 4.42 #define SVM_REG_EAX (0) 4.43 #define SVM_REG_ECX (1) 4.44 #define SVM_REG_EDX (2)