return X86EMUL_EXCEPTION;
}
-static int vmx_cr_access(unsigned long exit_qualification)
+static int vmx_cr_access(cr_access_qual_t qual)
{
struct vcpu *curr = current;
- switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
+ switch ( qual.access_type )
+ {
+ case VMX_CR_ACCESS_TYPE_MOV_TO_CR:
+ return hvm_mov_to_cr(qual.cr, qual.gpr);
+
+ case VMX_CR_ACCESS_TYPE_MOV_FROM_CR:
+ return hvm_mov_from_cr(qual.cr, qual.gpr);
+
+ case VMX_CR_ACCESS_TYPE_CLTS:
{
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR: {
- unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
- unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
- return hvm_mov_to_cr(cr, gp);
- }
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR: {
- unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
- unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
- return hvm_mov_from_cr(cr, gp);
- }
- case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
unsigned long value = old & ~X86_CR0_TS;
HVMTRACE_0D(CLTS);
break;
}
- case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: {
+
+ case VMX_CR_ACCESS_TYPE_LMSW:
+ {
unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
int rc;
/* LMSW can (1) set PE; (2) set or clear MP, EM, and TS. */
value = (value & ~(X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)) |
- (VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
+ (qual.lmsw_data &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
return rc;
}
+
default:
- BUG();
+ ASSERT_UNREACHABLE();
+ return X86EMUL_UNHANDLEABLE;
}
return X86EMUL_OKAY;
break;
case EXIT_REASON_CR_ACCESS:
{
- unsigned long exit_qualification;
- int cr, write;
+ cr_access_qual_t qual;
u32 mask = 0;
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
- cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
- write = VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification);
+ __vmread(EXIT_QUALIFICATION, &qual.raw);
/* also according to guest exec_control */
ctrl = __n2_exec_control(v);
- if ( cr == 3 )
+ /* CLTS/LMSW strictly act on CR0 */
+ if ( qual.access_type >= VMX_CR_ACCESS_TYPE_CLTS )
+ ASSERT(qual.cr == 0);
+
+ if ( qual.cr == 3 )
{
- mask = write? CPU_BASED_CR3_STORE_EXITING:
- CPU_BASED_CR3_LOAD_EXITING;
+ mask = qual.access_type ? CPU_BASED_CR3_STORE_EXITING
+ : CPU_BASED_CR3_LOAD_EXITING;
if ( ctrl & mask )
nvcpu->nv_vmexit_pending = 1;
}
- else if ( cr == 8 )
+ else if ( qual.cr == 8 )
{
- mask = write? CPU_BASED_CR8_STORE_EXITING:
- CPU_BASED_CR8_LOAD_EXITING;
+ mask = qual.access_type ? CPU_BASED_CR8_STORE_EXITING
+ : CPU_BASED_CR8_LOAD_EXITING;
if ( ctrl & mask )
nvcpu->nv_vmexit_pending = 1;
}
* Otherwise, L0 will handle it and sync the value to L1 virtual VMCS.
*/
unsigned long old_val, val, changed_bits;
- switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
+
+ switch ( qual.access_type )
{
- case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
+ case VMX_CR_ACCESS_TYPE_MOV_TO_CR:
{
- unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
- val = *decode_gpr(guest_cpu_user_regs(), gp);
+ val = *decode_gpr(guest_cpu_user_regs(), qual.gpr);
- if ( cr == 0 )
+ if ( qual.cr == 0 )
{
u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
(guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask));
}
}
- else if ( cr == 4 )
+ else if ( qual.cr == 4 )
{
u64 cr4_gh_mask = get_vvmcs(v, CR4_GUEST_HOST_MASK);
nvcpu->nv_vmexit_pending = 1;
break;
}
- case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+
+ case VMX_CR_ACCESS_TYPE_CLTS:
{
u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
}
break;
}
- case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
+
+ case VMX_CR_ACCESS_TYPE_LMSW:
{
u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK);
__vmread(CR0_READ_SHADOW, &old_val);
old_val &= X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS;
- val = VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
+ val = qual.lmsw_data &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS);
changed_bits = old_val ^ val;
if ( changed_bits & cr0_gh_mask )
}
break;
}
+
default:
+ ASSERT_UNREACHABLE();
break;
}
}
/*
* Exit Qualifications for MOV for Control Register Access
*/
- /* 3:0 - control register number (CRn) */
-#define VMX_CONTROL_REG_ACCESS_NUM(eq) ((eq) & 0xf)
- /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
-#define VMX_CONTROL_REG_ACCESS_TYPE(eq) (((eq) >> 4) & 0x3)
-# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR 0
-# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR 1
-# define VMX_CONTROL_REG_ACCESS_TYPE_CLTS 2
-# define VMX_CONTROL_REG_ACCESS_TYPE_LMSW 3
- /* 11:8 - general purpose register operand */
-#define VMX_CONTROL_REG_ACCESS_GPR(eq) (((eq) >> 8) & 0xf)
- /* 31:16 - LMSW source data */
-#define VMX_CONTROL_REG_ACCESS_DATA(eq) ((uint32_t)(eq) >> 16)
+enum {
+ VMX_CR_ACCESS_TYPE_MOV_TO_CR,
+ VMX_CR_ACCESS_TYPE_MOV_FROM_CR,
+ VMX_CR_ACCESS_TYPE_CLTS,
+ VMX_CR_ACCESS_TYPE_LMSW,
+};
+typedef union cr_access_qual {
+ unsigned long raw;
+ struct {
+ uint16_t cr:4,
+ access_type:2, /* VMX_CR_ACCESS_TYPE_* */
+ lmsw_op_type:1, /* 0 => reg, 1 => mem */
+ :1,
+ gpr:4,
+ :4;
+ uint16_t lmsw_data;
+ uint32_t :32;
+ };
+} __transparent__ cr_access_qual_t;
/*
* Access Rights