unsigned long val,
struct x86_emulate_ctxt *ctxt)
{
+ int rc;
+
HVMTRACE_LONG_2D(CR_WRITE, reg, TRC_PAR_LONG(val));
switch ( reg )
{
case 0:
- return hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, 1);
+ break;
+
case 2:
current->arch.hvm_vcpu.guest_cr[2] = val;
- return X86EMUL_OKAY;
+ rc = X86EMUL_OKAY;
+ break;
+
case 3:
- return hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, 1);
+ break;
+
case 4:
- return hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, 1);
+ break;
+
default:
+ rc = X86EMUL_UNHANDLEABLE;
break;
}
- return X86EMUL_UNHANDLEABLE;
+ if ( rc == X86EMUL_EXCEPTION )
+ x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+ return rc;
}
static int hvmemul_read_msr(
if ( w->do_write.cr0 )
{
- hvm_set_cr0(w->cr0, 0);
+ if ( hvm_set_cr0(w->cr0, 0) == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
w->do_write.cr0 = 0;
}
if ( w->do_write.cr4 )
{
- hvm_set_cr4(w->cr4, 0);
+ if ( hvm_set_cr4(w->cr4, 0) == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
w->do_write.cr4 = 0;
}
if ( w->do_write.cr3 )
{
- hvm_set_cr3(w->cr3, 0);
+ if ( hvm_set_cr3(w->cr3, 0) == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
w->do_write.cr3 = 0;
}
}
{
struct vcpu *curr = current;
unsigned long val, *reg;
+ int rc;
if ( (reg = decode_register(gpr, guest_cpu_user_regs(), 0)) == NULL )
{
switch ( cr )
{
case 0:
- return hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, 1);
+ break;
case 3:
- return hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, 1);
+ break;
case 4:
- return hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, 1);
+ break;
case 8:
vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
+ rc = X86EMUL_OKAY;
break;
default:
goto exit_and_crash;
}
- return X86EMUL_OKAY;
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
+ return rc;
exit_and_crash:
domain_crash(curr->domain);
HVM_DBG_LOG(DBG_LEVEL_1,
"Guest attempts to set upper 32 bits in CR0: %lx",
value);
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
value &= ~HVM_CR0_GUEST_RESERVED_BITS;
if ( !nestedhvm_vmswitch_in_progress(v) &&
(value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG )
- goto gpf;
+ return X86EMUL_EXCEPTION;
/* A pvh is not expected to change to real mode. */
if ( is_pvh_domain(d) &&
{
printk(XENLOG_G_WARNING
"PVH attempting to turn off PE/PG. CR0:%lx\n", value);
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
!nestedhvm_vmswitch_in_progress(v) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to clear CR0.PG "
"while CR4.PCIDE=1");
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
/* When CR0.PG is cleared, LMA is cleared immediately. */
}
return X86EMUL_OKAY;
-
- gpf:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
- return X86EMUL_EXCEPTION;
}
int hvm_set_cr3(unsigned long value, bool_t may_defer)
HVM_DBG_LOG(DBG_LEVEL_1,
"Guest attempts to set reserved bit in CR4: %lx",
value);
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
if ( !(value & X86_CR4_PAE) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
"EFER.LMA is set");
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
if ( is_pvh_vcpu(v) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "32-bit PVH guest cleared CR4.PAE");
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
}
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to change CR4.PCIDE from "
"0 to 1 while either EFER.LMA=0 or CR3[11:0]!=000H");
- goto gpf;
+ return X86EMUL_EXCEPTION;
}
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
}
return X86EMUL_OKAY;
-
- gpf:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
- return X86EMUL_EXCEPTION;
}
bool_t hvm_virtual_to_linear_addr(
if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
goto out;
- if ( hvm_set_cr3(tss.cr3, 1) )
+ rc = hvm_set_cr3(tss.cr3, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ if ( rc != X86EMUL_OKAY )
goto out;
regs->rip = tss.eip;
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4;
rc = hvm_set_cr4(n1vmcb->_cr4, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
}
rc = hvm_set_cr3(n1vmcb->_cr3, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
/* CR4 */
v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
rc = hvm_set_cr0(cr0, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else if (paging_mode_hap(v->domain)) {
*/
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else {
}
case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: {
unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
+ int rc;
/* LMSW can (1) set PE; (2) set or clear MP, EM, and TS. */
value = (value & ~(X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)) |
(VMX_CONTROL_REG_ACCESS_DATA(exit_qualification) &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
- return hvm_set_cr0(value, 1);
+
+ if ( (rc = hvm_set_cr0(value, 1)) == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
+ return rc;
}
default:
BUG();
nvcpu->guest_cr[0] = get_vvmcs(v, CR0_READ_SHADOW);
nvcpu->guest_cr[4] = get_vvmcs(v, CR4_READ_SHADOW);
- hvm_set_cr0(get_vvmcs(v, GUEST_CR0), 1);
- hvm_set_cr4(get_vvmcs(v, GUEST_CR4), 1);
- hvm_set_cr3(get_vvmcs(v, GUEST_CR3), 1);
+
+ rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
+ rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
+ rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
control = get_vvmcs(v, VM_ENTRY_CONTROLS);
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
__vmwrite(vmcs_h2g_field[i].guest_field, r);
}
- hvm_set_cr0(get_vvmcs(v, HOST_CR0), 1);
- hvm_set_cr4(get_vvmcs(v, HOST_CR4), 1);
- hvm_set_cr3(get_vvmcs(v, HOST_CR3), 1);
+ rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
+ rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
+ rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
control = get_vvmcs(v, VM_EXIT_CONTROLS);
if ( control & VM_EXIT_LOAD_HOST_PAT )
void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
-/* These functions all return X86EMUL return codes. */
+/*
+ * These functions all return X86EMUL return codes. For hvm_set_*(), the
+ * caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is
+ * returned.
+ */
int hvm_set_efer(uint64_t value);
int hvm_set_cr0(unsigned long value, bool_t may_defer);
int hvm_set_cr3(unsigned long value, bool_t may_defer);