switch ( reg )
{
case 0:
- rc = hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, true);
break;
case 2:
break;
case 3:
- rc = hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, true);
break;
case 4:
- rc = hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, true);
break;
default:
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- int rc = hvm_msr_write_intercept(reg, val, 1);
+ int rc = hvm_msr_write_intercept(reg, val, true);
if ( rc == X86EMUL_EXCEPTION )
x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
switch ( cr )
{
case 0:
- rc = hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, true);
break;
case 3:
- rc = hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, true);
break;
case 4:
- rc = hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, true);
break;
case 8:
hvm_update_guest_cr(v, cr);
}
-int hvm_set_cr0(unsigned long value, bool_t may_defer)
+int hvm_set_cr0(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
return X86EMUL_OKAY;
}
-int hvm_set_cr3(unsigned long value, bool_t may_defer)
+int hvm_set_cr3(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct page_info *page;
return X86EMUL_UNHANDLEABLE;
}
-int hvm_set_cr4(unsigned long value, bool_t may_defer)
+int hvm_set_cr4(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
unsigned long old_cr;
if ( task_switch_load_seg(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
goto out;
- rc = hvm_set_cr3(tss.cr3, 1);
+ rc = hvm_set_cr3(tss.cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if ( rc != X86EMUL_OKAY )
/* CR4 */
v->arch.hvm.guest_cr[4] = n1vmcb->_cr4;
- rc = hvm_set_cr4(n1vmcb->_cr4, 1);
+ rc = hvm_set_cr4(n1vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
svm->ns_cr0, v->arch.hvm.guest_cr[0]);
v->arch.hvm.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
- rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
+ rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
v->arch.guest_table = pagetable_null();
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
}
- rc = hvm_set_cr3(n1vmcb->_cr3, 1);
+ rc = hvm_set_cr3(n1vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
/* CR4 */
v->arch.hvm.guest_cr[4] = ns_vmcb->_cr4;
- rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
+ rc = hvm_set_cr4(ns_vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
svm->ns_cr0 = v->arch.hvm.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
v->arch.hvm.guest_cr[0] = ns_vmcb->_cr0;
- rc = hvm_set_cr0(cr0, 1);
+ rc = hvm_set_cr0(cr0, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
* we assume it intercepts page faults.
*/
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
msr_split(regs, msr_content);
}
else
- rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true);
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
if ( unlikely(w->do_write.cr0) )
{
- if ( hvm_set_cr0(w->cr0, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr0(w->cr0, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr0 = 0;
if ( unlikely(w->do_write.cr4) )
{
- if ( hvm_set_cr4(w->cr4, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr4(w->cr4, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr4 = 0;
if ( unlikely(w->do_write.cr3) )
{
- if ( hvm_set_cr3(w->cr3, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr3(w->cr3, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr3 = 0;
if ( unlikely(w->do_write.msr) )
{
- if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+ if ( hvm_msr_write_intercept(w->msr, w->value, false) ==
X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
- if ( (rc = hvm_set_cr0(value, 1)) == X86EMUL_EXCEPTION )
+ if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
return rc;
}
case EXIT_REASON_MSR_WRITE:
- switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1) )
+ switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true) )
{
case X86EMUL_OKAY:
update_guest_eip(); /* Safe: WRMSR */
nvcpu->guest_cr[0] = get_vvmcs(v, CR0_READ_SHADOW);
nvcpu->guest_cr[4] = get_vvmcs(v, CR4_READ_SHADOW);
- rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), 1);
+ rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), 1);
+ rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), 1);
+ rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
{
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+ get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), false);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
__vmwrite(vmcs_h2g_field[i].guest_field, r);
}
- rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), 1);
+ rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), 1);
+ rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), 1);
+ rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
{
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+ get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
* returned.
*/
int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value, bool_t may_defer);
-int hvm_set_cr3(unsigned long value, bool_t may_defer);
-int hvm_set_cr4(unsigned long value, bool_t may_defer);
+int hvm_set_cr0(unsigned long value, bool may_defer);
+int hvm_set_cr3(unsigned long value, bool may_defer);
+int hvm_set_cr4(unsigned long value, bool may_defer);
int hvm_descriptor_access_intercept(uint64_t exit_info,
uint64_t vmx_exit_qualification,
unsigned int descriptor, bool is_write);