.r13 = v->arch.user_regs.r13,
.r14 = v->arch.user_regs.r14,
.r15 = v->arch.user_regs.r15,
+ .cr0 = v->arch.hvm.guest_cr[0],
+ .cr2 = v->arch.hvm.guest_cr[2],
+ .cr3 = v->arch.hvm.guest_cr[3],
+ .cr4 = v->arch.hvm.guest_cr[4],
.dr0 = v->arch.debugreg[0],
.dr1 = v->arch.debugreg[1],
.dr2 = v->arch.debugreg[2],
.dr3 = v->arch.debugreg[3],
.dr6 = v->arch.debugreg[6],
.dr7 = v->arch.debugreg[7],
+ .msr_efer = v->arch.hvm.guest_efer,
};
/*
if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
return -EINVAL;
+ v->arch.hvm.guest_cr[2] = ctxt.cr2;
+ hvm_update_guest_cr(v, 2);
+
if ( hvm_funcs.tsc_scaling.setup )
hvm_funcs.tsc_scaling.setup(v);
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- c->cr0 = v->arch.hvm.guest_cr[0];
- c->cr2 = v->arch.hvm.guest_cr[2];
- c->cr3 = v->arch.hvm.guest_cr[3];
- c->cr4 = v->arch.hvm.guest_cr[4];
-
c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
- c->pending_event = 0;
- c->error_code = 0;
if ( vmcb->eventinj.fields.v &&
hvm_event_needs_reinjection(vmcb->eventinj.fields.type,
vmcb->eventinj.fields.vector) )
}
v->arch.hvm.guest_cr[0] = c->cr0 | X86_CR0_ET;
- v->arch.hvm.guest_cr[2] = c->cr2;
v->arch.hvm.guest_cr[3] = c->cr3;
v->arch.hvm.guest_cr[4] = c->cr4;
svm_update_guest_cr(v, 0, 0);
- svm_update_guest_cr(v, 2, 0);
svm_update_guest_cr(v, 4, 0);
/* Load sysenter MSRs into both VMCB save area and VCPU fields. */
data->msr_star = vmcb->star;
data->msr_cstar = vmcb->cstar;
data->msr_syscall_mask = vmcb->sfmask;
- data->msr_efer = v->arch.hvm.guest_efer;
- data->msr_flags = 0;
}
vmx_vmcs_enter(v);
- c->cr0 = v->arch.hvm.guest_cr[0];
- c->cr2 = v->arch.hvm.guest_cr[2];
- c->cr3 = v->arch.hvm.guest_cr[3];
- c->cr4 = v->arch.hvm.guest_cr[4];
-
- c->msr_efer = v->arch.hvm.guest_efer;
-
__vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
__vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
__vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
- c->pending_event = 0;
- c->error_code = 0;
__vmread(VM_ENTRY_INTR_INFO, &ev);
if ( (ev & INTR_INFO_VALID_MASK) &&
hvm_event_needs_reinjection(MASK_EXTR(ev, INTR_INFO_INTR_TYPE_MASK),
vmx_vmcs_enter(v);
- v->arch.hvm.guest_cr[2] = c->cr2;
v->arch.hvm.guest_cr[4] = c->cr4;
vmx_update_guest_cr(v, 0, 0);
- vmx_update_guest_cr(v, 2, 0);
vmx_update_guest_cr(v, 4, 0);
v->arch.hvm.guest_efer = c->msr_efer;
static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
data->shadow_gs = v->arch.hvm.vmx.shadow_gs;
- data->msr_flags = 0;
data->msr_lstar = v->arch.hvm.vmx.lstar;
data->msr_star = v->arch.hvm.vmx.star;
data->msr_cstar = v->arch.hvm.vmx.cstar;
#ifdef CONFIG_HVM
const struct cpu_user_regs *regs = guest_cpu_user_regs();
struct segment_register seg;
- struct hvm_hw_cpu ctxt;
+ struct hvm_hw_cpu ctxt = {};
struct vcpu *curr = current;
ASSERT(is_hvm_vcpu(curr));
req->data.regs.x86.rip = regs->rip;
req->data.regs.x86.dr7 = curr->arch.debugreg[7];
- req->data.regs.x86.cr0 = ctxt.cr0;
- req->data.regs.x86.cr2 = ctxt.cr2;
- req->data.regs.x86.cr3 = ctxt.cr3;
- req->data.regs.x86.cr4 = ctxt.cr4;
+ req->data.regs.x86.cr0 = curr->arch.hvm.guest_cr[0];
+ req->data.regs.x86.cr2 = curr->arch.hvm.guest_cr[2];
+ req->data.regs.x86.cr3 = curr->arch.hvm.guest_cr[3];
+ req->data.regs.x86.cr4 = curr->arch.hvm.guest_cr[4];
req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
- req->data.regs.x86.msr_efer = ctxt.msr_efer;
+ req->data.regs.x86.msr_efer = curr->arch.hvm.guest_efer;
req->data.regs.x86.msr_star = ctxt.msr_star;
req->data.regs.x86.msr_lstar = ctxt.msr_lstar;