hvm_update_guest_cr(v, 2);
hvm_update_guest_cr(v, 4);
- v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
- v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
- v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
-
+ /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
+ vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
+ vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
+ vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
+
if ( paging_mode_hap(v->domain) )
{
vmcb->np_enable = 1;
vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
if ( lma )
vmcb->efer |= EFER_LME;
-
- /*
- * In legacy mode (EFER.LMA=0) we natively support SYSENTER/SYSEXIT with
- * no need for MSR intercepts. When EFER.LMA=1 we must trap and emulate.
- */
- svm_intercept_msr(v, MSR_IA32_SYSENTER_CS, lma);
- svm_intercept_msr(v, MSR_IA32_SYSENTER_ESP, lma);
- svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
}
static void svm_sync_vmcb(struct vcpu *v)
u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ int sync = 0;
+
+ switch ( ecx )
+ {
+ case MSR_IA32_SYSENTER_CS:
+ case MSR_IA32_SYSENTER_ESP:
+ case MSR_IA32_SYSENTER_EIP:
+ sync = 1;
+ break;
+ default:
+ break;
+ }
+
+ if ( sync )
+ svm_sync_vmcb(v);
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
goto gpf;
case MSR_IA32_SYSENTER_CS:
- v->arch.hvm_svm.guest_sysenter_cs = msr_content;
+ vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content;
break;
case MSR_IA32_SYSENTER_ESP:
- v->arch.hvm_svm.guest_sysenter_esp = msr_content;
+ vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = msr_content;
break;
case MSR_IA32_SYSENTER_EIP:
- v->arch.hvm_svm.guest_sysenter_eip = msr_content;
+ vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = msr_content;
break;
case MSR_IA32_DEBUGCTLMSR:
break;
}
+ if ( sync )
+ svm_vmload(vmcb);
+
return X86EMUL_OKAY;
gpf: