cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
}
-static void svm_sync_vmcb(struct vcpu *v)
+static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
- if ( arch_svm->vmcb_in_sync )
- return;
-
- arch_svm->vmcb_in_sync = 1;
+ if ( new_state == vmcb_needs_vmsave )
+ {
+ if ( arch_svm->vmcb_sync_state == vmcb_needs_vmload )
+ {
+ svm_vmload(arch_svm->vmcb);
+ arch_svm->vmcb_sync_state = vmcb_in_sync;
+ }
+ }
+ else
+ {
+ if ( arch_svm->vmcb_sync_state == vmcb_needs_vmsave )
+ svm_vmsave(arch_svm->vmcb);
- svm_vmsave(arch_svm->vmcb);
+ if ( arch_svm->vmcb_sync_state != vmcb_needs_vmload )
+ arch_svm->vmcb_sync_state = new_state;
+ }
}
static unsigned int svm_get_cpl(struct vcpu *v)
switch ( seg )
{
case x86_seg_fs ... x86_seg_gs:
- svm_sync_vmcb(v);
+ svm_sync_vmcb(v, vmcb_in_sync);
/* Fallthrough. */
case x86_seg_es ... x86_seg_ds:
break;
case x86_seg_tr:
- svm_sync_vmcb(v);
+ svm_sync_vmcb(v, vmcb_in_sync);
*reg = vmcb->tr;
break;
break;
case x86_seg_ldtr:
- svm_sync_vmcb(v);
+ svm_sync_vmcb(v, vmcb_in_sync);
*reg = vmcb->ldtr;
break;
struct segment_register *reg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- bool sync = false;
ASSERT((v == current) || !vcpu_runnable(v));
case x86_seg_gs:
case x86_seg_tr:
case x86_seg_ldtr:
- sync = (v == current);
+ if ( v == current )
+ svm_sync_vmcb(v, vmcb_needs_vmload);
break;
default:
return;
}
- if ( sync )
- svm_sync_vmcb(v);
-
switch ( seg )
{
case x86_seg_ss:
ASSERT_UNREACHABLE();
break;
}
-
- if ( sync )
- svm_vmload(vmcb);
}
static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
svm_lwp_save(v);
svm_tsc_ratio_save(v);
- svm_sync_vmcb(v);
+ svm_sync_vmcb(v, vmcb_needs_vmload);
svm_vmload_pa(per_cpu(host_vmcb, cpu));
/* Resume use of ISTs now that the host TR is reinstated. */
svm_restore_dr(v);
svm_vmsave_pa(per_cpu(host_vmcb, cpu));
- svm_vmload(vmcb);
vmcb->cleanbits.bytes = 0;
svm_lwp_load(v);
svm_tsc_ratio_load(v);
hvm_do_resume(v);
+ svm_sync_vmcb(v, vmcb_needs_vmsave);
+
reset_stack_and_jump(svm_asm_do_resume);
}
case MSR_FS_BASE:
case MSR_GS_BASE:
case MSR_SHADOW_GS_BASE:
- svm_sync_vmcb(v);
+ svm_sync_vmcb(v, vmcb_in_sync);
break;
}
int ret, result = X86EMUL_OKAY;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- bool sync = false;
switch ( msr )
{
case MSR_FS_BASE:
case MSR_GS_BASE:
case MSR_SHADOW_GS_BASE:
- sync = true;
+ svm_sync_vmcb(v, vmcb_needs_vmload);
break;
}
- if ( sync )
- svm_sync_vmcb(v);
-
switch ( msr )
{
case MSR_IA32_SYSENTER_ESP:
break;
}
- if ( sync )
- svm_vmload(vmcb);
-
return result;
gpf:
put_page(page);
/* State in L1 VMCB is stale now */
- v->arch.hvm_svm.vmcb_in_sync = 0;
+ v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
__update_guest_eip(regs, inst_len);
}
bool_t vcpu_guestmode = 0;
struct vlapic *vlapic = vcpu_vlapic(v);
+ v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) )
}
out:
+ svm_sync_vmcb(v, vmcb_needs_vmsave);
+
if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
return;
intr.fields.tpr =
(vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
vmcb_set_vintr(vmcb, intr);
+ ASSERT(v->arch.hvm_svm.vmcb_sync_state != vmcb_needs_vmload);
}
void svm_trace_vmentry(void)
struct svm_domain {
};
+/*
+ * VMRUN doesn't switch fs/gs/tr/ldtr and SHADOWGS/SYSCALL/SYSENTER state.
+ * Therefore, guest state is in the hardware registers when servicing a
+ * VMExit.
+ *
+ * Immediately after a VMExit, the vmcb is stale, and needs to be brought
+ * into sync by VMSAVE. If state in the vmcb is modified, a VMLOAD is
+ * needed before the following VMRUN.
+ */
+enum vmcb_sync_state {
+ vmcb_in_sync,
+ vmcb_needs_vmsave, /* VMCB out of sync (VMSAVE needed)? */
+ vmcb_needs_vmload /* VMCB dirty (VMLOAD needed)? */
+};
+
struct arch_svm_struct {
struct vmcb_struct *vmcb;
u64 vmcb_pa;
unsigned long *msrpm;
int launch_core;
- bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */
+
+ uint8_t vmcb_sync_state; /* enum vmcb_sync_state */
/* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
uint8_t cached_insn_len; /* Zero if no cached instruction. */