__vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
}
+static void cf_check vmx_get_nonreg_state(struct vcpu *v,
+ struct hvm_vcpu_nonreg_state *nrs)
+{
+ vmx_vmcs_enter(v);
+
+ __vmread(GUEST_ACTIVITY_STATE, &nrs->vmx.activity_state);
+ __vmread(GUEST_INTERRUPTIBILITY_INFO, &nrs->vmx.interruptibility_info);
+ __vmread(GUEST_PENDING_DBG_EXCEPTIONS, &nrs->vmx.pending_dbg);
+
+ if ( cpu_has_vmx_virtual_intr_delivery )
+ __vmread(GUEST_INTR_STATUS, &nrs->vmx.interrupt_status);
+
+ vmx_vmcs_exit(v);
+}
+
+static void cf_check vmx_set_nonreg_state(struct vcpu *v,
+ struct hvm_vcpu_nonreg_state *nrs)
+{
+ vmx_vmcs_enter(v);
+
+ __vmwrite(GUEST_ACTIVITY_STATE, nrs->vmx.activity_state);
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, nrs->vmx.interruptibility_info);
+ __vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, nrs->vmx.pending_dbg);
+
+ if ( cpu_has_vmx_virtual_intr_delivery )
+ __vmwrite(GUEST_INTR_STATUS, nrs->vmx.interrupt_status);
+
+ vmx_vmcs_exit(v);
+}
+
static void vmx_load_pdptrs(struct vcpu *v)
{
uint32_t cr3 = v->arch.hvm.guest_cr[3];
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
.get_interrupt_shadow = vmx_get_interrupt_shadow,
.set_interrupt_shadow = vmx_set_interrupt_shadow,
+ .get_nonreg_state = vmx_get_nonreg_state,
+ .set_nonreg_state = vmx_set_nonreg_state,
.guest_x86_mode = vmx_guest_x86_mode,
.get_cpl = _vmx_get_cpl,
.get_segment_register = vmx_get_segment_register,
/* update_guest_cr() flags. */
#define HVM_UPDATE_GUEST_CR3_NOFLUSH 0x00000001
+struct hvm_vcpu_nonreg_state {
+ union {
+ struct {
+ uint64_t activity_state;
+ uint64_t interruptibility_info;
+ uint64_t pending_dbg;
+ uint64_t interrupt_status;
+ } vmx;
+ };
+};
+
/*
* The hardware virtual machine (HVM) interface abstracts away from the
* x86/x86_64 CPU virtualization assist specifics. Currently this interface
/* Examine specifics of the guest state. */
unsigned int (*get_interrupt_shadow)(struct vcpu *v);
void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
+ void (*get_nonreg_state)(struct vcpu *v,
+ struct hvm_vcpu_nonreg_state *nrs);
+ void (*set_nonreg_state)(struct vcpu *v,
+ struct hvm_vcpu_nonreg_state *nrs);
int (*guest_x86_mode)(struct vcpu *v);
unsigned int (*get_cpl)(struct vcpu *v);
void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
d_->arch.hvm.pi_ops.vcpu_block(v_); \
})
+static inline void hvm_get_nonreg_state(struct vcpu *v,
+ struct hvm_vcpu_nonreg_state *nrs)
+{
+ if ( hvm_funcs.get_nonreg_state )
+ alternative_vcall(hvm_funcs.get_nonreg_state, v, nrs);
+}
+
+static inline void hvm_set_nonreg_state(struct vcpu *v,
+ struct hvm_vcpu_nonreg_state *nrs)
+{
+ if ( hvm_funcs.set_nonreg_state )
+ alternative_vcall(hvm_funcs.set_nonreg_state, v, nrs);
+}
+
#else /* CONFIG_HVM */
#define hvm_enabled false
return 0;
}
+static void copy_vcpu_nonreg_state(struct vcpu *d_vcpu, struct vcpu *cd_vcpu)
+{
+ struct hvm_vcpu_nonreg_state nrs = {};
+
+ hvm_get_nonreg_state(d_vcpu, &nrs);
+ hvm_set_nonreg_state(cd_vcpu, &nrs);
+}
+
static int copy_vcpu_settings(struct domain *cd, const struct domain *d)
{
unsigned int i;
for ( i = 0; i < cd->max_vcpus; i++ )
{
- const struct vcpu *d_vcpu = d->vcpu[i];
+ struct vcpu *d_vcpu = d->vcpu[i];
struct vcpu *cd_vcpu = cd->vcpu[i];
mfn_t vcpu_info_mfn;
hvm_vmtrace_reset(cd_vcpu);
+ copy_vcpu_nonreg_state(d_vcpu, cd_vcpu);
+
/*
* TODO: to support VMs with PV interfaces copy additional
* settings here, such as PV timers.