case 0x8000001c:
if ( (v->arch.xcr0 & X86_XCR0_LWP) && cpu_has_svm )
/* Turn on available bit and other features specified in lwp_cfg. */
- res->a = (res->d & v->arch.hvm_svm.guest_lwp_cfg) | 1;
+ res->a = (res->d & v->arch.hvm.svm.guest_lwp_cfg) | 1;
break;
}
}
void svm_asid_handle_vmrun(void)
{
struct vcpu *curr = current;
- struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
struct hvm_vcpu_asid *p_asid =
nestedhvm_vcpu_in_guestmode(curr)
? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid;
static unsigned long svm_nextrip_insn_length(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( !cpu_has_svm_nrips )
return 0;
int __get_instruction_length_from_list(struct vcpu *v,
const enum instruction_index *list, unsigned int list_count)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct hvm_emulate_ctxt ctxt;
struct x86_emulate_state *state;
unsigned long inst_len, j;
static void svm_inject_nmi(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
eventinj_t event;
static void svm_inject_extint(struct vcpu *v, int vector)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
eventinj_t event;
event.bytes = 0;
static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
vintr_t intr;
void svm_intr_assist(void)
{
struct vcpu *v = current;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct hvm_intack intack;
enum hvm_intblk intblk;
* of l1 vmcb page.
*/
if (nv->nv_n1vmcx)
- v->arch.hvm_svm.vmcb = nv->nv_n1vmcx;
+ v->arch.hvm.svm.vmcb = nv->nv_n1vmcx;
if (svm->ns_cached_msrpm) {
free_xenheap_pages(svm->ns_cached_msrpm,
*/
/* switch vmcb to l1 guest's vmcb */
- v->arch.hvm_svm.vmcb = n1vmcb;
- v->arch.hvm_svm.vmcb_pa = nv->nv_n1vmcx_pa;
+ v->arch.hvm.svm.vmcb = n1vmcb;
+ v->arch.hvm.svm.vmcb_pa = nv->nv_n1vmcx_pa;
/* EFER */
v->arch.hvm.guest_efer = n1vmcb->_efer;
static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm)
{
- struct svm_vcpu *arch_svm = &v->arch.hvm_svm;
+ struct svm_vcpu *arch_svm = &v->arch.hvm.svm;
struct nestedsvm *svm = &vcpu_nestedsvm(v);
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct vmcb_struct *ns_vmcb = nv->nv_vvmcx;
nv->nv_ioport80 = ioport_80;
nv->nv_ioportED = ioport_ed;
- /* v->arch.hvm_svm.msrpm has type unsigned long, thus
- * BYTES_PER_LONG.
- */
+ /* v->arch.hvm.svm.msrpm has type unsigned long, thus BYTES_PER_LONG. */
for (i = 0; i < MSRPM_SIZE / BYTES_PER_LONG; i++)
svm->ns_merged_msrpm[i] = arch_svm->msrpm[i] | ns_msrpm_ptr[i];
}
/* switch vmcb to shadow vmcb */
- v->arch.hvm_svm.vmcb = nv->nv_n2vmcx;
- v->arch.hvm_svm.vmcb_pa = nv->nv_n2vmcx_pa;
+ v->arch.hvm.svm.vmcb = nv->nv_n2vmcx;
+ v->arch.hvm.svm.vmcb_pa = nv->nv_n2vmcx_pa;
ret = nsvm_vmcb_prepare4vmrun(v, regs);
if (ret) {
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
struct vmcb_struct *ns_vmcb;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( vmcb->_vintr.fields.vgif_enable )
ASSERT(vmcb->_vintr.fields.vgif == 0);
uint64_t exitcode, uint64_t exitinfo1, uint64_t exitinfo2)
{
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( vmcb->_vintr.fields.vgif_enable )
vmcb->_vintr.fields.vgif = 0;
nv = &vcpu_nestedhvm(v);
svm = &vcpu_nestedsvm(v);
- ASSERT(v->arch.hvm_svm.vmcb != NULL);
+ ASSERT(v->arch.hvm.svm.vmcb != NULL);
ASSERT(nv->nv_n1vmcx != NULL);
ASSERT(nv->nv_n2vmcx != NULL);
ASSERT(nv->nv_n1vmcx_pa != INVALID_PADDR);
nestedsvm_gif_isset(struct vcpu *v)
{
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
/* get the vmcb gif value if using vgif */
if ( vmcb->_vintr.fields.vgif_enable )
void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
unsigned int inst_len;
uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
vintr_t intr;
*/
void svm_nested_features_on_efer_update(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct nestedsvm *svm = &vcpu_nestedsvm(v);
u32 general2_intercepts;
vintr_t vintr;
/* Only crash the guest if the problem originates in kernel mode. */
static void svm_crash_or_fault(struct vcpu *v)
{
- if ( vmcb_get_cpl(v->arch.hvm_svm.vmcb) )
+ if ( vmcb_get_cpl(v->arch.hvm.svm.vmcb) )
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
else
domain_crash(v->domain);
regs->rip += inst_len;
regs->eflags &= ~X86_EFLAGS_RF;
- curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
+ curr->arch.hvm.svm.vmcb->interrupt_shadow = 0;
if ( regs->eflags & X86_EFLAGS_TF )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
unsigned long *msr_bit;
const struct domain *d = v->domain;
- msr_bit = svm_msrbit(v->arch.hvm_svm.msrpm, msr);
+ msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
BUG_ON(msr_bit == NULL);
msr &= 0x1fff;
for_each_vcpu ( d, v )
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
uint32_t intercepts = vmcb_get_general2_intercepts(vmcb);
if ( enable )
static void svm_save_dr(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
unsigned int flag_dr_dirty = v->arch.hvm.flag_dr_dirty;
if ( !flag_dr_dirty )
svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);
svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);
- rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
- rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
- rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
- rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
+ rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
+ rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
+ rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
+ rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
}
v->arch.debugreg[0] = read_debugreg(0);
svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
- wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
- wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
- wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
- wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
+ wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
+ wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
+ wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
+ wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
}
write_debugreg(0, v->arch.debugreg[0]);
*/
static void svm_restore_dr(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
__restore_debug_registers(vmcb, v);
}
static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
c->cr0 = v->arch.hvm.guest_cr[0];
c->cr2 = v->arch.hvm.guest_cr[2];
c->cr3 = v->arch.hvm.guest_cr[3];
c->cr4 = v->arch.hvm.guest_cr[4];
- c->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs;
- c->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp;
- c->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip;
+ c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
+ c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
+ c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
c->pending_event = 0;
c->error_code = 0;
static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
{
struct page_info *page = NULL;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
if ( c->pending_valid )
svm_update_guest_cr(v, 4, 0);
/* Load sysenter MSRs into both VMCB save area and VCPU fields. */
- vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
- vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
- vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
+ vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs;
+ vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp;
+ vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip;
if ( paging_mode_hap(v->domain) )
{
static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
data->shadow_gs = vmcb->kerngsbase;
data->msr_lstar = vmcb->lstar;
static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
vmcb->kerngsbase = data->shadow_gs;
vmcb->lstar = data->msr_lstar;
{
if ( boot_cpu_has(X86_FEATURE_DBEXT) )
{
- ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[0];
+ ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[0];
if ( ctxt->msr[ctxt->count].val )
ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK;
- ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[1];
+ ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[1];
if ( ctxt->msr[ctxt->count].val )
ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK;
- ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[2];
+ ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[2];
if ( ctxt->msr[ctxt->count].val )
ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK;
- ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[3];
+ ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[3];
if ( ctxt->msr[ctxt->count].val )
ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK;
}
else if ( ctxt->msr[i].val >> 32 )
err = -EDOM;
else
- v->arch.hvm_svm.dr_mask[0] = ctxt->msr[i].val;
+ v->arch.hvm.svm.dr_mask[0] = ctxt->msr[i].val;
break;
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
else if ( ctxt->msr[i].val >> 32 )
err = -EDOM;
else
- v->arch.hvm_svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+ v->arch.hvm.svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
ctxt->msr[i].val;
break;
static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
unsigned int intr_shadow = 0;
if ( vmcb->interrupt_shadow )
static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
vmcb->interrupt_shadow =
static int svm_guest_x86_mode(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( unlikely(!(v->arch.hvm.guest_cr[0] & X86_CR0_PE)) )
return 0;
void svm_update_guest_cr(struct vcpu *v, unsigned int cr, unsigned int flags)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
uint64_t value;
switch ( cr )
static void svm_update_guest_efer(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- bool_t lma = !!(v->arch.hvm.guest_efer & EFER_LMA);
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+ bool lma = v->arch.hvm.guest_efer & EFER_LMA;
uint64_t new_efer;
new_efer = (v->arch.hvm.guest_efer | EFER_SVME) & ~EFER_LME;
static void svm_cpuid_policy_changed(struct vcpu *v)
{
- struct svm_vcpu *svm = &v->arch.hvm_svm;
+ struct svm_vcpu *svm = &v->arch.hvm.svm;
struct vmcb_struct *vmcb = svm->vmcb;
const struct cpuid_policy *cp = v->domain->arch.cpuid;
u32 bitmap = vmcb_get_exception_intercepts(vmcb);
static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
{
- struct svm_vcpu *svm = &v->arch.hvm_svm;
+ struct svm_vcpu *svm = &v->arch.hvm.svm;
if ( new_state == vmcb_needs_vmsave )
{
static unsigned int svm_get_cpl(struct vcpu *v)
{
- return vmcb_get_cpl(v->arch.hvm_svm.vmcb);
+ return vmcb_get_cpl(v->arch.hvm.svm.vmcb);
}
static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
ASSERT((v == current) || !vcpu_runnable(v));
static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
ASSERT((v == current) || !vcpu_runnable(v));
static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
{
- return v->arch.hvm_svm.vmcb->kerngsbase;
+ return v->arch.hvm.svm.vmcb->kerngsbase;
}
static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( !paging_mode_hap(v->domain) )
return 0;
static int svm_get_guest_pat(struct vcpu *v, u64 *gpat)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( !paging_mode_hap(v->domain) )
return 0;
static void svm_set_tsc_offset(struct vcpu *v, u64 offset, u64 at_tsc)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct vmcb_struct *n1vmcb, *n2vmcb;
uint64_t n2_tsc_offset = 0;
struct domain *d = v->domain;
static void svm_set_rdtsc_exiting(struct vcpu *v, bool_t enable)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
u32 general2_intercepts = vmcb_get_general2_intercepts(vmcb);
static void svm_set_descriptor_access_exiting(struct vcpu *v, bool enable)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
u32 mask = GENERAL1_INTERCEPT_IDTR_READ | GENERAL1_INTERCEPT_GDTR_READ
| GENERAL1_INTERCEPT_LDTR_READ | GENERAL1_INTERCEPT_TR_READ
static unsigned int svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- unsigned int len = v->arch.hvm_svm.cached_insn_len;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+ unsigned int len = v->arch.hvm.svm.cached_insn_len;
if ( len != 0 )
{
/* Latch and clear the cached instruction. */
memcpy(buf, vmcb->guest_ins, MAX_INST_LEN);
- v->arch.hvm_svm.cached_insn_len = 0;
+ v->arch.hvm.svm.cached_insn_len = 0;
}
return len;
ack_APIC_irq();
vlapic_set_irq(
vcpu_vlapic(curr),
- (curr->arch.hvm_svm.guest_lwp_cfg >> 40) & 0xff,
+ (curr->arch.hvm.svm.guest_lwp_cfg >> 40) & 0xff,
0);
}
static inline void svm_lwp_save(struct vcpu *v)
{
/* Don't mess up with other guests. Disable LWP for next VCPU. */
- if ( v->arch.hvm_svm.guest_lwp_cfg )
+ if ( v->arch.hvm.svm.guest_lwp_cfg )
{
wrmsrl(MSR_AMD64_LWP_CFG, 0x0);
wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0);
static inline void svm_lwp_load(struct vcpu *v)
{
/* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */
- if ( v->arch.hvm_svm.guest_lwp_cfg )
- wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg);
+ if ( v->arch.hvm.svm.guest_lwp_cfg )
+ wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg);
}
/* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
if ( msr_low & ~v->domain->arch.cpuid->extd.raw[0x1c].d )
return -1;
- v->arch.hvm_svm.guest_lwp_cfg = msr_content;
+ v->arch.hvm.svm.guest_lwp_cfg = msr_content;
/* setup interrupt handler if needed */
if ( (msr_content & 0x80000000) && ((msr_content >> 40) & 0xff) )
{
alloc_direct_apic_vector(&lwp_intr_vector, svm_lwp_interrupt);
- v->arch.hvm_svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL)
+ v->arch.hvm.svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL)
| ((uint64_t)lwp_intr_vector << 40);
}
else
{
/* otherwise disable it */
- v->arch.hvm_svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL;
+ v->arch.hvm.svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL;
}
- wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg);
+ wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg);
/* track nonalzy state if LWP_CFG is non-zero. */
v->arch.nonlazy_xstate_used = !!(msr_content);
static void svm_ctxt_switch_to(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
int cpu = smp_processor_id();
/*
static void noreturn svm_do_resume(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
bool debug_state = (v->domain->debugger_attached ||
v->domain->arch.monitor.software_breakpoint_enabled ||
v->domain->arch.monitor.debug_exception_enabled);
: (intercepts & ~(1U << TRAP_int3)));
}
- if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
+ if ( v->arch.hvm.svm.launch_core != smp_processor_id() )
{
- v->arch.hvm_svm.launch_core = smp_processor_id();
+ v->arch.hvm.svm.launch_core = smp_processor_id();
hvm_migrate_timers(v);
hvm_migrate_pirqs(v);
/* Migrating to another ASID domain. Request a new ASID. */
void svm_vmenter_helper(const struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
- struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
svm_asid_handle_vmrun();
{
int rc;
- v->arch.hvm_svm.launch_core = -1;
+ v->arch.hvm.svm.launch_core = -1;
if ( (rc = svm_create_vmcb(v)) != 0 )
{
static void svm_emul_swint_injection(struct x86_event *event)
{
struct vcpu *curr = current;
- const struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+ const struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
const struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned int trap = event->vector, type = event->type;
unsigned int fault = TRAP_gp_fault, ec = 0;
static void svm_inject_event(const struct x86_event *event)
{
struct vcpu *curr = current;
- struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
eventinj_t eventinj = vmcb->eventinj;
struct x86_event _event = *event;
struct cpu_user_regs *regs = guest_cpu_user_regs();
static int svm_event_pending(struct vcpu *v)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
return vmcb->eventinj.fields.v;
}
static void svm_fpu_dirty_intercept(void)
{
struct vcpu *v = current;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
svm_fpu_enter(v);
int ret;
struct vcpu *v = current;
const struct domain *d = v->domain;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
switch ( msr )
{
switch ( msr )
{
case MSR_IA32_SYSENTER_CS:
- *msr_content = v->arch.hvm_svm.guest_sysenter_cs;
+ *msr_content = v->arch.hvm.svm.guest_sysenter_cs;
break;
case MSR_IA32_SYSENTER_ESP:
- *msr_content = v->arch.hvm_svm.guest_sysenter_esp;
+ *msr_content = v->arch.hvm.svm.guest_sysenter_esp;
break;
case MSR_IA32_SYSENTER_EIP:
- *msr_content = v->arch.hvm_svm.guest_sysenter_eip;
+ *msr_content = v->arch.hvm.svm.guest_sysenter_eip;
break;
case MSR_STAR:
break;
case MSR_AMD64_LWP_CFG:
- *msr_content = v->arch.hvm_svm.guest_lwp_cfg;
+ *msr_content = v->arch.hvm.svm.guest_lwp_cfg;
break;
case MSR_K7_PERFCTR0:
case MSR_AMD64_DR0_ADDRESS_MASK:
if ( !v->domain->arch.cpuid->extd.dbext )
goto gpf;
- *msr_content = v->arch.hvm_svm.dr_mask[0];
+ *msr_content = v->arch.hvm.svm.dr_mask[0];
break;
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !v->domain->arch.cpuid->extd.dbext )
goto gpf;
*msr_content =
- v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+ v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
break;
case MSR_AMD_OSVW_ID_LENGTH:
int ret, result = X86EMUL_OKAY;
struct vcpu *v = current;
struct domain *d = v->domain;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
switch ( msr )
{
switch ( msr )
{
case MSR_IA32_SYSENTER_ESP:
- vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = msr_content;
+ vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = msr_content;
break;
case MSR_IA32_SYSENTER_EIP:
- vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = msr_content;
+ vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = msr_content;
break;
case MSR_LSTAR:
break;
case MSR_IA32_SYSENTER_CS:
- vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content;
+ vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content;
break;
case MSR_STAR:
case MSR_AMD64_DR0_ADDRESS_MASK:
if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
goto gpf;
- v->arch.hvm_svm.dr_mask[0] = msr_content;
+ v->arch.hvm.svm.dr_mask[0] = msr_content;
break;
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
goto gpf;
- v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+ v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
msr_content;
break;
static void svm_do_msr_access(struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
- bool rdmsr = curr->arch.hvm_svm.vmcb->exitinfo1 == 0;
+ bool rdmsr = curr->arch.hvm.svm.vmcb->exitinfo1 == 0;
int rc, inst_len = __get_instruction_length(
curr, rdmsr ? INSTR_RDMSR : INSTR_WRMSR);
put_page(page);
/* State in L1 VMCB is stale now */
- v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
+ v->arch.hvm.svm.vmcb_sync_state = vmcb_needs_vmsave;
__update_guest_eip(regs, inst_len);
}
static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
{
- const struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ const struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
if ( vmcb->eventinj.fields.v )
return false;
{
uint64_t exit_reason;
struct vcpu *v = current;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
eventinj_t eventinj;
int inst_len, rc;
vintr_t intr;
regs->rdx, regs->rsi, regs->rdi);
if ( cpu_has_svm_decode )
- v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
+ v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
rc = paging_fault(va, regs);
- v->arch.hvm_svm.cached_insn_len = 0;
+ v->arch.hvm.svm.cached_insn_len = 0;
if ( rc )
{
case VMEXIT_NPF:
perfc_incra(svmexits, VMEXIT_NPF_PERFC);
if ( cpu_has_svm_decode )
- v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
+ v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
rc = vmcb->exitinfo1 & PFEC_page_present
? p2m_pt_handle_deferred_changes(vmcb->exitinfo2) : 0;
if ( rc >= 0 )
v, rc, vmcb->exitinfo2, vmcb->exitinfo1);
domain_crash(v->domain);
}
- v->arch.hvm_svm.cached_insn_len = 0;
+ v->arch.hvm.svm.cached_insn_len = 0;
break;
case VMEXIT_IRET: {
/* This function can directly access fields which are covered by clean bits. */
static int construct_vmcb(struct vcpu *v)
{
- struct svm_vcpu *svm = &v->arch.hvm_svm;
+ struct svm_vcpu *svm = &v->arch.hvm.svm;
struct vmcb_struct *vmcb = svm->vmcb;
/* Build-time check of the size of VMCB AMD structure. */
int svm_create_vmcb(struct vcpu *v)
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
- struct svm_vcpu *svm = &v->arch.hvm_svm;
+ struct svm_vcpu *svm = &v->arch.hvm.svm;
int rc;
if ( (nv->nv_n1vmcx == NULL) &&
void svm_destroy_vmcb(struct vcpu *v)
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
- struct svm_vcpu *svm = &v->arch.hvm_svm;
+ struct svm_vcpu *svm = &v->arch.hvm.svm;
if ( nv->nv_n1vmcx != NULL )
free_vmcb(nv->nv_n1vmcx);
for_each_vcpu ( d, v )
{
printk("\tVCPU %d\n", v->vcpu_id);
- svm_vmcb_dump("key_handler", v->arch.hvm_svm.vmcb);
+ svm_vmcb_dump("key_handler", v->arch.hvm.svm.vmcb);
}
}
ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
}
- if ( !(v->arch.hvm_vmx.exec_control & ctl) )
+ if ( !(v->arch.hvm.vmx.exec_control & ctl) )
{
- v->arch.hvm_vmx.exec_control |= ctl;
+ v->arch.hvm.vmx.exec_control |= ctl;
vmx_update_cpu_exec_control(v);
}
}
* Unfortunately, interrupt blocking in L2 won't work with simple
* intr_window_open (which depends on L2's IF). To solve this,
* the following algorithm can be used:
- * v->arch.hvm_vmx.exec_control.VIRTUAL_INTR_PENDING now denotes
+ * v->arch.hvm.vmx.exec_control.VIRTUAL_INTR_PENDING now denotes
* only L0 control, physical control may be different from it.
* - if in L1, it behaves normally, intr window is written
* to physical control as it is
/* Block event injection when single step with MTF. */
if ( unlikely(v->arch.hvm.single_step) )
{
- v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
+ v->arch.hvm.vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
return;
}
printk("\n");
}
- pi_desc = &v->arch.hvm_vmx.pi_desc;
+ pi_desc = &v->arch.hvm.vmx.pi_desc;
if ( pi_desc )
{
word = (const void *)&pi_desc->pir;
intack.vector;
__vmwrite(GUEST_INTR_STATUS, status);
- n = ARRAY_SIZE(v->arch.hvm_vmx.eoi_exit_bitmap);
- while ( (i = find_first_bit(&v->arch.hvm_vmx.eoi_exitmap_changed,
+ n = ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap);
+ while ( (i = find_first_bit(&v->arch.hvm.vmx.eoi_exitmap_changed,
n)) < n )
{
- clear_bit(i, &v->arch.hvm_vmx.eoi_exitmap_changed);
- __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm_vmx.eoi_exit_bitmap[i]);
+ clear_bit(i, &v->arch.hvm.vmx.eoi_exitmap_changed);
+ __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm.vmx.eoi_exit_bitmap[i]);
}
pt_intr_post(v, intack);
intr_info = 0;
}
- curr->arch.hvm_vmx.vmx_emulate = 1;
- while ( curr->arch.hvm_vmx.vmx_emulate &&
+ curr->arch.hvm.vmx.vmx_emulate = 1;
+ while ( curr->arch.hvm.vmx.vmx_emulate &&
!softirq_pending(smp_processor_id()) )
{
/*
* in real mode, because we don't emulate protected-mode IDT vectoring.
*/
if ( unlikely(!(++emulations & 15)) &&
- curr->arch.hvm_vmx.vmx_realmode &&
+ curr->arch.hvm.vmx.vmx_realmode &&
hvm_local_events_need_delivery(curr) )
break;
break;
/* Stop emulating unless our segment state is not safe */
- if ( curr->arch.hvm_vmx.vmx_realmode )
- curr->arch.hvm_vmx.vmx_emulate =
- (curr->arch.hvm_vmx.vm86_segment_mask != 0);
+ if ( curr->arch.hvm.vmx.vmx_realmode )
+ curr->arch.hvm.vmx.vmx_emulate =
+ (curr->arch.hvm.vmx.vm86_segment_mask != 0);
else
- curr->arch.hvm_vmx.vmx_emulate =
+ curr->arch.hvm.vmx.vmx_emulate =
((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3)
|| (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3));
}
/* Need to emulate next time if we've started an IO operation */
if ( vio->io_req.state != STATE_IOREQ_NONE )
- curr->arch.hvm_vmx.vmx_emulate = 1;
+ curr->arch.hvm.vmx.vmx_emulate = 1;
- if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
+ if ( !curr->arch.hvm.vmx.vmx_emulate && !curr->arch.hvm.vmx.vmx_realmode )
{
/*
* Cannot enter protected mode with bogus selector RPLs and DPLs.
static void __vmx_clear_vmcs(void *info)
{
struct vcpu *v = info;
- struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+ struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
/* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
ASSERT(!local_irq_is_enabled());
static void vmx_clear_vmcs(struct vcpu *v)
{
- int cpu = v->arch.hvm_vmx.active_cpu;
+ int cpu = v->arch.hvm.vmx.active_cpu;
if ( cpu != -1 )
on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1);
local_irq_save(flags);
- if ( v->arch.hvm_vmx.active_cpu == -1 )
+ if ( v->arch.hvm.vmx.active_cpu == -1 )
{
- list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
- v->arch.hvm_vmx.active_cpu = smp_processor_id();
+ list_add(&v->arch.hvm.vmx.active_list, &this_cpu(active_vmcs_list));
+ v->arch.hvm.vmx.active_cpu = smp_processor_id();
}
- ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());
+ ASSERT(v->arch.hvm.vmx.active_cpu == smp_processor_id());
- __vmptrld(v->arch.hvm_vmx.vmcs_pa);
- this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs_pa;
+ __vmptrld(v->arch.hvm.vmx.vmcs_pa);
+ this_cpu(current_vmcs) = v->arch.hvm.vmx.vmcs_pa;
local_irq_restore(flags);
}
{
/*
* As we may be running with interrupts disabled, we can't acquire
- * v->arch.hvm_vmx.vmcs_lock here. However, with interrupts disabled
+ * v->arch.hvm.vmx.vmcs_lock here. However, with interrupts disabled
* the VMCS can't be taken away from us anymore if we still own it.
*/
ASSERT(v->is_running || !local_irq_is_enabled());
- if ( v->arch.hvm_vmx.vmcs_pa == this_cpu(current_vmcs) )
+ if ( v->arch.hvm.vmx.vmcs_pa == this_cpu(current_vmcs) )
return;
vmx_load_vmcs(v);
while ( !list_empty(active_vmcs_list) )
__vmx_clear_vmcs(list_entry(active_vmcs_list->next,
- struct vcpu, arch.hvm_vmx.active_list));
+ struct vcpu, arch.hvm.vmx.active_list));
BUG_ON(!(read_cr4() & X86_CR4_VMXE));
this_cpu(vmxon) = 0;
* vmx_vmcs_enter/exit and scheduling tail critical regions.
*/
if ( likely(v == current) )
- return v->arch.hvm_vmx.vmcs_pa == this_cpu(current_vmcs);
+ return v->arch.hvm.vmx.vmcs_pa == this_cpu(current_vmcs);
fv = &this_cpu(foreign_vmcs);
BUG_ON(fv->count != 0);
vcpu_pause(v);
- spin_lock(&v->arch.hvm_vmx.vmcs_lock);
+ spin_lock(&v->arch.hvm.vmx.vmcs_lock);
vmx_clear_vmcs(v);
vmx_load_vmcs(v);
if ( is_hvm_vcpu(current) )
vmx_load_vmcs(current);
- spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
+ spin_unlock(&v->arch.hvm.vmx.vmcs_lock);
vcpu_unpause(v);
fv->v = NULL;
void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
enum vmx_msr_intercept_type type)
{
- struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+ struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
struct domain *d = v->domain;
/* VMX MSR bitmap supported? */
void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
enum vmx_msr_intercept_type type)
{
- struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+ struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
/* VMX MSR bitmap supported? */
if ( msr_bitmap == NULL )
*/
void vmx_vmcs_switch(paddr_t from, paddr_t to)
{
- struct vmx_vcpu *vmx = ¤t->arch.hvm_vmx;
+ struct vmx_vcpu *vmx = ¤t->arch.hvm.vmx;
spin_lock(&vmx->vmcs_lock);
__vmpclear(from);
void virtual_vmcs_enter(const struct vcpu *v)
{
- __vmptrld(v->arch.hvm_vmx.vmcs_shadow_maddr);
+ __vmptrld(v->arch.hvm.vmx.vmcs_shadow_maddr);
}
void virtual_vmcs_exit(const struct vcpu *v)
{
paddr_t cur = this_cpu(current_vmcs);
- __vmpclear(v->arch.hvm_vmx.vmcs_shadow_maddr);
+ __vmpclear(v->arch.hvm.vmx.vmcs_shadow_maddr);
if ( cur )
__vmptrld(cur);
}
*/
static void pi_desc_init(struct vcpu *v)
{
- v->arch.hvm_vmx.pi_desc.nv = posted_intr_vector;
+ v->arch.hvm.vmx.pi_desc.nv = posted_intr_vector;
/*
* Mark NDST as invalid, then we can use this invalid value as a
* marker to whether update NDST or not in vmx_pi_hooks_assign().
*/
- v->arch.hvm_vmx.pi_desc.ndst = APIC_INVALID_DEST;
+ v->arch.hvm.vmx.pi_desc.ndst = APIC_INVALID_DEST;
}
static int construct_vmcs(struct vcpu *v)
/* VMCS controls. */
__vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
- v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control;
+ v->arch.hvm.vmx.exec_control = vmx_cpu_based_exec_control;
if ( d->arch.vtsc && !cpu_has_vmx_tsc_scaling )
- v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
+ v->arch.hvm.vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
- v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
+ v->arch.hvm.vmx.secondary_exec_control = vmx_secondary_exec_control;
/*
* Disable descriptor table exiting: It's controlled by the VM event
* monitor requesting it.
*/
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
/* Disable VPID for now: we decide when to enable it on VMENTER. */
- v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
+ v->arch.hvm.vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
if ( paging_mode_hap(d) )
{
- v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
+ v->arch.hvm.vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING);
}
else
{
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~(SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
SECONDARY_EXEC_ENABLE_INVPCID);
}
/* Disable Virtualize x2APIC mode by default. */
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
/* Do not enable Monitor Trap Flag unless start single step debug */
- v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+ v->arch.hvm.vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
/* Disable VMFUNC and #VE for now: they may be enabled later by altp2m. */
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~(SECONDARY_EXEC_ENABLE_VM_FUNCTIONS |
SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS);
if ( !has_vlapic(d) )
{
/* Disable virtual apics, TPR */
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
| SECONDARY_EXEC_APIC_REGISTER_VIRT
| SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
- v->arch.hvm_vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
+ v->arch.hvm.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
/* In turn, disable posted interrupts. */
__vmwrite(PIN_BASED_VM_EXEC_CONTROL,
if ( cpu_has_vmx_secondary_exec_control )
__vmwrite(SECONDARY_VM_EXEC_CONTROL,
- v->arch.hvm_vmx.secondary_exec_control);
+ v->arch.hvm.vmx.secondary_exec_control);
/* MSR access bitmap. */
if ( cpu_has_vmx_msr_bitmap )
}
memset(msr_bitmap, ~0, PAGE_SIZE);
- v->arch.hvm_vmx.msr_bitmap = msr_bitmap;
+ v->arch.hvm.vmx.msr_bitmap = msr_bitmap;
__vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
vmx_clear_msr_intercept(v, MSR_FS_BASE, VMX_MSR_RW);
unsigned int i;
/* EOI-exit bitmap */
- bitmap_zero(v->arch.hvm_vmx.eoi_exit_bitmap, NR_VECTORS);
- for ( i = 0; i < ARRAY_SIZE(v->arch.hvm_vmx.eoi_exit_bitmap); ++i )
+ bitmap_zero(v->arch.hvm.vmx.eoi_exit_bitmap, NR_VECTORS);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap); ++i )
__vmwrite(EOI_EXIT_BITMAP(i), 0);
/* Initialise Guest Interrupt Status (RVI and SVI) to 0 */
if ( iommu_intpost )
pi_desc_init(v);
- __vmwrite(PI_DESC_ADDR, virt_to_maddr(&v->arch.hvm_vmx.pi_desc));
+ __vmwrite(PI_DESC_ADDR, virt_to_maddr(&v->arch.hvm.vmx.pi_desc));
__vmwrite(POSTED_INTR_NOTIFICATION_VECTOR, posted_intr_vector);
}
/* Disable PML anyway here as it will only be enabled in log dirty mode */
- v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+ v->arch.hvm.vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
/* Host data selectors. */
__vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
__vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
/* Host control registers. */
- v->arch.hvm_vmx.host_cr0 = read_cr0() & ~X86_CR0_TS;
+ v->arch.hvm.vmx.host_cr0 = read_cr0() & ~X86_CR0_TS;
if ( !v->arch.fully_eager_fpu )
- v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
- __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+ v->arch.hvm.vmx.host_cr0 |= X86_CR0_TS;
+ __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
__vmwrite(HOST_CR4, mmu_cr4_features);
if ( cpu_has_vmx_efer )
__vmwrite(HOST_EFER, read_efer());
__vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
__vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
- v->arch.hvm_vmx.cr4_host_mask = ~0UL;
+ v->arch.hvm.vmx.cr4_host_mask = ~0UL;
__vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
__vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
__vmwrite(GUEST_DR7, 0);
__vmwrite(VMCS_LINK_POINTER, ~0UL);
- v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
+ v->arch.hvm.vmx.exception_bitmap = HVM_TRAP_MASK
| (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
| (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
vmx_update_exception_bitmap(v);
struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr,
enum vmx_msr_list_type type)
{
- const struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+ const struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
struct vmx_msr_entry *start = NULL, *ent, *end;
unsigned int substart = 0, subend = vmx->msr_save_count;
unsigned int total = vmx->msr_load_count;
int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
enum vmx_msr_list_type type)
{
- struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+ struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
unsigned int substart, subend, total;
int rc;
int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type)
{
- struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+ struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
struct vmx_msr_entry *start = NULL, *ent, *end;
unsigned int substart = 0, subend = vmx->msr_save_count;
unsigned int total = vmx->msr_load_count;
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
{
- if ( !test_and_set_bit(vector, v->arch.hvm_vmx.eoi_exit_bitmap) )
+ if ( !test_and_set_bit(vector, v->arch.hvm.vmx.eoi_exit_bitmap) )
set_bit(vector / BITS_PER_LONG,
- &v->arch.hvm_vmx.eoi_exitmap_changed);
+ &v->arch.hvm.vmx.eoi_exitmap_changed);
}
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector)
{
- if ( test_and_clear_bit(vector, v->arch.hvm_vmx.eoi_exit_bitmap) )
+ if ( test_and_clear_bit(vector, v->arch.hvm.vmx.eoi_exit_bitmap) )
set_bit(vector / BITS_PER_LONG,
- &v->arch.hvm_vmx.eoi_exitmap_changed);
+ &v->arch.hvm.vmx.eoi_exitmap_changed);
}
bool_t vmx_vcpu_pml_enabled(const struct vcpu *v)
{
- return !!(v->arch.hvm_vmx.secondary_exec_control &
+ return !!(v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_ENABLE_PML);
}
if ( vmx_vcpu_pml_enabled(v) )
return 0;
- v->arch.hvm_vmx.pml_pg = v->domain->arch.paging.alloc_page(v->domain);
- if ( !v->arch.hvm_vmx.pml_pg )
+ v->arch.hvm.vmx.pml_pg = v->domain->arch.paging.alloc_page(v->domain);
+ if ( !v->arch.hvm.vmx.pml_pg )
return -ENOMEM;
vmx_vmcs_enter(v);
- __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm_vmx.pml_pg));
+ __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm.vmx.pml_pg));
__vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
- v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
+ v->arch.hvm.vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
__vmwrite(SECONDARY_VM_EXEC_CONTROL,
- v->arch.hvm_vmx.secondary_exec_control);
+ v->arch.hvm.vmx.secondary_exec_control);
vmx_vmcs_exit(v);
vmx_vmcs_enter(v);
- v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+ v->arch.hvm.vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
__vmwrite(SECONDARY_VM_EXEC_CONTROL,
- v->arch.hvm_vmx.secondary_exec_control);
+ v->arch.hvm.vmx.secondary_exec_control);
vmx_vmcs_exit(v);
- v->domain->arch.paging.free_page(v->domain, v->arch.hvm_vmx.pml_pg);
- v->arch.hvm_vmx.pml_pg = NULL;
+ v->domain->arch.paging.free_page(v->domain, v->arch.hvm.vmx.pml_pg);
+ v->arch.hvm.vmx.pml_pg = NULL;
}
void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
if ( pml_idx == (NR_PML_ENTRIES - 1) )
goto out;
- pml_buf = __map_domain_page(v->arch.hvm_vmx.pml_pg);
+ pml_buf = __map_domain_page(v->arch.hvm.vmx.pml_pg);
/*
* PML index can be either 2^16-1 (buffer is full), or 0 ~ NR_PML_ENTRIES-1
int vmx_create_vmcs(struct vcpu *v)
{
- struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+ struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
int rc;
if ( (vmx->vmcs_pa = vmx_alloc_vmcs()) == 0 )
void vmx_destroy_vmcs(struct vcpu *v)
{
- struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+ struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
vmx_clear_vmcs(v);
vmx_free_vmcs(vmx->vmcs_pa);
- free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
- free_xenheap_page(v->arch.hvm_vmx.msr_area);
- free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
+ free_xenheap_page(v->arch.hvm.vmx.host_msr_area);
+ free_xenheap_page(v->arch.hvm.vmx.msr_area);
+ free_xenheap_page(v->arch.hvm.vmx.msr_bitmap);
}
void vmx_vmentry_failure(void)
__vmread(VM_INSTRUCTION_ERROR, &error);
gprintk(XENLOG_ERR, "VM%s error: %#lx\n",
- curr->arch.hvm_vmx.launched ? "RESUME" : "LAUNCH", error);
+ curr->arch.hvm.vmx.launched ? "RESUME" : "LAUNCH", error);
if ( error == VMX_INSN_INVALID_CONTROL_STATE ||
error == VMX_INSN_INVALID_HOST_STATE )
bool_t debug_state;
unsigned long host_cr4;
- if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
+ if ( v->arch.hvm.vmx.active_cpu == smp_processor_id() )
vmx_vmcs_reload(v);
else
{
if ( has_arch_pdevs(v->domain) && !iommu_snoop
&& !cpu_has_wbinvd_exiting )
{
- int cpu = v->arch.hvm_vmx.active_cpu;
+ int cpu = v->arch.hvm.vmx.active_cpu;
if ( cpu != -1 )
flush_mask(cpumask_of(cpu), FLUSH_CACHE);
}
* VCPU migration. The environment of current VMCS is updated in place,
* but the action of another VMCS is deferred till it is switched in.
*/
- v->arch.hvm_vmx.hostenv_migrated = 1;
+ v->arch.hvm.vmx.hostenv_migrated = 1;
hvm_asid_flush_vcpu(v);
}
printk("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
cr4, vmr(CR4_READ_SHADOW), vmr(CR4_GUEST_HOST_MASK));
printk("CR3 = 0x%016lx\n", vmr(GUEST_CR3));
- if ( (v->arch.hvm_vmx.secondary_exec_control &
+ if ( (v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_ENABLE_EPT) &&
(cr4 & X86_CR4_PAE) && !(vmentry_ctl & VM_ENTRY_IA32E_MODE) )
{
vmr(GUEST_PERF_GLOBAL_CTRL), vmr(GUEST_BNDCFGS));
printk("Interruptibility = %08x ActivityState = %08x\n",
vmr32(GUEST_INTERRUPTIBILITY_INFO), vmr32(GUEST_ACTIVITY_STATE));
- if ( v->arch.hvm_vmx.secondary_exec_control &
+ if ( v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY )
printk("InterruptStatus = %04x\n", vmr16(GUEST_INTR_STATUS));
vmr32(IDT_VECTORING_INFO), vmr32(IDT_VECTORING_ERROR_CODE));
printk("TSC Offset = 0x%016lx TSC Multiplier = 0x%016lx\n",
vmr(TSC_OFFSET), vmr(TSC_MULTIPLIER));
- if ( (v->arch.hvm_vmx.exec_control & CPU_BASED_TPR_SHADOW) ||
+ if ( (v->arch.hvm.vmx.exec_control & CPU_BASED_TPR_SHADOW) ||
(vmx_pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT) )
printk("TPR Threshold = 0x%02x PostedIntrVec = 0x%02x\n",
vmr32(TPR_THRESHOLD), vmr16(POSTED_INTR_NOTIFICATION_VECTOR));
- if ( (v->arch.hvm_vmx.secondary_exec_control &
+ if ( (v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_ENABLE_EPT) )
printk("EPT pointer = 0x%016lx EPTP index = 0x%04x\n",
vmr(EPT_POINTER), vmr16(EPTP_INDEX));
i + 1, vmr(CR3_TARGET_VALUE(i + 1)));
if ( i < n )
printk("CR3 target%u=%016lx\n", i, vmr(CR3_TARGET_VALUE(i)));
- if ( v->arch.hvm_vmx.secondary_exec_control &
+ if ( v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_PAUSE_LOOP_EXITING )
printk("PLE Gap=%08x Window=%08x\n",
vmr32(PLE_GAP), vmr32(PLE_WINDOW));
- if ( v->arch.hvm_vmx.secondary_exec_control &
+ if ( v->arch.hvm.vmx.secondary_exec_control &
(SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) )
printk("Virtual processor ID = 0x%04x VMfunc controls = %016lx\n",
vmr16(VIRTUAL_PROCESSOR_ID), vmr(VM_FUNCTION_CONTROL));
spinlock_t *old_lock;
spinlock_t *pi_blocking_list_lock =
&per_cpu(vmx_pi_blocking, v->processor).lock;
- struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+ struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
spin_lock_irqsave(pi_blocking_list_lock, flags);
- old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL,
+ old_lock = cmpxchg(&v->arch.hvm.vmx.pi_blocking.lock, NULL,
pi_blocking_list_lock);
/*
- * 'v->arch.hvm_vmx.pi_blocking.lock' should be NULL before
+ * 'v->arch.hvm.vmx.pi_blocking.lock' should be NULL before
* being assigned to a new value, since the vCPU is currently
* running and it cannot be on any blocking list.
*/
ASSERT(old_lock == NULL);
- list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
+ list_add_tail(&v->arch.hvm.vmx.pi_blocking.list,
&per_cpu(vmx_pi_blocking, v->processor).list);
spin_unlock_irqrestore(pi_blocking_list_lock, flags);
static void vmx_pi_switch_from(struct vcpu *v)
{
- struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+ struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
if ( test_bit(_VPF_blocked, &v->pause_flags) )
return;
static void vmx_pi_switch_to(struct vcpu *v)
{
- struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+ struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
unsigned int dest = cpu_physical_id(v->processor);
write_atomic(&pi_desc->ndst,
{
unsigned long flags;
spinlock_t *pi_blocking_list_lock;
- struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+ struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
/*
* Set 'NV' field back to posted_intr_vector, so the
*/
write_atomic(&pi_desc->nv, posted_intr_vector);
- pi_blocking_list_lock = v->arch.hvm_vmx.pi_blocking.lock;
+ pi_blocking_list_lock = v->arch.hvm.vmx.pi_blocking.lock;
/* Prevent the compiler from eliminating the local variable.*/
smp_rmb();
spin_lock_irqsave(pi_blocking_list_lock, flags);
/*
- * v->arch.hvm_vmx.pi_blocking.lock == NULL here means the vCPU
+ * v->arch.hvm.vmx.pi_blocking.lock == NULL here means the vCPU
* was removed from the blocking list while we are acquiring the lock.
*/
- if ( v->arch.hvm_vmx.pi_blocking.lock != NULL )
+ if ( v->arch.hvm.vmx.pi_blocking.lock != NULL )
{
- ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
- list_del(&v->arch.hvm_vmx.pi_blocking.list);
- v->arch.hvm_vmx.pi_blocking.lock = NULL;
+ ASSERT(v->arch.hvm.vmx.pi_blocking.lock == pi_blocking_list_lock);
+ list_del(&v->arch.hvm.vmx.pi_blocking.list);
+ v->arch.hvm.vmx.pi_blocking.lock = NULL;
}
spin_unlock_irqrestore(pi_blocking_list_lock, flags);
{
list_del(&vmx->pi_blocking.list);
vmx->pi_blocking.lock = NULL;
- vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
+ vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm.vmx));
}
else
{
for_each_vcpu ( d, v )
{
unsigned int dest = cpu_physical_id(v->processor);
- struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+ struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
/*
* We don't need to update NDST if vmx_pi_switch_to()
{
int rc;
- spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
+ spin_lock_init(&v->arch.hvm.vmx.vmcs_lock);
- INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocking.list);
+ INIT_LIST_HEAD(&v->arch.hvm.vmx.pi_blocking.list);
if ( (rc = vmx_create_vmcs(v)) != 0 )
{
* We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
* be updated at any time via SWAPGS, which we cannot trap.
*/
- v->arch.hvm_vmx.shadow_gs = rdgsshadow();
+ v->arch.hvm.vmx.shadow_gs = rdgsshadow();
}
static void vmx_restore_guest_msrs(struct vcpu *v)
{
- wrgsshadow(v->arch.hvm_vmx.shadow_gs);
- wrmsrl(MSR_STAR, v->arch.hvm_vmx.star);
- wrmsrl(MSR_LSTAR, v->arch.hvm_vmx.lstar);
- wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm_vmx.sfmask);
+ wrgsshadow(v->arch.hvm.vmx.shadow_gs);
+ wrmsrl(MSR_STAR, v->arch.hvm.vmx.star);
+ wrmsrl(MSR_LSTAR, v->arch.hvm.vmx.lstar);
+ wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask);
if ( cpu_has_rdtscp )
wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
void vmx_update_cpu_exec_control(struct vcpu *v)
{
if ( nestedhvm_vcpu_in_guestmode(v) )
- nvmx_update_exec_control(v, v->arch.hvm_vmx.exec_control);
+ nvmx_update_exec_control(v, v->arch.hvm.vmx.exec_control);
else
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm.vmx.exec_control);
}
void vmx_update_secondary_exec_control(struct vcpu *v)
{
if ( nestedhvm_vcpu_in_guestmode(v) )
nvmx_update_secondary_exec_control(v,
- v->arch.hvm_vmx.secondary_exec_control);
+ v->arch.hvm.vmx.secondary_exec_control);
else
__vmwrite(SECONDARY_VM_EXEC_CONTROL,
- v->arch.hvm_vmx.secondary_exec_control);
+ v->arch.hvm.vmx.secondary_exec_control);
}
void vmx_update_exception_bitmap(struct vcpu *v)
{
- u32 bitmap = unlikely(v->arch.hvm_vmx.vmx_realmode)
- ? 0xffffffffu : v->arch.hvm_vmx.exception_bitmap;
+ u32 bitmap = unlikely(v->arch.hvm.vmx.vmx_realmode)
+ ? 0xffffffffu : v->arch.hvm.vmx.exception_bitmap;
if ( nestedhvm_vcpu_in_guestmode(v) )
nvmx_update_exception_bitmap(v, bitmap);
if ( opt_hvm_fep ||
(v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
- v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
+ v->arch.hvm.vmx.exception_bitmap |= (1U << TRAP_invalid_op);
else
- v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
+ v->arch.hvm.vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
vmx_vmcs_enter(v);
vmx_update_exception_bitmap(v);
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
v->arch.hvm.flag_dr_dirty = 0;
- v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
+ v->arch.hvm.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
vmx_update_cpu_exec_control(v);
v->arch.debugreg[0] = read_debugreg(0);
static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
- data->shadow_gs = v->arch.hvm_vmx.shadow_gs;
+ data->shadow_gs = v->arch.hvm.vmx.shadow_gs;
data->msr_flags = 0;
- data->msr_lstar = v->arch.hvm_vmx.lstar;
- data->msr_star = v->arch.hvm_vmx.star;
- data->msr_cstar = v->arch.hvm_vmx.cstar;
- data->msr_syscall_mask = v->arch.hvm_vmx.sfmask;
+ data->msr_lstar = v->arch.hvm.vmx.lstar;
+ data->msr_star = v->arch.hvm.vmx.star;
+ data->msr_cstar = v->arch.hvm.vmx.cstar;
+ data->msr_syscall_mask = v->arch.hvm.vmx.sfmask;
}
static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
- v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
- v->arch.hvm_vmx.star = data->msr_star;
- v->arch.hvm_vmx.lstar = data->msr_lstar;
- v->arch.hvm_vmx.cstar = data->msr_cstar;
- v->arch.hvm_vmx.sfmask = data->msr_syscall_mask;
+ v->arch.hvm.vmx.shadow_gs = data->shadow_gs;
+ v->arch.hvm.vmx.star = data->msr_star;
+ v->arch.hvm.vmx.lstar = data->msr_lstar;
+ v->arch.hvm.vmx.cstar = data->msr_cstar;
+ v->arch.hvm.vmx.sfmask = data->msr_syscall_mask;
}
static void vmx_fpu_enter(struct vcpu *v)
{
vcpu_restore_fpu_lazy(v);
- v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device);
+ v->arch.hvm.vmx.exception_bitmap &= ~(1u << TRAP_no_device);
vmx_update_exception_bitmap(v);
- v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
- __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+ v->arch.hvm.vmx.host_cr0 &= ~X86_CR0_TS;
+ __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
}
static void vmx_fpu_leave(struct vcpu *v)
ASSERT(!v->fpu_dirtied);
ASSERT(read_cr0() & X86_CR0_TS);
- if ( !(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS) )
+ if ( !(v->arch.hvm.vmx.host_cr0 & X86_CR0_TS) )
{
- v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
- __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+ v->arch.hvm.vmx.host_cr0 |= X86_CR0_TS;
+ __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
}
/*
{
v->arch.hvm.hw_cr[0] |= X86_CR0_TS;
__vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
- v->arch.hvm_vmx.exception_bitmap |= (1u << TRAP_no_device);
+ v->arch.hvm.vmx.exception_bitmap |= (1u << TRAP_no_device);
vmx_update_exception_bitmap(v);
}
}
(!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
/* Adjust for virtual 8086 mode */
- if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr
- && !(v->arch.hvm_vmx.vm86_segment_mask & (1u << seg)) )
+ if ( v->arch.hvm.vmx.vmx_realmode && seg <= x86_seg_tr
+ && !(v->arch.hvm.vmx.vm86_segment_mask & (1u << seg)) )
{
- struct segment_register *sreg = &v->arch.hvm_vmx.vm86_saved_seg[seg];
+ struct segment_register *sreg = &v->arch.hvm.vmx.vm86_saved_seg[seg];
if ( seg == x86_seg_tr )
*reg = *sreg;
else if ( reg->base != sreg->base || seg == x86_seg_ss )
base = reg->base;
/* Adjust CS/SS/DS/ES/FS/GS/TR for virtual 8086 mode */
- if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr )
+ if ( v->arch.hvm.vmx.vmx_realmode && seg <= x86_seg_tr )
{
/* Remember the proper contents */
- v->arch.hvm_vmx.vm86_saved_seg[seg] = *reg;
+ v->arch.hvm.vmx.vm86_saved_seg[seg] = *reg;
if ( seg == x86_seg_tr )
{
cmpxchg(&d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED],
val, val & ~VM86_TSS_UPDATED);
}
- v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg);
+ v->arch.hvm.vmx.vm86_segment_mask &= ~(1u << seg);
}
else
- v->arch.hvm_vmx.vm86_segment_mask |= (1u << seg);
+ v->arch.hvm.vmx.vm86_segment_mask |= (1u << seg);
}
else
{
sel = base >> 4;
attr = vm86_ds_attr;
limit = 0xffff;
- v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg);
+ v->arch.hvm.vmx.vm86_segment_mask &= ~(1u << seg);
}
else
- v->arch.hvm_vmx.vm86_segment_mask |= (1u << seg);
+ v->arch.hvm.vmx.vm86_segment_mask |= (1u << seg);
}
}
static unsigned long vmx_get_shadow_gs_base(struct vcpu *v)
{
- return v->arch.hvm_vmx.shadow_gs;
+ return v->arch.hvm.vmx.shadow_gs;
}
static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
static void vmx_set_rdtsc_exiting(struct vcpu *v, bool_t enable)
{
vmx_vmcs_enter(v);
- v->arch.hvm_vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING;
+ v->arch.hvm.vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING;
if ( enable )
- v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
+ v->arch.hvm.vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
vmx_update_cpu_exec_control(v);
vmx_vmcs_exit(v);
}
static void vmx_set_descriptor_access_exiting(struct vcpu *v, bool enable)
{
if ( enable )
- v->arch.hvm_vmx.secondary_exec_control |=
+ v->arch.hvm.vmx.secondary_exec_control |=
SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
else
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
vmx_vmcs_enter(v);
void vmx_update_debug_state(struct vcpu *v)
{
if ( v->arch.hvm.debug_state_latch )
- v->arch.hvm_vmx.exception_bitmap |= 1U << TRAP_int3;
+ v->arch.hvm.vmx.exception_bitmap |= 1U << TRAP_int3;
else
- v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_int3);
+ v->arch.hvm.vmx.exception_bitmap &= ~(1U << TRAP_int3);
vmx_vmcs_enter(v);
vmx_update_exception_bitmap(v);
if ( paging_mode_hap(v->domain) )
{
/* Manage GUEST_CR3 when CR0.PE=0. */
- uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
+ uint32_t old_ctls = v->arch.hvm.vmx.exec_control;
uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING);
- v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
+ v->arch.hvm.vmx.exec_control &= ~cr3_ctls;
if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
- v->arch.hvm_vmx.exec_control |= cr3_ctls;
+ v->arch.hvm.vmx.exec_control |= cr3_ctls;
/* Trap CR3 updates if CR3 memory events are enabled. */
if ( v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
- v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+ v->arch.hvm.vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
- if ( old_ctls != v->arch.hvm_vmx.exec_control )
+ if ( old_ctls != v->arch.hvm.vmx.exec_control )
vmx_update_cpu_exec_control(v);
}
realmode = !(v->arch.hvm.guest_cr[0] & X86_CR0_PE);
if ( !vmx_unrestricted_guest(v) &&
- (realmode != v->arch.hvm_vmx.vmx_realmode) )
+ (realmode != v->arch.hvm.vmx.vmx_realmode) )
{
enum x86_segment s;
struct segment_register reg[x86_seg_tr + 1];
* the saved values we'll use when returning to prot mode. */
for ( s = 0; s < ARRAY_SIZE(reg); s++ )
hvm_get_segment_register(v, s, ®[s]);
- v->arch.hvm_vmx.vmx_realmode = realmode;
+ v->arch.hvm.vmx.vmx_realmode = realmode;
if ( realmode )
{
else
{
for ( s = 0; s < ARRAY_SIZE(reg); s++ )
- if ( !(v->arch.hvm_vmx.vm86_segment_mask & (1<<s)) )
+ if ( !(v->arch.hvm.vmx.vm86_segment_mask & (1<<s)) )
hvm_set_segment_register(
- v, s, &v->arch.hvm_vmx.vm86_saved_seg[s]);
+ v, s, &v->arch.hvm.vmx.vm86_saved_seg[s]);
}
vmx_update_exception_bitmap(v);
nvmx_set_cr_read_shadow(v, 4);
v->arch.hvm.hw_cr[4] |= v->arch.hvm.guest_cr[4];
- if ( v->arch.hvm_vmx.vmx_realmode )
+ if ( v->arch.hvm.vmx.vmx_realmode )
v->arch.hvm.hw_cr[4] |= X86_CR4_VME;
if ( !hvm_paging_enabled(v) )
* Update CR4 host mask to only trap when the guest tries to set
* bits that are controlled by the hypervisor.
*/
- v->arch.hvm_vmx.cr4_host_mask =
+ v->arch.hvm.vmx.cr4_host_mask =
(HVM_CR4_HOST_MASK | X86_CR4_PKE |
~hvm_cr4_guest_valid_bits(v->domain, false));
- v->arch.hvm_vmx.cr4_host_mask |= v->arch.hvm_vmx.vmx_realmode ?
+ v->arch.hvm.vmx.cr4_host_mask |= v->arch.hvm.vmx.vmx_realmode ?
X86_CR4_VME : 0;
- v->arch.hvm_vmx.cr4_host_mask |= !hvm_paging_enabled(v) ?
+ v->arch.hvm.vmx.cr4_host_mask |= !hvm_paging_enabled(v) ?
(X86_CR4_PSE | X86_CR4_SMEP |
X86_CR4_SMAP)
: 0;
if ( v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4) )
- v->arch.hvm_vmx.cr4_host_mask |=
+ v->arch.hvm.vmx.cr4_host_mask |=
~v->domain->arch.monitor.write_ctrlreg_mask[VM_EVENT_X86_CR4];
if ( nestedhvm_vcpu_in_guestmode(v) )
/* Add the nested host mask to get the more restrictive one. */
- v->arch.hvm_vmx.cr4_host_mask |= get_vvmcs(v,
+ v->arch.hvm.vmx.cr4_host_mask |= get_vvmcs(v,
CR4_GUEST_HOST_MASK);
- __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm_vmx.cr4_host_mask);
+ __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm.vmx.cr4_host_mask);
}
break;
/* Can't inject exceptions in virtual 8086 mode because they would
* use the protected-mode IDT. Emulate at the next vmenter instead. */
- if ( curr->arch.hvm_vmx.vmx_realmode )
- curr->arch.hvm_vmx.vmx_emulate = 1;
+ if ( curr->arch.hvm.vmx.vmx_realmode )
+ curr->arch.hvm.vmx.vmx_emulate = 1;
}
void vmx_inject_extint(int trap, uint8_t source)
for ( i = 0x10; i < NR_VECTORS; ++i )
if ( vlapic_test_vector(i, &vlapic->regs->data[APIC_IRR]) ||
vlapic_test_vector(i, &vlapic->regs->data[APIC_ISR]) )
- set_bit(i, v->arch.hvm_vmx.eoi_exit_bitmap);
+ set_bit(i, v->arch.hvm.vmx.eoi_exit_bitmap);
- for ( i = 0; i < ARRAY_SIZE(v->arch.hvm_vmx.eoi_exit_bitmap); ++i )
- __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm_vmx.eoi_exit_bitmap[i]);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap); ++i )
+ __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm.vmx.eoi_exit_bitmap[i]);
vmx_vmcs_exit(v);
}
static void vmx_deliver_posted_intr(struct vcpu *v, u8 vector)
{
- if ( pi_test_and_set_pir(vector, &v->arch.hvm_vmx.pi_desc) )
+ if ( pi_test_and_set_pir(vector, &v->arch.hvm.vmx.pi_desc) )
return;
- if ( unlikely(v->arch.hvm_vmx.eoi_exitmap_changed) )
+ if ( unlikely(v->arch.hvm.vmx.eoi_exitmap_changed) )
{
/*
* If EOI exitbitmap needs to changed or notification vector
* can't be allocated, interrupt will not be injected till
* VMEntry as it used to be.
*/
- pi_set_on(&v->arch.hvm_vmx.pi_desc);
+ pi_set_on(&v->arch.hvm.vmx.pi_desc);
}
else
{
struct pi_desc old, new, prev;
- prev.control = v->arch.hvm_vmx.pi_desc.control;
+ prev.control = v->arch.hvm.vmx.pi_desc.control;
do {
/*
return;
}
- old.control = v->arch.hvm_vmx.pi_desc.control &
+ old.control = v->arch.hvm.vmx.pi_desc.control &
~((1 << POSTED_INTR_ON) | (1 << POSTED_INTR_SN));
- new.control = v->arch.hvm_vmx.pi_desc.control |
+ new.control = v->arch.hvm.vmx.pi_desc.control |
(1 << POSTED_INTR_ON);
- prev.control = cmpxchg(&v->arch.hvm_vmx.pi_desc.control,
+ prev.control = cmpxchg(&v->arch.hvm.vmx.pi_desc.control,
old.control, new.control);
} while ( prev.control != old.control );
unsigned int group, i;
DECLARE_BITMAP(pending_intr, NR_VECTORS);
- if ( !pi_test_and_clear_on(&v->arch.hvm_vmx.pi_desc) )
+ if ( !pi_test_and_clear_on(&v->arch.hvm.vmx.pi_desc) )
return;
for ( group = 0; group < ARRAY_SIZE(pending_intr); group++ )
- pending_intr[group] = pi_get_pir(&v->arch.hvm_vmx.pi_desc, group);
+ pending_intr[group] = pi_get_pir(&v->arch.hvm.vmx.pi_desc, group);
for_each_set_bit(i, pending_intr, NR_VECTORS)
vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]);
static bool vmx_test_pir(const struct vcpu *v, uint8_t vec)
{
- return pi_test_pir(vec, &v->arch.hvm_vmx.pi_desc);
+ return pi_test_pir(vec, &v->arch.hvm.vmx.pi_desc);
}
static void vmx_handle_eoi(u8 vector)
__vmwrite(EPT_POINTER, ept->eptp);
- if ( v->arch.hvm_vmx.secondary_exec_control &
+ if ( v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
__vmwrite(EPTP_INDEX, vcpu_altp2m(v).p2midx);
if ( !d->is_dying && altp2m_active(d) )
{
- v->arch.hvm_vmx.secondary_exec_control |= mask;
+ v->arch.hvm.vmx.secondary_exec_control |= mask;
__vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
__vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
__vmwrite(EPTP_INDEX, vcpu_altp2m(v).p2midx);
}
else
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS;
}
}
else
- v->arch.hvm_vmx.secondary_exec_control &= ~mask;
+ v->arch.hvm.vmx.secondary_exec_control &= ~mask;
vmx_update_secondary_exec_control(v);
vmx_vmcs_exit(v);
list_del(&vmx->pi_blocking.list);
ASSERT(vmx->pi_blocking.lock == lock);
vmx->pi_blocking.lock = NULL;
- vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
+ vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm.vmx));
}
}
__restore_debug_registers(v);
/* Allow guest direct access to DR registers */
- v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
+ v->arch.hvm.vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
vmx_update_cpu_exec_control(v);
}
break;
case MSR_STAR:
- *msr_content = curr->arch.hvm_vmx.star;
+ *msr_content = curr->arch.hvm.vmx.star;
break;
case MSR_LSTAR:
- *msr_content = curr->arch.hvm_vmx.lstar;
+ *msr_content = curr->arch.hvm.vmx.lstar;
break;
case MSR_CSTAR:
- *msr_content = curr->arch.hvm_vmx.cstar;
+ *msr_content = curr->arch.hvm.vmx.cstar;
break;
case MSR_SYSCALL_MASK:
- *msr_content = curr->arch.hvm_vmx.sfmask;
+ *msr_content = curr->arch.hvm.vmx.sfmask;
break;
case MSR_IA32_DEBUGCTLMSR:
return;
vmx_vmcs_enter(v);
- v->arch.hvm_vmx.secondary_exec_control &=
+ v->arch.hvm.vmx.secondary_exec_control &=
~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
if ( !vlapic_hw_disabled(vlapic) &&
{
if ( virtualize_x2apic_mode && vlapic_x2apic_mode(vlapic) )
{
- v->arch.hvm_vmx.secondary_exec_control |=
+ v->arch.hvm.vmx.secondary_exec_control |=
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
if ( cpu_has_vmx_apic_reg_virt )
{
}
}
else
- v->arch.hvm_vmx.secondary_exec_control |=
+ v->arch.hvm.vmx.secondary_exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
}
- if ( !(v->arch.hvm_vmx.secondary_exec_control &
+ if ( !(v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) )
for ( msr = MSR_IA32_APICBASE_MSR;
msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++ )
break;
case MSR_STAR:
- v->arch.hvm_vmx.star = msr_content;
+ v->arch.hvm.vmx.star = msr_content;
wrmsrl(MSR_STAR, msr_content);
break;
case MSR_LSTAR:
if ( !is_canonical_address(msr_content) )
goto gp_fault;
- v->arch.hvm_vmx.lstar = msr_content;
+ v->arch.hvm.vmx.lstar = msr_content;
wrmsrl(MSR_LSTAR, msr_content);
break;
case MSR_CSTAR:
if ( !is_canonical_address(msr_content) )
goto gp_fault;
- v->arch.hvm_vmx.cstar = msr_content;
+ v->arch.hvm.vmx.cstar = msr_content;
break;
case MSR_SYSCALL_MASK:
- v->arch.hvm_vmx.sfmask = msr_content;
+ v->arch.hvm.vmx.sfmask = msr_content;
wrmsrl(MSR_SYSCALL_MASK, msr_content);
break;
* the guest won't execute correctly either. Simply crash the domain
* to make the failure obvious.
*/
- if ( !(v->arch.hvm_vmx.lbr_flags & LBR_MSRS_INSERTED) &&
+ if ( !(v->arch.hvm.vmx.lbr_flags & LBR_MSRS_INSERTED) &&
(msr_content & IA32_DEBUGCTLMSR_LBR) )
{
const struct lbr_info *lbr = last_branch_msr_get();
}
}
- v->arch.hvm_vmx.lbr_flags |= LBR_MSRS_INSERTED;
+ v->arch.hvm.vmx.lbr_flags |= LBR_MSRS_INSERTED;
if ( lbr_tsx_fixup_needed )
- v->arch.hvm_vmx.lbr_flags |= LBR_FIXUP_TSX;
+ v->arch.hvm.vmx.lbr_flags |= LBR_FIXUP_TSX;
if ( bdw_erratum_bdf14_fixup_needed )
- v->arch.hvm_vmx.lbr_flags |= LBR_FIXUP_BDF14;
+ v->arch.hvm.vmx.lbr_flags |= LBR_FIXUP_BDF14;
}
__vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
printk(" Entry out of range\n");
else
{
- msr = &curr->arch.hvm_vmx.msr_area[idx];
+ msr = &curr->arch.hvm.vmx.msr_area[idx];
printk(" msr %08x val %016"PRIx64" (mbz %#x)\n",
msr->index, msr->data, msr->mbz);
/* Adjust RFLAGS to enter virtual 8086 mode with IOPL == 3. Since
* we have CR4.VME == 1 and our own TSS with an empty interrupt
* redirection bitmap, all software INTs will be handled by vm86 */
- v->arch.hvm_vmx.vm86_saved_eflags = regs->eflags;
+ v->arch.hvm.vmx.vm86_saved_eflags = regs->eflags;
regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
}
* values to match.
*/
__vmread(GUEST_CR4, &v->arch.hvm.hw_cr[4]);
- v->arch.hvm.guest_cr[4] &= v->arch.hvm_vmx.cr4_host_mask;
+ v->arch.hvm.guest_cr[4] &= v->arch.hvm.vmx.cr4_host_mask;
v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] &
- ~v->arch.hvm_vmx.cr4_host_mask);
+ ~v->arch.hvm.vmx.cr4_host_mask);
__vmread(GUEST_CR3, &v->arch.hvm.hw_cr[3]);
if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) )
* figure out whether it has done so and update the altp2m data.
*/
if ( altp2m_active(v->domain) &&
- (v->arch.hvm_vmx.secondary_exec_control &
+ (v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) )
{
unsigned long idx;
- if ( v->arch.hvm_vmx.secondary_exec_control &
+ if ( v->arch.hvm.vmx.secondary_exec_control &
SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
__vmread(EPTP_INDEX, &idx);
else
if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
return vmx_failed_vmentry(exit_reason, regs);
- if ( v->arch.hvm_vmx.vmx_realmode )
+ if ( v->arch.hvm.vmx.vmx_realmode )
{
/* Put RFLAGS back the way the guest wants it */
regs->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
- regs->eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
+ regs->eflags |= (v->arch.hvm.vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
/* Unless this exit was for an interrupt, we've hit something
* vm86 can't handle. Try again, using the emulator. */
{
default:
perfc_incr(realmode_exits);
- v->arch.hvm_vmx.vmx_emulate = 1;
+ v->arch.hvm.vmx.vmx_emulate = 1;
HVMTRACE_0D(REALMODE_EMULATE);
return;
}
break;
case EXIT_REASON_PENDING_VIRT_INTR:
/* Disable the interrupt window. */
- v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ v->arch.hvm.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
vmx_update_cpu_exec_control(v);
break;
case EXIT_REASON_PENDING_VIRT_NMI:
/* Disable the NMI window. */
- v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+ v->arch.hvm.vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
vmx_update_cpu_exec_control(v);
break;
case EXIT_REASON_TASK_SWITCH: {
}
case EXIT_REASON_MONITOR_TRAP_FLAG:
- v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+ v->arch.hvm.vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
if ( v->arch.hvm.single_step )
{
static void lbr_tsx_fixup(void)
{
struct vcpu *curr = current;
- unsigned int msr_count = curr->arch.hvm_vmx.msr_save_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ unsigned int msr_count = curr->arch.hvm.vmx.msr_save_count;
+ struct vmx_msr_entry *msr_area = curr->arch.hvm.vmx.msr_area;
struct vmx_msr_entry *msr;
if ( (msr = vmx_find_msr(curr, lbr_from_start, VMX_MSR_GUEST)) != NULL )
{
struct vcpu *curr = current;
- if ( curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_TSX )
+ if ( curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_TSX )
lbr_tsx_fixup();
- if ( curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_BDF14 )
+ if ( curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_BDF14 )
bdw_erratum_bdf14_fixup();
}
if ( !old_asid && new_asid )
{
/* VPID was disabled: now enabled. */
- curr->arch.hvm_vmx.secondary_exec_control |=
+ curr->arch.hvm.vmx.secondary_exec_control |=
SECONDARY_EXEC_ENABLE_VPID;
vmx_update_secondary_exec_control(curr);
}
else if ( old_asid && !new_asid )
{
/* VPID was enabled: now disabled. */
- curr->arch.hvm_vmx.secondary_exec_control &=
+ curr->arch.hvm.vmx.secondary_exec_control &=
~SECONDARY_EXEC_ENABLE_VPID;
vmx_update_secondary_exec_control(curr);
}
}
out:
- if ( unlikely(curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_MASK) )
+ if ( unlikely(curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_MASK) )
lbr_fixup();
HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
gdprintk(XENLOG_ERR, "nest: allocation for vmread bitmap failed\n");
return -ENOMEM;
}
- v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
+ v->arch.hvm.vmx.vmread_bitmap = vmread_bitmap;
clear_domain_page(page_to_mfn(vmread_bitmap));
gdprintk(XENLOG_ERR, "nest: allocation for vmwrite bitmap failed\n");
return -ENOMEM;
}
- v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
+ v->arch.hvm.vmx.vmwrite_bitmap = vmwrite_bitmap;
vw = __map_domain_page(vmwrite_bitmap);
clear_page(vw);
* leak of L1 VMCS page.
*/
if ( nvcpu->nv_n1vmcx_pa )
- v->arch.hvm_vmx.vmcs_pa = nvcpu->nv_n1vmcx_pa;
+ v->arch.hvm.vmx.vmcs_pa = nvcpu->nv_n1vmcx_pa;
if ( nvcpu->nv_n2vmcx_pa )
{
xfree(item);
}
- if ( v->arch.hvm_vmx.vmread_bitmap )
+ if ( v->arch.hvm.vmx.vmread_bitmap )
{
- free_domheap_page(v->arch.hvm_vmx.vmread_bitmap);
- v->arch.hvm_vmx.vmread_bitmap = NULL;
+ free_domheap_page(v->arch.hvm.vmx.vmread_bitmap);
+ v->arch.hvm.vmx.vmread_bitmap = NULL;
}
- if ( v->arch.hvm_vmx.vmwrite_bitmap )
+ if ( v->arch.hvm.vmx.vmwrite_bitmap )
{
- free_domheap_page(v->arch.hvm_vmx.vmwrite_bitmap);
- v->arch.hvm_vmx.vmwrite_bitmap = NULL;
+ free_domheap_page(v->arch.hvm.vmx.vmwrite_bitmap);
+ v->arch.hvm.vmx.vmwrite_bitmap = NULL;
}
}
hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = INVALID_PADDR;
- v->arch.hvm_vmx.vmcs_shadow_maddr = 0;
+ v->arch.hvm.vmx.vmcs_shadow_maddr = 0;
for (i=0; i<2; i++) {
if ( nvmx->iobitmap[i] ) {
hvm_unmap_guest_frame(nvmx->iobitmap[i], 1);
(get_vvmcs(v, CR4_READ_SHADOW) & cr_gh_mask);
__vmwrite(CR4_READ_SHADOW, cr_read_shadow);
/* Add the nested host mask to the one set by vmx_update_guest_cr. */
- v->arch.hvm_vmx.cr4_host_mask |= cr_gh_mask;
- __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm_vmx.cr4_host_mask);
+ v->arch.hvm.vmx.cr4_host_mask |= cr_gh_mask;
+ __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm.vmx.cr4_host_mask);
/* TODO: CR3 target control */
}
static void nvmx_set_vmcs_pointer(struct vcpu *v, struct vmcs_struct *vvmcs)
{
- paddr_t vvmcs_maddr = v->arch.hvm_vmx.vmcs_shadow_maddr;
+ paddr_t vvmcs_maddr = v->arch.hvm.vmx.vmcs_shadow_maddr;
__vmpclear(vvmcs_maddr);
vvmcs->vmcs_revision_id |= VMCS_RID_TYPE_MASK;
__vmwrite(VMCS_LINK_POINTER, vvmcs_maddr);
- __vmwrite(VMREAD_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmread_bitmap));
- __vmwrite(VMWRITE_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmwrite_bitmap));
+ __vmwrite(VMREAD_BITMAP, page_to_maddr(v->arch.hvm.vmx.vmread_bitmap));
+ __vmwrite(VMWRITE_BITMAP, page_to_maddr(v->arch.hvm.vmx.vmwrite_bitmap));
}
static void nvmx_clear_vmcs_pointer(struct vcpu *v, struct vmcs_struct *vvmcs)
{
- paddr_t vvmcs_maddr = v->arch.hvm_vmx.vmcs_shadow_maddr;
+ paddr_t vvmcs_maddr = v->arch.hvm.vmx.vmcs_shadow_maddr;
__vmpclear(vvmcs_maddr);
vvmcs->vmcs_revision_id &= ~VMCS_RID_TYPE_MASK;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
unsigned long lm_l1, lm_l2;
- vmx_vmcs_switch(v->arch.hvm_vmx.vmcs_pa, nvcpu->nv_n2vmcx_pa);
+ vmx_vmcs_switch(v->arch.hvm.vmx.vmcs_pa, nvcpu->nv_n2vmcx_pa);
nestedhvm_vcpu_enter_guestmode(v);
nvcpu->nv_vmentry_pending = 0;
regs->rflags = get_vvmcs(v, GUEST_RFLAGS);
/* updating host cr0 to sync TS bit */
- __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+ __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
/* Setup virtual ETP for L2 guest*/
if ( nestedhvm_paging_mode_hap(v) )
if ( !(__n2_exec_control(v) & CPU_BASED_CR3_LOAD_EXITING) )
shadow_to_vvmcs(v, GUEST_CR3);
- if ( v->arch.hvm_vmx.cr4_host_mask != ~0UL )
+ if ( v->arch.hvm.vmx.cr4_host_mask != ~0UL )
/* Only need to update nested GUEST_CR4 if not all bits are trapped. */
set_vvmcs(v, GUEST_CR4, v->arch.hvm.guest_cr[4]);
}
/* This will clear current pCPU bit in p2m->dirty_cpumask */
np2m_schedule(NP2M_SCHEDLE_OUT);
- vmx_vmcs_switch(v->arch.hvm_vmx.vmcs_pa, nvcpu->nv_n1vmcx_pa);
+ vmx_vmcs_switch(v->arch.hvm.vmx.vmcs_pa, nvcpu->nv_n1vmcx_pa);
nestedhvm_vcpu_exit_guestmode(v);
nvcpu->nv_vmexit_pending = 0;
regs->rflags = X86_EFLAGS_MBS;
/* updating host cr0 to sync TS bit */
- __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+ __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
if ( cpu_has_vmx_virtual_intr_delivery )
nvmx_update_apicv(v);
* `fork' the host vmcs to shadow_vmcs
* vmcs_lock is not needed since we are on current
*/
- nvcpu->nv_n1vmcx_pa = v->arch.hvm_vmx.vmcs_pa;
- __vmpclear(v->arch.hvm_vmx.vmcs_pa);
+ nvcpu->nv_n1vmcx_pa = v->arch.hvm.vmx.vmcs_pa;
+ __vmpclear(v->arch.hvm.vmx.vmcs_pa);
copy_domain_page(_mfn(PFN_DOWN(nvcpu->nv_n2vmcx_pa)),
- _mfn(PFN_DOWN(v->arch.hvm_vmx.vmcs_pa)));
- __vmptrld(v->arch.hvm_vmx.vmcs_pa);
- v->arch.hvm_vmx.launched = 0;
+ _mfn(PFN_DOWN(v->arch.hvm.vmx.vmcs_pa)));
+ __vmptrld(v->arch.hvm.vmx.vmcs_pa);
+ v->arch.hvm.vmx.launched = 0;
vmsucceed(regs);
return X86EMUL_OKAY;
}
launched = vvmcs_launched(&nvmx->launched_list,
- PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
+ PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr));
if ( !launched )
{
vmfail_valid(regs, VMX_INSN_VMRESUME_NONLAUNCHED_VMCS);
}
launched = vvmcs_launched(&nvmx->launched_list,
- PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
+ PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr));
if ( launched )
{
vmfail_valid(regs, VMX_INSN_VMLAUNCH_NONCLEAR_VMCS);
if ( rc == X86EMUL_OKAY )
{
if ( set_vvmcs_launched(&nvmx->launched_list,
- PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr)) < 0 )
+ PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr)) < 0 )
return X86EMUL_UNHANDLEABLE;
}
}
}
nvcpu->nv_vvmcx = vvmcx;
nvcpu->nv_vvmcxaddr = gpa;
- v->arch.hvm_vmx.vmcs_shadow_maddr =
+ v->arch.hvm.vmx.vmcs_shadow_maddr =
mfn_to_maddr(domain_page_map_to_mfn(vvmcx));
}
else
if ( cpu_has_vmx_vmcs_shadowing )
nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx);
clear_vvmcs_launched(&nvmx->launched_list,
- PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
+ PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr));
nvmx_purge_vvmcs(v);
}
else
case MSR_IA32_VMX_BASIC:
{
const struct vmcs_struct *vmcs =
- map_domain_page(_mfn(PFN_DOWN(v->arch.hvm_vmx.vmcs_pa)));
+ map_domain_page(_mfn(PFN_DOWN(v->arch.hvm.vmx.vmcs_pa)));
data = (host_data & (~0ul << 32)) |
(vmcs->vmcs_revision_id & 0x7fffffff);
struct vcpu *v;
for_each_vcpu ( p2m->domain, v )
- v->arch.hvm_vmx.ept_spurious_misconfig = 1;
+ v->arch.hvm.vmx.ept_spurious_misconfig = 1;
}
return rc;
p2m_lock(p2m);
- spurious = curr->arch.hvm_vmx.ept_spurious_misconfig;
+ spurious = curr->arch.hvm.vmx.ept_spurious_misconfig;
rc = resolve_misconfig(p2m, PFN_DOWN(gpa));
- curr->arch.hvm_vmx.ept_spurious_misconfig = 0;
+ curr->arch.hvm.vmx.ept_spurious_misconfig = 0;
p2m_unlock(p2m);
DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
BLANK();
- OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
- OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
+ OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm.svm.vmcb_pa);
+ OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm.svm.vmcb);
BLANK();
- OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
- OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
- OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
- OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
+ OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm.vmx.launched);
+ OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm.vmx.vmx_realmode);
+ OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm.vmx.vmx_emulate);
+ OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm.vmx.vm86_segment_mask);
OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm.guest_cr[2]);
BLANK();
/* Use interrupt posting if it is supported. */
if ( iommu_intpost )
- pi_update_irte(vcpu ? &vcpu->arch.hvm_vmx.pi_desc : NULL,
+ pi_update_irte(vcpu ? &vcpu->arch.hvm.vmx.pi_desc : NULL,
info, pirq_dpci->gmsi.gvec);
if ( pt_irq_bind->u.msi.gflags & XEN_DOMCTL_VMSI_X86_UNMASKED )
void update_guest_memory_policy(struct vcpu *v,
struct guest_memory_policy *policy);
-/* Shorthands to improve code legibility. */
-#define hvm_vmx hvm.u.vmx
-#define hvm_svm hvm.u.svm
-
bool update_runstate_area(struct vcpu *);
bool update_secondary_system_time(struct vcpu *,
struct vcpu_time_info *);
{
#if 0
/* Optimization? */
- svm_invlpga(g_vaddr, v->arch.hvm_svm.vmcb->guest_asid);
+ svm_invlpga(g_vaddr, v->arch.hvm.svm.vmcb->guest_asid);
#endif
/* Safe fallback. Take a new ASID. */
union {
struct vmx_vcpu vmx;
struct svm_vcpu svm;
- } u;
+ };
struct tasklet assert_evtchn_irq_tasklet;
#define cpu_has_vmx_unrestricted_guest \
(vmx_secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST)
#define vmx_unrestricted_guest(v) \
- ((v)->arch.hvm_vmx.secondary_exec_control & \
+ ((v)->arch.hvm.vmx.secondary_exec_control & \
SECONDARY_EXEC_UNRESTRICTED_GUEST)
#define cpu_has_vmx_ple \
(vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)