The trailing _vcpu suffix is redundant, but adds to code volume. Drop it.
Reflow lines as appropriate. No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
hvm_get_segment_register(sampled, x86_seg_ss, &seg);
r->ss = seg.sel;
r->cpl = seg.dpl;
- if ( !(sampled->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
+ if ( !(sampled->arch.hvm.guest_cr[0] & X86_CR0_PE) )
*flags |= PMU_SAMPLE_REAL;
}
}
if ( is_hvm_domain(d) )
{
/* OSXSAVE clear in policy. Fast-forward CR4 back in. */
- if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE )
+ if ( v->arch.hvm.guest_cr[4] & X86_CR4_OSXSAVE )
res->c |= cpufeat_mask(X86_FEATURE_OSXSAVE);
}
else /* PV domain */
/* OSPKE clear in policy. Fast-forward CR4 back in. */
if ( (is_pv_domain(d)
? v->arch.pv.ctrlreg[4]
- : v->arch.hvm_vcpu.guest_cr[4]) & X86_CR4_PKE )
+ : v->arch.hvm.guest_cr[4]) & X86_CR4_PKE )
res->c |= cpufeat_mask(X86_FEATURE_OSPKE);
break;
}
BUG();
if ( cpu_has_xsaves && is_hvm_vcpu(n) )
- set_msr_xss(n->arch.hvm_vcpu.msr_xss);
+ set_msr_xss(n->arch.hvm.msr_xss);
}
vcpu_restore_fpu_nonlazy(n, false);
nd->arch.ctxt_switch->to(n);
np2m_schedule(NP2M_SCHEDLE_OUT);
}
- if ( is_hvm_domain(prevd) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
+ if ( is_hvm_domain(prevd) && !list_empty(&prev->arch.hvm.tm_list) )
pt_save_timer(prev);
local_irq_disable();
{
struct segment_register sreg;
- c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
- c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
- c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
- c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
+ c.nat->ctrlreg[0] = v->arch.hvm.guest_cr[0];
+ c.nat->ctrlreg[2] = v->arch.hvm.guest_cr[2];
+ c.nat->ctrlreg[3] = v->arch.hvm.guest_cr[3];
+ c.nat->ctrlreg[4] = v->arch.hvm.guest_cr[4];
hvm_get_segment_register(v, x86_seg_cs, &sreg);
c.nat->user_regs.cs = sreg.sel;
hvm_get_segment_register(v, x86_seg_ss, &sreg);
void hvm_asid_flush_vcpu(struct vcpu *v)
{
- hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
+ hvm_asid_flush_vcpu_asid(&v->arch.hvm.n1asid);
hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
}
if ( data->vcpuid >= d->max_vcpus || !(v = d->vcpu[data->vcpuid]) )
return -EINVAL;
- if ( cmpxchg(&v->arch.hvm_vcpu.inject_event.vector,
+ if ( cmpxchg(&v->arch.hvm.inject_event.vector,
HVM_EVENT_VECTOR_UNSET, HVM_EVENT_VECTOR_UPDATING) !=
HVM_EVENT_VECTOR_UNSET )
return -EBUSY;
- v->arch.hvm_vcpu.inject_event.type = data->type;
- v->arch.hvm_vcpu.inject_event.insn_len = data->insn_len;
- v->arch.hvm_vcpu.inject_event.error_code = data->error_code;
- v->arch.hvm_vcpu.inject_event.cr2 = data->cr2;
+ v->arch.hvm.inject_event.type = data->type;
+ v->arch.hvm.inject_event.insn_len = data->insn_len;
+ v->arch.hvm.inject_event.error_code = data->error_code;
+ v->arch.hvm.inject_event.cr2 = data->cr2;
smp_wmb();
- v->arch.hvm_vcpu.inject_event.vector = data->vector;
+ v->arch.hvm.inject_event.vector = data->vector;
return 0;
}
uregs->rip = regs->eip;
uregs->rflags = regs->eflags;
- v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
- v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
- v->arch.hvm_vcpu.guest_efer = regs->efer;
+ v->arch.hvm.guest_cr[0] = regs->cr0;
+ v->arch.hvm.guest_cr[3] = regs->cr3;
+ v->arch.hvm.guest_cr[4] = regs->cr4;
+ v->arch.hvm.guest_efer = regs->efer;
}
break;
uregs->rip = regs->rip;
uregs->rflags = regs->rflags;
- v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
- v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
- v->arch.hvm_vcpu.guest_efer = regs->efer;
+ v->arch.hvm.guest_cr[0] = regs->cr0;
+ v->arch.hvm.guest_cr[3] = regs->cr3;
+ v->arch.hvm.guest_cr[4] = regs->cr4;
+ v->arch.hvm.guest_efer = regs->efer;
#define SEG(l, a) (struct segment_register){ 0, { a }, l, 0 }
cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
}
- if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
+ if ( v->arch.hvm.guest_efer & EFER_LME )
+ v->arch.hvm.guest_efer |= EFER_LMA;
- if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d, false) )
+ if ( v->arch.hvm.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d, false) )
{
gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
- v->arch.hvm_vcpu.guest_cr[4]);
+ v->arch.hvm.guest_cr[4]);
return -EINVAL;
}
- errstr = hvm_efer_valid(v, v->arch.hvm_vcpu.guest_efer, -1);
+ errstr = hvm_efer_valid(v, v->arch.hvm.guest_efer, -1);
if ( errstr )
{
gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
- v->arch.hvm_vcpu.guest_efer, errstr);
+ v->arch.hvm.guest_efer, errstr);
return -EINVAL;
}
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
struct page_info *page = get_page_from_gfn(v->domain,
- v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
+ v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
NULL, P2M_ALLOC);
if ( !page )
{
gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
- v->arch.hvm_vcpu.guest_cr[3]);
+ v->arch.hvm.guest_cr[3]);
return -EINVAL;
}
hvm_set_segment_register(v, x86_seg_tr, &tr);
/* Sync AP's TSC with BSP's. */
- v->arch.hvm_vcpu.cache_tsc_offset =
- d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ v->arch.hvm.cache_tsc_offset =
+ d->vcpu[0]->arch.hvm.cache_tsc_offset;
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset,
d->arch.hvm.sync_tsc);
paging_update_paging_modes(v);
{
struct vcpu *curr = current;
struct domain *currd = curr->domain;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
ioreq_t p = {
.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
.addr = addr,
ASSERT(rc != X86EMUL_UNIMPLEMENTED);
if ( rc == X86EMUL_OKAY )
- v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps);
+ v->arch.hvm.hvm_io.mmio_retry = (count < *reps);
*reps = count;
*reps = min_t(unsigned long, *reps, 4096);
/* With no paging it's easy: linear == physical. */
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG) )
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
{
*paddr = addr;
return X86EMUL_OKAY;
unsigned long gla, unsigned int size, uint8_t dir, void *buffer,
uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t known_gpfn)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long offset = gla & ~PAGE_MASK;
struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir);
unsigned int chunk, buffer_offset = 0;
pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
if ( is_x86_system_segment(seg) )
struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
void *mapping;
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
int rc;
void *mapping;
struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
void *mapping = NULL;
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long saddr, daddr, bytes;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
unsigned long addr, bytes;
paddr_t gpa;
p2m_type_t p2mt;
case 2:
case 3:
case 4:
- *val = current->arch.hvm_vcpu.guest_cr[reg];
+ *val = current->arch.hvm.guest_cr[reg];
HVMTRACE_LONG_2D(CR_READ, reg, TRC_PAR_LONG(*val));
return X86EMUL_OKAY;
default:
break;
case 2:
- current->arch.hvm_vcpu.guest_cr[2] = val;
+ current->arch.hvm.guest_cr[2] = val;
rc = X86EMUL_OKAY;
break;
const struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
struct vcpu *curr = current;
uint32_t new_intr_shadow;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
hvm_emulate_init_per_insn(hvmemul_ctxt, vio->mmio_insn,
break;
case EMUL_KIND_SET_CONTEXT_INSN: {
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
BUILD_BUG_ON(sizeof(vio->mmio_insn) !=
sizeof(curr->arch.vm_event->emul.insn.data));
return 0;
write_lock(&hp->lock);
- guest_time = (v->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(v)) /
+ guest_time = (v->arch.hvm.guest_time ?: hvm_get_guest_time(v)) /
STIME_PER_HPET_TICK;
/* Write the proper value into the main counter */
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
{
if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
- *guest_pat = v->arch.hvm_vcpu.pat_cr;
+ *guest_pat = v->arch.hvm.pat_cr;
}
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
}
if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
- v->arch.hvm_vcpu.pat_cr = guest_pat;
+ v->arch.hvm.pat_cr = guest_pat;
return 1;
}
}
delta_tsc = guest_tsc - tsc;
- v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
+ v->arch.hvm.cache_tsc_offset = delta_tsc;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, at_tsc);
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, at_tsc);
}
#define hvm_set_guest_tsc(v, t) hvm_set_guest_tsc_fixed(v, t, 0)
static void hvm_set_guest_tsc_msr(struct vcpu *v, u64 guest_tsc)
{
- uint64_t tsc_offset = v->arch.hvm_vcpu.cache_tsc_offset;
+ uint64_t tsc_offset = v->arch.hvm.cache_tsc_offset;
hvm_set_guest_tsc(v, guest_tsc);
- v->arch.hvm_vcpu.msr_tsc_adjust += v->arch.hvm_vcpu.cache_tsc_offset
- - tsc_offset;
+ v->arch.hvm.msr_tsc_adjust += v->arch.hvm.cache_tsc_offset - tsc_offset;
}
static void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust)
{
- v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
- - v->arch.hvm_vcpu.msr_tsc_adjust;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
- v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust;
+ v->arch.hvm.cache_tsc_offset += tsc_adjust - v->arch.hvm.msr_tsc_adjust;
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
+ v->arch.hvm.msr_tsc_adjust = tsc_adjust;
}
u64 hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc)
tsc = hvm_scale_tsc(v->domain, tsc);
}
- return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
+ return tsc + v->arch.hvm.cache_tsc_offset;
}
void hvm_migrate_timers(struct vcpu *v)
static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
{
- info->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+ info->cr2 = v->arch.hvm.guest_cr[2];
return hvm_funcs.get_pending_event(v, info);
}
hvm_vm_event_do_resume(v);
/* Inject pending hw/sw event */
- if ( v->arch.hvm_vcpu.inject_event.vector >= 0 )
+ if ( v->arch.hvm.inject_event.vector >= 0 )
{
smp_rmb();
if ( !hvm_event_pending(v) )
- hvm_inject_event(&v->arch.hvm_vcpu.inject_event);
+ hvm_inject_event(&v->arch.hvm.inject_event);
- v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
+ v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
}
if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
for_each_vcpu ( d, v )
{
- ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+ ctxt.tsc_adjust = v->arch.hvm.msr_tsc_adjust;
err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
if ( err )
break;
if ( hvm_load_entry(TSC_ADJUST, h, &ctxt) != 0 )
return -EINVAL;
- v->arch.hvm_vcpu.msr_tsc_adjust = ctxt.tsc_adjust;
+ v->arch.hvm.msr_tsc_adjust = ctxt.tsc_adjust;
return 0;
}
if ( hvm_funcs.tsc_scaling.setup )
hvm_funcs.tsc_scaling.setup(v);
- v->arch.hvm_vcpu.msr_tsc_aux = ctxt.msr_tsc_aux;
+ v->arch.hvm.msr_tsc_aux = ctxt.msr_tsc_aux;
hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
hvm_asid_flush_vcpu(v);
- spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
- INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
+ spin_lock_init(&v->arch.hvm.tm_lock);
+ INIT_LIST_HEAD(&v->arch.hvm.tm_list);
rc = hvm_vcpu_cacheattr_init(v); /* teardown: vcpu_cacheattr_destroy */
if ( rc != 0 )
goto fail3;
softirq_tasklet_init(
- &v->arch.hvm_vcpu.assert_evtchn_irq_tasklet,
+ &v->arch.hvm.assert_evtchn_irq_tasklet,
(void(*)(unsigned long))hvm_assert_evtchn_irq,
(unsigned long)v);
- v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
+ v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
if ( rc != 0 )
free_compat_arg_xlat(v);
- tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
+ tasklet_kill(&v->arch.hvm.assert_evtchn_irq_tasklet);
hvm_funcs.vcpu_destroy(v);
vlapic_destroy(v);
{
printk(XENLOG_G_WARNING
"%pv: Invalid EFER update: %#"PRIx64" -> %#"PRIx64" - %s\n",
- v, v->arch.hvm_vcpu.guest_efer, value, errstr);
+ v, v->arch.hvm.guest_efer, value, errstr);
return X86EMUL_EXCEPTION;
}
- if ( ((value ^ v->arch.hvm_vcpu.guest_efer) & EFER_LME) &&
+ if ( ((value ^ v->arch.hvm.guest_efer) & EFER_LME) &&
hvm_paging_enabled(v) )
{
gdprintk(XENLOG_WARNING,
return X86EMUL_EXCEPTION;
}
- if ( (value & EFER_LME) && !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+ if ( (value & EFER_LME) && !(v->arch.hvm.guest_efer & EFER_LME) )
{
struct segment_register cs;
if ( nestedhvm_enabled(v->domain) && cpu_has_svm &&
((value & EFER_SVME) == 0 ) &&
- ((value ^ v->arch.hvm_vcpu.guest_efer) & EFER_SVME) )
+ ((value ^ v->arch.hvm.guest_efer) & EFER_SVME) )
{
/* Cleared EFER.SVME: Flush all nestedp2m tables */
p2m_flush_nestedp2m(v->domain);
nestedhvm_vcpu_reset(v);
}
- value |= v->arch.hvm_vcpu.guest_efer & EFER_LMA;
- v->arch.hvm_vcpu.guest_efer = value;
+ value |= v->arch.hvm.guest_efer & EFER_LMA;
+ v->arch.hvm.guest_efer = value;
hvm_update_guest_efer(v);
return X86EMUL_OKAY;
{
if ( (vs == v) || !vs->is_initialised )
continue;
- if ( (vs->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) ||
+ if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
mtrr_pat_not_equal(vs, v) )
return 0;
}
case 2:
case 3:
case 4:
- val = curr->arch.hvm_vcpu.guest_cr[cr];
+ val = curr->arch.hvm.guest_cr[cr];
break;
case 8:
val = (vlapic_get_reg(vcpu_vlapic(curr), APIC_TASKPRI) & 0xf0) >> 4;
{
/* Entering no fill cache mode. */
spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm_vcpu.cache_mode = NO_FILL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
if ( !v->domain->arch.hvm.is_in_uc_mode )
{
spin_unlock(&v->domain->arch.hvm.uc_lock);
}
else if ( !(value & X86_CR0_CD) &&
- (v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
+ (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
{
/* Exit from no fill cache mode. */
spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
if ( domain_exit_uc_mode(v) )
hvm_set_uc_mode(v, 0);
static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
{
- v->arch.hvm_vcpu.guest_cr[cr] = value;
+ v->arch.hvm.guest_cr[cr] = value;
nestedhvm_set_cr(v, cr, value);
hvm_update_guest_cr(v, cr);
}
{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+ unsigned long gfn, old_value = v->arch.hvm.guest_cr[0];
struct page_info *page;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
- if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
+ if ( v->arch.hvm.guest_efer & EFER_LME )
{
- if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) &&
+ if ( !(v->arch.hvm.guest_cr[4] & X86_CR4_PAE) &&
!nestedhvm_vmswitch_in_progress(v) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
return X86EMUL_EXCEPTION;
}
HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
+ v->arch.hvm.guest_efer |= EFER_LMA;
hvm_update_guest_efer(v);
}
if ( !paging_mode_hap(d) )
{
/* The guest CR3 must be pointing to the guest physical. */
- gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
+ gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
{
gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx\n",
- v->arch.hvm_vcpu.guest_cr[3]);
+ v->arch.hvm.guest_cr[3]);
domain_crash(d);
return X86EMUL_UNHANDLEABLE;
}
v->arch.guest_table = pagetable_from_page(page);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vcpu.guest_cr[3], mfn_x(page_to_mfn(page)));
+ v->arch.hvm.guest_cr[3], mfn_x(page_to_mfn(page)));
}
}
else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
/* When CR0.PG is cleared, LMA is cleared immediately. */
if ( hvm_long_mode_active(v) )
{
- v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
+ v->arch.hvm.guest_efer &= ~EFER_LMA;
hvm_update_guest_efer(v);
}
{
struct vcpu *v = current;
struct page_info *page;
- unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
+ unsigned long old = v->arch.hvm.guest_cr[3];
bool noflush = false;
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
}
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
- (value != v->arch.hvm_vcpu.guest_cr[3]) )
+ (value != v->arch.hvm.guest_cr[3]) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
- v->arch.hvm_vcpu.guest_cr[3] = value;
+ v->arch.hvm.guest_cr[3] = value;
paging_update_cr3(v, noflush);
return X86EMUL_OKAY;
}
}
- old_cr = v->arch.hvm_vcpu.guest_cr[4];
+ old_cr = v->arch.hvm.guest_cr[4];
if ( (value & X86_CR4_PCIDE) && !(old_cr & X86_CR4_PCIDE) &&
(!hvm_long_mode_active(v) ||
- (v->arch.hvm_vcpu.guest_cr[3] & 0xfff)) )
+ (v->arch.hvm.guest_cr[3] & 0xfff)) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to change CR4.PCIDE from "
"0 to 1 while either EFER.LMA=0 or CR3[11:0]!=000H");
*/
ASSERT(seg < x86_seg_none);
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PE) ||
(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
{
/*
tr.type = 0xb; /* busy 32-bit tss */
hvm_set_segment_register(v, x86_seg_tr, &tr);
- v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
+ v->arch.hvm.guest_cr[0] |= X86_CR0_TS;
hvm_update_guest_cr(v, 0);
if ( (taskswitch_reason == TSW_iret ||
uint64_t *var_range_base, *fixed_range_base;
int ret;
- var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
- fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
+ var_range_base = (uint64_t *)v->arch.hvm.mtrr.var_ranges;
+ fixed_range_base = (uint64_t *)v->arch.hvm.mtrr.fixed_ranges;
if ( (ret = guest_rdmsr(v, msr, msr_content)) != X86EMUL_UNHANDLEABLE )
return ret;
unsigned int index;
case MSR_EFER:
- *msr_content = v->arch.hvm_vcpu.guest_efer;
+ *msr_content = v->arch.hvm.guest_efer;
break;
case MSR_IA32_TSC:
break;
case MSR_IA32_TSC_ADJUST:
- *msr_content = v->arch.hvm_vcpu.msr_tsc_adjust;
+ *msr_content = v->arch.hvm.msr_tsc_adjust;
break;
case MSR_TSC_AUX:
case MSR_MTRRcap:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- *msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
+ *msr_content = v->arch.hvm.mtrr.mtrr_cap;
break;
case MSR_MTRRdefType:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- *msr_content = v->arch.hvm_vcpu.mtrr.def_type |
- MASK_INSR(v->arch.hvm_vcpu.mtrr.enabled, MTRRdefType_E) |
- MASK_INSR(v->arch.hvm_vcpu.mtrr.fixed_enabled,
+ *msr_content = v->arch.hvm.mtrr.def_type |
+ MASK_INSR(v->arch.hvm.mtrr.enabled, MTRRdefType_E) |
+ MASK_INSR(v->arch.hvm.mtrr.fixed_enabled,
MTRRdefType_FE);
break;
case MSR_MTRRfix64K_00000:
goto gp_fault;
index = msr - MSR_IA32_MTRR_PHYSBASE(0);
if ( (index / 2) >=
- MASK_EXTR(v->arch.hvm_vcpu.mtrr.mtrr_cap, MTRRcap_VCNT) )
+ MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT) )
goto gp_fault;
*msr_content = var_range_base[index];
break;
case MSR_IA32_XSS:
if ( !d->arch.cpuid->xstate.xsaves )
goto gp_fault;
- *msr_content = v->arch.hvm_vcpu.msr_xss;
+ *msr_content = v->arch.hvm.msr_xss;
break;
case MSR_IA32_BNDCFGS:
break;
case MSR_TSC_AUX:
- v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
+ v->arch.hvm.msr_tsc_aux = (uint32_t)msr_content;
if ( cpu_has_rdtscp
&& (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
wrmsr_tsc_aux(msr_content);
case MSR_MTRRdefType:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_def_type_msr_set(v->domain, &v->arch.hvm.mtrr,
msr_content) )
goto gp_fault;
break;
case MSR_MTRRfix64K_00000:
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
- if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr, 0,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr, 0,
msr_content) )
goto gp_fault;
break;
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix16K_80000 + 1;
- if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr,
index, msr_content) )
goto gp_fault;
break;
if ( !d->arch.cpuid->basic.mtrr )
goto gp_fault;
index = msr - MSR_MTRRfix4K_C0000 + 3;
- if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ if ( !mtrr_fix_range_msr_set(v->domain, &v->arch.hvm.mtrr,
index, msr_content) )
goto gp_fault;
break;
goto gp_fault;
index = msr - MSR_IA32_MTRR_PHYSBASE(0);
if ( ((index / 2) >=
- MASK_EXTR(v->arch.hvm_vcpu.mtrr.mtrr_cap, MTRRcap_VCNT)) ||
- !mtrr_var_range_msr_set(v->domain, &v->arch.hvm_vcpu.mtrr,
+ MASK_EXTR(v->arch.hvm.mtrr.mtrr_cap, MTRRcap_VCNT)) ||
+ !mtrr_var_range_msr_set(v->domain, &v->arch.hvm.mtrr,
msr, msr_content) )
goto gp_fault;
break;
/* No XSS features currently supported for guests. */
if ( !d->arch.cpuid->xstate.xsaves || msr_content != 0 )
goto gp_fault;
- v->arch.hvm_vcpu.msr_xss = msr_content;
+ v->arch.hvm.msr_xss = msr_content;
break;
case MSR_IA32_BNDCFGS:
if ( !paging_mode_hap(d) )
{
- if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
+ if ( v->arch.hvm.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
v->arch.guest_table = pagetable_null();
}
v->arch.user_regs.rip = ip;
memset(&v->arch.debugreg, 0, sizeof(v->arch.debugreg));
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
+ v->arch.hvm.guest_cr[0] = X86_CR0_ET;
hvm_update_guest_cr(v, 0);
- v->arch.hvm_vcpu.guest_cr[2] = 0;
+ v->arch.hvm.guest_cr[2] = 0;
hvm_update_guest_cr(v, 2);
- v->arch.hvm_vcpu.guest_cr[3] = 0;
+ v->arch.hvm.guest_cr[3] = 0;
hvm_update_guest_cr(v, 3);
- v->arch.hvm_vcpu.guest_cr[4] = 0;
+ v->arch.hvm.guest_cr[4] = 0;
hvm_update_guest_cr(v, 4);
- v->arch.hvm_vcpu.guest_efer = 0;
+ v->arch.hvm.guest_efer = 0;
hvm_update_guest_efer(v);
reg.sel = cs;
hvm_funcs.tsc_scaling.setup(v);
/* Sync AP's TSC with BSP's. */
- v->arch.hvm_vcpu.cache_tsc_offset =
- v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ v->arch.hvm.cache_tsc_offset =
+ v->domain->vcpu[0]->arch.hvm.cache_tsc_offset;
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset,
d->arch.hvm.sync_tsc);
- v->arch.hvm_vcpu.msr_tsc_adjust = 0;
+ v->arch.hvm.msr_tsc_adjust = 0;
paging_update_paging_modes(v);
printk(XENLOG_G_INFO "%pv: upcall vector %02x\n", v, op.vector);
- v->arch.hvm_vcpu.evtchn_upcall_vector = op.vector;
+ v->arch.hvm.evtchn_upcall_vector = op.vector;
hvm_assert_evtchn_irq(v);
return 0;
}
break;
rc = 0;
vcpu_pause(v);
- v->arch.hvm_vcpu.single_step =
+ v->arch.hvm.single_step =
(op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON);
vcpu_unpause(v); /* guest will latch new state */
break;
if ( !hvm_is_singlestep_supported() )
return;
- v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step;
+ v->arch.hvm.single_step = !v->arch.hvm.single_step;
}
void hvm_domain_soft_reset(struct domain *d)
{
struct hvm_emulate_ctxt ctxt;
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec access)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
vio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
bool handle_pio(uint16_t port, unsigned int size, int dir)
{
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
unsigned long data;
int rc;
{
struct vcpu *curr = current;
const struct hvm_domain *hvm = &curr->domain->arch.hvm;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
struct g2m_ioport *g2m_ioport;
unsigned int start, end;
static int g2m_portio_read(const struct hvm_io_handler *handler,
uint64_t addr, uint32_t size, uint64_t *data)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
static int g2m_portio_write(const struct hvm_io_handler *handler,
uint64_t addr, uint32_t size, uint64_t data)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;
static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
{
struct vcpu *v = sv->vcpu;
- ioreq_t *ioreq = &v->arch.hvm_vcpu.hvm_io.io_req;
+ ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
if ( hvm_ioreq_needs_completion(ioreq) )
{
bool handle_hvm_io_completion(struct vcpu *v)
{
struct domain *d = v->domain;
- struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
struct hvm_ioreq_server *s;
enum hvm_io_completion io_completion;
unsigned int id;
{
if ( unlikely(in_irq() || !local_irq_is_enabled()) )
{
- tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
+ tasklet_schedule(&v->arch.hvm.assert_evtchn_irq_tasklet);
return;
}
- if ( v->arch.hvm_vcpu.evtchn_upcall_vector != 0 )
+ if ( v->arch.hvm.evtchn_upcall_vector != 0 )
{
- uint8_t vector = v->arch.hvm_vcpu.evtchn_upcall_vector;
+ uint8_t vector = v->arch.hvm.evtchn_upcall_vector;
vlapic_set_irq(vcpu_vlapic(v), vector, 0);
}
int hvm_vcpu_cacheattr_init(struct vcpu *v)
{
- struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
+ struct mtrr_state *m = &v->arch.hvm.mtrr;
unsigned int num_var_ranges =
is_hardware_domain(v->domain) ? MASK_EXTR(mtrr_state.mtrr_cap,
MTRRcap_VCNT)
m->mtrr_cap = (1u << 10) | (1u << 8) | num_var_ranges;
- v->arch.hvm_vcpu.pat_cr =
+ v->arch.hvm.pat_cr =
((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */
((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */
((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */
void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
{
- xfree(v->arch.hvm_vcpu.mtrr.var_ranges);
+ xfree(v->arch.hvm.mtrr.var_ranges);
}
/*
uint8_t guest_eff_mm_type;
uint8_t shadow_mtrr_type;
uint8_t pat_entry_value;
- uint64_t pat = v->arch.hvm_vcpu.pat_cr;
- struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;
+ uint64_t pat = v->arch.hvm.pat_cr;
+ struct mtrr_state *g = &v->arch.hvm.mtrr;
/* 1. Get the effective memory type of guest physical address,
* with the pair of guest MTRR and PAT
bool mtrr_pat_not_equal(const struct vcpu *vd, const struct vcpu *vs)
{
- const struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;
- const struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;
+ const struct mtrr_state *md = &vd->arch.hvm.mtrr;
+ const struct mtrr_state *ms = &vs->arch.hvm.mtrr;
if ( md->enabled != ms->enabled )
return true;
}
/* Test PAT. */
- return vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr;
+ return vd->arch.hvm.pat_cr != vs->arch.hvm.pat_cr;
}
struct hvm_mem_pinned_cacheattr_range {
/* save mtrr&pat */
for_each_vcpu(d, v)
{
- const struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr;
+ const struct mtrr_state *mtrr_state = &v->arch.hvm.mtrr;
struct hvm_hw_mtrr hw_mtrr = {
.msr_mtrr_def_type = mtrr_state->def_type |
MASK_INSR(mtrr_state->fixed_enabled,
return -EINVAL;
}
- mtrr_state = &v->arch.hvm_vcpu.mtrr;
+ mtrr_state = &v->arch.hvm.mtrr;
hvm_set_guest_pat(v, hw_mtrr.msr_pat_cr);
return -1;
gmtrr_mtype = is_hvm_domain(d) && v ?
- get_mtrr_type(&v->arch.hvm_vcpu.mtrr,
+ get_mtrr_type(&v->arch.hvm.mtrr,
gfn << PAGE_SHIFT, order) :
MTRR_TYPE_WRBACK;
hmtrr_mtype = get_mtrr_type(&mtrr_state, mfn_x(mfn) << PAGE_SHIFT, order);
* Update the counter to the guest's current time. Make sure it only
* goes forwards.
*/
- x = (((s->vcpu->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(s->vcpu)) -
+ x = (((s->vcpu->arch.hvm.guest_time ?: hvm_get_guest_time(s->vcpu)) -
s->last_gtime) * s->scale) >> 32;
if ( x < 1UL<<31 )
acpi->tmr_val += x;
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
struct hvm_vcpu_asid *p_asid =
nestedhvm_vcpu_in_guestmode(curr)
- ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm_vcpu.n1asid;
+ ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid;
bool_t need_flush = hvm_asid_handle_vmenter(p_asid);
/* ASID 0 indicates that ASIDs are disabled. */
/* Save shadowed values. This ensures that the l1 guest
* cannot override them to break out. */
- n1vmcb->_efer = v->arch.hvm_vcpu.guest_efer;
- n1vmcb->_cr0 = v->arch.hvm_vcpu.guest_cr[0];
- n1vmcb->_cr2 = v->arch.hvm_vcpu.guest_cr[2];
- n1vmcb->_cr4 = v->arch.hvm_vcpu.guest_cr[4];
+ n1vmcb->_efer = v->arch.hvm.guest_efer;
+ n1vmcb->_cr0 = v->arch.hvm.guest_cr[0];
+ n1vmcb->_cr2 = v->arch.hvm.guest_cr[2];
+ n1vmcb->_cr4 = v->arch.hvm.guest_cr[4];
/* Remember the host interrupt flag */
svm->ns_hostflags.fields.rflagsif =
v->arch.hvm_svm.vmcb_pa = nv->nv_n1vmcx_pa;
/* EFER */
- v->arch.hvm_vcpu.guest_efer = n1vmcb->_efer;
+ v->arch.hvm.guest_efer = n1vmcb->_efer;
rc = hvm_set_efer(n1vmcb->_efer);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc);
/* CR4 */
- v->arch.hvm_vcpu.guest_cr[4] = n1vmcb->_cr4;
+ v->arch.hvm.guest_cr[4] = n1vmcb->_cr4;
rc = hvm_set_cr4(n1vmcb->_cr4, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
/* CR0 */
nestedsvm_fpu_vmexit(n1vmcb, n2vmcb,
- svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]);
- v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
+ svm->ns_cr0, v->arch.hvm.guest_cr[0]);
+ v->arch.hvm.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
- svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ svm->ns_cr0 = v->arch.hvm.guest_cr[0];
/* CR2 */
- v->arch.hvm_vcpu.guest_cr[2] = n1vmcb->_cr2;
+ v->arch.hvm.guest_cr[2] = n1vmcb->_cr2;
hvm_update_guest_cr(v, 2);
/* CR3 */
/* Nested paging mode */
if (nestedhvm_paging_mode_hap(v)) {
/* host nested paging + guest nested paging. */
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
} else if (paging_mode_hap(v->domain)) {
/* host nested paging + guest shadow paging. */
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
} else {
/* host shadow paging + guest shadow paging. */
if (!pagetable_is_null(v->arch.guest_table))
put_page(pagetable_get_page(v->arch.guest_table));
v->arch.guest_table = pagetable_null();
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
}
rc = hvm_set_cr3(n1vmcb->_cr3, 1);
if ( rc == X86EMUL_EXCEPTION )
}
/* EFER */
- v->arch.hvm_vcpu.guest_efer = ns_vmcb->_efer;
+ v->arch.hvm.guest_efer = ns_vmcb->_efer;
rc = hvm_set_efer(ns_vmcb->_efer);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc);
/* CR4 */
- v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
+ v->arch.hvm.guest_cr[4] = ns_vmcb->_cr4;
rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
/* CR0 */
- svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ svm->ns_cr0 = v->arch.hvm.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
- v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
+ v->arch.hvm.guest_cr[0] = ns_vmcb->_cr0;
rc = hvm_set_cr0(cr0, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
/* CR2 */
- v->arch.hvm_vcpu.guest_cr[2] = ns_vmcb->_cr2;
+ v->arch.hvm.guest_cr[2] = ns_vmcb->_cr2;
hvm_update_guest_cr(v, 2);
/* Nested paging mode */
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
/* When l1 guest does shadow paging
* we assume it intercepts page faults.
*/
- /* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
+ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
* Delay the injection because this would result in delivering
* an interrupt *within* the execution of an instruction.
*/
- if ( v->arch.hvm_vcpu.hvm_io.io_req.state != STATE_IOREQ_NONE )
+ if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE )
return hvm_intblk_shadow;
if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
* Need state for transfering the nested gif status so only write on
* the hvm_vcpu EFER.SVME changing.
*/
- if ( v->arch.hvm_vcpu.guest_efer & EFER_SVME )
+ if ( v->arch.hvm.guest_efer & EFER_SVME )
{
if ( !vmcb->virt_ext.fields.vloadsave_enable &&
paging_mode_hap(v->domain) &&
static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- unsigned int flag_dr_dirty = v->arch.hvm_vcpu.flag_dr_dirty;
+ unsigned int flag_dr_dirty = v->arch.hvm.flag_dr_dirty;
if ( !flag_dr_dirty )
return;
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
- v->arch.hvm_vcpu.flag_dr_dirty = 0;
+ v->arch.hvm.flag_dr_dirty = 0;
vmcb_set_dr_intercepts(vmcb, ~0u);
if ( v->domain->arch.cpuid->extd.dbext )
static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( v->arch.hvm.flag_dr_dirty )
return;
- v->arch.hvm_vcpu.flag_dr_dirty = 1;
+ v->arch.hvm.flag_dr_dirty = 1;
vmcb_set_dr_intercepts(vmcb, 0);
ASSERT(v == current);
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
- c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
- c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
- c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
+ c->cr0 = v->arch.hvm.guest_cr[0];
+ c->cr2 = v->arch.hvm.guest_cr[2];
+ c->cr3 = v->arch.hvm.guest_cr[3];
+ c->cr4 = v->arch.hvm.guest_cr[4];
c->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs;
c->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp;
}
}
- if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
+ if ( v->arch.hvm.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
v->arch.guest_table =
page ? pagetable_from_page(page) : pagetable_null();
}
- v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
- v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
- v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
- v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ v->arch.hvm.guest_cr[0] = c->cr0 | X86_CR0_ET;
+ v->arch.hvm.guest_cr[2] = c->cr2;
+ v->arch.hvm.guest_cr[3] = c->cr3;
+ v->arch.hvm.guest_cr[4] = c->cr4;
svm_update_guest_cr(v, 0, 0);
svm_update_guest_cr(v, 2, 0);
svm_update_guest_cr(v, 4, 0);
data->msr_star = vmcb->star;
data->msr_cstar = vmcb->cstar;
data->msr_syscall_mask = vmcb->sfmask;
- data->msr_efer = v->arch.hvm_vcpu.guest_efer;
+ data->msr_efer = v->arch.hvm.guest_efer;
data->msr_flags = 0;
}
vmcb->star = data->msr_star;
vmcb->cstar = data->msr_cstar;
vmcb->sfmask = data->msr_syscall_mask;
- v->arch.hvm_vcpu.guest_efer = data->msr_efer;
+ v->arch.hvm.guest_efer = data->msr_efer;
svm_update_guest_efer(v);
}
* then this is not necessary: no FPU activity can occur until the guest
* clears CR0.TS, and we will initialise the FPU when that happens.
*/
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
vmcb_set_exception_intercepts(
n1vmcb,
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
+ if ( unlikely(!(v->arch.hvm.guest_cr[0] & X86_CR0_PE)) )
return 0;
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
case 0: {
unsigned long hw_cr0_mask = 0;
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
if ( v != current )
{
vmcb_set_cr_intercepts(vmcb, intercepts | CR_INTERCEPT_CR3_WRITE);
}
- value = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
+ value = v->arch.hvm.guest_cr[0] | hw_cr0_mask;
if ( !paging_mode_hap(v->domain) )
value |= X86_CR0_PG | X86_CR0_WP;
vmcb_set_cr0(vmcb, value);
break;
}
case 2:
- vmcb_set_cr2(vmcb, v->arch.hvm_vcpu.guest_cr[2]);
+ vmcb_set_cr2(vmcb, v->arch.hvm.guest_cr[2]);
break;
case 3:
- vmcb_set_cr3(vmcb, v->arch.hvm_vcpu.hw_cr[3]);
+ vmcb_set_cr3(vmcb, v->arch.hvm.hw_cr[3]);
if ( !nestedhvm_enabled(v->domain) )
{
if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
else if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
hvm_asid_flush_vcpu_asid(
nestedhvm_vcpu_in_guestmode(v)
- ? &vcpu_nestedhvm(v).nv_n2asid : &v->arch.hvm_vcpu.n1asid);
+ ? &vcpu_nestedhvm(v).nv_n2asid : &v->arch.hvm.n1asid);
break;
case 4:
value = HVM_CR4_HOST_MASK;
if ( paging_mode_hap(v->domain) )
value &= ~X86_CR4_PAE;
- value |= v->arch.hvm_vcpu.guest_cr[4];
+ value |= v->arch.hvm.guest_cr[4];
if ( !hvm_paging_enabled(v) )
{
static void svm_update_guest_efer(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- bool_t lma = !!(v->arch.hvm_vcpu.guest_efer & EFER_LMA);
+ bool_t lma = !!(v->arch.hvm.guest_efer & EFER_LMA);
uint64_t new_efer;
- new_efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
+ new_efer = (v->arch.hvm.guest_efer | EFER_SVME) & ~EFER_LME;
if ( lma )
new_efer |= EFER_LME;
vmcb_set_efer(vmcb, new_efer);
ASSERT(nestedhvm_enabled(v->domain) ||
- !(v->arch.hvm_vcpu.guest_efer & EFER_SVME));
+ !(v->arch.hvm.guest_efer & EFER_SVME));
if ( nestedhvm_enabled(v->domain) )
svm_nested_features_on_efer_update(v);
vcpu_guestmode = 1;
if ( !vcpu_guestmode &&
- unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
+ unlikely(v->arch.hvm.debug_state_latch != debug_state) )
{
uint32_t intercepts = vmcb_get_exception_intercepts(vmcb);
- v->arch.hvm_vcpu.debug_state_latch = debug_state;
+ v->arch.hvm.debug_state_latch = debug_state;
vmcb_set_exception_intercepts(
vmcb, debug_state ? (intercepts | (1U << TRAP_int3))
: (intercepts & ~(1U << TRAP_int3)));
case TRAP_page_fault:
ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION);
- curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2;
+ curr->arch.hvm.guest_cr[2] = _event.cr2;
vmcb_set_cr2(vmcb, _event.cr2);
break;
}
if ( vmcb != n1vmcb )
{
/* Check if l1 guest must make FPU ready for the l2 guest */
- if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS )
+ if ( v->arch.hvm.guest_cr[0] & X86_CR0_TS )
hvm_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC);
else
vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS);
return;
}
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS);
}
{
svm_invlpga(vaddr,
(asid == 0)
- ? v->arch.hvm_vcpu.n1asid.asid
+ ? v->arch.hvm.n1asid.asid
: vcpu_nestedhvm(v).nv_n2asid.asid);
}
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) )
- v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
- vmcb_get_cr3(vmcb);
+ v->arch.hvm.guest_cr[3] = v->arch.hvm.hw_cr[3] = vmcb_get_cr3(vmcb);
if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
vcpu_guestmode = 1;
}
/* Guest EFER. */
- v->arch.hvm_vcpu.guest_efer = 0;
+ v->arch.hvm.guest_efer = 0;
hvm_update_guest_efer(v);
/* Guest segment limits. */
vmcb->tr.base = 0;
vmcb->tr.limit = 0xff;
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
+ v->arch.hvm.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
hvm_update_guest_cr(v, 0);
- v->arch.hvm_vcpu.guest_cr[4] = 0;
+ v->arch.hvm.guest_cr[4] = 0;
hvm_update_guest_cr(v, 4);
paging_update_paging_modes(v);
{
const union viridian_vp_assist *va;
- va = &v->arch.hvm_vcpu.viridian.vp_assist.msr;
+ va = &v->arch.hvm.viridian.vp_assist.msr;
printk(XENLOG_G_INFO "%pv: VIRIDIAN VP_ASSIST_PAGE: enabled: %x pfn: %lx\n",
v, va->fields.enabled, (unsigned long)va->fields.pfn);
static void initialize_vp_assist(struct vcpu *v)
{
struct domain *d = v->domain;
- unsigned long gmfn = v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.pfn;
+ unsigned long gmfn = v->arch.hvm.viridian.vp_assist.msr.fields.pfn;
struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
void *va;
- ASSERT(!v->arch.hvm_vcpu.viridian.vp_assist.va);
+ ASSERT(!v->arch.hvm.viridian.vp_assist.va);
/*
* See section 7.8.7 of the specification for details of this
clear_page(va);
- v->arch.hvm_vcpu.viridian.vp_assist.va = va;
+ v->arch.hvm.viridian.vp_assist.va = va;
return;
fail:
static void teardown_vp_assist(struct vcpu *v)
{
- void *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ void *va = v->arch.hvm.viridian.vp_assist.va;
struct page_info *page;
if ( !va )
return;
- v->arch.hvm_vcpu.viridian.vp_assist.va = NULL;
+ v->arch.hvm.viridian.vp_assist.va = NULL;
page = mfn_to_page(domain_page_map_to_mfn(va));
void viridian_apic_assist_set(struct vcpu *v)
{
- uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ uint32_t *va = v->arch.hvm.viridian.vp_assist.va;
if ( !va )
return;
* wrong and the VM will most likely hang so force a crash now
* to make the problem clear.
*/
- if ( v->arch.hvm_vcpu.viridian.vp_assist.pending )
+ if ( v->arch.hvm.viridian.vp_assist.pending )
domain_crash(v->domain);
- v->arch.hvm_vcpu.viridian.vp_assist.pending = true;
+ v->arch.hvm.viridian.vp_assist.pending = true;
*va |= 1u;
}
bool viridian_apic_assist_completed(struct vcpu *v)
{
- uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ uint32_t *va = v->arch.hvm.viridian.vp_assist.va;
if ( !va )
return false;
- if ( v->arch.hvm_vcpu.viridian.vp_assist.pending &&
+ if ( v->arch.hvm.viridian.vp_assist.pending &&
!(*va & 1u) )
{
/* An EOI has been avoided */
- v->arch.hvm_vcpu.viridian.vp_assist.pending = false;
+ v->arch.hvm.viridian.vp_assist.pending = false;
return true;
}
void viridian_apic_assist_clear(struct vcpu *v)
{
- uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va;
+ uint32_t *va = v->arch.hvm.viridian.vp_assist.va;
if ( !va )
return;
*va &= ~1u;
- v->arch.hvm_vcpu.viridian.vp_assist.pending = false;
+ v->arch.hvm.viridian.vp_assist.pending = false;
}
static void update_reference_tsc(struct domain *d, bool_t initialize)
case HV_X64_MSR_VP_ASSIST_PAGE:
perfc_incr(mshv_wrmsr_apic_msr);
teardown_vp_assist(v); /* release any previous mapping */
- v->arch.hvm_vcpu.viridian.vp_assist.msr.raw = val;
+ v->arch.hvm.viridian.vp_assist.msr.raw = val;
dump_vp_assist(v);
- if ( v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.enabled )
+ if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled )
initialize_vp_assist(v);
break;
case HV_X64_MSR_CRASH_P3:
case HV_X64_MSR_CRASH_P4:
BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
- ARRAY_SIZE(v->arch.hvm_vcpu.viridian.crash_param));
+ ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
idx -= HV_X64_MSR_CRASH_P0;
- v->arch.hvm_vcpu.viridian.crash_param[idx] = val;
+ v->arch.hvm.viridian.crash_param[idx] = val;
break;
case HV_X64_MSR_CRASH_CTL:
break;
gprintk(XENLOG_WARNING, "VIRIDIAN CRASH: %lx %lx %lx %lx %lx\n",
- v->arch.hvm_vcpu.viridian.crash_param[0],
- v->arch.hvm_vcpu.viridian.crash_param[1],
- v->arch.hvm_vcpu.viridian.crash_param[2],
- v->arch.hvm_vcpu.viridian.crash_param[3],
- v->arch.hvm_vcpu.viridian.crash_param[4]);
+ v->arch.hvm.viridian.crash_param[0],
+ v->arch.hvm.viridian.crash_param[1],
+ v->arch.hvm.viridian.crash_param[2],
+ v->arch.hvm.viridian.crash_param[3],
+ v->arch.hvm.viridian.crash_param[4]);
break;
}
case HV_X64_MSR_VP_ASSIST_PAGE:
perfc_incr(mshv_rdmsr_apic_msr);
- *val = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw;
+ *val = v->arch.hvm.viridian.vp_assist.msr.raw;
break;
case HV_X64_MSR_REFERENCE_TSC:
case HV_X64_MSR_CRASH_P3:
case HV_X64_MSR_CRASH_P4:
BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
- ARRAY_SIZE(v->arch.hvm_vcpu.viridian.crash_param));
+ ARRAY_SIZE(v->arch.hvm.viridian.crash_param));
idx -= HV_X64_MSR_CRASH_P0;
- *val = v->arch.hvm_vcpu.viridian.crash_param[idx];
+ *val = v->arch.hvm.viridian.crash_param[idx];
break;
case HV_X64_MSR_CRASH_CTL:
for_each_vcpu( d, v ) {
struct hvm_viridian_vcpu_context ctxt = {
- .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw,
- .vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending,
+ .vp_assist_msr = v->arch.hvm.viridian.vp_assist.msr.raw,
+ .vp_assist_pending = v->arch.hvm.viridian.vp_assist.pending,
};
if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
if ( memcmp(&ctxt._pad, zero_page, sizeof(ctxt._pad)) )
return -EINVAL;
- v->arch.hvm_vcpu.viridian.vp_assist.msr.raw = ctxt.vp_assist_msr;
- if ( v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.enabled &&
- !v->arch.hvm_vcpu.viridian.vp_assist.va )
+ v->arch.hvm.viridian.vp_assist.msr.raw = ctxt.vp_assist_msr;
+ if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled &&
+ !v->arch.hvm.viridian.vp_assist.va )
initialize_vp_assist(v);
- v->arch.hvm_vcpu.viridian.vp_assist.pending = !!ctxt.vp_assist_pending;
+ v->arch.hvm.viridian.vp_assist.pending = !!ctxt.vp_assist_pending;
return 0;
}
if ( !(val & PCI_MSIX_VECTOR_BITMASK) &&
test_and_clear_bit(nr_entry, &entry->table_flags) )
{
- v->arch.hvm_vcpu.hvm_io.msix_unmask_address = address;
+ v->arch.hvm.hvm_io.msix_unmask_address = address;
goto out;
}
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) &&
!(data & PCI_MSIX_VECTOR_BITMASK) )
{
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_address = addr;
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_gpa = 0;
+ curr->arch.hvm.hvm_io.msix_snoop_address = addr;
+ curr->arch.hvm.hvm_io.msix_snoop_gpa = 0;
}
}
else if ( (size == 4 || size == 8) &&
BUILD_BUG_ON((PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET + 4) &
(PCI_MSIX_ENTRY_SIZE - 1));
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_address =
+ curr->arch.hvm.hvm_io.msix_snoop_address =
addr + size * r->count - 4;
- curr->arch.hvm_vcpu.hvm_io.msix_snoop_gpa =
+ curr->arch.hvm.hvm_io.msix_snoop_gpa =
r->data + size * r->count - 4;
}
}
for_each_vcpu ( d, v )
{
if ( (v->pause_flags & VPF_blocked_in_xen) &&
- !v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa &&
- v->arch.hvm_vcpu.hvm_io.msix_snoop_address ==
+ !v->arch.hvm.hvm_io.msix_snoop_gpa &&
+ v->arch.hvm.hvm_io.msix_snoop_address ==
(gtable + msi_desc->msi_attrib.entry_nr *
PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) )
- v->arch.hvm_vcpu.hvm_io.msix_unmask_address =
- v->arch.hvm_vcpu.hvm_io.msix_snoop_address;
+ v->arch.hvm.hvm_io.msix_unmask_address =
+ v->arch.hvm.hvm_io.msix_snoop_address;
}
}
void msix_write_completion(struct vcpu *v)
{
- unsigned long ctrl_address = v->arch.hvm_vcpu.hvm_io.msix_unmask_address;
- unsigned long snoop_addr = v->arch.hvm_vcpu.hvm_io.msix_snoop_address;
+ unsigned long ctrl_address = v->arch.hvm.hvm_io.msix_unmask_address;
+ unsigned long snoop_addr = v->arch.hvm.hvm_io.msix_snoop_address;
- v->arch.hvm_vcpu.hvm_io.msix_snoop_address = 0;
+ v->arch.hvm.hvm_io.msix_snoop_address = 0;
if ( !ctrl_address && snoop_addr &&
- v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa )
+ v->arch.hvm.hvm_io.msix_snoop_gpa )
{
const struct msi_desc *desc;
uint32_t data;
if ( desc &&
hvm_copy_from_guest_phys(&data,
- v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
+ v->arch.hvm.hvm_io.msix_snoop_gpa,
sizeof(data)) == HVMTRANS_okay &&
!(data & PCI_MSIX_VECTOR_BITMASK) )
ctrl_address = snoop_addr;
if ( !ctrl_address )
return;
- v->arch.hvm_vcpu.hvm_io.msix_unmask_address = 0;
+ v->arch.hvm.hvm_io.msix_unmask_address = 0;
if ( msixtbl_write(v, ctrl_address, 4, 0) != X86EMUL_OKAY )
gdprintk(XENLOG_WARNING, "MSI-X write completion failure\n");
}
int pt_vector;
/* Block event injection when single step with MTF. */
- if ( unlikely(v->arch.hvm_vcpu.single_step) )
+ if ( unlikely(v->arch.hvm.single_step) )
{
v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
perfc_incr(realmode_emulations);
if ( rc == X86EMUL_UNRECOGNIZED )
{
gdprintk(XENLOG_ERR, "Unrecognized insn.\n");
- if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE )
goto fail;
realmode_deliver_exception(TRAP_invalid_op, 0, hvmemul_ctxt);
{
domain_pause_for_debugger();
}
- else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
+ else if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE )
{
gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
hvmemul_ctxt->ctxt.event.vector);
struct vcpu *curr = current;
struct hvm_emulate_ctxt hvmemul_ctxt;
struct segment_register *sreg;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
unsigned long intr_info;
unsigned int emulations = 0;
hvm_emulate_init_once(&hvmemul_ctxt, NULL, regs);
/* Only deliver interrupts into emulated real mode. */
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PE) &&
(intr_info & INTR_INFO_VALID_MASK) )
{
realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
| (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
vmx_update_exception_bitmap(v);
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
+ v->arch.hvm.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
hvm_update_guest_cr(v, 0);
- v->arch.hvm_vcpu.guest_cr[4] = 0;
+ v->arch.hvm.guest_cr[4] = 0;
hvm_update_guest_cr(v, 4);
if ( cpu_has_vmx_tpr_shadow )
|| v->domain->arch.monitor.software_breakpoint_enabled
|| v->domain->arch.monitor.singlestep_enabled;
- if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
+ if ( unlikely(v->arch.hvm.debug_state_latch != debug_state) )
{
- v->arch.hvm_vcpu.debug_state_latch = debug_state;
+ v->arch.hvm.debug_state_latch = debug_state;
vmx_update_debug_state(v);
}
{
unsigned long cs_ar_bytes;
- if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
+ if ( unlikely(!(v->arch.hvm.guest_cr[0] & X86_CR0_PE)) )
return 0;
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
static void vmx_save_dr(struct vcpu *v)
{
- if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( !v->arch.hvm.flag_dr_dirty )
return;
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
- v->arch.hvm_vcpu.flag_dr_dirty = 0;
+ v->arch.hvm.flag_dr_dirty = 0;
v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
vmx_update_cpu_exec_control(v);
static void __restore_debug_registers(struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( v->arch.hvm.flag_dr_dirty )
return;
- v->arch.hvm_vcpu.flag_dr_dirty = 1;
+ v->arch.hvm.flag_dr_dirty = 1;
write_debugreg(0, v->arch.debugreg[0]);
write_debugreg(1, v->arch.debugreg[1]);
vmx_vmcs_enter(v);
- c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
- c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
- c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
- c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
+ c->cr0 = v->arch.hvm.guest_cr[0];
+ c->cr2 = v->arch.hvm.guest_cr[2];
+ c->cr3 = v->arch.hvm.guest_cr[3];
+ c->cr4 = v->arch.hvm.guest_cr[4];
- c->msr_efer = v->arch.hvm_vcpu.guest_efer;
+ c->msr_efer = v->arch.hvm.guest_efer;
__vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
__vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
page ? pagetable_from_page(page) : pagetable_null();
}
- v->arch.hvm_vcpu.guest_cr[0] = cr0 | X86_CR0_ET;
- v->arch.hvm_vcpu.guest_cr[3] = cr3;
+ v->arch.hvm.guest_cr[0] = cr0 | X86_CR0_ET;
+ v->arch.hvm.guest_cr[3] = cr3;
return 0;
}
vmx_vmcs_enter(v);
- v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
- v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ v->arch.hvm.guest_cr[2] = c->cr2;
+ v->arch.hvm.guest_cr[4] = c->cr4;
vmx_update_guest_cr(v, 0, 0);
vmx_update_guest_cr(v, 2, 0);
vmx_update_guest_cr(v, 4, 0);
- v->arch.hvm_vcpu.guest_efer = c->msr_efer;
+ v->arch.hvm.guest_efer = c->msr_efer;
vmx_update_guest_efer(v);
__vmwrite(GUEST_SYSENTER_CS, c->sysenter_cs);
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
{
- ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss;
+ ctxt->msr[ctxt->count].val = v->arch.hvm.msr_xss;
if ( ctxt->msr[ctxt->count].val )
ctxt->msr[ctxt->count++].index = MSR_IA32_XSS;
}
break;
case MSR_IA32_XSS:
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
- v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val;
+ v->arch.hvm.msr_xss = ctxt->msr[i].val;
else
err = -ENXIO;
break;
* then this is not necessary: no FPU activity can occur until the guest
* clears CR0.TS, and we will initialise the FPU when that happens.
*/
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
- v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ v->arch.hvm.hw_cr[0] |= X86_CR0_TS;
+ __vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
v->arch.hvm_vmx.exception_bitmap |= (1u << TRAP_no_device);
vmx_update_exception_bitmap(v);
}
static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
if ( !paging_mode_hap(v->domain) ||
- unlikely(v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
+ unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
return 0;
vmx_vmcs_enter(v);
static int vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
{
if ( !paging_mode_hap(v->domain) ||
- unlikely(v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
+ unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
return 0;
vmx_vmcs_enter(v);
}
else
{
- u64 *pat = &v->arch.hvm_vcpu.pat_cr;
+ u64 *pat = &v->arch.hvm.pat_cr;
if ( value & X86_CR0_CD )
{
wbinvd(); /* flush possibly polluted cache */
hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
- v->arch.hvm_vcpu.cache_mode = NO_FILL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
}
else
{
- v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
+ v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
vmx_set_guest_pat(v, *pat);
if ( !iommu_enabled || iommu_snoop )
vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
static void vmx_load_pdptrs(struct vcpu *v)
{
- unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ unsigned long cr3 = v->arch.hvm.guest_cr[3];
uint64_t *guest_pdptes;
struct page_info *page;
p2m_type_t p2mt;
char *p;
/* EPT needs to load PDPTRS into VMCS for PAE. */
- if ( !hvm_pae_enabled(v) || (v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
+ if ( !hvm_pae_enabled(v) || (v->arch.hvm.guest_efer & EFER_LMA) )
return;
if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
void vmx_update_debug_state(struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.debug_state_latch )
+ if ( v->arch.hvm.debug_state_latch )
v->arch.hvm_vmx.exception_bitmap |= 1U << TRAP_int3;
else
v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_int3);
}
if ( !nestedhvm_vcpu_in_guestmode(v) )
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm.guest_cr[0]);
else
nvmx_set_cr_read_shadow(v, 0);
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
if ( v != current )
{
if ( !v->arch.fully_eager_fpu )
hw_cr0_mask |= X86_CR0_TS;
}
- else if ( v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS )
+ else if ( v->arch.hvm.hw_cr[0] & X86_CR0_TS )
vmx_fpu_enter(v);
}
- realmode = !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE);
+ realmode = !(v->arch.hvm.guest_cr[0] & X86_CR0_PE);
if ( !vmx_unrestricted_guest(v) &&
(realmode != v->arch.hvm_vmx.vmx_realmode) )
vmx_update_exception_bitmap(v);
}
- v->arch.hvm_vcpu.hw_cr[0] =
- v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ v->arch.hvm.hw_cr[0] =
+ v->arch.hvm.guest_cr[0] | hw_cr0_mask;
+ __vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
}
/* Fallthrough: Changing CR0 can change some bits in real CR4. */
case 4:
- v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
+ v->arch.hvm.hw_cr[4] = HVM_CR4_HOST_MASK;
if ( paging_mode_hap(v->domain) )
- v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
+ v->arch.hvm.hw_cr[4] &= ~X86_CR4_PAE;
if ( !nestedhvm_vcpu_in_guestmode(v) )
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm.guest_cr[4]);
else
nvmx_set_cr_read_shadow(v, 4);
- v->arch.hvm_vcpu.hw_cr[4] |= v->arch.hvm_vcpu.guest_cr[4];
+ v->arch.hvm.hw_cr[4] |= v->arch.hvm.guest_cr[4];
if ( v->arch.hvm_vmx.vmx_realmode )
- v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_VME;
+ v->arch.hvm.hw_cr[4] |= X86_CR4_VME;
if ( !hvm_paging_enabled(v) )
{
* HVM_PARAM_IDENT_PT which is a 32bit pagetable using 4M
* superpages. Override the guests paging settings to match.
*/
- v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_PSE;
- v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
+ v->arch.hvm.hw_cr[4] |= X86_CR4_PSE;
+ v->arch.hvm.hw_cr[4] &= ~X86_CR4_PAE;
}
/*
* effect if paging was actually disabled, so hide them behind the
* back of the guest.
*/
- v->arch.hvm_vcpu.hw_cr[4] &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
+ v->arch.hvm.hw_cr[4] &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
}
- __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
+ __vmwrite(GUEST_CR4, v->arch.hvm.hw_cr[4]);
/*
* Shadow path has not been optimized because it requires
if ( paging_mode_hap(v->domain) )
{
if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
- v->arch.hvm_vcpu.hw_cr[3] =
+ v->arch.hvm.hw_cr[3] =
v->domain->arch.hvm.params[HVM_PARAM_IDENT_PT];
vmx_load_pdptrs(v);
}
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
+ __vmwrite(GUEST_CR3, v->arch.hvm.hw_cr[3]);
if ( !(flags & HVM_UPDATE_GUEST_CR3_NOFLUSH) )
hvm_asid_flush_vcpu(v);
static void vmx_update_guest_efer(struct vcpu *v)
{
- unsigned long entry_ctls, guest_efer = v->arch.hvm_vcpu.guest_efer,
+ unsigned long entry_ctls, guest_efer = v->arch.hvm.guest_efer,
xen_efer = read_efer();
if ( paging_mode_shadow(v->domain) )
* If the guests virtualised view of MSR_EFER matches the value loaded
* into hardware, clear the read intercept to avoid unnecessary VMExits.
*/
- if ( guest_efer == v->arch.hvm_vcpu.guest_efer )
+ if ( guest_efer == v->arch.hvm.guest_efer )
vmx_clear_msr_intercept(v, MSR_EFER, VMX_MSR_R);
else
vmx_set_msr_intercept(v, MSR_EFER, VMX_MSR_R);
case TRAP_page_fault:
ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION);
- curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2;
+ curr->arch.hvm.guest_cr[2] = _event.cr2;
break;
}
if ( (_event.vector == TRAP_page_fault) &&
(_event.type == X86_EVENTTYPE_HW_EXCEPTION) )
HVMTRACE_LONG_2D(PF_INJECT, _event.error_code,
- TRC_PAR_LONG(curr->arch.hvm_vcpu.guest_cr[2]));
+ TRC_PAR_LONG(curr->arch.hvm.guest_cr[2]));
else
HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code);
}
vmx_fpu_enter(curr);
/* Disable TS in guest CR0 unless the guest wants the exception too. */
- if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_TS) )
{
- curr->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
- __vmwrite(GUEST_CR0, curr->arch.hvm_vcpu.hw_cr[0]);
+ curr->arch.hvm.hw_cr[0] &= ~X86_CR0_TS;
+ __vmwrite(GUEST_CR0, curr->arch.hvm.hw_cr[0]);
}
}
HVMTRACE_0D(DR_WRITE);
- if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ if ( !v->arch.hvm.flag_dr_dirty )
__restore_debug_registers(v);
/* Allow guest direct access to DR registers */
case VMX_CR_ACCESS_TYPE_CLTS:
{
- unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
+ unsigned long old = curr->arch.hvm.guest_cr[0];
unsigned long value = old & ~X86_CR0_TS;
/*
* return value is ignored for now.
*/
hvm_monitor_crX(CR0, value, old);
- curr->arch.hvm_vcpu.guest_cr[0] = value;
+ curr->arch.hvm.guest_cr[0] = value;
vmx_update_guest_cr(curr, 0, 0);
HVMTRACE_0D(CLTS);
break;
case VMX_CR_ACCESS_TYPE_LMSW:
{
- unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
+ unsigned long value = curr->arch.hvm.guest_cr[0];
int rc;
/* LMSW can (1) set PE; (2) set or clear MP, EM, and TS. */
* Xen allows the guest to modify some CR4 bits directly, update cached
* values to match.
*/
- __vmread(GUEST_CR4, &v->arch.hvm_vcpu.hw_cr[4]);
- v->arch.hvm_vcpu.guest_cr[4] &= v->arch.hvm_vmx.cr4_host_mask;
- v->arch.hvm_vcpu.guest_cr[4] |= v->arch.hvm_vcpu.hw_cr[4] &
- ~v->arch.hvm_vmx.cr4_host_mask;
+ __vmread(GUEST_CR4, &v->arch.hvm.hw_cr[4]);
+ v->arch.hvm.guest_cr[4] &= v->arch.hvm_vmx.cr4_host_mask;
+ v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] &
+ ~v->arch.hvm_vmx.cr4_host_mask);
- __vmread(GUEST_CR3, &v->arch.hvm_vcpu.hw_cr[3]);
+ __vmread(GUEST_CR3, &v->arch.hvm.hw_cr[3]);
if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) )
- v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3];
+ v->arch.hvm.guest_cr[3] = v->arch.hvm.hw_cr[3];
}
__vmread(VM_EXIT_REASON, &exit_reason);
case EXIT_REASON_MONITOR_TRAP_FLAG:
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
- if ( v->arch.hvm_vcpu.single_step )
+ if ( v->arch.hvm.single_step )
{
hvm_monitor_debug(regs->rip,
HVM_MONITOR_SINGLESTEP_BREAKPOINT,
if ( nestedhvm_vcpu_in_guestmode(curr) )
p_asid = &vcpu_nestedhvm(curr).nv_n2asid;
else
- p_asid = &curr->arch.hvm_vcpu.n1asid;
+ p_asid = &curr->arch.hvm.n1asid;
old_asid = p_asid->asid;
need_flush = hvm_asid_handle_vmenter(p_asid);
if ( vmxop_check )
{
- if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
- !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VMXE) )
+ if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_PE) ||
+ !(v->arch.hvm.guest_cr[4] & X86_CR4_VMXE) )
goto invalid_op;
}
else if ( !nvmx_vcpu_in_vmx(v) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmentry_fields), vmentry_fields);
* hvm_set_efer won't work if CR0.PG = 1, so we change the value
* directly to make hvm_long_mode_active(v) work in L2.
* An additional update_paging_modes is also needed if
- * there is 32/64 switch. v->arch.hvm_vcpu.guest_efer doesn't
+ * there is 32/64 switch. v->arch.hvm.guest_efer doesn't
* need to be saved, since its value on vmexit is determined by
* L1 exit_controls
*/
lm_l2 = !!(get_vvmcs(v, VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
if ( lm_l2 )
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
+ v->arch.hvm.guest_efer |= EFER_LMA | EFER_LME;
else
- v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
+ v->arch.hvm.guest_efer &= ~(EFER_LMA | EFER_LME);
load_shadow_control(v);
load_shadow_guest_state(v);
paging_update_paging_modes(v);
if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
- !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
+ !(v->arch.hvm.guest_efer & EFER_LMA) )
vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
regs->rip = get_vvmcs(v, GUEST_RIP);
if ( v->arch.hvm_vmx.cr4_host_mask != ~0UL )
/* Only need to update nested GUEST_CR4 if not all bits are trapped. */
- set_vvmcs(v, GUEST_CR4, v->arch.hvm_vcpu.guest_cr[4]);
+ set_vvmcs(v, GUEST_CR4, v->arch.hvm.guest_cr[4]);
}
static void sync_vvmcs_ro(struct vcpu *v)
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
- hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
+ hvm_set_tsc_offset(v, v->arch.hvm.cache_tsc_offset, 0);
set_vvmcs(v, VM_ENTRY_INTR_INFO, 0);
}
sync_exception_state(v);
if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
- !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
+ !(v->arch.hvm.guest_efer & EFER_LMA) )
shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
/* This will clear current pCPU bit in p2m->dirty_cpumask */
lm_l1 = !!(get_vvmcs(v, VM_EXIT_CONTROLS) & VM_EXIT_IA32E_MODE);
if ( lm_l1 )
- v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
+ v->arch.hvm.guest_efer |= EFER_LMA | EFER_LME;
else
- v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
+ v->arch.hvm.guest_efer &= ~(EFER_LMA | EFER_LME);
vmx_update_cpu_exec_control(v);
vmx_update_secondary_exec_control(v);
case EXIT_REASON_DR_ACCESS:
ctrl = __n2_exec_control(v);
if ( (ctrl & CPU_BASED_MOV_DR_EXITING) &&
- v->arch.hvm_vcpu.flag_dr_dirty )
+ v->arch.hvm.flag_dr_dirty )
nvcpu->nv_vmexit_pending = 1;
break;
case EXIT_REASON_INVLPG:
* hardware. It consists of the L2-owned bits from the new
* value combined with the L1-owned bits from L1's guest cr.
*/
- v->arch.hvm_vcpu.guest_cr[cr] &= ~virtual_cr_mask;
- v->arch.hvm_vcpu.guest_cr[cr] |= virtual_cr_mask &
+ v->arch.hvm.guest_cr[cr] &= ~virtual_cr_mask;
+ v->arch.hvm.guest_cr[cr] |= virtual_cr_mask &
get_vvmcs(v, cr_field);
}
/* nvcpu.guest_cr is what L2 write to cr actually. */
- __vmwrite(read_shadow_field, v->arch.hvm_vcpu.nvcpu.guest_cr[cr]);
+ __vmwrite(read_shadow_field, v->arch.hvm.nvcpu.guest_cr[cr]);
}
/*
}
spin_unlock(&pl->pl_time_lock);
- return now + v->arch.hvm_vcpu.stime_offset;
+ return now + v->arch.hvm.stime_offset;
}
void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
if ( offset )
{
- v->arch.hvm_vcpu.stime_offset += offset;
+ v->arch.hvm.stime_offset += offset;
/*
- * If hvm_vcpu.stime_offset is updated make sure to
+ * If hvm.stime_offset is updated make sure to
* also update vcpu time, since this value is used to
* calculate the TSC.
*/
for ( ; ; )
{
v = pt->vcpu;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
if ( likely(pt->vcpu == v) )
break;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
}
static void pt_unlock(struct periodic_time *pt)
{
- spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&pt->vcpu->arch.hvm.tm_lock);
}
static void pt_process_missed_ticks(struct periodic_time *pt)
if ( !mode_is(v->domain, delay_for_missed_ticks) )
return;
- v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
+ v->arch.hvm.guest_time = hvm_get_guest_time(v);
}
static void pt_thaw_time(struct vcpu *v)
if ( !mode_is(v->domain, delay_for_missed_ticks) )
return;
- if ( v->arch.hvm_vcpu.guest_time == 0 )
+ if ( v->arch.hvm.guest_time == 0 )
return;
- hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
- v->arch.hvm_vcpu.guest_time = 0;
+ hvm_set_guest_time(v, v->arch.hvm.guest_time);
+ v->arch.hvm.guest_time = 0;
}
void pt_save_timer(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
if ( v->pause_flags & VPF_blocked )
return;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
list_for_each_entry ( pt, head, list )
if ( !pt->do_not_freeze )
pt_freeze_time(v);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void pt_restore_timer(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
list_for_each_entry ( pt, head, list )
{
pt_thaw_time(v);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
static void pt_timer_fn(void *data)
int pt_update_irq(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt, *temp, *earliest_pt;
uint64_t max_lag;
int irq, pt_vector = -1;
bool level;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
earliest_pt = NULL;
max_lag = -1ULL;
if ( earliest_pt == NULL )
{
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
return -1;
}
irq = earliest_pt->irq;
level = earliest_pt->level;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
switch ( earliest_pt->source )
{
time_cb *cb = NULL;
void *cb_priv;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
/* Make sure the timer is still on the list. */
- list_for_each_entry ( pt, &v->arch.hvm_vcpu.tm_list, list )
+ list_for_each_entry ( pt, &v->arch.hvm.tm_list, list )
if ( pt == earliest_pt )
{
pt_irq_fired(v, pt);
cb_priv = pt->priv;
break;
}
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
if ( cb != NULL )
cb(v, cb_priv);
static struct periodic_time *is_pt_irq(
struct vcpu *v, struct hvm_intack intack)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
list_for_each_entry ( pt, head, list )
if ( intack.source == hvm_intsrc_vector )
return;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
pt = is_pt_irq(v, intack);
if ( pt == NULL )
{
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
return;
}
cb = pt->cb;
cb_priv = pt->priv;
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
if ( cb != NULL )
cb(v, cb_priv);
void pt_migrate(struct vcpu *v)
{
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
+ struct list_head *head = &v->arch.hvm.tm_list;
struct periodic_time *pt;
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
list_for_each_entry ( pt, head, list )
migrate_timer(&pt->timer, v->processor);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void create_periodic_time(
destroy_periodic_time(pt);
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
pt->pending_intr_nr = 0;
pt->do_not_freeze = 0;
pt->priv = data;
pt->on_list = 1;
- list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
+ list_add(&pt->list, &v->arch.hvm.tm_list);
init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
set_timer(&pt->timer, pt->scheduled);
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void destroy_periodic_time(struct periodic_time *pt)
pt->on_list = 0;
pt_unlock(pt);
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ spin_lock(&v->arch.hvm.tm_lock);
pt->vcpu = v;
if ( on_list )
{
pt->on_list = 1;
- list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
+ list_add(&pt->list, &v->arch.hvm.tm_list);
migrate_timer(&pt->timer, v->processor);
}
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ spin_unlock(&v->arch.hvm.tm_lock);
}
void pt_adjust_global_vcpu_target(struct vcpu *v)
if ( pt->pending_intr_nr && !pt->on_list )
{
pt->on_list = 1;
- list_add(&pt->list, &pt->vcpu->arch.hvm_vcpu.tm_list);
+ list_add(&pt->list, &pt->vcpu->arch.hvm.tm_list);
vcpu_kick(pt->vcpu);
}
pt_unlock(pt);
unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec)
{
- unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ unsigned long cr3 = v->arch.hvm.guest_cr[3];
return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva, pfec, NULL);
}
static void hap_update_cr3(struct vcpu *v, int do_locking, bool noflush)
{
- v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
+ v->arch.hvm.hw_cr[3] = v->arch.hvm.guest_cr[3];
hvm_update_guest_cr3(v, noflush);
}
static void hap_update_paging_modes(struct vcpu *v)
{
struct domain *d = v->domain;
- unsigned long cr3_gfn = v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT;
+ unsigned long cr3_gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
p2m_type_t t;
/* We hold onto the cr3 as it may be modified later, and
ASSERT(shadow_mode_external(d));
/* Find where in the page the l3 table is */
- guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
+ guest_idx = guest_index((void *)v->arch.hvm.guest_cr[3]);
// Ignore the low 2 bits of guest_idx -- they are really just
// cache control.
///
- /// v->arch.hvm_vcpu.hw_cr[3]
+ /// v->arch.hvm.hw_cr[3]
///
if ( shadow_mode_external(d) )
{
ASSERT(is_hvm_domain(d));
#if SHADOW_PAGING_LEVELS == 3
/* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
- v->arch.hvm_vcpu.hw_cr[3] =
- virt_to_maddr(&v->arch.paging.shadow.l3table);
+ v->arch.hvm.hw_cr[3] = virt_to_maddr(&v->arch.paging.shadow.l3table);
#else
/* 4-on-4: Just use the shadow top-level directly */
- v->arch.hvm_vcpu.hw_cr[3] =
- pagetable_get_paddr(v->arch.shadow_table[0]);
+ v->arch.hvm.hw_cr[3] = pagetable_get_paddr(v->arch.shadow_table[0]);
#endif
hvm_update_guest_cr3(v, noflush);
}
unsigned long l3gfn;
mfn_t l3mfn;
- gcr3 = (v->arch.hvm_vcpu.guest_cr[3]);
+ gcr3 = v->arch.hvm.guest_cr[3];
/* fast path: the pagetable belongs to the current context */
if ( gcr3 == gpa )
fast_path = 1;
{
struct pl_time *pl = v->domain->arch.hvm.pl_time;
- stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
+ stime += pl->stime_offset + v->arch.hvm.stime_offset;
if ( stime >= 0 )
tsc_stamp = gtime_to_gtsc(d, stime);
else
_u.flags |= XEN_PVCLOCK_TSC_STABLE_BIT;
if ( is_hvm_domain(d) )
- _u.tsc_timestamp += v->arch.hvm_vcpu.cache_tsc_offset;
+ _u.tsc_timestamp += v->arch.hvm.cache_tsc_offset;
/* Don't bother unless timestamp record has changed or we are forced. */
_u.version = u->version; /* make versions match for memcmp test */
*/
d->arch.hvm.sync_tsc = rdtsc();
hvm_set_tsc_offset(d->vcpu[0],
- d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
+ d->vcpu[0]->arch.hvm.cache_tsc_offset,
d->arch.hvm.sync_tsc);
}
}
OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
- OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
+ OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm.guest_cr[2]);
BLANK();
- OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode);
- OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m);
- OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled);
+ OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm.nvcpu.nv_guestmode);
+ OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm.nvcpu.nv_p2m);
+ OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm.nvcpu.u.nsvm.ns_hap_enabled);
BLANK();
OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
{
struct segment_register sreg;
context = CTXT_hvm_guest;
- fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
- fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
- fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
- fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
+ fault_crs[0] = v->arch.hvm.guest_cr[0];
+ fault_crs[2] = v->arch.hvm.guest_cr[2];
+ fault_crs[3] = v->arch.hvm.guest_cr[3];
+ fault_crs[4] = v->arch.hvm.guest_cr[4];
hvm_get_segment_register(v, x86_seg_cs, &sreg);
fault_regs.cs = sreg.sel;
hvm_get_segment_register(v, x86_seg_ds, &sreg);
/* Virtual Machine Extensions */
union {
struct pv_vcpu pv;
- struct hvm_vcpu hvm_vcpu;
+ struct hvm_vcpu hvm;
};
pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
struct guest_memory_policy *policy);
/* Shorthands to improve code legibility. */
-#define hvm_vmx hvm_vcpu.u.vmx
-#define hvm_svm hvm_vcpu.u.svm
+#define hvm_vmx hvm.u.vmx
+#define hvm_svm hvm.u.svm
bool update_runstate_area(struct vcpu *);
bool update_secondary_system_time(struct vcpu *,
return (is_pv_vcpu(v) ||
GUEST_PAGING_LEVELS != 2 ||
!hvm_paging_enabled(v) ||
- (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE));
+ (v->arch.hvm.guest_cr[4] & X86_CR4_PSE));
}
static inline bool guest_can_use_l3_superpages(const struct domain *d)
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
#define hvm_paging_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
+ (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_PG))
#define hvm_wp_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
+ (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_WP))
#define hvm_pcid_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PCIDE))
+ (!!((v)->arch.hvm.guest_cr[4] & X86_CR4_PCIDE))
#define hvm_pae_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PAE))
#define hvm_smep_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMEP))
#define hvm_smap_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP))
#define hvm_nx_enabled(v) \
- ((v)->arch.hvm_vcpu.guest_efer & EFER_NX)
+ ((v)->arch.hvm.guest_efer & EFER_NX)
#define hvm_pku_enabled(v) \
- (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE))
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE))
/* Can we use superpages in the HAP p2m table? */
#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
#define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))
-#define hvm_long_mode_active(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_LMA))
+#define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
enum hvm_intblk
hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
#define hvm_msr_tsc_aux(v) ({ \
struct domain *__d = (v)->domain; \
(__d->arch.tsc_mode == TSC_MODE_PVRDTSCP) \
- ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm_vcpu.msr_tsc_aux; \
+ ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm.msr_tsc_aux; \
})
int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
{
if ( !nestedhvm_vmswitch_in_progress(v) &&
nestedhvm_vcpu_in_guestmode(v) )
- v->arch.hvm_vcpu.nvcpu.guest_cr[cr] = value;
+ v->arch.hvm.nvcpu.guest_cr[cr] = value;
}
#endif /* _HVM_NESTEDHVM_H */
/* True when l1 guest enabled SVM in EFER */
#define nsvm_efer_svm_enabled(v) \
- (!!((v)->arch.hvm_vcpu.guest_efer & EFER_SVME))
+ (!!((v)->arch.hvm.guest_efer & EFER_SVME))
int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr);
void nestedsvm_vmexit_defer(struct vcpu *v,
unsigned long guest_cr[5];
};
-#define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu)
+#define vcpu_nestedhvm(v) ((v)->arch.hvm.nvcpu)
struct altp2mvcpu {
uint16_t p2midx; /* alternate p2m index */
gfn_t veinfo_gfn; /* #VE information page gfn */
};
-#define vcpu_altp2m(v) ((v)->arch.hvm_vcpu.avcpu)
+#define vcpu_altp2m(v) ((v)->arch.hvm.avcpu)
struct hvm_vcpu {
/* Guest control-register and EFER values, just as the guest sees them. */
#include <public/hvm/ioreq.h>
#include <asm/hvm/vpt.h>
-#define vcpu_vlapic(x) (&(x)->arch.hvm_vcpu.vlapic)
-#define vlapic_vcpu(x) (container_of((x), struct vcpu, arch.hvm_vcpu.vlapic))
+#define vcpu_vlapic(x) (&(x)->arch.hvm.vlapic)
+#define vlapic_vcpu(x) (container_of((x), struct vcpu, arch.hvm.vlapic))
#define const_vlapic_vcpu(x) (container_of((x), const struct vcpu, \
- arch.hvm_vcpu.vlapic))
+ arch.hvm.vlapic))
#define vlapic_domain(x) (vlapic_vcpu(x)->domain)
#define _VLAPIC_ID(vlapic, id) (vlapic_x2apic_mode(vlapic) \
type = INVVPID_ALL_CONTEXT;
execute_invvpid:
- __invvpid(type, v->arch.hvm_vcpu.n1asid.asid, (u64)gva);
+ __invvpid(type, v->arch.hvm.n1asid.asid, (u64)gva);
}
static inline void vpid_sync_all(void)