The trailing _vcpu suffix is redundant, but adds to code volume. Drop it.
Reflow lines as appropriate, and switch to using the new XFREE/etc wrappers
where applicable.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
* kernel.
*/
if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
- !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE))
+ !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 32);
if (unlikely(these_masks->_1cd != val)) {
* kernel.
*/
if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
- !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE))
+ !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
val &= ~(uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE);
if (unlikely(these_masks->_1cd != val)) {
*
* Architecturally, the correct code here is simply:
*
- * if ( v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE )
+ * if ( v->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE )
* c |= cpufeat_mask(X86_FEATURE_OSXSAVE);
*
* However because of bugs in Xen (before c/s bd19080b, Nov 2010,
* #UD or #GP is currently being serviced.
*/
/* OSXSAVE clear in policy. Fast-forward CR4 back in. */
- if ( (v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE) ||
+ if ( (v->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE) ||
(regs->entry_vector == TRAP_invalid_op &&
guest_kernel_mode(v, regs) &&
(read_cr4() & X86_CR4_OSXSAVE)) )
case 0:
/* OSPKE clear in policy. Fast-forward CR4 back in. */
if ( (is_pv_domain(d)
- ? v->arch.pv_vcpu.ctrlreg[4]
+ ? v->arch.pv.ctrlreg[4]
: v->arch.hvm_vcpu.guest_cr[4]) & X86_CR4_PKE )
res->c |= cpufeat_mask(X86_FEATURE_OSPKE);
break;
{
memcpy(&v->arch.user_regs, &c.nat->user_regs, sizeof(c.nat->user_regs));
if ( is_pv_domain(d) )
- memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
+ memcpy(v->arch.pv.trap_ctxt, c.nat->trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
else
if ( is_pv_domain(d) )
{
for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i )
- XLAT_trap_info(v->arch.pv_vcpu.trap_ctxt + i,
+ XLAT_trap_info(v->arch.pv.trap_ctxt + i,
c.cmp->trap_ctxt + i);
}
}
}
/* IOPL privileges are virtualised. */
- v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
+ v->arch.pv.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
/* Ensure real hardware interrupts are enabled. */
if ( !compat && !(flags & VGCF_in_kernel) && !c.nat->ctrlreg[1] )
return -EINVAL;
- v->arch.pv_vcpu.ldt_base = c(ldt_base);
- v->arch.pv_vcpu.ldt_ents = c(ldt_ents);
+ v->arch.pv.ldt_base = c(ldt_base);
+ v->arch.pv.ldt_ents = c(ldt_ents);
}
else
{
fail = compat_pfn_to_cr3(pfn) != c.cmp->ctrlreg[3];
}
- for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
- fail |= v->arch.pv_vcpu.gdt_frames[i] != c(gdt_frames[i]);
- fail |= v->arch.pv_vcpu.gdt_ents != c(gdt_ents);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.pv.gdt_frames); ++i )
+ fail |= v->arch.pv.gdt_frames[i] != c(gdt_frames[i]);
+ fail |= v->arch.pv.gdt_ents != c(gdt_ents);
- fail |= v->arch.pv_vcpu.ldt_base != c(ldt_base);
- fail |= v->arch.pv_vcpu.ldt_ents != c(ldt_ents);
+ fail |= v->arch.pv.ldt_base != c(ldt_base);
+ fail |= v->arch.pv.ldt_ents != c(ldt_ents);
if ( fail )
return -EOPNOTSUPP;
}
- v->arch.pv_vcpu.kernel_ss = c(kernel_ss);
- v->arch.pv_vcpu.kernel_sp = c(kernel_sp);
- for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.ctrlreg); ++i )
- v->arch.pv_vcpu.ctrlreg[i] = c(ctrlreg[i]);
+ v->arch.pv.kernel_ss = c(kernel_ss);
+ v->arch.pv.kernel_sp = c(kernel_sp);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.pv.ctrlreg); ++i )
+ v->arch.pv.ctrlreg[i] = c(ctrlreg[i]);
- v->arch.pv_vcpu.event_callback_eip = c(event_callback_eip);
- v->arch.pv_vcpu.failsafe_callback_eip = c(failsafe_callback_eip);
+ v->arch.pv.event_callback_eip = c(event_callback_eip);
+ v->arch.pv.failsafe_callback_eip = c(failsafe_callback_eip);
if ( !compat )
{
- v->arch.pv_vcpu.syscall_callback_eip = c.nat->syscall_callback_eip;
+ v->arch.pv.syscall_callback_eip = c.nat->syscall_callback_eip;
/* non-nul selector kills fs_base */
- v->arch.pv_vcpu.fs_base =
+ v->arch.pv.fs_base =
!(v->arch.user_regs.fs & ~3) ? c.nat->fs_base : 0;
- v->arch.pv_vcpu.gs_base_kernel = c.nat->gs_base_kernel;
+ v->arch.pv.gs_base_kernel = c.nat->gs_base_kernel;
/* non-nul selector kills gs_base_user */
- v->arch.pv_vcpu.gs_base_user =
+ v->arch.pv.gs_base_user =
!(v->arch.user_regs.gs & ~3) ? c.nat->gs_base_user : 0;
}
else
{
- v->arch.pv_vcpu.event_callback_cs = c(event_callback_cs);
- v->arch.pv_vcpu.failsafe_callback_cs = c(failsafe_callback_cs);
+ v->arch.pv.event_callback_cs = c(event_callback_cs);
+ v->arch.pv.failsafe_callback_cs = c(failsafe_callback_cs);
}
/* Only CR0.TS is modifiable by guest or admin. */
- v->arch.pv_vcpu.ctrlreg[0] &= X86_CR0_TS;
- v->arch.pv_vcpu.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;
+ v->arch.pv.ctrlreg[0] &= X86_CR0_TS;
+ v->arch.pv.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;
- cr4 = v->arch.pv_vcpu.ctrlreg[4];
- v->arch.pv_vcpu.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(v, cr4) :
+ cr4 = v->arch.pv.ctrlreg[4];
+ v->arch.pv.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(v, cr4) :
real_cr4_to_pv_guest_cr4(mmu_cr4_features);
memset(v->arch.debugreg, 0, sizeof(v->arch.debugreg));
rc = (int)pv_set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
else
{
- unsigned long gdt_frames[ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames)];
+ unsigned long gdt_frames[ARRAY_SIZE(v->arch.pv.gdt_frames)];
unsigned int nr_frames = DIV_ROUND_UP(c.cmp->gdt_ents, 512);
- if ( nr_frames > ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames) )
+ if ( nr_frames > ARRAY_SIZE(v->arch.pv.gdt_frames) )
return -EINVAL;
for ( i = 0; i < nr_frames; ++i )
if ( !is_pv_32bit_vcpu(n) )
{
/* This can only be non-zero if selector is NULL. */
- if ( n->arch.pv_vcpu.fs_base | (dirty_segment_mask & DIRTY_FS_BASE) )
- wrfsbase(n->arch.pv_vcpu.fs_base);
+ if ( n->arch.pv.fs_base | (dirty_segment_mask & DIRTY_FS_BASE) )
+ wrfsbase(n->arch.pv.fs_base);
/*
* Most kernels have non-zero GS base, so don't bother testing.
* (For old AMD hardware this is also a serialising instruction,
* avoiding erratum #88.)
*/
- wrgsshadow(n->arch.pv_vcpu.gs_base_kernel);
+ wrgsshadow(n->arch.pv.gs_base_kernel);
/* This can only be non-zero if selector is NULL. */
- if ( n->arch.pv_vcpu.gs_base_user |
+ if ( n->arch.pv.gs_base_user |
(dirty_segment_mask & DIRTY_GS_BASE) )
- wrgsbase(n->arch.pv_vcpu.gs_base_user);
+ wrgsbase(n->arch.pv.gs_base_user);
/* If in kernel mode then switch the GS bases around. */
if ( (n->arch.flags & TF_kernel_mode) )
if ( unlikely(!all_segs_okay) )
{
- struct pv_vcpu *pv = &n->arch.pv_vcpu;
+ struct pv_vcpu *pv = &n->arch.pv;
struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long *rsp =
(unsigned long *)(((n->arch.flags & TF_kernel_mode)
rflags = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
if ( VM_ASSIST(n->domain, architectural_iopl) )
- rflags |= n->arch.pv_vcpu.iopl;
+ rflags |= n->arch.pv.iopl;
if ( is_pv_32bit_vcpu(n) )
{
if ( cpu_has_fsgsbase && !is_pv_32bit_vcpu(v) )
{
- v->arch.pv_vcpu.fs_base = __rdfsbase();
+ v->arch.pv.fs_base = __rdfsbase();
if ( v->arch.flags & TF_kernel_mode )
- v->arch.pv_vcpu.gs_base_kernel = __rdgsbase();
+ v->arch.pv.gs_base_kernel = __rdgsbase();
else
- v->arch.pv_vcpu.gs_base_user = __rdgsbase();
+ v->arch.pv.gs_base_user = __rdgsbase();
}
if ( regs->ds )
dirty_segment_mask |= DIRTY_FS;
/* non-nul selector kills fs_base */
if ( regs->fs & ~3 )
- v->arch.pv_vcpu.fs_base = 0;
+ v->arch.pv.fs_base = 0;
}
- if ( v->arch.pv_vcpu.fs_base )
+ if ( v->arch.pv.fs_base )
dirty_segment_mask |= DIRTY_FS_BASE;
if ( regs->gs || is_pv_32bit_vcpu(v) )
dirty_segment_mask |= DIRTY_GS;
/* non-nul selector kills gs_base_user */
if ( regs->gs & ~3 )
- v->arch.pv_vcpu.gs_base_user = 0;
+ v->arch.pv.gs_base_user = 0;
}
- if ( v->arch.flags & TF_kernel_mode ? v->arch.pv_vcpu.gs_base_kernel
- : v->arch.pv_vcpu.gs_base_user )
+ if ( v->arch.flags & TF_kernel_mode ? v->arch.pv.gs_base_kernel
+ : v->arch.pv.gs_base_user )
dirty_segment_mask |= DIRTY_GS_BASE;
this_cpu(dirty_segment_mask) = dirty_segment_mask;
{
if ( !update_runstate_area(v) && is_pv_vcpu(v) &&
!(v->arch.flags & TF_kernel_mode) )
- v->arch.pv_vcpu.need_update_runstate_area = 1;
+ v->arch.pv.need_update_runstate_area = 1;
}
static inline bool need_full_gdt(const struct domain *d)
return mfn_to_virt(mfn_x(mfn));
dcache = &v->domain->arch.pv.mapcache;
- vcache = &v->arch.pv_vcpu.mapcache;
+ vcache = &v->arch.pv.mapcache;
if ( !dcache->inuse )
return mfn_to_virt(mfn_x(mfn));
idx = PFN_DOWN(va - MAPCACHE_VIRT_START);
mfn = l1e_get_pfn(MAPCACHE_L1ENT(idx));
- hashent = &v->arch.pv_vcpu.mapcache.hash[MAPHASH_HASHFN(mfn)];
+ hashent = &v->arch.pv.mapcache.hash[MAPHASH_HASHFN(mfn)];
local_irq_save(flags);
BUILD_BUG_ON(MAPHASHENT_NOTINUSE < MAPCACHE_ENTRIES);
for ( i = 0; i < MAPHASH_ENTRIES; i++ )
{
- struct vcpu_maphash_entry *hashent = &v->arch.pv_vcpu.mapcache.hash[i];
+ struct vcpu_maphash_entry *hashent = &v->arch.pv.mapcache.hash[i];
hashent->mfn = ~0UL; /* never valid to map */
hashent->idx = MAPHASHENT_NOTINUSE;
if ( is_pv_domain(d) )
{
evc->sysenter_callback_cs =
- v->arch.pv_vcpu.sysenter_callback_cs;
+ v->arch.pv.sysenter_callback_cs;
evc->sysenter_callback_eip =
- v->arch.pv_vcpu.sysenter_callback_eip;
+ v->arch.pv.sysenter_callback_eip;
evc->sysenter_disables_events =
- v->arch.pv_vcpu.sysenter_disables_events;
+ v->arch.pv.sysenter_disables_events;
evc->syscall32_callback_cs =
- v->arch.pv_vcpu.syscall32_callback_cs;
+ v->arch.pv.syscall32_callback_cs;
evc->syscall32_callback_eip =
- v->arch.pv_vcpu.syscall32_callback_eip;
+ v->arch.pv.syscall32_callback_eip;
evc->syscall32_disables_events =
- v->arch.pv_vcpu.syscall32_disables_events;
+ v->arch.pv.syscall32_disables_events;
}
else
{
break;
domain_pause(d);
fixup_guest_code_selector(d, evc->sysenter_callback_cs);
- v->arch.pv_vcpu.sysenter_callback_cs =
+ v->arch.pv.sysenter_callback_cs =
evc->sysenter_callback_cs;
- v->arch.pv_vcpu.sysenter_callback_eip =
+ v->arch.pv.sysenter_callback_eip =
evc->sysenter_callback_eip;
- v->arch.pv_vcpu.sysenter_disables_events =
+ v->arch.pv.sysenter_disables_events =
evc->sysenter_disables_events;
fixup_guest_code_selector(d, evc->syscall32_callback_cs);
- v->arch.pv_vcpu.syscall32_callback_cs =
+ v->arch.pv.syscall32_callback_cs =
evc->syscall32_callback_cs;
- v->arch.pv_vcpu.syscall32_callback_eip =
+ v->arch.pv.syscall32_callback_eip =
evc->syscall32_callback_eip;
- v->arch.pv_vcpu.syscall32_disables_events =
+ v->arch.pv.syscall32_disables_events =
evc->syscall32_disables_events;
}
else if ( (evc->sysenter_callback_cs & ~3) ||
if ( boot_cpu_has(X86_FEATURE_DBEXT) )
{
- if ( v->arch.pv_vcpu.dr_mask[0] )
+ if ( v->arch.pv.dr_mask[0] )
{
if ( i < vmsrs->msr_count && !ret )
{
msr.index = MSR_AMD64_DR0_ADDRESS_MASK;
- msr.value = v->arch.pv_vcpu.dr_mask[0];
+ msr.value = v->arch.pv.dr_mask[0];
if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
ret = -EFAULT;
}
for ( j = 0; j < 3; ++j )
{
- if ( !v->arch.pv_vcpu.dr_mask[1 + j] )
+ if ( !v->arch.pv.dr_mask[1 + j] )
continue;
if ( i < vmsrs->msr_count && !ret )
{
msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j;
- msr.value = v->arch.pv_vcpu.dr_mask[1 + j];
+ msr.value = v->arch.pv.dr_mask[1 + j];
if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
ret = -EFAULT;
}
if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
(msr.value >> 32) )
break;
- v->arch.pv_vcpu.dr_mask[0] = msr.value;
+ v->arch.pv.dr_mask[0] = msr.value;
continue;
case MSR_AMD64_DR1_ADDRESS_MASK ...
(msr.value >> 32) )
break;
msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1;
- v->arch.pv_vcpu.dr_mask[msr.index] = msr.value;
+ v->arch.pv.dr_mask[msr.index] = msr.value;
continue;
}
break;
{
memcpy(&c.nat->user_regs, &v->arch.user_regs, sizeof(c.nat->user_regs));
if ( is_pv_domain(d) )
- memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt,
+ memcpy(c.nat->trap_ctxt, v->arch.pv.trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
else
{
for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i )
XLAT_trap_info(c.cmp->trap_ctxt + i,
- v->arch.pv_vcpu.trap_ctxt + i);
+ v->arch.pv.trap_ctxt + i);
}
}
}
else
{
- c(ldt_base = v->arch.pv_vcpu.ldt_base);
- c(ldt_ents = v->arch.pv_vcpu.ldt_ents);
- for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
- c(gdt_frames[i] = v->arch.pv_vcpu.gdt_frames[i]);
+ c(ldt_base = v->arch.pv.ldt_base);
+ c(ldt_ents = v->arch.pv.ldt_ents);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.pv.gdt_frames); ++i )
+ c(gdt_frames[i] = v->arch.pv.gdt_frames[i]);
BUILD_BUG_ON(ARRAY_SIZE(c.nat->gdt_frames) !=
ARRAY_SIZE(c.cmp->gdt_frames));
for ( ; i < ARRAY_SIZE(c.nat->gdt_frames); ++i )
c(gdt_frames[i] = 0);
- c(gdt_ents = v->arch.pv_vcpu.gdt_ents);
- c(kernel_ss = v->arch.pv_vcpu.kernel_ss);
- c(kernel_sp = v->arch.pv_vcpu.kernel_sp);
- for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.ctrlreg); ++i )
- c(ctrlreg[i] = v->arch.pv_vcpu.ctrlreg[i]);
- c(event_callback_eip = v->arch.pv_vcpu.event_callback_eip);
- c(failsafe_callback_eip = v->arch.pv_vcpu.failsafe_callback_eip);
+ c(gdt_ents = v->arch.pv.gdt_ents);
+ c(kernel_ss = v->arch.pv.kernel_ss);
+ c(kernel_sp = v->arch.pv.kernel_sp);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.pv.ctrlreg); ++i )
+ c(ctrlreg[i] = v->arch.pv.ctrlreg[i]);
+ c(event_callback_eip = v->arch.pv.event_callback_eip);
+ c(failsafe_callback_eip = v->arch.pv.failsafe_callback_eip);
if ( !compat )
{
- c.nat->syscall_callback_eip = v->arch.pv_vcpu.syscall_callback_eip;
- c.nat->fs_base = v->arch.pv_vcpu.fs_base;
- c.nat->gs_base_kernel = v->arch.pv_vcpu.gs_base_kernel;
- c.nat->gs_base_user = v->arch.pv_vcpu.gs_base_user;
+ c.nat->syscall_callback_eip = v->arch.pv.syscall_callback_eip;
+ c.nat->fs_base = v->arch.pv.fs_base;
+ c.nat->gs_base_kernel = v->arch.pv.gs_base_kernel;
+ c.nat->gs_base_user = v->arch.pv.gs_base_user;
}
else
{
- c(event_callback_cs = v->arch.pv_vcpu.event_callback_cs);
- c(failsafe_callback_cs = v->arch.pv_vcpu.failsafe_callback_cs);
+ c(event_callback_cs = v->arch.pv.event_callback_cs);
+ c(failsafe_callback_cs = v->arch.pv.failsafe_callback_cs);
}
/* IOPL privileges are virtualised: merge back into returned eflags. */
BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
- c(user_regs.eflags |= v->arch.pv_vcpu.iopl);
+ c(user_regs.eflags |= v->arch.pv.iopl);
if ( !compat )
{
v->fpu_dirtied = 1;
/* Xen doesn't need TS set, but the guest might. */
- need_stts = is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS);
+ need_stts = is_pv_vcpu(v) && (v->arch.pv.ctrlreg[0] & X86_CR0_TS);
}
else
{
const struct domain *d = v->domain;
unsigned long cr4;
- cr4 = v->arch.pv_vcpu.ctrlreg[4] & ~X86_CR4_DE;
+ cr4 = v->arch.pv.ctrlreg[4] & ~X86_CR4_DE;
cr4 |= mmu_cr4_features & (X86_CR4_PSE | X86_CR4_SMEP | X86_CR4_SMAP |
X86_CR4_OSXSAVE | X86_CR4_FSGSBASE);
"Bad args to SET_LDT: ptr=%lx, ents=%x\n", ptr, ents);
rc = -EINVAL;
}
- else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
- (curr->arch.pv_vcpu.ldt_base != ptr) )
+ else if ( (curr->arch.pv.ldt_ents != ents) ||
+ (curr->arch.pv.ldt_base != ptr) )
{
if ( pv_destroy_ldt(curr) )
flush_tlb_local();
- curr->arch.pv_vcpu.ldt_base = ptr;
- curr->arch.pv_vcpu.ldt_ents = ents;
+ curr->arch.pv.ldt_base = ptr;
+ curr->arch.pv.ldt_ents = ents;
load_LDT(curr);
}
break;
if ( set_iopl.iopl > 3 )
break;
ret = 0;
- curr->arch.pv_vcpu.iopl = MASK_INSR(set_iopl.iopl, X86_EFLAGS_IOPL);
+ curr->arch.pv.iopl = MASK_INSR(set_iopl.iopl, X86_EFLAGS_IOPL);
break;
}
break;
ret = 0;
#ifndef COMPAT
- curr->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
+ curr->arch.pv.iobmp = set_iobitmap.bitmap;
#else
- guest_from_compat_handle(curr->arch.pv_vcpu.iobmp,
- set_iobitmap.bitmap);
+ guest_from_compat_handle(curr->arch.pv.iobmp, set_iobitmap.bitmap);
#endif
- curr->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
+ curr->arch.pv.iobmp_limit = set_iobitmap.nr_ports;
break;
}
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
- struct trap_info *t = &curr->arch.pv_vcpu.trap_ctxt[TRAP_nmi];
+ struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi];
if ( !is_canonical_address(address) )
return -EINVAL;
static void unregister_guest_nmi_callback(void)
{
struct vcpu *curr = current;
- struct trap_info *t = &curr->arch.pv_vcpu.trap_ctxt[TRAP_nmi];
+ struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi];
memset(t, 0, sizeof(*t));
}
switch ( reg->type )
{
case CALLBACKTYPE_event:
- curr->arch.pv_vcpu.event_callback_eip = reg->address;
+ curr->arch.pv.event_callback_eip = reg->address;
break;
case CALLBACKTYPE_failsafe:
- curr->arch.pv_vcpu.failsafe_callback_eip = reg->address;
+ curr->arch.pv.failsafe_callback_eip = reg->address;
if ( reg->flags & CALLBACKF_mask_events )
curr->arch.vgc_flags |= VGCF_failsafe_disables_events;
else
break;
case CALLBACKTYPE_syscall:
- curr->arch.pv_vcpu.syscall_callback_eip = reg->address;
+ curr->arch.pv.syscall_callback_eip = reg->address;
if ( reg->flags & CALLBACKF_mask_events )
curr->arch.vgc_flags |= VGCF_syscall_disables_events;
else
break;
case CALLBACKTYPE_syscall32:
- curr->arch.pv_vcpu.syscall32_callback_eip = reg->address;
- curr->arch.pv_vcpu.syscall32_disables_events =
+ curr->arch.pv.syscall32_callback_eip = reg->address;
+ curr->arch.pv.syscall32_disables_events =
!!(reg->flags & CALLBACKF_mask_events);
break;
case CALLBACKTYPE_sysenter:
- curr->arch.pv_vcpu.sysenter_callback_eip = reg->address;
- curr->arch.pv_vcpu.sysenter_disables_events =
+ curr->arch.pv.sysenter_callback_eip = reg->address;
+ curr->arch.pv.sysenter_disables_events =
!!(reg->flags & CALLBACKF_mask_events);
break;
switch ( reg->type )
{
case CALLBACKTYPE_event:
- curr->arch.pv_vcpu.event_callback_cs = reg->address.cs;
- curr->arch.pv_vcpu.event_callback_eip = reg->address.eip;
+ curr->arch.pv.event_callback_cs = reg->address.cs;
+ curr->arch.pv.event_callback_eip = reg->address.eip;
break;
case CALLBACKTYPE_failsafe:
- curr->arch.pv_vcpu.failsafe_callback_cs = reg->address.cs;
- curr->arch.pv_vcpu.failsafe_callback_eip = reg->address.eip;
+ curr->arch.pv.failsafe_callback_cs = reg->address.cs;
+ curr->arch.pv.failsafe_callback_eip = reg->address.eip;
if ( reg->flags & CALLBACKF_mask_events )
curr->arch.vgc_flags |= VGCF_failsafe_disables_events;
else
break;
case CALLBACKTYPE_syscall32:
- curr->arch.pv_vcpu.syscall32_callback_cs = reg->address.cs;
- curr->arch.pv_vcpu.syscall32_callback_eip = reg->address.eip;
- curr->arch.pv_vcpu.syscall32_disables_events =
+ curr->arch.pv.syscall32_callback_cs = reg->address.cs;
+ curr->arch.pv.syscall32_callback_eip = reg->address.eip;
+ curr->arch.pv.syscall32_disables_events =
(reg->flags & CALLBACKF_mask_events) != 0;
break;
case CALLBACKTYPE_sysenter:
- curr->arch.pv_vcpu.sysenter_callback_cs = reg->address.cs;
- curr->arch.pv_vcpu.sysenter_callback_eip = reg->address.eip;
- curr->arch.pv_vcpu.sysenter_disables_events =
+ curr->arch.pv.sysenter_callback_cs = reg->address.cs;
+ curr->arch.pv.sysenter_callback_eip = reg->address.eip;
+ curr->arch.pv.sysenter_disables_events =
(reg->flags & CALLBACKF_mask_events) != 0;
break;
{
struct trap_info cur;
struct vcpu *curr = current;
- struct trap_info *dst = curr->arch.pv_vcpu.trap_ctxt;
+ struct trap_info *dst = curr->arch.pv.trap_ctxt;
long rc = 0;
/* If no table is presented then clear the entire virtual IDT. */
{
struct vcpu *curr = current;
struct compat_trap_info cur;
- struct trap_info *dst = curr->arch.pv_vcpu.trap_ctxt;
+ struct trap_info *dst = curr->arch.pv.trap_ctxt;
long rc = 0;
/* If no table is presented then clear the entire virtual IDT. */
ASSERT(!in_irq());
- spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
+ spin_lock(&v->arch.pv.shadow_ldt_lock);
- if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
+ if ( v->arch.pv.shadow_ldt_mapcnt == 0 )
goto out;
pl1e = pv_ldt_ptes(v);
put_page_and_type(page);
}
- ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped);
- v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
+ ASSERT(v->arch.pv.shadow_ldt_mapcnt == mappings_dropped);
+ v->arch.pv.shadow_ldt_mapcnt = 0;
out:
- spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
+ spin_unlock(&v->arch.pv.shadow_ldt_lock);
return mappings_dropped;
}
l1_pgentry_t zero_l1e = l1e_from_mfn(zero_mfn, __PAGE_HYPERVISOR_RO);
unsigned int i;
- v->arch.pv_vcpu.gdt_ents = 0;
+ v->arch.pv.gdt_ents = 0;
for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
{
mfn_t mfn = l1e_get_mfn(pl1e[i]);
put_page_and_type(mfn_to_page(mfn));
l1e_write(&pl1e[i], zero_l1e);
- v->arch.pv_vcpu.gdt_frames[i] = 0;
+ v->arch.pv.gdt_frames[i] = 0;
}
}
pv_destroy_gdt(v);
/* Install the new GDT. */
- v->arch.pv_vcpu.gdt_ents = entries;
+ v->arch.pv.gdt_ents = entries;
pl1e = pv_gdt_ptes(v);
for ( i = 0; i < nr_frames; i++ )
{
- v->arch.pv_vcpu.gdt_frames[i] = frames[i];
+ v->arch.pv.gdt_frames[i] = frames[i];
l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR_RW));
}
if ( is_pv_32bit_domain(d) )
{
- v->arch.pv_vcpu.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
- v->arch.pv_vcpu.event_callback_cs = FLAT_COMPAT_KERNEL_CS;
+ v->arch.pv.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
+ v->arch.pv.event_callback_cs = FLAT_COMPAT_KERNEL_CS;
}
/* WARNING: The new domain must have its 'processor' field filled in! */
}
pv_destroy_gdt_ldt_l1tab(v);
- xfree(v->arch.pv_vcpu.trap_ctxt);
- v->arch.pv_vcpu.trap_ctxt = NULL;
+ XFREE(v->arch.pv.trap_ctxt);
}
int pv_vcpu_initialise(struct vcpu *v)
ASSERT(!is_idle_domain(d));
- spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
+ spin_lock_init(&v->arch.pv.shadow_ldt_lock);
rc = pv_create_gdt_ldt_l1tab(v);
if ( rc )
return rc;
- BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
+ BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv.trap_ctxt) >
PAGE_SIZE);
- v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
- NR_VECTORS);
- if ( !v->arch.pv_vcpu.trap_ctxt )
+ v->arch.pv.trap_ctxt = xzalloc_array(struct trap_info, NR_VECTORS);
+ if ( !v->arch.pv.trap_ctxt )
{
rc = -ENOMEM;
goto done;
/* PV guests by default have a 100Hz ticker. */
v->periodic_period = MILLISECS(10);
- v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
+ v->arch.pv.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
if ( is_pv_32bit_domain(d) )
{
if ( !(v->arch.flags & TF_kernel_mode) )
return;
- if ( v->arch.pv_vcpu.need_update_runstate_area &&
- update_runstate_area(v) )
- v->arch.pv_vcpu.need_update_runstate_area = 0;
+ if ( v->arch.pv.need_update_runstate_area && update_runstate_area(v) )
+ v->arch.pv.need_update_runstate_area = 0;
- if ( v->arch.pv_vcpu.pending_system_time.version &&
- update_secondary_system_time(v,
- &v->arch.pv_vcpu.pending_system_time) )
- v->arch.pv_vcpu.pending_system_time.version = 0;
+ if ( v->arch.pv.pending_system_time.version &&
+ update_secondary_system_time(v, &v->arch.pv.pending_system_time) )
+ v->arch.pv.pending_system_time.version = 0;
}
void toggle_guest_mode(struct vcpu *v)
if ( cpu_has_fsgsbase )
{
if ( v->arch.flags & TF_kernel_mode )
- v->arch.pv_vcpu.gs_base_kernel = __rdgsbase();
+ v->arch.pv.gs_base_kernel = __rdgsbase();
else
- v->arch.pv_vcpu.gs_base_user = __rdgsbase();
+ v->arch.pv.gs_base_user = __rdgsbase();
}
asm volatile ( "swapgs" );
pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
return;
}
- esp = v->arch.pv_vcpu.kernel_sp;
- ss = v->arch.pv_vcpu.kernel_ss;
+ esp = v->arch.pv.kernel_sp;
+ ss = v->arch.pv.kernel_ss;
if ( (ss & 3) != (sel & 3) ||
!pv_emul_read_descriptor(ss, v, &base, &limit, &ar, 0) ||
((ar >> 13) & 3) != (sel & 3) ||
unsigned int cpl = guest_kernel_mode(v, regs) ?
(VM_ASSIST(v->domain, architectural_iopl) ? 0 : 1) : 3;
- ASSERT((v->arch.pv_vcpu.iopl & ~X86_EFLAGS_IOPL) == 0);
+ ASSERT((v->arch.pv.iopl & ~X86_EFLAGS_IOPL) == 0);
- return IOPL(cpl) <= v->arch.pv_vcpu.iopl;
+ return IOPL(cpl) <= v->arch.pv.iopl;
}
/* Has the guest requested sufficient permission for this I/O access? */
if ( iopl_ok(v, regs) )
return true;
- if ( (port + bytes) <= v->arch.pv_vcpu.iobmp_limit )
+ if ( (port + bytes) <= v->arch.pv.iobmp_limit )
{
union { uint8_t bytes[2]; uint16_t mask; } x;
if ( user_mode )
toggle_guest_pt(v);
- switch ( __copy_from_guest_offset(x.bytes, v->arch.pv_vcpu.iobmp,
+ switch ( __copy_from_guest_offset(x.bytes, v->arch.pv.iobmp,
port>>3, 2) )
{
default: x.bytes[0] = ~0;
unsigned int width, i, match = 0;
unsigned long start;
- if ( !(v->arch.debugreg[5]) ||
- !(v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) )
+ if ( !(v->arch.debugreg[5]) || !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) )
return 0;
for ( i = 0; i < 4; i++ )
switch ( reg )
{
case 0: /* Read CR0 */
- *val = (read_cr0() & ~X86_CR0_TS) | curr->arch.pv_vcpu.ctrlreg[0];
+ *val = (read_cr0() & ~X86_CR0_TS) | curr->arch.pv.ctrlreg[0];
return X86EMUL_OKAY;
case 2: /* Read CR2 */
case 4: /* Read CR4 */
- *val = curr->arch.pv_vcpu.ctrlreg[reg];
+ *val = curr->arch.pv.ctrlreg[reg];
return X86EMUL_OKAY;
case 3: /* Read CR3 */
return X86EMUL_OKAY;
case 2: /* Write CR2 */
- curr->arch.pv_vcpu.ctrlreg[2] = val;
+ curr->arch.pv.ctrlreg[2] = val;
arch_set_cr2(curr, val);
return X86EMUL_OKAY;
}
case 4: /* Write CR4 */
- curr->arch.pv_vcpu.ctrlreg[4] = pv_guest_cr4_fixup(curr, val);
+ curr->arch.pv.ctrlreg[4] = pv_guest_cr4_fixup(curr, val);
write_cr4(pv_guest_cr4_to_real_cr4(curr));
ctxt_switch_levelling(curr);
return X86EMUL_OKAY;
case MSR_FS_BASE:
if ( is_pv_32bit_domain(currd) )
break;
- *val = cpu_has_fsgsbase ? __rdfsbase() : curr->arch.pv_vcpu.fs_base;
+ *val = cpu_has_fsgsbase ? __rdfsbase() : curr->arch.pv.fs_base;
return X86EMUL_OKAY;
case MSR_GS_BASE:
if ( is_pv_32bit_domain(currd) )
break;
*val = cpu_has_fsgsbase ? __rdgsbase()
- : curr->arch.pv_vcpu.gs_base_kernel;
+ : curr->arch.pv.gs_base_kernel;
return X86EMUL_OKAY;
case MSR_SHADOW_GS_BASE:
if ( is_pv_32bit_domain(currd) )
break;
- *val = curr->arch.pv_vcpu.gs_base_user;
+ *val = curr->arch.pv.gs_base_user;
return X86EMUL_OKAY;
/*
case MSR_AMD64_DR0_ADDRESS_MASK:
if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
break;
- *val = curr->arch.pv_vcpu.dr_mask[0];
+ *val = curr->arch.pv.dr_mask[0];
return X86EMUL_OKAY;
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
break;
- *val = curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+ *val = curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];
return X86EMUL_OKAY;
case MSR_IA32_PERF_CAPABILITIES:
if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) )
break;
wrfsbase(val);
- curr->arch.pv_vcpu.fs_base = val;
+ curr->arch.pv.fs_base = val;
return X86EMUL_OKAY;
case MSR_GS_BASE:
if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) )
break;
wrgsbase(val);
- curr->arch.pv_vcpu.gs_base_kernel = val;
+ curr->arch.pv.gs_base_kernel = val;
return X86EMUL_OKAY;
case MSR_SHADOW_GS_BASE:
if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) )
break;
wrgsshadow(val);
- curr->arch.pv_vcpu.gs_base_user = val;
+ curr->arch.pv.gs_base_user = val;
return X86EMUL_OKAY;
case MSR_K7_FID_VID_STATUS:
case MSR_AMD64_DR0_ADDRESS_MASK:
if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
break;
- curr->arch.pv_vcpu.dr_mask[0] = val;
+ curr->arch.pv.dr_mask[0] = val;
if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val);
return X86EMUL_OKAY;
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
break;
- curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
+ curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
wrmsrl(reg, val);
return X86EMUL_OKAY;
else
regs->eflags |= X86_EFLAGS_IF;
ASSERT(!(regs->eflags & X86_EFLAGS_IOPL));
- regs->eflags |= curr->arch.pv_vcpu.iopl;
+ regs->eflags |= curr->arch.pv.iopl;
eflags = regs->eflags;
ctxt.ctxt.addr_size = ar & _SEGMENT_L ? 64 : ar & _SEGMENT_DB ? 32 : 16;
if ( ctxt.bpmatch )
{
curr->arch.debugreg[6] |= ctxt.bpmatch | DR_STATUS_RESERVED_ONE;
- if ( !(curr->arch.pv_vcpu.trap_bounce.flags & TBF_EXCEPTION) )
+ if ( !(curr->arch.pv.trap_bounce.flags & TBF_EXCEPTION) )
pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
/* fall through */
}
if ( VM_ASSIST(v->domain, architectural_iopl) )
- v->arch.pv_vcpu.iopl = iret_saved.rflags & X86_EFLAGS_IOPL;
+ v->arch.pv.iopl = iret_saved.rflags & X86_EFLAGS_IOPL;
regs->rip = iret_saved.rip;
regs->cs = iret_saved.cs | 3; /* force guest privilege */
}
if ( VM_ASSIST(v->domain, architectural_iopl) )
- v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
+ v->arch.pv.iopl = eflags & X86_EFLAGS_IOPL;
regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
* mode frames).
*/
const struct trap_info *ti;
- u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
+ u32 x, ksp = v->arch.pv.kernel_sp - 40;
unsigned int i;
int rc = 0;
return 0;
}
regs->esp = ksp;
- regs->ss = v->arch.pv_vcpu.kernel_ss;
+ regs->ss = v->arch.pv.kernel_ss;
- ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
+ ti = &v->arch.pv.trap_ctxt[TRAP_gp_fault];
if ( TI_GET_IF(ti) )
eflags &= ~X86_EFLAGS_IF;
regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
if ( set )
{
- v->arch.pv_vcpu.ctrlreg[0] |= X86_CR0_TS;
+ v->arch.pv.ctrlreg[0] |= X86_CR0_TS;
stts();
}
else
{
- v->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS;
+ v->arch.pv.ctrlreg[0] &= ~X86_CR0_TS;
if ( v->fpu_dirtied )
clts();
}
struct domain *currd = curr->domain;
struct page_info *page;
l1_pgentry_t gl1e, *pl1e;
- unsigned long linear = curr->arch.pv_vcpu.ldt_base + offset;
+ unsigned long linear = curr->arch.pv.ldt_base + offset;
BUG_ON(unlikely(in_irq()));
* current vcpu, and vcpu_reset() will block until this vcpu has been
* descheduled before continuing.
*/
- ASSERT((offset >> 3) <= curr->arch.pv_vcpu.ldt_ents);
+ ASSERT((offset >> 3) <= curr->arch.pv.ldt_ents);
if ( is_pv_32bit_domain(currd) )
linear = (uint32_t)linear;
pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
l1e_add_flags(gl1e, _PAGE_RW);
- spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+ spin_lock(&curr->arch.pv.shadow_ldt_lock);
l1e_write(pl1e, gl1e);
- curr->arch.pv_vcpu.shadow_ldt_mapcnt++;
- spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+ curr->arch.pv.shadow_ldt_mapcnt++;
+ spin_unlock(&curr->arch.pv.shadow_ldt_lock);
return true;
}
else
ASSERT(error_code == X86_EVENT_NO_EC);
- tb = &curr->arch.pv_vcpu.trap_bounce;
- ti = &curr->arch.pv_vcpu.trap_ctxt[vector];
+ tb = &curr->arch.pv.trap_bounce;
+ ti = &curr->arch.pv.trap_ctxt[vector];
tb->flags = TBF_EXCEPTION;
tb->cs = ti->cs;
if ( event->type == X86_EVENTTYPE_HW_EXCEPTION &&
vector == TRAP_page_fault )
{
- curr->arch.pv_vcpu.ctrlreg[2] = event->cr2;
+ curr->arch.pv.ctrlreg[2] = event->cr2;
arch_set_cr2(curr, event->cr2);
/* Re-set error_code.user flag appropriately for the guest. */
bool set_guest_machinecheck_trapbounce(void)
{
struct vcpu *curr = current;
- struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce;
+ struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
pv_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
bool set_guest_nmi_trapbounce(void)
{
struct vcpu *curr = current;
- struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce;
+ struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
pv_inject_hw_exception(TRAP_nmi, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
if ( !update_secondary_system_time(v, &_u) && is_pv_domain(d) &&
!is_pv_32bit_domain(d) && !(v->arch.flags & TF_kernel_mode) )
- v->arch.pv_vcpu.pending_system_time = _u;
+ v->arch.pv.pending_system_time = _u;
}
bool update_secondary_system_time(struct vcpu *v,
return 0;
/* Access would have become non-canonical? Pass #GP[sel] back. */
- if ( unlikely(!is_canonical_address(
- curr->arch.pv_vcpu.ldt_base + offset)) )
+ if ( unlikely(!is_canonical_address(curr->arch.pv.ldt_base + offset)) )
{
uint16_t ec = (offset & ~(X86_XEC_EXT | X86_XEC_IDT)) | X86_XEC_TI;
else
/* else pass the #PF back, with adjusted %cr2. */
pv_inject_page_fault(regs->error_code,
- curr->arch.pv_vcpu.ldt_base + offset);
+ curr->arch.pv.ldt_base + offset);
}
return EXCRET_fault_fixed;
/* This fault must be due to <INT n> instruction. */
const struct trap_info *ti;
unsigned char vector = regs->error_code >> 3;
- ti = &v->arch.pv_vcpu.trap_ctxt[vector];
+ ti = &v->arch.pv.trap_ctxt[vector];
if ( permit_softint(TI_GET_DPL(ti), v, regs) )
{
regs->rip += 2;
vcpu_restore_fpu_lazy(curr);
- if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS )
+ if ( curr->arch.pv.ctrlreg[0] & X86_CR0_TS )
{
pv_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC);
- curr->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS;
+ curr->arch.pv.ctrlreg[0] &= ~X86_CR0_TS;
}
else
TRACE_0D(TRC_PV_MATH_STATE_RESTORE);
if ( boot_cpu_has(X86_FEATURE_DBEXT) )
{
- wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[0]);
- wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[1]);
- wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[2]);
- wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[3]);
+ wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv.dr_mask[0]);
+ wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv.dr_mask[1]);
+ wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv.dr_mask[2]);
+ wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv.dr_mask[3]);
}
}
break;
case 4:
- if ( v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE )
+ if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE )
return -ENODEV;
/* Fallthrough */
break;
case 5:
- if ( v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE )
+ if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE )
return -ENODEV;
/* Fallthrough */
{
if ( ((value >> i) & 3) == DR_IO )
{
- if ( !(v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) )
+ if ( !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) )
return -EPERM;
io_enable |= value & (3 << ((i - 16) >> 1));
}
OFFSET(VCPU_processor, struct vcpu, processor);
OFFSET(VCPU_domain, struct vcpu, domain);
OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
- OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv_vcpu.trap_bounce);
+ OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv.trap_bounce);
OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
- OFFSET(VCPU_event_addr, struct vcpu, arch.pv_vcpu.event_callback_eip);
- OFFSET(VCPU_event_sel, struct vcpu, arch.pv_vcpu.event_callback_cs);
- OFFSET(VCPU_syscall_addr, struct vcpu,
- arch.pv_vcpu.syscall_callback_eip);
- OFFSET(VCPU_syscall32_addr, struct vcpu,
- arch.pv_vcpu.syscall32_callback_eip);
- OFFSET(VCPU_syscall32_sel, struct vcpu,
- arch.pv_vcpu.syscall32_callback_cs);
- OFFSET(VCPU_syscall32_disables_events, struct vcpu,
- arch.pv_vcpu.syscall32_disables_events);
- OFFSET(VCPU_sysenter_addr, struct vcpu,
- arch.pv_vcpu.sysenter_callback_eip);
- OFFSET(VCPU_sysenter_sel, struct vcpu,
- arch.pv_vcpu.sysenter_callback_cs);
- OFFSET(VCPU_sysenter_disables_events, struct vcpu,
- arch.pv_vcpu.sysenter_disables_events);
- OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv_vcpu.trap_ctxt);
- OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv_vcpu.kernel_sp);
- OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss);
- OFFSET(VCPU_iopl, struct vcpu, arch.pv_vcpu.iopl);
+ OFFSET(VCPU_event_addr, struct vcpu, arch.pv.event_callback_eip);
+ OFFSET(VCPU_event_sel, struct vcpu, arch.pv.event_callback_cs);
+ OFFSET(VCPU_syscall_addr, struct vcpu, arch.pv.syscall_callback_eip);
+ OFFSET(VCPU_syscall32_addr, struct vcpu, arch.pv.syscall32_callback_eip);
+ OFFSET(VCPU_syscall32_sel, struct vcpu, arch.pv.syscall32_callback_cs);
+ OFFSET(VCPU_syscall32_disables_events,
+ struct vcpu, arch.pv.syscall32_disables_events);
+ OFFSET(VCPU_sysenter_addr, struct vcpu, arch.pv.sysenter_callback_eip);
+ OFFSET(VCPU_sysenter_sel, struct vcpu, arch.pv.sysenter_callback_cs);
+ OFFSET(VCPU_sysenter_disables_events,
+ struct vcpu, arch.pv.sysenter_disables_events);
+ OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv.trap_ctxt);
+ OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv.kernel_sp);
+ OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv.kernel_ss);
+ OFFSET(VCPU_iopl, struct vcpu, arch.pv.iopl);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
mov VCPU_domain(%rbx), %rax
/*
- * if ( null_trap_info(v, &v->arch.pv_vcpu.trap_ctxt[0x80]) )
+ * if ( null_trap_info(v, &v->arch.pv.trap_ctxt[0x80]) )
* goto int80_slow_path;
*/
mov 0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
long do_stack_switch(unsigned long ss, unsigned long esp)
{
fixup_guest_stack_selector(current->domain, ss);
- current->arch.pv_vcpu.kernel_ss = ss;
- current->arch.pv_vcpu.kernel_sp = esp;
+ current->arch.pv.kernel_ss = ss;
+ current->arch.pv.kernel_sp = esp;
return 0;
}
if ( is_canonical_address(base) )
{
wrfsbase(base);
- v->arch.pv_vcpu.fs_base = base;
+ v->arch.pv.fs_base = base;
}
else
ret = -EINVAL;
if ( is_canonical_address(base) )
{
wrgsshadow(base);
- v->arch.pv_vcpu.gs_base_user = base;
+ v->arch.pv.gs_base_user = base;
}
else
ret = -EINVAL;
if ( is_canonical_address(base) )
{
wrgsbase(base);
- v->arch.pv_vcpu.gs_base_kernel = base;
+ v->arch.pv.gs_base_kernel = base;
}
else
ret = -EINVAL;
if ( !is_pv_vcpu(v) )
return;
- crs[0] = v->arch.pv_vcpu.ctrlreg[0];
+ crs[0] = v->arch.pv.ctrlreg[0];
crs[2] = arch_get_cr2(v);
crs[3] = pagetable_get_paddr(kernel ?
v->arch.guest_table :
v->arch.guest_table_user);
- crs[4] = v->arch.pv_vcpu.ctrlreg[4];
- crs[5] = v->arch.pv_vcpu.fs_base;
- crs[6 + !kernel] = v->arch.pv_vcpu.gs_base_kernel;
- crs[7 - !kernel] = v->arch.pv_vcpu.gs_base_user;
+ crs[4] = v->arch.pv.ctrlreg[4];
+ crs[5] = v->arch.pv.fs_base;
+ crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
+ crs[7 - !kernel] = v->arch.pv.gs_base_user;
_show_registers(regs, crs, CTXT_pv_guest, v);
}
break;
case 4 ... 5:
- if ( !(curr->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) )
+ if ( !(curr->arch.pv.ctrlreg[4] & X86_CR4_DE) )
{
*val = curr->arch.debugreg[reg + 2];
break;
/* Virtual Machine Extensions */
union {
- struct pv_vcpu pv_vcpu;
+ struct pv_vcpu pv;
struct hvm_vcpu hvm_vcpu;
};
struct desc_struct *desc;
unsigned long ents;
- if ( (ents = v->arch.pv_vcpu.ldt_ents) == 0 )
+ if ( (ents = v->arch.pv.ldt_ents) == 0 )
lldt(0);
else
{
static inline bool pv_trap_callback_registered(const struct vcpu *v,
uint8_t vector)
{
- return v->arch.pv_vcpu.trap_ctxt[vector].address;
+ return v->arch.pv.trap_ctxt[vector].address;
}
#else /* !CONFIG_PV */