v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
- rc = is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0;
+ rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0;
done:
if ( rc )
{
#define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
flags = c(flags);
- if ( is_pv_vcpu(v) )
+ if ( is_pv_domain(d) )
{
if ( !compat )
{
(c(ldt_ents) > 8192) )
return -EINVAL;
}
- else if ( is_pvh_vcpu(v) )
+ else if ( is_pvh_domain(d) )
{
/* PVH 32bitfixme */
ASSERT(!compat);
v->fpu_initialised = !!(flags & VGCF_I387_VALID);
v->arch.flags &= ~TF_kernel_mode;
- if ( (flags & VGCF_in_kernel) || has_hvm_container_vcpu(v)/*???*/ )
+ if ( (flags & VGCF_in_kernel) || has_hvm_container_domain(d)/*???*/ )
v->arch.flags |= TF_kernel_mode;
v->arch.vgc_flags = flags;
if ( !compat )
{
memcpy(&v->arch.user_regs, &c.nat->user_regs, sizeof(c.nat->user_regs));
- if ( is_pv_vcpu(v) )
+ if ( is_pv_domain(d) )
memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
c.cmp->trap_ctxt + i);
}
- if ( has_hvm_container_vcpu(v) )
+ if ( has_hvm_container_domain(d) )
{
for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
v->arch.debugreg[i] = c(debugreg[i]);
hvm_set_info_guest(v);
- if ( is_hvm_vcpu(v) || v->is_initialised )
+ if ( is_hvm_domain(d) || v->is_initialised )
goto out;
/* NB: No need to use PV cr3 un-pickling macros */
void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
{
unsigned int i;
- bool_t compat = is_pv_32on64_domain(v->domain);
+ const struct domain *d = v->domain;
+ bool_t compat = is_pv_32on64_domain(d);
#define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
- if ( !is_pv_vcpu(v) )
+ if ( !is_pv_domain(d) )
memset(c.nat, 0, sizeof(*c.nat));
memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt));
c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel));
if ( !compat )
{
memcpy(&c.nat->user_regs, &v->arch.user_regs, sizeof(c.nat->user_regs));
- if ( is_pv_vcpu(v) )
+ if ( is_pv_domain(d) )
memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
c(debugreg[i] = v->arch.debugreg[i]);
- if ( has_hvm_container_vcpu(v) )
+ if ( has_hvm_container_domain(d) )
{
struct segment_register sreg;
BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
c(user_regs.eflags |= v->arch.pv_vcpu.iopl << 12);
- if ( !is_pv_32on64_domain(v->domain) )
+ if ( !compat )
{
c.nat->ctrlreg[3] = xen_pfn_to_cr3(
pagetable_get_pfn(v->arch.guest_table));
c(flags |= VGCF_in_kernel);
}
- c(vm_assist = v->domain->vm_assist);
+ c(vm_assist = d->vm_assist);
#undef c
}
int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
- struct domain *d = current->domain;
+ struct domain *currd = current->domain;
/* Optionally shift out of the way of Viridian architectural leaves. */
- uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000;
+ uint32_t base = is_viridian_domain(currd) ? 0x40000100 : 0x40000000;
uint32_t limit, dummy;
idx -= base;
if ( idx > XEN_CPUID_MAX_NUM_LEAVES )
return 0; /* Avoid unnecessary pass through domain_cpuid() */
- domain_cpuid(d, base, 0, &limit, &dummy, &dummy, &dummy);
+ domain_cpuid(currd, base, 0, &limit, &dummy, &dummy, &dummy);
if ( limit == 0 )
/* Default number of leaves */
limit = XEN_CPUID_MAX_NUM_LEAVES;
case 2:
*eax = 1; /* Number of hypercall-transfer pages */
*ebx = 0x40000000; /* MSR base address */
- if ( is_viridian_domain(d) )
+ if ( is_viridian_domain(currd) )
*ebx = 0x40000200;
*ecx = 0; /* Features 1 */
*edx = 0; /* Features 2 */
- if ( is_pv_vcpu(current) )
+ if ( is_pv_domain(currd) )
*ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
break;
{
uint32_t a, b, c, d;
struct vcpu *curr = current;
+ struct domain *currd = curr->domain;
a = regs->eax;
b = regs->ebx;
c = regs->ecx;
d = regs->edx;
- if ( !is_control_domain(curr->domain) && !is_hardware_domain(curr->domain) )
+ if ( !is_control_domain(currd) && !is_hardware_domain(currd) )
{
unsigned int cpuid_leaf = a, sub_leaf = c;
if ( !cpuid_hypervisor_leaves(a, c, &a, &b, &c, &d) )
- domain_cpuid(curr->domain, a, c, &a, &b, &c, &d);
+ domain_cpuid(currd, a, c, &a, &b, &c, &d);
switch ( cpuid_leaf )
{
{
if ( !(curr->arch.xcr0 & (1ULL << sub_leaf)) )
continue;
- domain_cpuid(curr->domain, cpuid_leaf, sub_leaf,
+ domain_cpuid(currd, cpuid_leaf, sub_leaf,
&_eax, &_ebx, &_ecx, &_edx);
if ( (_eax + _ebx) > b )
b = _eax + _ebx;
if ( !cpu_has_apic )
__clear_bit(X86_FEATURE_APIC, &d);
- if ( !is_pvh_vcpu(curr) )
+ if ( !is_pvh_domain(currd) )
{
__clear_bit(X86_FEATURE_PSE, &d);
__clear_bit(X86_FEATURE_PGE, &d);
__clear_bit(X86_FEATURE_DS, &d);
__clear_bit(X86_FEATURE_ACC, &d);
__clear_bit(X86_FEATURE_PBE, &d);
- if ( is_pvh_vcpu(curr) )
+ if ( is_pvh_domain(currd) )
__clear_bit(X86_FEATURE_MTRR, &d);
__clear_bit(X86_FEATURE_DTES64 % 32, &c);
__clear_bit(X86_FEATURE_VMXE % 32, &c);
__clear_bit(X86_FEATURE_SMXE % 32, &c);
__clear_bit(X86_FEATURE_TM2 % 32, &c);
- if ( is_pv_32bit_vcpu(curr) )
+ if ( is_pv_32bit_domain(currd) )
__clear_bit(X86_FEATURE_CX16 % 32, &c);
__clear_bit(X86_FEATURE_XTPR % 32, &c);
__clear_bit(X86_FEATURE_PDCM % 32, &c);
case 0x80000001:
/* Modify Feature Information. */
- if ( is_pv_32bit_vcpu(curr) )
+ if ( is_pv_32bit_domain(currd) )
{
__clear_bit(X86_FEATURE_LM % 32, &d);
__clear_bit(X86_FEATURE_LAHF_LM % 32, &c);
}
- if ( is_pv_32on64_vcpu(curr) &&
+ if ( is_pv_32on64_domain(currd) &&
boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
__clear_bit(X86_FEATURE_SYSCALL % 32, &d);
__clear_bit(X86_FEATURE_PAGE1GB % 32, &d);