case 0x80000001:
/* SYSCALL is hidden outside of long mode on Intel. */
if ( p->x86_vendor == X86_VENDOR_INTEL &&
- is_hvm_domain(d) && !hvm_long_mode_enabled(v) )
+ is_hvm_domain(d) && !hvm_long_mode_active(v) )
res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
common_leaf1_adjustments:
unsigned int pfec = PFEC_page_present;
unsigned long addr;
- if ( hvm_long_mode_enabled(curr) &&
+ if ( hvm_long_mode_active(curr) &&
hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )
hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;
else
}
/* When CR0.PG is cleared, LMA is cleared immediately. */
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
{
v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
hvm_update_guest_efer(v);
if ( !(value & X86_CR4_PAE) )
{
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
"EFER.LMA is set");
old_cr = v->arch.hvm_vcpu.guest_cr[4];
if ( (value & X86_CR4_PCIDE) && !(old_cr & X86_CR4_PCIDE) &&
- (!hvm_long_mode_enabled(v) ||
+ (!hvm_long_mode_active(v) ||
(v->arch.hvm_vcpu.guest_cr[3] & 0xfff)) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to change CR4.PCIDE from "
if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
sizeof(sig), hvm_access_insn_fetch,
- (hvm_long_mode_enabled(cur) &&
+ (hvm_long_mode_active(cur) &&
cs->attr.fields.l) ? 64 :
cs->attr.fields.db ? 32 : 16, &addr) &&
(hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
regs->eflags &= ~X86_EFLAGS_RF;
/* Zero the upper 32 bits of %rip if not in 64bit mode. */
- if ( !(hvm_long_mode_enabled(cur) && cs->attr.fields.l) )
+ if ( !(hvm_long_mode_active(cur) && cs->attr.fields.l) )
regs->rip = regs->eip;
add_taint(TAINT_HVM_FEP);
return 0;
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
- if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
+ if ( hvm_long_mode_active(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
}
exit_reason = vmcb->exitcode;
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 3, exit_reason,
regs->eip, regs->rip >> 32, 0, 0, 0);
{
if ( trace_will_trace_event(TRC_SHADOW) )
break;
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va));
else
HVMTRACE_2D(PF_XEN, regs->error_code, va);
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
__vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
- if ( hvm_long_mode_enabled(v) &&
+ if ( hvm_long_mode_active(v) &&
likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
return 8;
return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
__vmread(VM_EXIT_REASON, &exit_reason);
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
regs->eip, regs->rip >> 32, 0, 0, 0);
else
{
if ( trace_will_trace_event(TRC_SHADOW) )
break;
- if ( hvm_long_mode_enabled(v) )
+ if ( hvm_long_mode_active(v) )
HVMTRACE_LONG_2D(PF_XEN, regs->error_code,
TRC_PAR_LONG(exit_qualification) );
else
else if ( !nvmx_vcpu_in_vmx(v) )
goto invalid_op;
- if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) )
+ if ( vmx_guest_x86_mode(v) < (hvm_long_mode_active(v) ? 8 : 2) )
goto invalid_op;
else if ( nestedhvm_vcpu_in_guestmode(v) )
goto vmexit;
/*
* EFER handling:
* hvm_set_efer won't work if CR0.PG = 1, so we change the value
- * directly to make hvm_long_mode_enabled(v) work in L2.
+ * directly to make hvm_long_mode_active(v) work in L2.
* An additional update_paging_modes is also needed if
* there is 32/64 switch. v->arch.hvm_vcpu.guest_efer doesn't
* need to be saved, since its value on vmexit is determined by
* L1 exit_controls
*/
- lm_l1 = !!hvm_long_mode_enabled(v);
+ lm_l1 = hvm_long_mode_active(v);
lm_l2 = !!(get_vvmcs(v, VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
if ( lm_l2 )
nvcpu->nv_vmexit_pending = 0;
nvcpu->nv_vmswitch_in_progress = 1;
- lm_l2 = !!hvm_long_mode_enabled(v);
+ lm_l2 = hvm_long_mode_active(v);
lm_l1 = !!(get_vvmcs(v, VM_EXIT_CONTROLS) & VM_EXIT_IA32E_MODE);
if ( lm_l1 )
const struct paging_mode *
hap_paging_get_mode(struct vcpu *v)
{
- return !hvm_paging_enabled(v) ? &hap_paging_real_mode :
- hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
- hvm_pae_enabled(v) ? &hap_paging_pae_mode :
- &hap_paging_protected_mode;
+ return (!hvm_paging_enabled(v) ? &hap_paging_real_mode :
+ hvm_long_mode_active(v) ? &hap_paging_long_mode :
+ hvm_pae_enabled(v) ? &hap_paging_pae_mode :
+ &hap_paging_protected_mode);
}
static void hap_update_paging_modes(struct vcpu *v)
creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
/* Work out the emulation mode. */
- if ( hvm_long_mode_enabled(v) && creg->attr.fields.l )
+ if ( hvm_long_mode_active(v) && creg->attr.fields.l )
{
sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
}
v->arch.guest_table = d->arch.paging.shadow.unpaged_pagetable;
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 2);
}
- else if ( hvm_long_mode_enabled(v) )
+ else if ( hvm_long_mode_active(v) )
{
// long mode guest...
v->arch.paging.mode =
static inline int is_32bit_vcpu(struct vcpu *vcpu)
{
if (is_hvm_vcpu(vcpu))
- return !hvm_long_mode_enabled(vcpu);
+ return !hvm_long_mode_active(vcpu);
else
return is_pv_32bit_vcpu(vcpu);
}
#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
#define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))
-#define hvm_long_mode_enabled(v) \
- ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
+#define hvm_long_mode_active(v) (!!((v)->arch.hvm_vcpu.guest_efer & EFER_LMA))
enum hvm_intblk
hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);