jbe 1f
mov $0x80000001,%eax
cpuid
-1: mov %edx,sym_fs(cpuid_ext_features)
- mov %edx,sym_fs(boot_cpu_data)+CPUINFO_FEATURE_OFFSET(X86_FEATURE_LM)
+1: mov %edx, sym_fs(boot_cpu_data) + CPUINFO_FEATURE_OFFSET(X86_FEATURE_LM)
+
+ /* Check for NX. Adjust EFER setting if available. */
+ bt $cpufeat_bit(X86_FEATURE_NX), %edx
+ jnc 1f
+ orb $EFER_NX >> 8, 1 + sym_esi(trampoline_efer)
+1:
/* Check for availability of long mode. */
bt $cpufeat_bit(X86_FEATURE_LM),%edx
GLOBAL(trampoline_misc_enable_off)
.quad 0
-GLOBAL(cpuid_ext_features)
- .long 0
+/* EFER OR-mask for boot paths. This gets adjusted with NX when available. */
+GLOBAL(trampoline_efer)
+ .long EFER_LME | EFER_SCE
GLOBAL(trampoline_xen_phys_start)
.long 0
1:
/* Set up EFER (Extended Feature Enable Register). */
- mov bootsym_rel(cpuid_ext_features,4,%edi)
movl $MSR_EFER,%ecx
rdmsr
- or $EFER_LME|EFER_SCE,%eax /* Long Mode + SYSCALL/SYSRET */
- bt $cpufeat_bit(X86_FEATURE_NX),%edi /* No Execute? */
- jnc 1f
- btsl $_EFER_NX,%eax /* No Execute */
-1: wrmsr
+ or bootsym_rel(trampoline_efer, 4, %eax)
+ wrmsr
mov $(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_NE |\
X86_CR0_ET | X86_CR0_MP | X86_CR0_PE), %eax
wrmsr
1:
- /* Will cpuid feature change after resume? */
/* Set up EFER (Extended Feature Enable Register). */
- mov bootsym_rel(cpuid_ext_features,4,%edi)
- test $0x20100800,%edi /* SYSCALL/SYSRET, No Execute, Long Mode? */
- jz .Lskip_eferw
movl $MSR_EFER,%ecx
rdmsr
- btsl $_EFER_LME,%eax /* Long Mode */
- btsl $_EFER_SCE,%eax /* SYSCALL/SYSRET */
- btl $20,%edi /* No Execute? */
- jnc 1f
- btsl $_EFER_NX,%eax /* No Execute */
-1: wrmsr
-.Lskip_eferw:
+ or bootsym_rel(trampoline_efer, 4, %eax)
+ wrmsr
wbinvd
cpuid(0x80000001, &tmp, &tmp,
&c->x86_capability[cpufeat_word(X86_FEATURE_LAHF_LM)],
&c->x86_capability[cpufeat_word(X86_FEATURE_SYSCALL)]);
- if (c == &boot_cpu_data)
- bootsym(cpuid_ext_features) =
- c->x86_capability[cpufeat_word(X86_FEATURE_NX)];
if (c->extended_cpuid_level >= 0x80000004)
get_model_name(c); /* Default name */
if (disable) {
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable & ~disable);
bootsym(trampoline_misc_enable_off) |= disable;
+ bootsym(trampoline_efer) |= EFER_NX;
}
if (disable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)
asm volatile("pushq $0\n\tpopfq");
rdmsrl(MSR_EFER, efer);
efer |= EFER_SCE;
- if ( cpuid_ext_features & cpufeat_mask(X86_FEATURE_NX) )
+ if ( cpu_has_nx )
efer |= EFER_NX;
wrmsrl(MSR_EFER, efer);
write_cr0(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP |
if ( (eax >> 16) == 0x8000 && eax > 0x80000000 )
{
- cpuid_ext_features = cpuid_edx(0x80000001);
boot_cpu_data.x86_capability[cpufeat_word(X86_FEATURE_SYSCALL)]
- = cpuid_ext_features;
+ = cpuid_edx(0x80000001);
+
+ if ( cpu_has_nx )
+ trampoline_efer |= EFER_NX;
}
}
extern void (*ctxt_switch_masking)(const struct vcpu *next);
extern bool_t opt_cpu_info;
-extern u32 cpuid_ext_features;
+extern u32 trampoline_efer;
extern u64 trampoline_misc_enable_off;
/* Maximum width of physical addresses supported by the hardware. */