/* Check for NX. Adjust EFER setting if available. */
bt $cpufeat_bit(X86_FEATURE_NX), %edx
jnc 1f
- orb $EFER_NX >> 8, 1 + sym_esi(trampoline_efer)
+ orb $EFER_NXE >> 8, 1 + sym_esi(trampoline_efer)
1:
/* Check for availability of long mode. */
if (disable) {
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable & ~disable);
bootsym(trampoline_misc_enable_off) |= disable;
- bootsym(trampoline_efer) |= EFER_NX;
+ bootsym(trampoline_efer) |= EFER_NXE;
}
if (disable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)
printk(KERN_INFO "revised cpuid level: %d\n",
cpuid_eax(0));
if (disable & MSR_IA32_MISC_ENABLE_XD_DISABLE) {
- write_efer(read_efer() | EFER_NX);
+ write_efer(read_efer() | EFER_NXE);
printk(KERN_INFO
"re-enabled NX (Execute Disable) protection\n");
}
caps[cpufeat_word(X86_FEATURE_SYSCALL)] = cpuid_edx(0x80000001);
if ( cpu_has_nx )
- trampoline_efer |= EFER_NX;
+ trampoline_efer |= EFER_NXE;
}
}
if ( (value & EFER_LMA) && (!(value & EFER_LME) || !cr0_pg) )
return "LMA/LME/CR0.PG inconsistency";
- if ( (value & EFER_NX) && !p->extd.nx )
- return "NX without feature";
+ if ( (value & EFER_NXE) && !p->extd.nx )
+ return "NXE without feature";
if ( (value & EFER_SVME) && (!p->extd.svm || !nestedhvm_enabled(d)) )
return "SVME without nested virt";
if ( paging_mode_shadow(v->domain) )
{
/* EFER.NX is a Xen-owned bit and is not under guest control. */
- guest_efer &= ~EFER_NX;
- guest_efer |= xen_efer & EFER_NX;
+ guest_efer &= ~EFER_NXE;
+ guest_efer |= xen_efer & EFER_NXE;
/*
* CR0.PG is a Xen-owned bit, and remains set even when the guest has
* When using shadow pagetables, EFER.NX is a Xen-owned bit and is not
* under guest control.
*/
- guest_efer &= ~EFER_NX;
- guest_efer |= xen_efer & EFER_NX;
+ guest_efer &= ~EFER_NXE;
+ guest_efer |= xen_efer & EFER_NXE;
}
if ( !vmx_unrestricted_guest(v) )
#define hvm_smap_enabled(v) \
(hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP))
#define hvm_nx_enabled(v) \
- ((v)->arch.hvm.guest_efer & EFER_NX)
+ ((v)->arch.hvm.guest_efer & EFER_NXE)
#define hvm_pku_enabled(v) \
(hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE))
#define PASID_PASID_MASK 0x000fffff
#define PASID_VALID (_AC(1, ULL) << 31)
+#define MSR_EFER 0xc0000080 /* Extended Feature Enable Register */
+#define EFER_SCE (_AC(1, ULL) << 0) /* SYSCALL Enable */
+#define EFER_LME (_AC(1, ULL) << 8) /* Long Mode Enable */
+#define EFER_LMA (_AC(1, ULL) << 10) /* Long Mode Active */
+#define EFER_NXE (_AC(1, ULL) << 11) /* No Execute Enable */
+#define EFER_SVME (_AC(1, ULL) << 12) /* Secure Virtual Machine Enable */
+#define EFER_FFXSE (_AC(1, ULL) << 14) /* Fast FXSAVE/FXRSTOR */
+
+#define EFER_KNOWN_MASK \
+ (EFER_SCE | EFER_LME | EFER_LMA | EFER_NXE | EFER_SVME | EFER_FFXSE)
+
#define MSR_K8_SYSCFG 0xc0010010
#define SYSCFG_MTRR_FIX_DRAM_EN (_AC(1, ULL) << 18)
#define SYSCFG_MTRR_FIX_DRAM_MOD_EN (_AC(1, ULL) << 19)
*/
/* x86-64 specific MSRs */
-#define MSR_EFER 0xc0000080 /* extended feature register */
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
-/* EFER bits: */
-#define _EFER_SCE 0 /* SYSCALL/SYSRET */
-#define _EFER_LME 8 /* Long mode enable */
-#define _EFER_LMA 10 /* Long mode active (read-only) */
-#define _EFER_NX 11 /* No execute enable */
-#define _EFER_SVME 12 /* AMD: SVM enable */
-#define _EFER_FFXSE 14 /* AMD: Fast FXSAVE/FXRSTOR enable */
-
-#define EFER_SCE (1<<_EFER_SCE)
-#define EFER_LME (1<<_EFER_LME)
-#define EFER_LMA (1<<_EFER_LMA)
-#define EFER_NX (1<<_EFER_NX)
-#define EFER_SVME (1<<_EFER_SVME)
-#define EFER_FFXSE (1<<_EFER_FFXSE)
-
-#define EFER_KNOWN_MASK (EFER_SCE | EFER_LME | EFER_LMA | EFER_NX | \
- EFER_SVME | EFER_FFXSE)
-
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_A_PERFCTR0 0x000004c1