]> xenbits.xensource.com Git - xen.git/commitdiff
x86/HVM: correct the SMEP logic for HVM_CR0_GUEST_RESERVED_BITS
authorFeng Wu <feng.wu@intel.com>
Mon, 12 May 2014 15:41:42 +0000 (17:41 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 12 May 2014 15:41:42 +0000 (17:41 +0200)
When checking the SMEP feature for HVM guests, we should check the
VCPU instead of the host CPU.

Signed-off-by: Feng Wu <feng.wu@intel.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit: 31ee951a3bee6e7cc21f94f900fe989e3701a79a
master date: 2014-04-28 12:47:24 +0200

xen/include/asm-x86/hvm/hvm.h

index f9ea093ddc58d6d606104973d3545f9bd5a87bd0..902f224859e602cb3609b011e4b2b5d8efbef0e5 100644 (file)
@@ -339,6 +339,19 @@ static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
     return hvm_funcs.do_pmu_interrupt(regs);
 }
 
+static inline bool_t hvm_vcpu_has_smep(void)
+{
+    unsigned int eax, ebx, ecx = 0, dummy;
+
+    hvm_cpuid(0, &eax, &dummy, &dummy, &dummy);
+
+    if ( eax < 7 )
+        return 0;
+
+    hvm_cpuid(7, &dummy, &ebx, &ecx, &dummy);
+    return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
+}
+
 /* These reserved bits in lower 32 remain 0 after any load of CR0 */
 #define HVM_CR0_GUEST_RESERVED_BITS             \
     (~((unsigned long)                          \
@@ -358,7 +371,7 @@ static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
         X86_CR4_DE  | X86_CR4_PSE | X86_CR4_PAE |       \
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
-        (cpu_has_smep ? X86_CR4_SMEP : 0) |             \
+        (hvm_vcpu_has_smep() ? X86_CR4_SMEP : 0) |      \
         (cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) |     \
         ((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
                       ? X86_CR4_VMXE : 0)  |             \