]> xenbits.xensource.com Git - xen.git/commitdiff
x86/spec_ctrl: Rename bits of infrastructure to avoid NATIVE and VMEXIT
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 29 May 2018 08:59:39 +0000 (10:59 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 29 May 2018 08:59:39 +0000 (10:59 +0200)
In hindsight, using NATIVE and VMEXIT as naming terminology was not clever.
A future change wants to split SPEC_CTRL_EXIT_TO_GUEST into PV and HVM
specific implementations, and using VMEXIT as a term is completely wrong.

Take the opportunity to fix some stale documentation in spec_ctrl_asm.h.  The
IST helpers were missing from the large comment block, and since
SPEC_CTRL_ENTRY_FROM_INTR_IST was introduced, we've gained a new piece of
functionality which currently depends on the fine grain control, which exists
in lieu of livepatching.  Note this in the comment.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
master commit: d9822b8a38114e96e4516dc998f4055249364d5d
master date: 2018-05-16 12:19:10 +0100

xen/arch/x86/cpu/common.c
xen/arch/x86/hvm/svm/entry.S
xen/arch/x86/hvm/vmx/entry.S
xen/arch/x86/spec_ctrl.c
xen/arch/x86/x86_64/compat/entry.S
xen/arch/x86/x86_64/entry.S
xen/include/asm-x86/cpufeature.h
xen/include/asm-x86/spec_ctrl_asm.h

index 3da097927757248148c5ce76baacfb216edd7266..1ba1622e72baa3c4eebe85531573f82e1bcda51e 100644 (file)
@@ -362,12 +362,12 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
                if (test_bit(X86_FEATURE_SC_MSR,
                             boot_cpu_data.x86_capability))
                        __set_bit(X86_FEATURE_SC_MSR, c->x86_capability);
-               if (test_bit(X86_FEATURE_RSB_NATIVE,
+               if (test_bit(X86_FEATURE_SC_RSB_PV,
                             boot_cpu_data.x86_capability))
-                       __set_bit(X86_FEATURE_RSB_NATIVE, c->x86_capability);
-               if (test_bit(X86_FEATURE_RSB_VMEXIT,
+                       __set_bit(X86_FEATURE_SC_RSB_PV, c->x86_capability);
+               if (test_bit(X86_FEATURE_SC_RSB_HVM,
                             boot_cpu_data.x86_capability))
-                       __set_bit(X86_FEATURE_RSB_VMEXIT, c->x86_capability);
+                       __set_bit(X86_FEATURE_SC_RSB_HVM, c->x86_capability);
 
                /* AND the already accumulated flags with these */
                for ( i = 0 ; i < NCAPINTS ; i++ )
index 706bdd301a96ddb59bf1164971a9ae57e312d241..6426452baad4e7d0ceee4970e009b7e6302155de 100644 (file)
@@ -81,7 +81,7 @@ UNLIKELY_END(svm_trace)
         mov VCPU_arch_spec_ctrl(%rbx), %eax
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
-        SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+        SPEC_CTRL_EXIT_TO_HVM   /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
 
         pop  %r15
         pop  %r14
@@ -106,7 +106,7 @@ UNLIKELY_END(svm_trace)
 
         GET_CURRENT(%rbx)
 
-        SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+        SPEC_CTRL_ENTRY_FROM_HVM    /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
         mov  VCPU_svm_vmcb(%rbx),%rcx
index d43ae26a6bed05beed3e674cc3f39b0e1e32d2d0..32e0f87ae49e38a44e83f0e1af6e6ad05f76bc07 100644 (file)
@@ -37,7 +37,7 @@ ENTRY(vmx_asm_vmexit_handler)
         movb $1,VCPU_vmx_launched(%rbx)
         mov  %rax,VCPU_hvm_guest_cr2(%rbx)
 
-        SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+        SPEC_CTRL_ENTRY_FROM_HVM    /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
         mov  %rsp,%rdi
@@ -72,7 +72,7 @@ UNLIKELY_END(realmode)
         mov VCPU_arch_spec_ctrl(%rbx), %eax
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
-        SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+        SPEC_CTRL_EXIT_TO_HVM   /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
 
         mov  VCPU_hvm_guest_cr2(%rbx),%rax
 
index 4fcbba21435367a74e6bc5cbf769da68fa45bb25..91e18487f0fc9fa08bfff518e8fe60570799d8eb 100644 (file)
@@ -35,8 +35,8 @@ static enum ind_thunk {
     THUNK_JMP,
 } opt_thunk __initdata = THUNK_DEFAULT;
 static int8_t __initdata opt_ibrs = -1;
-static bool_t __initdata opt_rsb_native = 1;
-static bool_t __initdata opt_rsb_vmexit = 1;
+static bool_t __initdata opt_rsb_pv = 1;
+static bool_t __initdata opt_rsb_hvm = 1;
 bool_t __read_mostly opt_ibpb = 1;
 uint8_t __read_mostly default_xen_spec_ctrl;
 uint8_t __read_mostly default_spec_ctrl_flags;
@@ -69,9 +69,9 @@ static int __init parse_bti(const char *s)
         else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
             opt_ibpb = val;
         else if ( (val = parse_boolean("rsb_native", s, ss)) >= 0 )
-            opt_rsb_native = val;
+            opt_rsb_pv = val;
         else if ( (val = parse_boolean("rsb_vmexit", s, ss)) >= 0 )
-            opt_rsb_vmexit = val;
+            opt_rsb_hvm = val;
         else
             rc = -EINVAL;
 
@@ -118,8 +118,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
            default_xen_spec_ctrl & SPEC_CTRL_IBRS    ? " IBRS+" :
                                                        " IBRS-"      : "",
            opt_ibpb                                  ? " IBPB"       : "",
-           boot_cpu_has(X86_FEATURE_RSB_NATIVE)      ? " RSB_NATIVE" : "",
-           boot_cpu_has(X86_FEATURE_RSB_VMEXIT)      ? " RSB_VMEXIT" : "");
+           boot_cpu_has(X86_FEATURE_SC_RSB_PV)       ? " RSB_NATIVE" : "",
+           boot_cpu_has(X86_FEATURE_SC_RSB_HVM)      ? " RSB_VMEXIT" : "");
 }
 
 /* Calculate whether Retpoline is known-safe on this CPU. */
@@ -304,9 +304,9 @@ void __init init_speculation_mitigations(void)
      * If a processors speculates to 32bit PV guest kernel mappings, it is
      * speculating in 64bit supervisor mode, and can leak data.
      */
-    if ( opt_rsb_native )
+    if ( opt_rsb_pv )
     {
-        __set_bit(X86_FEATURE_RSB_NATIVE, boot_cpu_data.x86_capability);
+        __set_bit(X86_FEATURE_SC_RSB_PV, boot_cpu_data.x86_capability);
         default_spec_ctrl_flags |= SCF_ist_rsb;
     }
 
@@ -314,8 +314,8 @@ void __init init_speculation_mitigations(void)
      * HVM guests can always poison the RSB to point at Xen supervisor
      * mappings.
      */
-    if ( opt_rsb_vmexit )
-        __set_bit(X86_FEATURE_RSB_VMEXIT, boot_cpu_data.x86_capability);
+    if ( opt_rsb_hvm )
+        __set_bit(X86_FEATURE_SC_RSB_HVM, boot_cpu_data.x86_capability);
 
     /* Check we have hardware IBPB support before using it... */
     if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) )
index c211e9a86987ccff767ea136bf5239fcd4b3726d..6a48fc50b746c3f7cf8b3d40938046031d63f063 100644 (file)
@@ -235,7 +235,7 @@ ENTRY(compat_restore_all_guest)
         mov VCPU_arch_spec_ctrl(%rbx), %eax
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
-        SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+        SPEC_CTRL_EXIT_TO_PV    /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
 
         RESTORE_ALL adj=8 compat=1
 .Lft0:  iretq
index 7c8211ae5abba80f22a61908aa339ad431fcdd20..8cecfd407ed40da3b7d6f5fb4eb620c5142a1879 100644 (file)
@@ -71,7 +71,7 @@ restore_all_guest:
         mov   %r15d, %eax
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
-        SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+        SPEC_CTRL_EXIT_TO_PV    /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
 
         RESTORE_ALL
         testw $TRAP_syscall,4(%rsp)
index ed4f18cf90956d2172379919af4e946681513e68..9c8bca9faab323201bfae036741eec614f97a7b3 100644 (file)
@@ -66,8 +66,8 @@
 #define X86_FEATURE_IND_THUNK_JMP   (3*32+ 2) /* Use IND_THUNK_JMP */
 #define X86_FEATURE_XEN_IBPB        (3*32+ 3) /* IBRSB || IBPB */
 #define X86_FEATURE_SC_MSR          (3*32+ 4) /* MSR_SPEC_CTRL used by Xen */
-#define X86_FEATURE_RSB_NATIVE      (3*32+ 6) /* RSB overwrite needed for native */
-#define X86_FEATURE_RSB_VMEXIT      (3*32+ 7) /* RSB overwrite needed for vmexit */
+#define X86_FEATURE_SC_RSB_PV       (3*32+ 6) /* RSB overwrite needed for PV */
+#define X86_FEATURE_SC_RSB_HVM      (3*32+ 7) /* RSB overwrite needed for HVM */
 #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
 #define X86_FEATURE_NONSTOP_TSC        (3*32+ 9) /* TSC does not stop in C states */
 #define X86_FEATURE_ARAT       (3*32+ 10) /* Always running APIC timer */
index ab47508b8085727e6e191e0311a38f3d7efc6e88..be5cba318e14e6cd706afdc11b013952043aa32c 100644 (file)
  *
  * The following ASM fragments implement this algorithm.  See their local
  * comments for further details.
- *  - SPEC_CTRL_ENTRY_FROM_VMEXIT
+ *  - SPEC_CTRL_ENTRY_FROM_HVM
  *  - SPEC_CTRL_ENTRY_FROM_PV
  *  - SPEC_CTRL_ENTRY_FROM_INTR
+ *  - SPEC_CTRL_ENTRY_FROM_INTR_IST
+ *  - SPEC_CTRL_EXIT_TO_XEN_IST
  *  - SPEC_CTRL_EXIT_TO_XEN
- *  - SPEC_CTRL_EXIT_TO_GUEST
+ *  - SPEC_CTRL_EXIT_TO_PV
+ *  - SPEC_CTRL_EXIT_TO_HVM
  */
 
 .macro DO_OVERWRITE_RSB tmp=rax
     mov %\tmp, %rsp                 /* Restore old %rsp */
 .endm
 
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
+.macro DO_SPEC_CTRL_ENTRY_FROM_HVM
 /*
  * Requires %rbx=current, %rsp=regs/cpuinfo
  * Clobbers %rax, %rcx, %rdx
 .endm
 
 /* Use after a VMEXIT from an HVM guest. */
-#define SPEC_CTRL_ENTRY_FROM_VMEXIT                                     \
+#define SPEC_CTRL_ENTRY_FROM_HVM                                        \
     ALTERNATIVE __stringify(ASM_NOP40),                                 \
-        DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT;                       \
+        DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM;                       \
     ALTERNATIVE __stringify(ASM_NOP33),                                 \
-        DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
+        DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR
 
 /* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
 #define SPEC_CTRL_ENTRY_FROM_PV                                         \
     ALTERNATIVE __stringify(ASM_NOP40),                                 \
-        DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE;                       \
+        DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV;                        \
     ALTERNATIVE __stringify(ASM_NOP25),                                 \
         __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
 
 /* Use in interrupt/exception context.  May interrupt Xen or PV context. */
 #define SPEC_CTRL_ENTRY_FROM_INTR                                       \
     ALTERNATIVE __stringify(ASM_NOP40),                                 \
-        DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE;                       \
+        DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV;                        \
     ALTERNATIVE __stringify(ASM_NOP39),                                 \
         __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
 
     ALTERNATIVE __stringify(ASM_NOP23),                                 \
         DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
 
-/* Use when exiting to guest context. */
-#define SPEC_CTRL_EXIT_TO_GUEST                                         \
+/* Use when exiting to PV guest context. */
+#define SPEC_CTRL_EXIT_TO_PV                                            \
     ALTERNATIVE __stringify(ASM_NOP24),                                 \
         DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
 
-/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
+/* Use when exiting to HVM guest context. */
+#define SPEC_CTRL_EXIT_TO_HVM                                           \
+    ALTERNATIVE __stringify(ASM_NOP24),                                 \
+        DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
+
+/*
+ * Use in IST interrupt/exception context.  May interrupt Xen or PV context.
+ * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume
+ * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has
+ * been reloaded.
+ */
 .macro SPEC_CTRL_ENTRY_FROM_INTR_IST
 /*
  * Requires %rsp=regs, %r14=stack_end
@@ -293,6 +306,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
     UNLIKELY_END(\@_serialise)
 .endm
 
+/* Use when exiting to Xen in IST context. */
 .macro SPEC_CTRL_EXIT_TO_XEN_IST
 /*
  * Requires %rbx=stack_end