]> xenbits.xensource.com Git - people/aperard/linux.git/commitdiff
KVM/VMX: Use BT+JNC, i.e. EFLAGS.CF to select VMRESUME vs. VMLAUNCH
authorPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Mon, 4 Mar 2024 09:24:18 +0000 (01:24 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Mar 2024 14:45:20 +0000 (14:45 +0000)
From: Sean Christopherson <seanjc@google.com>

commit 706a189dcf74d3b3f955e9384785e726ed6c7c80 upstream.

Use EFLAGS.CF instead of EFLAGS.ZF to track whether to use VMRESUME versus
VMLAUNCH.  Freeing up EFLAGS.ZF will allow doing VERW, which clobbers ZF,
for MDS mitigations as late as possible without needing to duplicate VERW
for both paths.

  [ pawan: resolved merge conflict in __vmx_vcpu_run in backport. ]

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
Link: https://lore.kernel.org/all/20240213-delay-verw-v8-5-a6216d83edb7%40linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/vmx/run_flags.h
arch/x86/kvm/vmx/vmenter.S

index edc3f16cc1896f29e4eef46da685d22b4c31c668..6a9bfdfbb6e59b2e613385cd2ad46cc651a0eb28 100644 (file)
@@ -2,7 +2,10 @@
 #ifndef __KVM_X86_VMX_RUN_FLAGS_H
 #define __KVM_X86_VMX_RUN_FLAGS_H
 
-#define VMX_RUN_VMRESUME       (1 << 0)
-#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
+#define VMX_RUN_VMRESUME_SHIFT         0
+#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT   1
+
+#define VMX_RUN_VMRESUME               BIT(VMX_RUN_VMRESUME_SHIFT)
+#define VMX_RUN_SAVE_SPEC_CTRL         BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
 
 #endif /* __KVM_X86_VMX_RUN_FLAGS_H */
index 0b5db4de4d09e568cc01b6005748f97104d02ec8..42c0b2c3aee10c5a34efa9ef311f712c3e061ff8 100644 (file)
@@ -106,7 +106,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
        mov (%_ASM_SP), %_ASM_AX
 
        /* Check if vmlaunch or vmresume is needed */
-       testb $VMX_RUN_VMRESUME, %bl
+       bt   $VMX_RUN_VMRESUME_SHIFT, %bx
 
        /* Load guest registers.  Don't clobber flags. */
        mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -128,8 +128,8 @@ SYM_FUNC_START(__vmx_vcpu_run)
        /* Load guest RAX.  This kills the @regs pointer! */
        mov VCPU_RAX(%_ASM_AX), %_ASM_AX
 
-       /* Check EFLAGS.ZF from 'testb' above */
-       jz .Lvmlaunch
+       /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
+       jnc .Lvmlaunch
 
        /*
         * After a successful VMRESUME/VMLAUNCH, control flow "magically"