]> xenbits.xensource.com Git - xen.git/commitdiff
x86/traps: guard vmx specific functions with usinc_vmx() check
authorXenia Ragiadakou <burzalodowa@gmail.com>
Thu, 1 Aug 2024 11:56:34 +0000 (13:56 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 1 Aug 2024 11:56:34 +0000 (13:56 +0200)
Replace cpu_has_vmx check with using_vmx(), so that not only VMX support in CPU
is being checked at runtime, but also at build time we ensure the availability
of functions vmx_vmcs_enter() & vmx_vmcs_exit().

Also since CONFIG_VMX is checked in using_vmx and it depends on CONFIG_HVM,
we can drop #ifdef CONFIG_HVM lines around using_vmx.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/traps.c

index a981b6c2e8b8839ec829c5bffffe0f8ae5f7b3c9..552a07e6aa56de9bdc4dd14c97ca1092d391c417 100644 (file)
@@ -676,7 +676,6 @@ void vcpu_show_execution_state(struct vcpu *v)
 
     vcpu_pause(v); /* acceptably dangerous */
 
-#ifdef CONFIG_HVM
     /*
      * For VMX special care is needed: Reading some of the register state will
      * require VMCS accesses. Engaging foreign VMCSes involves acquiring of a
@@ -684,12 +683,11 @@ void vcpu_show_execution_state(struct vcpu *v)
      * region. Despite this being a layering violation, engage the VMCS right
      * here. This then also avoids doing so several times in close succession.
      */
-    if ( cpu_has_vmx && is_hvm_vcpu(v) )
+    if ( using_vmx() && is_hvm_vcpu(v) )
     {
         ASSERT(!in_irq());
         vmx_vmcs_enter(v);
     }
-#endif
 
     /* Prevent interleaving of output. */
     flags = console_lock_recursive_irqsave();
@@ -714,10 +712,8 @@ void vcpu_show_execution_state(struct vcpu *v)
         console_unlock_recursive_irqrestore(flags);
     }
 
-#ifdef CONFIG_HVM
-    if ( cpu_has_vmx && is_hvm_vcpu(v) )
+    if ( using_vmx() && is_hvm_vcpu(v) )
         vmx_vmcs_exit(v);
-#endif
 
     vcpu_unpause(v);
 }