]> xenbits.xensource.com Git - xen.git/commitdiff
xen/arm: Ensure the SSBD workaround is re-enabled right after exiting a guest
authorJulien Grall <julien.grall@arm.com>
Mon, 23 Sep 2019 16:45:22 +0000 (17:45 +0100)
committerStefano Stabellini <sstabellini@kernel.org>
Wed, 27 Nov 2019 22:30:49 +0000 (14:30 -0800)
At the moment, SSBD workaround is re-enabled for Xen after interrupts
are unmasked. This means we may end up to execute some part of the
hypervisor if an interrupt is received before the workaround is
re-enabled.

Each trap may require to unmask different interrupts.
As the rest of enter_hypervisor_from_guest() does not require to have
interrupts masked, the function is now split in two parts:
    1) enter_hypervisor_from_guest_preirq() called with interrupts
       masked.
    2) enter_hypervisor_from_guest() called with interrupts unmasked.

Note that while it might be possible to avoid spliting the function in
two parts, it requires a bit more work than I can currently invest to
avoid using indirect branch.

Furthermore, the function name is rather generic as there might be more
work to dob before interrupts are unmasked in the future.

Fixes: a7898e4c59 ("xen/arm: Add ARCH_WORKAROUND_2 support for guests")
Reported-by: Andrii Anisov <andrii_anisov@epam.com>
Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Release-acked-by: Juergen Gross <jgross@suse.com>
(cherry picked from commit efee8ba9bf84d54e752f2a44c510cdfb3cc0c282)

xen/arch/arm/arm32/entry.S
xen/arch/arm/arm64/entry.S
xen/arch/arm/traps.c

index 59a296a4add0f4719c156f335f514689a3e4dd46..d5c2982e4a0f107f0671c941474d681f30456f17 100644 (file)
@@ -118,7 +118,7 @@ abort_guest_exit_end:
         bne return_from_trap
 
 skip_check:
-        mov pc, lr
+        b   enter_hypervisor_from_guest_preirq
 ENDPROC(prepare_context_from_guest)
 
         /*
index 8c9963c50b0d1048355050f640743eef39ba307f..c476108b7b1b23aa202f9f998703b4b1a03087c1 100644 (file)
@@ -196,6 +196,7 @@ lr      .req    x30             /* link register */
         ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
                     "nop; nop",
                     SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
+        bl      enter_hypervisor_from_guest_preirq
         msr     daifclr, \iflags
         bl      enter_hypervisor_from_guest
         mov     x0, sp
index 53cbaca4000b1be2a09989cd8a3cd021d3ee0e07..ae7bab6c0e529ae3786216dfb4d6698ebea7ee34 100644 (file)
@@ -1978,15 +1978,25 @@ static inline bool needs_ssbd_flip(struct vcpu *v)
 
 /*
  * Actions that needs to be done after entering the hypervisor from the
- * guest and before we handle any request.
+ * guest and before the interrupts are unmasked.
  */
-void enter_hypervisor_from_guest(void)
+void enter_hypervisor_from_guest_preirq(void)
 {
     struct vcpu *v = current;
 
     /* If the guest has disabled the workaround, bring it back on. */
     if ( needs_ssbd_flip(v) )
         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
+}
+
+/*
+ * Actions that needs to be done after entering the hypervisor from the
+ * guest and before we handle any request. Depending on the exception trap,
+ * this may be called with interrupts unmasked.
+ */
+void enter_hypervisor_from_guest(void)
+{
+    struct vcpu *v = current;
 
     /*
      * If we pended a virtual abort, preserve it until it gets cleared.