]> xenbits.xensource.com Git - people/liuw/xen.git/commitdiff
xen/arm: Ensure the SSBD workaround is re-enabled right after exiting a guest
authorJulien Grall <julien.grall@arm.com>
Mon, 23 Sep 2019 16:45:22 +0000 (17:45 +0100)
committerJulien Grall <julien.grall@arm.com>
Fri, 1 Nov 2019 14:31:07 +0000 (14:31 +0000)
At the moment, SSBD workaround is re-enabled for Xen after interrupts
are unmasked. This means we may end up to execute some part of the
hypervisor if an interrupt is received before the workaround is
re-enabled.

Each trap may require to unmask different interrupts.
As the rest of enter_hypervisor_from_guest() does not require to have
interrupts masked, the function is now split in two parts:
    1) enter_hypervisor_from_guest_preirq() called with interrupts
       masked.
    2) enter_hypervisor_from_guest() called with interrupts unmasked.

Note that while it might be possible to avoid spliting the function in
two parts, it requires a bit more work than I can currently invest to
avoid using indirect branch.

Furthermore, the function name is rather generic as there might be more
work to dob before interrupts are unmasked in the future.

Fixes: a7898e4c59 ("xen/arm: Add ARCH_WORKAROUND_2 support for guests")
Reported-by: Andrii Anisov <andrii_anisov@epam.com>
Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Release-acked-by: Juergen Gross <jgross@suse.com>
xen/arch/arm/arm32/entry.S
xen/arch/arm/arm64/entry.S
xen/arch/arm/traps.c

index 59a296a4add0f4719c156f335f514689a3e4dd46..d5c2982e4a0f107f0671c941474d681f30456f17 100644 (file)
@@ -118,7 +118,7 @@ abort_guest_exit_end:
         bne return_from_trap
 
 skip_check:
-        mov pc, lr
+        b   enter_hypervisor_from_guest_preirq
 ENDPROC(prepare_context_from_guest)
 
         /*
index 97dc60210ddd4ecdcc2cfc2bcc378f9aa6e10949..d4fb5fdc1c3d00f89c993d421c85c2d41d509cad 100644 (file)
         ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
                     "nop; nop",
                     SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
+        bl      enter_hypervisor_from_guest_preirq
         msr     daifclr, \iflags
         bl      enter_hypervisor_from_guest
         mov     x0, sp
index adbedc2d15f44bb8a098bd8eac685470609327b4..cb4e3b627b5f71a4f6dc7baa84624353792479a3 100644 (file)
@@ -1986,15 +1986,25 @@ static inline bool needs_ssbd_flip(struct vcpu *v)
 
 /*
  * Actions that needs to be done after entering the hypervisor from the
- * guest and before we handle any request.
+ * guest and before the interrupts are unmasked.
  */
-void enter_hypervisor_from_guest(void)
+void enter_hypervisor_from_guest_preirq(void)
 {
     struct vcpu *v = current;
 
     /* If the guest has disabled the workaround, bring it back on. */
     if ( needs_ssbd_flip(v) )
         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
+}
+
+/*
+ * Actions that needs to be done after entering the hypervisor from the
+ * guest and before we handle any request. Depending on the exception trap,
+ * this may be called with interrupts unmasked.
+ */
+void enter_hypervisor_from_guest(void)
+{
+    struct vcpu *v = current;
 
     /*
      * If we pended a virtual abort, preserve it until it gets cleared.