]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
x86/pv: Move async_exception_cleanup() into pv/iret.c
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 23 Jul 2019 19:46:35 +0000 (20:46 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 24 Jul 2019 13:40:10 +0000 (14:40 +0100)
All callers are in pv/iret.c.  Move the function and make it static.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/pv/iret.c
xen/arch/x86/traps.c
xen/include/asm-x86/traps.h

index c359a1dbfdf410c30a2e9a99a6ba169db8e5b09b..16b449ff645594cf1f895013e177e3cf3d140f5c 100644 (file)
 #include <xen/sched.h>
 
 #include <asm/current.h>
-#include <asm/traps.h>
+
+static void async_exception_cleanup(struct vcpu *curr)
+{
+    unsigned int trap;
+
+    if ( !curr->async_exception_mask )
+        return;
+
+    if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
+        trap = __scanbit(curr->async_exception_mask, VCPU_TRAP_NONE);
+    else
+        for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
+            if ( (curr->async_exception_mask ^
+                  curr->async_exception_state(trap).old_mask) == (1u << trap) )
+                break;
+    if ( unlikely(trap > VCPU_TRAP_LAST) )
+    {
+        ASSERT_UNREACHABLE();
+        return;
+    }
+
+    /* Restore previous asynchronous exception mask. */
+    curr->async_exception_mask = curr->async_exception_state(trap).old_mask;
+}
 
 unsigned long do_iret(void)
 {
index 08d7edc568ef80e887563a6959b4f367316388ef..38d12013db950338d52085375545809d49c908f8 100644 (file)
@@ -1593,30 +1593,6 @@ static void pci_serr_softirq(void)
     outb(inb(0x61) & 0x0b, 0x61); /* re-enable the PCI SERR error line. */
 }
 
-void async_exception_cleanup(struct vcpu *curr)
-{
-    int trap;
-
-    if ( !curr->async_exception_mask )
-        return;
-
-    if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
-        trap = __scanbit(curr->async_exception_mask, VCPU_TRAP_NONE);
-    else
-        for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
-            if ( (curr->async_exception_mask ^
-                  curr->async_exception_state(trap).old_mask) == (1 << trap) )
-                break;
-    if ( unlikely(trap > VCPU_TRAP_LAST) )
-    {
-        ASSERT_UNREACHABLE();
-        return;
-    }
-
-    /* Restore previous asynchronous exception mask. */
-    curr->async_exception_mask = curr->async_exception_state(trap).old_mask;
-}
-
 static void nmi_hwdom_report(unsigned int reason_idx)
 {
     struct domain *d = hardware_domain;
index b88f2a4f2fe875319fadc0f0c6c34ab2008cbe02..ec23d3a70b36dd3d02afd3bdf2f07665ccf35682 100644 (file)
@@ -19,8 +19,6 @@
 #ifndef ASM_TRAP_H
 #define ASM_TRAP_H
 
-void async_exception_cleanup(struct vcpu *);
-
 const char *trapstr(unsigned int trapnr);
 
 #endif /* ASM_TRAP_H */