]> xenbits.xensource.com Git - people/tklengyel/xen.git/commitdiff
Revert "x86/HVM: also dump stacks from show_execution_state()"
authorRoger Pau Monne <roger.pau@citrix.com>
Fri, 4 Nov 2022 14:43:37 +0000 (15:43 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 9 Nov 2022 20:07:45 +0000 (20:07 +0000)
This reverts commit adb715db698bc8ec3b88c24eb88b21e9da5b6c07.

The dumping of stacks for HVM guests is problematic, since it requires
taking the p2m lock in order to walk the guest page tables and the p2m.

The suggested solution to the issue is to introduce and use a lockless p2m
walker, that relies on being executed with interrupts disabled in order to
prevent any p2m pages from being freed while doing the walk.

Note that modifications of p2m entries are already done atomically in order
to prevent the hardware walker from seeing partially updated values.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Release-acked-by: Henry Wang <Henry.Wang@arm.com>
xen/arch/x86/traps.c

index 7207390118e8110868c8376a3ccc6fbbc387b9f7..56e59896bfecd2dd50878b1c11df4eb55db82e8f 100644 (file)
@@ -278,6 +278,10 @@ static void show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs)
     unsigned long mask = STACK_SIZE;
     void *stack_page = NULL;
 
+    /* Avoid HVM as we don't know what the stack looks like. */
+    if ( is_hvm_vcpu(v) )
+        return;
+
     if ( is_pv_32bit_vcpu(v) )
     {
         compat_show_guest_stack(v, regs, debug_stack_lines);
@@ -591,6 +595,9 @@ static void show_stack(const struct cpu_user_regs *regs)
     unsigned long *stack = ESP_BEFORE_EXCEPTION(regs), *stack_bottom, addr;
     int i;
 
+    if ( guest_mode(regs) )
+        return show_guest_stack(current, regs);
+
     printk("Xen stack trace from "__OP"sp=%p:\n  ", stack);
 
     stack_bottom = _p(get_stack_dump_bottom(regs->rsp));
@@ -655,30 +662,8 @@ void show_execution_state(const struct cpu_user_regs *regs)
     unsigned long flags = console_lock_recursive_irqsave();
 
     show_registers(regs);
-
-    if ( guest_mode(regs) )
-    {
-        struct vcpu *curr = current;
-
-        if ( is_hvm_vcpu(curr) )
-        {
-            /*
-             * Stop interleaving prevention: The necessary P2M lookups
-             * involve locking, which has to occur with IRQs enabled.
-             */
-            console_unlock_recursive_irqrestore(flags);
-
-            show_hvm_stack(curr, regs);
-            return;
-        }
-
-        show_guest_stack(curr, regs);
-    }
-    else
-    {
-        show_code(regs);
-        show_stack(regs);
-    }
+    show_code(regs);
+    show_stack(regs);
 
     console_unlock_recursive_irqrestore(flags);
 }