ia64/xen-unstable

changeset 19768:67a0ffade665

x86: improve output resulting from sending '0' over serial

While the original logic already implied that the kernel part of the
guest's address space is identical on all vCPU-s (i.e. for all guest
processes), it didn't fully leverage the potential here: As long as
the top page table currently active is owned by the subject domain
(currently only Dom0), the stack dump can be done without extra
effort.

For x86-64, additionally add page table traversal so that the stack
can be dumped in all cases (unless it's invalid or user space).

I left the 32-bit variant of do_page_walk() unimplemented for the
moment as I couldn't convince myself using map_domain_page() there is
a good idea, and didn't want to introduce new fixmap entries either.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 13:57:18 2009 +0100 (2009-06-16)
parents cb6f8a34b59a
children 2d68d518038b
files xen/arch/x86/traps.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/compat/traps.c xen/arch/x86/x86_64/mm.c xen/include/asm-x86/mm.h xen/include/asm-x86/processor.h xen/include/asm-x86/x86_64/uaccess.h
line diff
     1.1 --- a/xen/arch/x86/traps.c	Tue Jun 16 13:52:13 2009 +0100
     1.2 +++ b/xen/arch/x86/traps.c	Tue Jun 16 13:57:18 2009 +0100
     1.3 @@ -129,18 +129,18 @@ boolean_param("ler", opt_ler);
     1.4  #define ESP_BEFORE_EXCEPTION(regs) ((unsigned long *)regs->rsp)
     1.5  #endif
     1.6  
     1.7 -static void show_guest_stack(struct cpu_user_regs *regs)
     1.8 +static void show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs)
     1.9  {
    1.10      int i;
    1.11 -    struct vcpu *curr = current;
    1.12      unsigned long *stack, addr;
    1.13 -
    1.14 -    if ( is_hvm_vcpu(curr) )
    1.15 +    unsigned long mask = STACK_SIZE;
    1.16 +
    1.17 +    if ( is_hvm_vcpu(v) )
    1.18          return;
    1.19  
    1.20 -    if ( is_pv_32on64_vcpu(curr) )
    1.21 +    if ( is_pv_32on64_vcpu(v) )
    1.22      {
    1.23 -        compat_show_guest_stack(regs, debug_stack_lines);
    1.24 +        compat_show_guest_stack(v, regs, debug_stack_lines);
    1.25          return;
    1.26      }
    1.27  
    1.28 @@ -156,11 +156,42 @@ static void show_guest_stack(struct cpu_
    1.29          printk("Guest stack trace from "__OP"sp=%p:\n  ", stack);
    1.30      }
    1.31  
    1.32 +    if ( !access_ok(stack, sizeof(*stack)) )
    1.33 +    {
    1.34 +        printk("Guest-inaccessible memory.\n");
    1.35 +        return;
    1.36 +    }
    1.37 +
    1.38 +    if ( v != current )
    1.39 +    {
    1.40 +        struct vcpu *vcpu;
    1.41 +
    1.42 +        ASSERT(guest_kernel_mode(v, regs));
    1.43 +#ifndef __x86_64__
    1.44 +        addr = read_cr3();
    1.45 +        for_each_vcpu( v->domain, vcpu )
    1.46 +            if ( vcpu->arch.cr3 == addr )
    1.47 +                break;
    1.48 +#else
    1.49 +        vcpu = maddr_get_owner(read_cr3()) == v->domain ? v : NULL;
    1.50 +#endif
    1.51 +        if ( !vcpu )
    1.52 +        {
    1.53 +            stack = do_page_walk(v, (unsigned long)stack);
    1.54 +            if ( (unsigned long)stack < PAGE_SIZE )
    1.55 +            {
    1.56 +                printk("Inaccessible guest memory.\n");
    1.57 +                return;
    1.58 +            }
    1.59 +            mask = PAGE_SIZE;
    1.60 +        }
    1.61 +    }
    1.62 +
    1.63      for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ )
    1.64      {
    1.65 -        if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 )
    1.66 +        if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask )
    1.67              break;
    1.68 -        if ( get_user(addr, stack) )
    1.69 +        if ( __get_user(addr, stack) )
    1.70          {
    1.71              if ( i != 0 )
    1.72                  printk("\n    ");
    1.73 @@ -264,7 +295,7 @@ void show_stack(struct cpu_user_regs *re
    1.74      int i;
    1.75  
    1.76      if ( guest_mode(regs) )
    1.77 -        return show_guest_stack(regs);
    1.78 +        return show_guest_stack(current, regs);
    1.79  
    1.80      printk("Xen stack trace from "__OP"sp=%p:\n  ", stack);
    1.81  
    1.82 @@ -346,10 +377,8 @@ void vcpu_show_execution_state(struct vc
    1.83      vcpu_pause(v); /* acceptably dangerous */
    1.84  
    1.85      vcpu_show_registers(v);
    1.86 -    /* Todo: map arbitrary vcpu's top guest stack page here. */
    1.87 -    if ( (v->domain == current->domain) &&
    1.88 -         guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
    1.89 -        show_guest_stack(&v->arch.guest_context.user_regs);
    1.90 +    if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
    1.91 +        show_guest_stack(v, &v->arch.guest_context.user_regs);
    1.92  
    1.93      vcpu_unpause(v);
    1.94  }
     2.1 --- a/xen/arch/x86/x86_32/mm.c	Tue Jun 16 13:52:13 2009 +0100
     2.2 +++ b/xen/arch/x86/x86_32/mm.c	Tue Jun 16 13:57:18 2009 +0100
     2.3 @@ -63,6 +63,11 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
     2.4      return &idle_pg_table_l2[l2_linear_offset(v)];
     2.5  }
     2.6  
     2.7 +void *do_page_walk(struct vcpu *v, unsigned long addr)
     2.8 +{
     2.9 +    return NULL;
    2.10 +}
    2.11 +
    2.12  void __init paging_init(void)
    2.13  {
    2.14      unsigned long v;
     3.1 --- a/xen/arch/x86/x86_64/compat/traps.c	Tue Jun 16 13:52:13 2009 +0100
     3.2 +++ b/xen/arch/x86/x86_64/compat/traps.c	Tue Jun 16 13:57:18 2009 +0100
     3.3 @@ -5,18 +5,46 @@
     3.4  #include <compat/callback.h>
     3.5  #include <compat/arch-x86_32.h>
     3.6  
     3.7 -void compat_show_guest_stack(struct cpu_user_regs *regs, int debug_stack_lines)
     3.8 +void compat_show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs,
     3.9 +                             int debug_stack_lines)
    3.10  {
    3.11 -    unsigned int i, *stack, addr;
    3.12 +    unsigned int i, *stack, addr, mask = STACK_SIZE;
    3.13  
    3.14      stack = (unsigned int *)(unsigned long)regs->_esp;
    3.15      printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
    3.16  
    3.17 +    if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
    3.18 +    {
    3.19 +        printk("Guest-inaccessible memory.\n");
    3.20 +        return;
    3.21 +    }
    3.22 +
    3.23 +    if ( v != current )
    3.24 +    {
    3.25 +        struct vcpu *vcpu;
    3.26 +
    3.27 +        ASSERT(guest_kernel_mode(v, regs));
    3.28 +        addr = read_cr3() >> PAGE_SHIFT;
    3.29 +        for_each_vcpu( v->domain, vcpu )
    3.30 +            if ( pagetable_get_pfn(vcpu->arch.guest_table) == addr )
    3.31 +                break;
    3.32 +        if ( !vcpu )
    3.33 +        {
    3.34 +            stack = do_page_walk(v, (unsigned long)stack);
    3.35 +            if ( (unsigned long)stack < PAGE_SIZE )
    3.36 +            {
    3.37 +                printk("Inaccessible guest memory.\n");
    3.38 +                return;
    3.39 +            }
    3.40 +            mask = PAGE_SIZE;
    3.41 +        }
    3.42 +    }
    3.43 +
    3.44      for ( i = 0; i < debug_stack_lines * 8; i++ )
    3.45      {
    3.46 -        if ( (((long)stack + 3) & (STACK_SIZE - 4)) == 0 )
    3.47 +        if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask )
    3.48              break;
    3.49 -        if ( get_user(addr, stack) )
    3.50 +        if ( __get_user(addr, stack) )
    3.51          {
    3.52              if ( i != 0 )
    3.53                  printk("\n    ");
     4.1 --- a/xen/arch/x86/x86_64/mm.c	Tue Jun 16 13:52:13 2009 +0100
     4.2 +++ b/xen/arch/x86/x86_64/mm.c	Tue Jun 16 13:57:18 2009 +0100
     4.3 @@ -103,6 +103,47 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
     4.4      return l3e_to_l2e(*pl3e) + l2_table_offset(v);
     4.5  }
     4.6  
     4.7 +void *do_page_walk(struct vcpu *v, unsigned long addr)
     4.8 +{
     4.9 +    unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
    4.10 +    l4_pgentry_t l4e, *l4t;
    4.11 +    l3_pgentry_t l3e, *l3t;
    4.12 +    l2_pgentry_t l2e, *l2t;
    4.13 +    l1_pgentry_t l1e, *l1t;
    4.14 +
    4.15 +    if ( is_hvm_vcpu(v) )
    4.16 +        return NULL;
    4.17 +
    4.18 +    l4t = mfn_to_virt(mfn);
    4.19 +    l4e = l4t[l4_table_offset(addr)];
    4.20 +    mfn = l4e_get_pfn(l4e);
    4.21 +    if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
    4.22 +        return NULL;
    4.23 +
    4.24 +    l3t = mfn_to_virt(mfn);
    4.25 +    l3e = l3t[l3_table_offset(addr)];
    4.26 +    mfn = l3e_get_pfn(l3e);
    4.27 +    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
    4.28 +    if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
    4.29 +        return mfn_to_virt(mfn) + (addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
    4.30 +
    4.31 +    l2t = mfn_to_virt(mfn);
    4.32 +    l2e = l2t[l2_table_offset(addr)];
    4.33 +    mfn = l2e_get_pfn(l2e);
    4.34 +    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
    4.35 +        return NULL;
    4.36 +    if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
    4.37 +        return mfn_to_virt(mfn) + (addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
    4.38 +
    4.39 +    l1t = mfn_to_virt(mfn);
    4.40 +    l1e = l1t[l1_table_offset(addr)];
    4.41 +    mfn = l1e_get_pfn(l1e);
    4.42 +    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
    4.43 +        return NULL;
    4.44 +
    4.45 +    return mfn_to_virt(mfn) + (addr & ~PAGE_MASK);
    4.46 +}
    4.47 +
    4.48  void __init paging_init(void)
    4.49  {
    4.50      unsigned long i, mpt_size, va;
     5.1 --- a/xen/include/asm-x86/mm.h	Tue Jun 16 13:52:13 2009 +0100
     5.2 +++ b/xen/include/asm-x86/mm.h	Tue Jun 16 13:57:18 2009 +0100
     5.3 @@ -475,6 +475,7 @@ int new_guest_cr3(unsigned long pfn);
     5.4  void make_cr3(struct vcpu *v, unsigned long mfn);
     5.5  void update_cr3(struct vcpu *v);
     5.6  void propagate_page_fault(unsigned long addr, u16 error_code);
     5.7 +void *do_page_walk(struct vcpu *v, unsigned long addr);
     5.8  
     5.9  int __sync_lazy_execstate(void);
    5.10  
     6.1 --- a/xen/include/asm-x86/processor.h	Tue Jun 16 13:52:13 2009 +0100
     6.2 +++ b/xen/include/asm-x86/processor.h	Tue Jun 16 13:57:18 2009 +0100
     6.3 @@ -536,9 +536,9 @@ void show_page_walk(unsigned long addr);
     6.4  asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
     6.5  
     6.6  #ifdef CONFIG_COMPAT
     6.7 -void compat_show_guest_stack(struct cpu_user_regs *, int lines);
     6.8 +void compat_show_guest_stack(struct vcpu *, struct cpu_user_regs *, int lines);
     6.9  #else
    6.10 -#define compat_show_guest_stack(regs, lines) ((void)0)
    6.11 +#define compat_show_guest_stack(vcpu, regs, lines) ((void)0)
    6.12  #endif
    6.13  
    6.14  extern void mtrr_ap_init(void);
     7.1 --- a/xen/include/asm-x86/x86_64/uaccess.h	Tue Jun 16 13:52:13 2009 +0100
     7.2 +++ b/xen/include/asm-x86/x86_64/uaccess.h	Tue Jun 16 13:57:18 2009 +0100
     7.3 @@ -27,11 +27,14 @@ DECLARE_PER_CPU(char, compat_arg_xlat[CO
     7.4  #define array_access_ok(addr, count, size) \
     7.5      (access_ok(addr, (count)*(size)))
     7.6  
     7.7 -#define __compat_addr_ok(addr) \
     7.8 -    ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain))
     7.9 +#define __compat_addr_ok(d, addr) \
    7.10 +    ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d))
    7.11 +
    7.12 +#define __compat_access_ok(d, addr, size) \
    7.13 +    __compat_addr_ok(d, (unsigned long)(addr) + ((size) ? (size) - 1 : 0))
    7.14  
    7.15  #define compat_access_ok(addr, size) \
    7.16 -    __compat_addr_ok((unsigned long)(addr) + ((size) ? (size) - 1 : 0))
    7.17 +    __compat_access_ok(current->domain, addr, size)
    7.18  
    7.19  #define compat_array_access_ok(addr,count,size) \
    7.20      (likely((count) < (~0U / (size))) && \