ia64/xen-unstable

changeset 9354:96d606c074f8

The current ptrace code is traversing the page table structures to
guest guest physical address, even when the guest paging is disabled.
The gdbserver-xen tries to access guest pdes & ptes to map memory of
hvm guest being debugged; and it gets a seg-fault because guest has not
setup it's paging yet. The attached patch adds guest paging state check,
so that the map_domain_va() can get the correct guest physical address
from guest va.

Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Mar 21 11:29:17 2006 +0100 (2006-03-21)
parents eb2a2529f96c
children d0db2359769c
files tools/libxc/xc_ptrace.c
line diff
     1.1 --- a/tools/libxc/xc_ptrace.c	Tue Mar 21 11:28:03 2006 +0100
     1.2 +++ b/tools/libxc/xc_ptrace.c	Tue Mar 21 11:29:17 2006 +0100
     1.3 @@ -251,35 +251,39 @@ map_domain_va(
     1.4      if (fetch_regs(xc_handle, cpu, NULL))
     1.5          return NULL;
     1.6  
     1.7 -    if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
     1.8 -    {
     1.9 -        cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
    1.10 -        if ( cr3_virt[cpu] )
    1.11 -            munmap(cr3_virt[cpu], PAGE_SIZE);
    1.12 -        cr3_virt[cpu] = xc_map_foreign_range(
    1.13 -            xc_handle, current_domid, PAGE_SIZE, PROT_READ,
    1.14 -            cr3_phys[cpu] >> PAGE_SHIFT);
    1.15 -        if ( cr3_virt[cpu] == NULL )
    1.16 +    if (paging_enabled(&ctxt[cpu])) {
    1.17 +       if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
    1.18 +        {
    1.19 +            cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
    1.20 +            if ( cr3_virt[cpu] )
    1.21 +                munmap(cr3_virt[cpu], PAGE_SIZE);
    1.22 +            cr3_virt[cpu] = xc_map_foreign_range(
    1.23 +                xc_handle, current_domid, PAGE_SIZE, PROT_READ,
    1.24 +                cr3_phys[cpu] >> PAGE_SHIFT);
    1.25 +            if ( cr3_virt[cpu] == NULL )
    1.26 +                return NULL;
    1.27 +        }
    1.28 +        if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
    1.29              return NULL;
    1.30 +        if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
    1.31 +            pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
    1.32 +        if ( pde != pde_phys[cpu] )
    1.33 +        {
    1.34 +            pde_phys[cpu] = pde;
    1.35 +            if ( pde_virt[cpu] )
    1.36 +                munmap(pde_virt[cpu], PAGE_SIZE);
    1.37 +            pde_virt[cpu] = xc_map_foreign_range(
    1.38 +                xc_handle, current_domid, PAGE_SIZE, PROT_READ,
    1.39 +                pde_phys[cpu] >> PAGE_SHIFT);
    1.40 +            if ( pde_virt[cpu] == NULL )
    1.41 +                return NULL;
    1.42 +        }
    1.43 +        if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
    1.44 +            return NULL;
    1.45 +    } else {
    1.46 +        page = va;
    1.47      }
    1.48 -    if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
    1.49 -        return NULL;
    1.50 -    if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
    1.51 -        pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
    1.52 -    if ( pde != pde_phys[cpu] )
    1.53 -    {
    1.54 -        pde_phys[cpu] = pde;
    1.55 -        if ( pde_virt[cpu] )
    1.56 -            munmap(pde_virt[cpu], PAGE_SIZE);
    1.57 -        pde_virt[cpu] = xc_map_foreign_range(
    1.58 -            xc_handle, current_domid, PAGE_SIZE, PROT_READ,
    1.59 -            pde_phys[cpu] >> PAGE_SHIFT);
    1.60 -        if ( pde_virt[cpu] == NULL )
    1.61 -            return NULL;
    1.62 -    }
    1.63 -    if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
    1.64 -        return NULL;
    1.65 -    if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
    1.66 +    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
    1.67          page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
    1.68      if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
    1.69      {