ia64/xen-unstable

changeset 15845:de247793f7b5

[IA64] Foreign p2m: xc_core: ia64 xc_core_arch_gpfn_may_present()

Prevent warning message when xm dump-core
Using foreign p2m exposure, we can avoid to map the page which isn't allocated.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Sep 06 15:32:54 2007 -0600 (2007-09-06)
parents 005652f8e4e4
children e7c143aafbc1
files tools/libxc/xc_core_ia64.c tools/libxc/xc_core_ia64.h
line diff
     1.1 --- a/tools/libxc/xc_core_ia64.c	Thu Sep 06 14:41:14 2007 -0600
     1.2 +++ b/tools/libxc/xc_core_ia64.c	Thu Sep 06 15:32:54 2007 -0600
     1.3 @@ -24,6 +24,16 @@
     1.4  #include "xc_dom.h"
     1.5  #include <inttypes.h>
     1.6  
     1.7 +int
     1.8 +xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
     1.9 +                              unsigned long pfn)
    1.10 +{
    1.11 +    if (arch_ctxt->p2m_table.p2m == NULL)
    1.12 +        return 1; /* default to trying to map the page */
    1.13 +
    1.14 +    return xc_ia64_p2m_present(&arch_ctxt->p2m_table, pfn);
    1.15 +}
    1.16 +
    1.17  static int
    1.18  xc_memory_map_cmp(const void *lhs__, const void *rhs__)
    1.19  {
    1.20 @@ -158,13 +168,18 @@ memory_map_get_old(int xc_handle, xc_dom
    1.21  }
    1.22  
    1.23  int
    1.24 -xc_core_arch_memory_map_get(int xc_handle, struct xc_core_arch_context *unused,
    1.25 +xc_core_arch_memory_map_get(int xc_handle,
    1.26 +                            struct xc_core_arch_context *arch_ctxt,
    1.27                              xc_dominfo_t *info, shared_info_t *live_shinfo,
    1.28                              xc_core_memory_map_t **mapp,
    1.29                              unsigned int *nr_entries)
    1.30  {
    1.31      int ret = -1;
    1.32 -    xen_ia64_memmap_info_t *memmap_info;
    1.33 +    unsigned int memmap_info_num_pages;
    1.34 +    unsigned long memmap_info_pfn;
    1.35 +
    1.36 +    xen_ia64_memmap_info_t *memmap_info_live;
    1.37 +    xen_ia64_memmap_info_t *memmap_info = NULL;
    1.38      unsigned long map_size;
    1.39      xc_core_memory_map_t *map;
    1.40      char *start;
    1.41 @@ -172,27 +187,46 @@ xc_core_arch_memory_map_get(int xc_handl
    1.42      char *p;
    1.43      efi_memory_desc_t *md;
    1.44  
    1.45 -    if  ( live_shinfo == NULL ||
    1.46 -          live_shinfo->arch.memmap_info_num_pages == 0 ||
    1.47 -          live_shinfo->arch.memmap_info_pfn == 0 )
    1.48 +    if ( live_shinfo == NULL )
    1.49 +    {
    1.50 +        ERROR("can't access shared info");
    1.51          goto old;
    1.52 +    }
    1.53  
    1.54 -    map_size = PAGE_SIZE * live_shinfo->arch.memmap_info_num_pages;
    1.55 -    memmap_info = xc_map_foreign_range(xc_handle, info->domid,
    1.56 -                                       map_size, PROT_READ,
    1.57 -                                       live_shinfo->arch.memmap_info_pfn);
    1.58 -    if ( memmap_info == NULL )
    1.59 +    /* copy before use in case someone updating them */
    1.60 +    memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
    1.61 +    memmap_info_pfn = live_shinfo->arch.memmap_info_pfn;
    1.62 +    if ( memmap_info_num_pages == 0 || memmap_info_pfn == 0 )
    1.63 +    {
    1.64 +        ERROR("memmap_info_num_pages 0x%x memmap_info_pfn 0x%lx",
    1.65 +              memmap_info_num_pages, memmap_info_pfn);
    1.66 +        goto old;
    1.67 +    }
    1.68 +
    1.69 +    map_size = PAGE_SIZE * memmap_info_num_pages;
    1.70 +    memmap_info_live = xc_map_foreign_range(xc_handle, info->domid,
    1.71 +                                       map_size, PROT_READ, memmap_info_pfn);
    1.72 +    if ( memmap_info_live == NULL )
    1.73      {
    1.74          PERROR("Could not map memmap info.");
    1.75          return -1;
    1.76      }
    1.77 +    memmap_info = malloc(map_size);
    1.78 +    if ( memmap_info == NULL )
    1.79 +    {
    1.80 +        munmap(memmap_info_live, map_size);
    1.81 +        return -1;
    1.82 +    }
    1.83 +    memcpy(memmap_info, memmap_info_live, map_size);    /* copy before use */
    1.84 +    munmap(memmap_info_live, map_size);
    1.85 +    
    1.86      if ( memmap_info->efi_memdesc_size != sizeof(*md) ||
    1.87           (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 ||
    1.88           memmap_info->efi_memmap_size > map_size - sizeof(memmap_info) ||
    1.89           memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION )
    1.90      {
    1.91          PERROR("unknown memmap header. defaulting to compat mode.");
    1.92 -        munmap(memmap_info, PAGE_SIZE);
    1.93 +        free(memmap_info);
    1.94          goto old;
    1.95      }
    1.96  
    1.97 @@ -201,7 +235,8 @@ xc_core_arch_memory_map_get(int xc_handl
    1.98      if ( map == NULL )
    1.99      {
   1.100          PERROR("Could not allocate memory for memmap.");
   1.101 -        goto out;
   1.102 +        free(memmap_info);
   1.103 +        return -1;
   1.104      }
   1.105      *mapp = map;
   1.106  
   1.107 @@ -221,12 +256,16 @@ xc_core_arch_memory_map_get(int xc_handl
   1.108          (*nr_entries)++;
   1.109      }
   1.110      ret = 0;
   1.111 -out:
   1.112 -    munmap(memmap_info, map_size);
   1.113 +
   1.114 +    xc_ia64_p2m_map(&arch_ctxt->p2m_table, xc_handle, info->domid,
   1.115 +                    memmap_info, 0);
   1.116 +    if ( memmap_info != NULL )
   1.117 +        free(memmap_info);
   1.118      qsort(map, *nr_entries, sizeof(map[0]), &xc_memory_map_cmp);
   1.119      return ret;
   1.120      
   1.121  old:
   1.122 +    DPRINTF("Falling back old method.\n");
   1.123      return memory_map_get_old(xc_handle, info, live_shinfo, mapp, nr_entries);
   1.124  }
   1.125  
   1.126 @@ -253,6 +292,8 @@ xc_core_arch_context_init(struct xc_core
   1.127      arch_ctxt->nr_vcpus = 0;
   1.128      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   1.129          arch_ctxt->mapped_regs[i] = NULL;
   1.130 +
   1.131 +    xc_ia64_p2m_init(&arch_ctxt->p2m_table);
   1.132  }
   1.133  
   1.134  void
   1.135 @@ -262,6 +303,7 @@ xc_core_arch_context_free(struct xc_core
   1.136      for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
   1.137          if ( arch_ctxt->mapped_regs[i] != NULL )
   1.138              munmap(arch_ctxt->mapped_regs[i], arch_ctxt->mapped_regs_size);
   1.139 +    xc_ia64_p2m_unmap(&arch_ctxt->p2m_table);
   1.140  }
   1.141  
   1.142  int
     2.1 --- a/tools/libxc/xc_core_ia64.h	Thu Sep 06 14:41:14 2007 -0600
     2.2 +++ b/tools/libxc/xc_core_ia64.h	Thu Sep 06 15:32:54 2007 -0600
     2.3 @@ -21,6 +21,8 @@
     2.4  #ifndef XC_CORE_IA64_H
     2.5  #define XC_CORE_IA64_H
     2.6  
     2.7 +#include "ia64/xc_ia64.h"
     2.8 +
     2.9  #define ELF_ARCH_DATA           ELFDATA2LSB
    2.10  #define ELF_ARCH_MACHINE        EM_IA_64
    2.11  
    2.12 @@ -28,6 +30,8 @@ struct xc_core_arch_context {
    2.13      size_t mapped_regs_size;
    2.14      int nr_vcpus;
    2.15      mapped_regs_t* mapped_regs[MAX_VIRT_CPUS];
    2.16 +
    2.17 +    struct xen_ia64_p2m_table p2m_table;
    2.18  };
    2.19  
    2.20  void
    2.21 @@ -46,7 +50,10 @@ xc_core_arch_context_get_shdr(struct xc_
    2.22  int
    2.23  xc_core_arch_context_dump(struct xc_core_arch_context* arch_ctxt,
    2.24                            void* args, dumpcore_rtn_t dump_rtn);
    2.25 -#define xc_core_arch_gpfn_may_present(arch_ctxt, i)             (1)
    2.26 +
    2.27 +int
    2.28 +xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
    2.29 +                              unsigned long pfn);
    2.30  
    2.31  #endif /* XC_CORE_IA64_H */
    2.32