ia64/xen-unstable

changeset 16349:4f1363491a77

ioemu: Do not use PAGE_SHIFT/PAGE_SIZE/PAGE_MASK macros. Use the libxc
provided versions.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Nov 07 16:51:08 2007 +0000 (2007-11-07)
parents 4fd6610949f1
children d4c5a1cdcf2e
files tools/ioemu/hw/pass-through.c tools/ioemu/hw/xen_machine_fv.c
line diff
     1.1 --- a/tools/ioemu/hw/pass-through.c	Wed Nov 07 15:20:06 2007 +0000
     1.2 +++ b/tools/ioemu/hw/pass-through.c	Wed Nov 07 16:51:08 2007 +0000
     1.3 @@ -20,8 +20,8 @@
     1.4   * Guy Zana <guy@neocleus.com>
     1.5   *
     1.6   * This file implements direct PCI assignment to a HVM guest
     1.7 - *
     1.8   */
     1.9 +
    1.10  #include "vl.h"
    1.11  #include "pass-through.h"
    1.12  #include "pci/header.h"
    1.13 @@ -128,9 +128,9 @@ void pt_iomem_map(PCIDevice *d, int i, u
    1.14      {
    1.15          /* Remove old mapping */
    1.16          ret = xc_domain_memory_mapping(xc_handle, domid,
    1.17 -                old_ebase >> PAGE_SHIFT,
    1.18 -                assigned_device->bases[i].access.maddr >> PAGE_SHIFT,
    1.19 -                (e_size+PAGE_MASK) >> PAGE_SHIFT,
    1.20 +                old_ebase >> XC_PAGE_SHIFT,
    1.21 +                assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT,
    1.22 +                (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT,
    1.23                  DPCI_REMOVE_MAPPING);
    1.24          if ( ret != 0 )
    1.25          {
    1.26 @@ -141,9 +141,9 @@ void pt_iomem_map(PCIDevice *d, int i, u
    1.27  
    1.28      /* Create new mapping */
    1.29      ret = xc_domain_memory_mapping(xc_handle, domid,
    1.30 -            assigned_device->bases[i].e_physbase >> PAGE_SHIFT,
    1.31 -            assigned_device->bases[i].access.maddr >> PAGE_SHIFT,
    1.32 -            (e_size+PAGE_MASK) >> PAGE_SHIFT,
    1.33 +            assigned_device->bases[i].e_physbase >> XC_PAGE_SHIFT,
    1.34 +            assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT,
    1.35 +            (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT,
    1.36              DPCI_ADD_MAPPING);
    1.37      if ( ret != 0 )
    1.38          PT_LOG("Error: create new mapping failed!\n");
     2.1 --- a/tools/ioemu/hw/xen_machine_fv.c	Wed Nov 07 15:20:06 2007 +0000
     2.2 +++ b/tools/ioemu/hw/xen_machine_fv.c	Wed Nov 07 16:51:08 2007 +0000
     2.3 @@ -27,13 +27,6 @@
     2.4  #include <xen/hvm/params.h>
     2.5  #include <sys/mman.h>
     2.6  
     2.7 -#ifndef PAGE_SIZE
     2.8 -#define PAGE_SIZE XC_PAGE_SIZE
     2.9 -#endif
    2.10 -#ifndef PAGE_SHIFT
    2.11 -#define PAGE_SHIFT XC_PAGE_SHIFT
    2.12 -#endif
    2.13 -
    2.14  #if defined(MAPCACHE)
    2.15  
    2.16  #if defined(__i386__) 
    2.17 @@ -57,7 +50,7 @@
    2.18  struct map_cache {
    2.19      unsigned long paddr_index;
    2.20      uint8_t      *vaddr_base;
    2.21 -    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT);
    2.22 +    DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>XC_PAGE_SHIFT);
    2.23  };
    2.24  
    2.25  static struct map_cache *mapcache_entry;
    2.26 @@ -71,9 +64,9 @@ static int qemu_map_cache_init(void)
    2.27  {
    2.28      unsigned long size;
    2.29  
    2.30 -    nr_buckets = (((MAX_MCACHE_SIZE >> PAGE_SHIFT) +
    2.31 -                   (1UL << (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)) - 1) >>
    2.32 -                  (MCACHE_BUCKET_SHIFT - PAGE_SHIFT));
    2.33 +    nr_buckets = (((MAX_MCACHE_SIZE >> XC_PAGE_SHIFT) +
    2.34 +                   (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
    2.35 +                  (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
    2.36  
    2.37      /*
    2.38       * Use mmap() directly: lets us allocate a big hash table with no up-front
    2.39 @@ -81,7 +74,7 @@ static int qemu_map_cache_init(void)
    2.40       * that we actually use. All others will contain all zeroes.
    2.41       */
    2.42      size = nr_buckets * sizeof(struct map_cache);
    2.43 -    size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
    2.44 +    size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
    2.45      fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", nr_buckets, size);
    2.46      mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE,
    2.47                            MAP_SHARED|MAP_ANON, -1, 0);
    2.48 @@ -97,7 +90,7 @@ static void qemu_remap_bucket(struct map
    2.49                                unsigned long address_index)
    2.50  {
    2.51      uint8_t *vaddr_base;
    2.52 -    unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT];
    2.53 +    unsigned long pfns[MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT];
    2.54      unsigned int i, j;
    2.55  
    2.56      if (entry->vaddr_base != NULL) {
    2.57 @@ -108,11 +101,11 @@ static void qemu_remap_bucket(struct map
    2.58          }
    2.59      }
    2.60  
    2.61 -    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++)
    2.62 -        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i;
    2.63 +    for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i++)
    2.64 +        pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
    2.65  
    2.66      vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE,
    2.67 -                                      pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT);
    2.68 +                                      pfns, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT);
    2.69      if (vaddr_base == NULL) {
    2.70          fprintf(logfile, "xc_map_foreign_batch error %d\n", errno);
    2.71          exit(-1);
    2.72 @@ -121,10 +114,10 @@ static void qemu_remap_bucket(struct map
    2.73      entry->vaddr_base  = vaddr_base;
    2.74      entry->paddr_index = address_index;
    2.75  
    2.76 -    for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) {
    2.77 +    for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i += BITS_PER_LONG) {
    2.78          unsigned long word = 0;
    2.79 -        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ?
    2.80 -            (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
    2.81 +        j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT)) ?
    2.82 +            (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG;
    2.83          while (j > 0)
    2.84              word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf);
    2.85          entry->valid_mapping[i / BITS_PER_LONG] = word;
    2.86 @@ -143,10 +136,10 @@ uint8_t *qemu_map_cache(target_phys_addr
    2.87      entry = &mapcache_entry[address_index % nr_buckets];
    2.88  
    2.89      if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
    2.90 -        !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
    2.91 +        !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
    2.92          qemu_remap_bucket(entry, address_index);
    2.93  
    2.94 -    if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping))
    2.95 +    if (!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
    2.96          return NULL;
    2.97  
    2.98      last_address_index = address_index;
    2.99 @@ -215,7 +208,7 @@ static void xen_init_fv(uint64_t ram_siz
   2.100  
   2.101      xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
   2.102      fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
   2.103 -    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
   2.104 +    shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
   2.105                                         PROT_READ|PROT_WRITE, ioreq_pfn);
   2.106      if (shared_page == NULL) {
   2.107          fprintf(logfile, "map shared IO page returned error %d\n", errno);
   2.108 @@ -224,7 +217,7 @@ static void xen_init_fv(uint64_t ram_siz
   2.109  
   2.110      xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
   2.111      fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn);
   2.112 -    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
   2.113 +    buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
   2.114                                              PROT_READ|PROT_WRITE, ioreq_pfn);
   2.115      if (buffered_io_page == NULL) {
   2.116          fprintf(logfile, "map buffered IO page returned error %d\n", errno);
   2.117 @@ -233,7 +226,7 @@ static void xen_init_fv(uint64_t ram_siz
   2.118  
   2.119  #elif defined(__ia64__)
   2.120  
   2.121 -    nr_pages = ram_size/PAGE_SIZE;
   2.122 +    nr_pages = ram_size/XC_PAGE_SIZE;
   2.123  
   2.124      page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
   2.125      if (page_array == NULL) {
   2.126 @@ -241,17 +234,17 @@ static void xen_init_fv(uint64_t ram_siz
   2.127          exit(-1);
   2.128      }
   2.129  
   2.130 -    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
   2.131 +    shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
   2.132                                         PROT_READ|PROT_WRITE,
   2.133 -                                       IO_PAGE_START >> PAGE_SHIFT);
   2.134 +                                       IO_XC_PAGE_START >> XC_PAGE_SHIFT);
   2.135  
   2.136 -    buffered_io_page =xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
   2.137 +    buffered_io_page =xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
   2.138                                         PROT_READ|PROT_WRITE,
   2.139 -                                       BUFFER_IO_PAGE_START >> PAGE_SHIFT);
   2.140 +                                       BUFFER_IO_XC_PAGE_START >> XC_PAGE_SHIFT);
   2.141  
   2.142 -    buffered_pio_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
   2.143 +    buffered_pio_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
   2.144                                         PROT_READ|PROT_WRITE,
   2.145 -                                       BUFFER_PIO_PAGE_START >> PAGE_SHIFT);
   2.146 +                                       BUFFER_PIO_XC_PAGE_START >> XC_PAGE_SHIFT);
   2.147  
   2.148      for (i = 0; i < nr_pages; i++)
   2.149          page_array[i] = i;
   2.150 @@ -259,9 +252,9 @@ static void xen_init_fv(uint64_t ram_siz
   2.151      /* VTI will not use memory between 3G~4G, so we just pass a legal pfn
   2.152         to make QEMU map continuous virtual memory space */
   2.153      if (ram_size > MMIO_START) {	
   2.154 -        for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++)
   2.155 -            page_array[(MMIO_START >> PAGE_SHIFT) + i] =
   2.156 -                (STORE_PAGE_START >> PAGE_SHIFT); 
   2.157 +        for (i = 0 ; i < (MEM_G >> XC_PAGE_SHIFT); i++)
   2.158 +            page_array[(MMIO_START >> XC_PAGE_SHIFT) + i] =
   2.159 +                (STORE_XC_PAGE_START >> XC_PAGE_SHIFT); 
   2.160      }
   2.161  
   2.162      phys_ram_base = xc_map_foreign_batch(xc_handle, domid,