ia64/xen-unstable

changeset 11207:078bfd250677

[qemu] Support HVM guests with more than 3.75G memory.
Changes are:
1) M2P table and e820 table are changed to skip address space from
HVM_RAM_LIMIT_BELOW_4G to 4G.
2) shared io page location, when less than HVM_RAM_LIMIT_BELOW_4G
memory, it's the last page of RAM as today, or it's the last page of
HVM_RAM_LIMIT_BELOW_4G RAM.
3) in qemu-dm address space from HVM_RAM_LIMIT_BELOW_4G to 4G are
stuffed with mfns starting from 4G, so the 1:1 mapping can still works.
This is ugly, but another limit check patch as changeset 10757 will
prevent qemu-dm to access this range. This ugly stuffing will be
removed when the patch to remove 1:1 mapping from qemu-dm gets accepted
in the future.

Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author Christian Limpach <Christian.Limpach@xensource.com>
date Thu Aug 17 20:31:13 2006 +0100 (2006-08-17)
parents 28e3132b9f93
children a7dcafb540be
files tools/ioemu/hw/piix_pci.c tools/ioemu/vl.c tools/ioemu/vl.h tools/libxc/xc_hvm_build.c xen/include/public/hvm/e820.h
line diff
     1.1 --- a/tools/ioemu/hw/piix_pci.c	Thu Aug 17 20:30:05 2006 +0100
     1.2 +++ b/tools/ioemu/hw/piix_pci.c	Thu Aug 17 20:31:13 2006 +0100
     1.3 @@ -415,7 +415,7 @@ void pci_bios_init(void)
     1.4      uint8_t elcr[2];
     1.5  
     1.6      pci_bios_io_addr = 0xc000;
     1.7 -    pci_bios_mem_addr = 0xf0000000;
     1.8 +    pci_bios_mem_addr = HVM_BELOW_4G_MMIO_START;
     1.9  
    1.10      /* activate IRQ mappings */
    1.11      elcr[0] = 0x00;
     2.1 --- a/tools/ioemu/vl.c	Thu Aug 17 20:30:05 2006 +0100
     2.2 +++ b/tools/ioemu/vl.c	Thu Aug 17 20:31:13 2006 +0100
     2.3 @@ -5835,7 +5835,7 @@ int main(int argc, char **argv)
     2.4      QEMUMachine *machine;
     2.5      char usb_devices[MAX_USB_CMDLINE][128];
     2.6      int usb_devices_index;
     2.7 -    unsigned long nr_pages;
     2.8 +    unsigned long nr_pages, tmp_nr_pages, shared_page_nr;
     2.9      xen_pfn_t *page_array;
    2.10      extern void *shared_page;
    2.11      extern void *buffered_io_page;
    2.12 @@ -6366,17 +6366,27 @@ int main(int argc, char **argv)
    2.13      /* init the memory */
    2.14      phys_ram_size = ram_size + vga_ram_size + bios_size;
    2.15  
    2.16 +#ifdef CONFIG_DM
    2.17 +
    2.18 +    xc_handle = xc_interface_open();
    2.19 +
    2.20  #if defined (__ia64__)
    2.21      if (ram_size > MMIO_START)
    2.22 -	ram_size += 1 * MEM_G; /* skip 3G-4G MMIO, LEGACY_IO_SPACE etc. */
    2.23 +        ram_size += 1 * MEM_G; /* skip 3G-4G MMIO, LEGACY_IO_SPACE etc. */
    2.24  #endif
    2.25  
    2.26 -#ifdef CONFIG_DM
    2.27 -
    2.28      nr_pages = ram_size/PAGE_SIZE;
    2.29 -    xc_handle = xc_interface_open();
    2.30 -
    2.31 -    page_array = (xen_pfn_t *)malloc(nr_pages * sizeof(xen_pfn_t));
    2.32 +    tmp_nr_pages = nr_pages;
    2.33 +
    2.34 +#if defined(__i386__) || defined(__x86_64__)
    2.35 +    if (ram_size > HVM_BELOW_4G_RAM_END) {
    2.36 +        tmp_nr_pages += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
    2.37 +        shared_page_nr = (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) - 1;
    2.38 +    } else
    2.39 +        shared_page_nr = nr_pages - 1;
    2.40 +#endif
    2.41 +
    2.42 +    page_array = (xen_pfn_t *)malloc(tmp_nr_pages * sizeof(xen_pfn_t));
    2.43      if (page_array == NULL) {
    2.44          fprintf(logfile, "malloc returned error %d\n", errno);
    2.45          exit(-1);
    2.46 @@ -6388,25 +6398,40 @@ int main(int argc, char **argv)
    2.47          exit(-1);
    2.48      }
    2.49  
    2.50 +    if (ram_size > HVM_BELOW_4G_RAM_END)
    2.51 +        for (i = 0; i < nr_pages - (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT); i++)
    2.52 +            page_array[tmp_nr_pages - 1 - i] = page_array[nr_pages - 1 - i];
    2.53 +
    2.54      phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
    2.55                                           PROT_READ|PROT_WRITE, page_array,
    2.56 -                                         nr_pages - 3);
    2.57 -    if (phys_ram_base == 0) {
    2.58 -        fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
    2.59 +                                         tmp_nr_pages);
    2.60 +    if (phys_ram_base == NULL) {
    2.61 +        fprintf(logfile, "batch map guest memory returned error %d\n", errno);
    2.62          exit(-1);
    2.63      }
    2.64  
    2.65 +    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    2.66 +                                       PROT_READ|PROT_WRITE,
    2.67 +                                       page_array[shared_page_nr]);
    2.68 +    if (shared_page == NULL) {
    2.69 +        fprintf(logfile, "map shared IO page returned error %d\n", errno);
    2.70 +        exit(-1);
    2.71 +    }
    2.72 +
    2.73 +    fprintf(logfile, "shared page at pfn:%lx, mfn: %"PRIx64"\n",
    2.74 +            shared_page_nr, (uint64_t)(page_array[shared_page_nr]));
    2.75 +
    2.76      /* not yet add for IA64 */
    2.77      buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    2.78 -                                       PROT_READ|PROT_WRITE,
    2.79 -                                       page_array[nr_pages - 3]);
    2.80 -
    2.81 -    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    2.82 -                                       PROT_READ|PROT_WRITE,
    2.83 -                                       page_array[nr_pages - 1]);
    2.84 -
    2.85 -    fprintf(logfile, "shared page at pfn:%lx, mfn: %"PRIx64"\n", nr_pages - 1,
    2.86 -            (uint64_t)(page_array[nr_pages - 1]));
    2.87 +                                            PROT_READ|PROT_WRITE,
    2.88 +                                            page_array[shared_page_nr - 2]);
    2.89 +    if (buffered_io_page == NULL) {
    2.90 +        fprintf(logfile, "map buffered IO page returned error %d\n", errno);
    2.91 +        exit(-1);
    2.92 +    }
    2.93 +
    2.94 +    fprintf(logfile, "buffered io page at pfn:%lx, mfn: %"PRIx64"\n",
    2.95 +            shared_page_nr - 2, (uint64_t)(page_array[shared_page_nr - 2]));
    2.96  
    2.97      free(page_array);
    2.98  
    2.99 @@ -6432,9 +6457,9 @@ int main(int argc, char **argv)
   2.100      }
   2.101  
   2.102      if (ram_size > MMIO_START) {	
   2.103 -	for (i = 0 ; i < MEM_G >> PAGE_SHIFT; i++)
   2.104 -	    page_array[MMIO_START >> PAGE_SHIFT + i] =
   2.105 -		page_array[IO_PAGE_START >> PAGE_SHIFT + 1];
   2.106 +        for (i = 0 ; i < MEM_G >> PAGE_SHIFT; i++)
   2.107 +            page_array[MMIO_START >> PAGE_SHIFT + i] =
   2.108 +                page_array[IO_PAGE_START >> PAGE_SHIFT + 1];
   2.109      }
   2.110  
   2.111      phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
     3.1 --- a/tools/ioemu/vl.h	Thu Aug 17 20:30:05 2006 +0100
     3.2 +++ b/tools/ioemu/vl.h	Thu Aug 17 20:31:13 2006 +0100
     3.3 @@ -39,6 +39,7 @@
     3.4  #include <sys/stat.h>
     3.5  #include "xenctrl.h"
     3.6  #include "xs.h"
     3.7 +#include <xen/hvm/e820.h>
     3.8  
     3.9  #ifndef O_LARGEFILE
    3.10  #define O_LARGEFILE 0
     4.1 --- a/tools/libxc/xc_hvm_build.c	Thu Aug 17 20:30:05 2006 +0100
     4.2 +++ b/tools/libxc/xc_hvm_build.c	Thu Aug 17 20:31:13 2006 +0100
     4.3 @@ -54,9 +54,19 @@ static void build_e820map(void *e820_pag
     4.4  {
     4.5      struct e820entry *e820entry =
     4.6          (struct e820entry *)(((unsigned char *)e820_page) + E820_MAP_OFFSET);
     4.7 +    unsigned long long extra_mem_size = 0;
     4.8      unsigned char nr_map = 0;
     4.9  
    4.10 -    /* XXX: Doesn't work for > 4GB yet */
    4.11 +    /*
    4.12 +     * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
    4.13 +     * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
    4.14 +     * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
    4.15 +     */
    4.16 +    if ( mem_size > HVM_BELOW_4G_RAM_END ) {
    4.17 +        extra_mem_size = mem_size - HVM_BELOW_4G_RAM_END;
    4.18 +        mem_size = HVM_BELOW_4G_RAM_END;
    4.19 +    }
    4.20 +
    4.21      e820entry[nr_map].addr = 0x0;
    4.22      e820entry[nr_map].size = 0x9F000;
    4.23      e820entry[nr_map].type = E820_RAM;
    4.24 @@ -77,54 +87,87 @@ static void build_e820map(void *e820_pag
    4.25      e820entry[nr_map].type = E820_RESERVED;
    4.26      nr_map++;
    4.27  
    4.28 -#define STATIC_PAGES    3
    4.29 -    /* 3 static pages:
    4.30 -     * - ioreq buffer.
    4.31 -     * - xenstore.
    4.32 -     * - shared_page.
    4.33 -     */
    4.34 +/* ACPI data: 10 pages. */
    4.35 +#define ACPI_DATA_PAGES     10
    4.36 +/* ACPI NVS: 3 pages.   */
    4.37 +#define ACPI_NVS_PAGES      3
    4.38 +/* buffered io page.    */
    4.39 +#define BUFFERED_IO_PAGES   1
    4.40 +/* xenstore page.       */
    4.41 +#define XENSTORE_PAGES      1
    4.42 +/* shared io page.      */
    4.43 +#define SHARED_IO_PAGES     1
    4.44 +/* totally 16 static pages are reserved in E820 table */
    4.45  
    4.46      /* Most of the ram goes here */
    4.47      e820entry[nr_map].addr = 0x100000;
    4.48 -    e820entry[nr_map].size = mem_size - 0x100000 - STATIC_PAGES * PAGE_SIZE;
    4.49 +    e820entry[nr_map].size = mem_size - 0x100000 - PAGE_SIZE *
    4.50 +                                                (ACPI_DATA_PAGES +
    4.51 +                                                 ACPI_NVS_PAGES +
    4.52 +                                                 BUFFERED_IO_PAGES +
    4.53 +                                                 XENSTORE_PAGES +
    4.54 +                                                 SHARED_IO_PAGES);
    4.55      e820entry[nr_map].type = E820_RAM;
    4.56      nr_map++;
    4.57  
    4.58      /* Statically allocated special pages */
    4.59  
    4.60 +    /* For ACPI data */
    4.61 +    e820entry[nr_map].addr = mem_size - PAGE_SIZE *
    4.62 +                                        (ACPI_DATA_PAGES +
    4.63 +                                         ACPI_NVS_PAGES +
    4.64 +                                         BUFFERED_IO_PAGES +
    4.65 +                                         XENSTORE_PAGES +
    4.66 +                                         SHARED_IO_PAGES);
    4.67 +    e820entry[nr_map].size = PAGE_SIZE * ACPI_DATA_PAGES;
    4.68 +    e820entry[nr_map].type = E820_ACPI;
    4.69 +    nr_map++;
    4.70 +
    4.71 +    /* For ACPI NVS */
    4.72 +    e820entry[nr_map].addr = mem_size - PAGE_SIZE *
    4.73 +                                        (ACPI_NVS_PAGES +
    4.74 +                                         BUFFERED_IO_PAGES +
    4.75 +                                         XENSTORE_PAGES +
    4.76 +                                         SHARED_IO_PAGES);
    4.77 +    e820entry[nr_map].size = PAGE_SIZE * ACPI_NVS_PAGES;
    4.78 +    e820entry[nr_map].type = E820_NVS;
    4.79 +    nr_map++;
    4.80 +
    4.81      /* For buffered IO requests */
    4.82 -    e820entry[nr_map].addr = mem_size - 3 * PAGE_SIZE;
    4.83 -    e820entry[nr_map].size = PAGE_SIZE;
    4.84 +    e820entry[nr_map].addr = mem_size - PAGE_SIZE *
    4.85 +                                        (BUFFERED_IO_PAGES +
    4.86 +                                         XENSTORE_PAGES +
    4.87 +                                         SHARED_IO_PAGES);
    4.88 +    e820entry[nr_map].size = PAGE_SIZE * BUFFERED_IO_PAGES;
    4.89      e820entry[nr_map].type = E820_BUFFERED_IO;
    4.90      nr_map++;
    4.91  
    4.92      /* For xenstore */
    4.93 -    e820entry[nr_map].addr = mem_size - 2 * PAGE_SIZE;
    4.94 -    e820entry[nr_map].size = PAGE_SIZE;
    4.95 +    e820entry[nr_map].addr = mem_size - PAGE_SIZE *
    4.96 +                                        (XENSTORE_PAGES +
    4.97 +                                         SHARED_IO_PAGES);
    4.98 +    e820entry[nr_map].size = PAGE_SIZE * XENSTORE_PAGES;
    4.99      e820entry[nr_map].type = E820_XENSTORE;
   4.100      nr_map++;
   4.101  
   4.102      /* Shared ioreq_t page */
   4.103 -    e820entry[nr_map].addr = mem_size - PAGE_SIZE;
   4.104 -    e820entry[nr_map].size = PAGE_SIZE;
   4.105 +    e820entry[nr_map].addr = mem_size - PAGE_SIZE * SHARED_IO_PAGES;
   4.106 +    e820entry[nr_map].size = PAGE_SIZE * SHARED_IO_PAGES;
   4.107      e820entry[nr_map].type = E820_SHARED_PAGE;
   4.108      nr_map++;
   4.109  
   4.110 -    e820entry[nr_map].addr = mem_size;
   4.111 -    e820entry[nr_map].size = 0x3 * PAGE_SIZE;
   4.112 -    e820entry[nr_map].type = E820_NVS;
   4.113 -    nr_map++;
   4.114 -
   4.115 -    e820entry[nr_map].addr = mem_size + 0x3 * PAGE_SIZE;
   4.116 -    e820entry[nr_map].size = 0xA * PAGE_SIZE;
   4.117 -    e820entry[nr_map].type = E820_ACPI;
   4.118 -    nr_map++;
   4.119 -
   4.120      e820entry[nr_map].addr = 0xFEC00000;
   4.121      e820entry[nr_map].size = 0x1400000;
   4.122      e820entry[nr_map].type = E820_IO;
   4.123      nr_map++;
   4.124  
   4.125 +    if ( extra_mem_size ) {
   4.126 +        e820entry[nr_map].addr = (1ULL << 32);
   4.127 +        e820entry[nr_map].size = extra_mem_size;
   4.128 +        e820entry[nr_map].type = E820_RAM;
   4.129 +        nr_map++;
   4.130 +    }
   4.131 +
   4.132      *(((unsigned char *)e820_page) + E820_MAP_NR_OFFSET) = nr_map;
   4.133  }
   4.134  
   4.135 @@ -147,7 +190,7 @@ static void set_hvm_info_checksum(struct
   4.136   */
   4.137  static int set_hvm_info(int xc_handle, uint32_t dom,
   4.138                          xen_pfn_t *pfn_list, unsigned int vcpus,
   4.139 -                        unsigned int acpi, unsigned int apic)
   4.140 +                        unsigned int acpi)
   4.141  {
   4.142      char *va_map;
   4.143      struct hvm_info_table *va_hvm;
   4.144 @@ -171,8 +214,6 @@ static int set_hvm_info(int xc_handle, u
   4.145  
   4.146      munmap(va_map, PAGE_SIZE);
   4.147  
   4.148 -    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_APIC_ENABLED, apic);
   4.149 -
   4.150      return 0;
   4.151  }
   4.152  
   4.153 @@ -200,11 +241,7 @@ static int setup_guest(int xc_handle,
   4.154      struct domain_setup_info dsi;
   4.155      uint64_t v_end;
   4.156  
   4.157 -    unsigned long shared_page_frame = 0;
   4.158 -    shared_iopage_t *sp;
   4.159 -
   4.160 -    unsigned long ioreq_buffer_frame = 0;
   4.161 -    void *ioreq_buffer_page;
   4.162 +    unsigned long shared_page_nr;
   4.163  
   4.164      memset(&dsi, 0, sizeof(struct domain_setup_info));
   4.165  
   4.166 @@ -256,23 +293,38 @@ static int setup_guest(int xc_handle,
   4.167      /* Write the machine->phys table entries. */
   4.168      for ( count = 0; count < nr_pages; count++ )
   4.169      {
   4.170 +        unsigned long gpfn_count_skip;
   4.171 +
   4.172          ptr = (unsigned long long)page_array[count] << PAGE_SHIFT;
   4.173 +
   4.174 +        gpfn_count_skip = 0;
   4.175 +
   4.176 +        /*
   4.177 +         * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
   4.178 +         * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
   4.179 +         * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
   4.180 +         */
   4.181 +        if ( count >= (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) )
   4.182 +            gpfn_count_skip = HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
   4.183 +
   4.184          if ( xc_add_mmu_update(xc_handle, mmu,
   4.185 -                               ptr | MMU_MACHPHYS_UPDATE, count) )
   4.186 +                               ptr | MMU_MACHPHYS_UPDATE,
   4.187 +                               count + gpfn_count_skip) )
   4.188              goto error_out;
   4.189      }
   4.190  
   4.191 -    if ( set_hvm_info(xc_handle, dom, page_array, vcpus, acpi, apic) )
   4.192 +    if ( set_hvm_info(xc_handle, dom, page_array, vcpus, acpi) )
   4.193      {
   4.194          ERROR("Couldn't set hvm info for HVM guest.\n");
   4.195          goto error_out;
   4.196      }
   4.197  
   4.198      xc_set_hvm_param(xc_handle, dom, HVM_PARAM_PAE_ENABLED, pae);
   4.199 +    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_APIC_ENABLED, apic);
   4.200  
   4.201      if ( (e820_page = xc_map_foreign_range(
   4.202                xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
   4.203 -              page_array[E820_MAP_PAGE >> PAGE_SHIFT])) == 0 )
   4.204 +              page_array[E820_MAP_PAGE >> PAGE_SHIFT])) == NULL )
   4.205          goto error_out;
   4.206      memset(e820_page, 0, PAGE_SIZE);
   4.207      build_e820map(e820_page, v_end);
   4.208 @@ -281,7 +333,7 @@ static int setup_guest(int xc_handle,
   4.209      /* shared_info page starts its life empty. */
   4.210      if ( (shared_info = xc_map_foreign_range(
   4.211                xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
   4.212 -              shared_info_frame)) == 0 )
   4.213 +              shared_info_frame)) == NULL )
   4.214          goto error_out;
   4.215      memset(shared_info, 0, PAGE_SIZE);
   4.216      /* Mask all upcalls... */
   4.217 @@ -289,32 +341,25 @@ static int setup_guest(int xc_handle,
   4.218          shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
   4.219      munmap(shared_info, PAGE_SIZE);
   4.220  
   4.221 +    if ( v_end > HVM_BELOW_4G_RAM_END )
   4.222 +        shared_page_nr = (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) - 1;
   4.223 +    else
   4.224 +        shared_page_nr = (v_end >> PAGE_SHIFT) - 1;
   4.225 +
   4.226 +    *store_mfn = page_array[shared_page_nr - 1];
   4.227 +
   4.228 +    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, *store_mfn);
   4.229 +    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
   4.230 +
   4.231      /* Paranoia */
   4.232 -    shared_page_frame = page_array[(v_end >> PAGE_SHIFT) - 1];
   4.233 -    if ( (sp = (shared_iopage_t *) xc_map_foreign_range(
   4.234 -              xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
   4.235 -              shared_page_frame)) == 0 )
   4.236 +    /* clean the shared IO requests page */
   4.237 +    if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr]) )
   4.238          goto error_out;
   4.239 -    memset(sp, 0, PAGE_SIZE);
   4.240 -    munmap(sp, PAGE_SIZE);
   4.241  
   4.242      /* clean the buffered IO requests page */
   4.243 -    ioreq_buffer_frame = page_array[(v_end >> PAGE_SHIFT) - 3];
   4.244 -    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   4.245 -                                             PROT_READ | PROT_WRITE,
   4.246 -                                             ioreq_buffer_frame);
   4.247 -
   4.248 -    if ( ioreq_buffer_page == NULL )
   4.249 +    if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr - 2]) )
   4.250          goto error_out;
   4.251  
   4.252 -    memset(ioreq_buffer_page, 0, PAGE_SIZE);
   4.253 -
   4.254 -    munmap(ioreq_buffer_page, PAGE_SIZE);
   4.255 -
   4.256 -    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, (v_end >> PAGE_SHIFT) - 2);
   4.257 -    xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
   4.258 -
   4.259 -    *store_mfn = page_array[(v_end >> PAGE_SHIFT) - 2];
   4.260      if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
   4.261          goto error_out;
   4.262  
     5.1 --- a/xen/include/public/hvm/e820.h	Thu Aug 17 20:30:05 2006 +0100
     5.2 +++ b/xen/include/public/hvm/e820.h	Thu Aug 17 20:31:13 2006 +0100
     5.3 @@ -24,4 +24,9 @@ struct e820entry {
     5.4      uint32_t type;
     5.5  } __attribute__((packed));
     5.6  
     5.7 +#define HVM_BELOW_4G_RAM_END        0xF0000000
     5.8 +
     5.9 +#define HVM_BELOW_4G_MMIO_START     HVM_BELOW_4G_RAM_END
    5.10 +#define HVM_BELOW_4G_MMIO_LENGTH    ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
    5.11 +
    5.12  #endif /* __XEN_PUBLIC_HVM_E820_H__ */