ia64/xen-unstable

changeset 11025:b16252dbcb1f

merge
author Ian.Campbell@xensource.com
date Wed Aug 09 11:29:06 2006 +0100 (2006-08-09)
parents c1850c659e40 01043d543878
children 54550e85f25a bd04004865ba
files
line diff
     1.1 --- a/tools/ioemu/patches/series	Wed Aug 09 11:27:28 2006 +0100
     1.2 +++ b/tools/ioemu/patches/series	Wed Aug 09 11:29:06 2006 +0100
     1.3 @@ -39,3 +39,4 @@ xenstore-write-vnc-port
     1.4  qemu-allow-disable-sdl
     1.5  qemu-fix-memset-args
     1.6  qemu-fix-write-to-disk-synchronous
     1.7 +xen-support-buffered-ioreqs
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/tools/ioemu/patches/xen-support-buffered-ioreqs	Wed Aug 09 11:29:06 2006 +0100
     2.3 @@ -0,0 +1,150 @@
     2.4 +Index: ioemu/vl.c
     2.5 +===================================================================
     2.6 +--- ioemu.orig/vl.c	2006-08-08 14:33:30.000000000 +0100
     2.7 ++++ ioemu/vl.c	2006-08-08 14:43:34.000000000 +0100
     2.8 +@@ -5834,6 +5834,7 @@
     2.9 +     unsigned long nr_pages;
    2.10 +     xen_pfn_t *page_array;
    2.11 +     extern void *shared_page;
    2.12 ++    extern void *buffered_io_page;
    2.13 + 
    2.14 +     char qemu_dm_logfilename[64];
    2.15 + 
    2.16 +@@ -6378,12 +6379,17 @@
    2.17 + 
    2.18 +     phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
    2.19 +                                          PROT_READ|PROT_WRITE, page_array,
    2.20 +-                                         nr_pages - 1);
    2.21 ++                                         nr_pages - 3);
    2.22 +     if (phys_ram_base == 0) {
    2.23 +         fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
    2.24 +         exit(-1);
    2.25 +     }
    2.26 + 
    2.27 ++    /* not yet add for IA64 */
    2.28 ++    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    2.29 ++                                       PROT_READ|PROT_WRITE,
    2.30 ++                                       page_array[nr_pages - 3]);
    2.31 ++
    2.32 +     shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    2.33 +                                        PROT_READ|PROT_WRITE,
    2.34 +                                        page_array[nr_pages - 1]);
    2.35 +Index: ioemu/target-i386-dm/helper2.c
    2.36 +===================================================================
    2.37 +--- ioemu.orig/target-i386-dm/helper2.c	2006-08-08 14:33:30.000000000 +0100
    2.38 ++++ ioemu/target-i386-dm/helper2.c	2006-08-09 10:03:40.558744653 +0100
    2.39 +@@ -76,6 +76,10 @@
    2.40 + 
    2.41 + shared_iopage_t *shared_page = NULL;
    2.42 + 
    2.43 ++#define BUFFER_IO_MAX_DELAY  100
    2.44 ++buffered_iopage_t *buffered_io_page = NULL;
    2.45 ++QEMUTimer *buffered_io_timer;
    2.46 ++
    2.47 + /* the evtchn fd for polling */
    2.48 + int xce_handle = -1;
    2.49 + 
    2.50 +@@ -419,36 +423,68 @@
    2.51 +     req->u.data = tmp1;
    2.52 + }
    2.53 + 
    2.54 ++void __handle_ioreq(CPUState *env, ioreq_t *req)
    2.55 ++{
    2.56 ++    if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
    2.57 ++	req->u.data &= (1UL << (8 * req->size)) - 1;
    2.58 ++
    2.59 ++    switch (req->type) {
    2.60 ++    case IOREQ_TYPE_PIO:
    2.61 ++        cpu_ioreq_pio(env, req);
    2.62 ++        break;
    2.63 ++    case IOREQ_TYPE_COPY:
    2.64 ++        cpu_ioreq_move(env, req);
    2.65 ++        break;
    2.66 ++    case IOREQ_TYPE_AND:
    2.67 ++        cpu_ioreq_and(env, req);
    2.68 ++        break;
    2.69 ++    case IOREQ_TYPE_OR:
    2.70 ++        cpu_ioreq_or(env, req);
    2.71 ++        break;
    2.72 ++    case IOREQ_TYPE_XOR:
    2.73 ++        cpu_ioreq_xor(env, req);
    2.74 ++        break;
    2.75 ++    default:
    2.76 ++        hw_error("Invalid ioreq type 0x%x\n", req->type);
    2.77 ++    }
    2.78 ++}
    2.79 ++
    2.80 ++void __handle_buffered_iopage(CPUState *env)
    2.81 ++{
    2.82 ++    ioreq_t *req = NULL;
    2.83 ++
    2.84 ++    if (!buffered_io_page)
    2.85 ++        return;
    2.86 ++
    2.87 ++    while (buffered_io_page->read_pointer !=
    2.88 ++           buffered_io_page->write_pointer) {
    2.89 ++        req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
    2.90 ++				       IOREQ_BUFFER_SLOT_NUM];
    2.91 ++
    2.92 ++        __handle_ioreq(env, req);
    2.93 ++
    2.94 ++        mb();
    2.95 ++        buffered_io_page->read_pointer++;
    2.96 ++    }
    2.97 ++}
    2.98 ++
    2.99 ++void handle_buffered_io(void *opaque)
   2.100 ++{
   2.101 ++    CPUState *env = opaque;
   2.102 ++
   2.103 ++    __handle_buffered_iopage(env);
   2.104 ++    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
   2.105 ++		   qemu_get_clock(rt_clock));
   2.106 ++}
   2.107 ++
   2.108 + void cpu_handle_ioreq(void *opaque)
   2.109 + {
   2.110 +     CPUState *env = opaque;
   2.111 +     ioreq_t *req = cpu_get_ioreq();
   2.112 + 
   2.113 ++    handle_buffered_io(env);
   2.114 +     if (req) {
   2.115 +-        if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
   2.116 +-            if (req->size != 4)
   2.117 +-                req->u.data &= (1UL << (8 * req->size))-1;
   2.118 +-        }
   2.119 +-
   2.120 +-        switch (req->type) {
   2.121 +-        case IOREQ_TYPE_PIO:
   2.122 +-            cpu_ioreq_pio(env, req);
   2.123 +-            break;
   2.124 +-        case IOREQ_TYPE_COPY:
   2.125 +-            cpu_ioreq_move(env, req);
   2.126 +-            break;
   2.127 +-        case IOREQ_TYPE_AND:
   2.128 +-            cpu_ioreq_and(env, req);
   2.129 +-            break;
   2.130 +-        case IOREQ_TYPE_OR:
   2.131 +-            cpu_ioreq_or(env, req);
   2.132 +-            break;
   2.133 +-        case IOREQ_TYPE_XOR:
   2.134 +-            cpu_ioreq_xor(env, req);
   2.135 +-            break;
   2.136 +-        default:
   2.137 +-            hw_error("Invalid ioreq type 0x%x\n", req->type);
   2.138 +-        }
   2.139 ++        __handle_ioreq(env, req);
   2.140 + 
   2.141 +         /* No state change if state = STATE_IORESP_HOOK */
   2.142 +         if (req->state == STATE_IOREQ_INPROCESS) {
   2.143 +@@ -466,6 +502,10 @@
   2.144 +     CPUState *env = cpu_single_env;
   2.145 +     int evtchn_fd = xc_evtchn_fd(xce_handle);
   2.146 + 
   2.147 ++    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
   2.148 ++				       cpu_single_env);
   2.149 ++    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
   2.150 ++
   2.151 +     qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
   2.152 + 
   2.153 +     env->send_event = 0;
     3.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Wed Aug 09 11:27:28 2006 +0100
     3.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Wed Aug 09 11:29:06 2006 +0100
     3.3 @@ -76,6 +76,10 @@ int xc_handle;
     3.4  
     3.5  shared_iopage_t *shared_page = NULL;
     3.6  
     3.7 +#define BUFFER_IO_MAX_DELAY  100
     3.8 +buffered_iopage_t *buffered_io_page = NULL;
     3.9 +QEMUTimer *buffered_io_timer;
    3.10 +
    3.11  /* the evtchn fd for polling */
    3.12  int xce_handle = -1;
    3.13  
    3.14 @@ -419,36 +423,68 @@ void cpu_ioreq_xor(CPUState *env, ioreq_
    3.15      req->u.data = tmp1;
    3.16  }
    3.17  
    3.18 +void __handle_ioreq(CPUState *env, ioreq_t *req)
    3.19 +{
    3.20 +    if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
    3.21 +	req->u.data &= (1UL << (8 * req->size)) - 1;
    3.22 +
    3.23 +    switch (req->type) {
    3.24 +    case IOREQ_TYPE_PIO:
    3.25 +        cpu_ioreq_pio(env, req);
    3.26 +        break;
    3.27 +    case IOREQ_TYPE_COPY:
    3.28 +        cpu_ioreq_move(env, req);
    3.29 +        break;
    3.30 +    case IOREQ_TYPE_AND:
    3.31 +        cpu_ioreq_and(env, req);
    3.32 +        break;
    3.33 +    case IOREQ_TYPE_OR:
    3.34 +        cpu_ioreq_or(env, req);
    3.35 +        break;
    3.36 +    case IOREQ_TYPE_XOR:
    3.37 +        cpu_ioreq_xor(env, req);
    3.38 +        break;
    3.39 +    default:
    3.40 +        hw_error("Invalid ioreq type 0x%x\n", req->type);
    3.41 +    }
    3.42 +}
    3.43 +
    3.44 +void __handle_buffered_iopage(CPUState *env)
    3.45 +{
    3.46 +    ioreq_t *req = NULL;
    3.47 +
    3.48 +    if (!buffered_io_page)
    3.49 +        return;
    3.50 +
    3.51 +    while (buffered_io_page->read_pointer !=
    3.52 +           buffered_io_page->write_pointer) {
    3.53 +        req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
    3.54 +				       IOREQ_BUFFER_SLOT_NUM];
    3.55 +
    3.56 +        __handle_ioreq(env, req);
    3.57 +
    3.58 +        mb();
    3.59 +        buffered_io_page->read_pointer++;
    3.60 +    }
    3.61 +}
    3.62 +
    3.63 +void handle_buffered_io(void *opaque)
    3.64 +{
    3.65 +    CPUState *env = opaque;
    3.66 +
    3.67 +    __handle_buffered_iopage(env);
    3.68 +    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
    3.69 +		   qemu_get_clock(rt_clock));
    3.70 +}
    3.71 +
    3.72  void cpu_handle_ioreq(void *opaque)
    3.73  {
    3.74      CPUState *env = opaque;
    3.75      ioreq_t *req = cpu_get_ioreq();
    3.76  
    3.77 +    handle_buffered_io(env);
    3.78      if (req) {
    3.79 -        if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
    3.80 -            if (req->size != 4)
    3.81 -                req->u.data &= (1UL << (8 * req->size))-1;
    3.82 -        }
    3.83 -
    3.84 -        switch (req->type) {
    3.85 -        case IOREQ_TYPE_PIO:
    3.86 -            cpu_ioreq_pio(env, req);
    3.87 -            break;
    3.88 -        case IOREQ_TYPE_COPY:
    3.89 -            cpu_ioreq_move(env, req);
    3.90 -            break;
    3.91 -        case IOREQ_TYPE_AND:
    3.92 -            cpu_ioreq_and(env, req);
    3.93 -            break;
    3.94 -        case IOREQ_TYPE_OR:
    3.95 -            cpu_ioreq_or(env, req);
    3.96 -            break;
    3.97 -        case IOREQ_TYPE_XOR:
    3.98 -            cpu_ioreq_xor(env, req);
    3.99 -            break;
   3.100 -        default:
   3.101 -            hw_error("Invalid ioreq type 0x%x\n", req->type);
   3.102 -        }
   3.103 +        __handle_ioreq(env, req);
   3.104  
   3.105          /* No state change if state = STATE_IORESP_HOOK */
   3.106          if (req->state == STATE_IOREQ_INPROCESS) {
   3.107 @@ -466,6 +502,10 @@ int main_loop(void)
   3.108      CPUState *env = cpu_single_env;
   3.109      int evtchn_fd = xc_evtchn_fd(xce_handle);
   3.110  
   3.111 +    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
   3.112 +				       cpu_single_env);
   3.113 +    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
   3.114 +
   3.115      qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
   3.116  
   3.117      env->send_event = 0;
     4.1 --- a/tools/ioemu/vl.c	Wed Aug 09 11:27:28 2006 +0100
     4.2 +++ b/tools/ioemu/vl.c	Wed Aug 09 11:29:06 2006 +0100
     4.3 @@ -5834,6 +5834,7 @@ int main(int argc, char **argv)
     4.4      unsigned long nr_pages;
     4.5      xen_pfn_t *page_array;
     4.6      extern void *shared_page;
     4.7 +    extern void *buffered_io_page;
     4.8  
     4.9      char qemu_dm_logfilename[64];
    4.10  
    4.11 @@ -6378,12 +6379,17 @@ int main(int argc, char **argv)
    4.12  
    4.13      phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
    4.14                                           PROT_READ|PROT_WRITE, page_array,
    4.15 -                                         nr_pages - 1);
    4.16 +                                         nr_pages - 3);
    4.17      if (phys_ram_base == 0) {
    4.18          fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
    4.19          exit(-1);
    4.20      }
    4.21  
    4.22 +    /* not yet add for IA64 */
    4.23 +    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    4.24 +                                       PROT_READ|PROT_WRITE,
    4.25 +                                       page_array[nr_pages - 3]);
    4.26 +
    4.27      shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    4.28                                         PROT_READ|PROT_WRITE,
    4.29                                         page_array[nr_pages - 1]);
     5.1 --- a/tools/libxc/xc_hvm_build.c	Wed Aug 09 11:27:28 2006 +0100
     5.2 +++ b/tools/libxc/xc_hvm_build.c	Wed Aug 09 11:29:06 2006 +0100
     5.3 @@ -26,6 +26,7 @@
     5.4  #define E820_IO          16
     5.5  #define E820_SHARED_PAGE 17
     5.6  #define E820_XENSTORE    18
     5.7 +#define E820_BUFFERED_IO 19
     5.8  
     5.9  #define E820_MAP_PAGE       0x00090000
    5.10  #define E820_MAP_NR_OFFSET  0x000001E8
    5.11 @@ -96,7 +97,13 @@ static void build_e820map(void *e820_pag
    5.12      e820entry[nr_map].type = E820_RESERVED;
    5.13      nr_map++;
    5.14  
    5.15 -#define STATIC_PAGES    2       /* for ioreq_t and store_mfn */
    5.16 +#define STATIC_PAGES    3
    5.17 +    /* 3 static pages:
    5.18 +     * - ioreq buffer.
    5.19 +     * - xenstore.
    5.20 +     * - shared_page.
    5.21 +     */
    5.22 +
    5.23      /* Most of the ram goes here */
    5.24      e820entry[nr_map].addr = 0x100000;
    5.25      e820entry[nr_map].size = mem_size - 0x100000 - STATIC_PAGES * PAGE_SIZE;
    5.26 @@ -105,6 +112,12 @@ static void build_e820map(void *e820_pag
    5.27  
    5.28      /* Statically allocated special pages */
    5.29  
    5.30 +    /* For buffered IO requests */
    5.31 +    e820entry[nr_map].addr = mem_size - 3 * PAGE_SIZE;
    5.32 +    e820entry[nr_map].size = PAGE_SIZE;
    5.33 +    e820entry[nr_map].type = E820_BUFFERED_IO;
    5.34 +    nr_map++;
    5.35 +
    5.36      /* For xenstore */
    5.37      e820entry[nr_map].addr = mem_size - 2 * PAGE_SIZE;
    5.38      e820entry[nr_map].size = PAGE_SIZE;
    5.39 @@ -213,6 +226,9 @@ static int setup_guest(int xc_handle,
    5.40      unsigned long shared_page_frame = 0;
    5.41      shared_iopage_t *sp;
    5.42  
    5.43 +    unsigned long ioreq_buffer_frame = 0;
    5.44 +    void *ioreq_buffer_page;
    5.45 +
    5.46      memset(&dsi, 0, sizeof(struct domain_setup_info));
    5.47  
    5.48      if ( (parseelfimage(image, image_size, &dsi)) != 0 )
    5.49 @@ -303,6 +319,19 @@ static int setup_guest(int xc_handle,
    5.50      memset(sp, 0, PAGE_SIZE);
    5.51      munmap(sp, PAGE_SIZE);
    5.52  
    5.53 +    /* clean the buffered IO requests page */
    5.54 +    ioreq_buffer_frame = page_array[(v_end >> PAGE_SHIFT) - 3];
    5.55 +    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
    5.56 +                                             PROT_READ | PROT_WRITE,
    5.57 +                                             ioreq_buffer_frame);
    5.58 +
    5.59 +    if ( ioreq_buffer_page == NULL )
    5.60 +        goto error_out;
    5.61 +
    5.62 +    memset(ioreq_buffer_page, 0, PAGE_SIZE);
    5.63 +
    5.64 +    munmap(ioreq_buffer_page, PAGE_SIZE);
    5.65 +
    5.66      xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, (v_end >> PAGE_SHIFT) - 2);
    5.67      xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
    5.68  
     6.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Aug 09 11:27:28 2006 +0100
     6.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Aug 09 11:29:06 2006 +0100
     6.3 @@ -134,15 +134,28 @@ static void e820_map_io_shared_callback(
     6.4      }
     6.5  }
     6.6  
     6.7 -void hvm_map_io_shared_page(struct vcpu *v)
     6.8 +static void e820_map_buffered_io_callback(struct domain *d,
     6.9 +                                          struct e820entry *e,
    6.10 +                                          void *data)
    6.11  {
    6.12 -    unsigned long mfn = INVALID_MFN;
    6.13 +    unsigned long *mfn = data;
    6.14 +    if ( e->type == E820_BUFFERED_IO ) {
    6.15 +        ASSERT(*mfn == INVALID_MFN);
    6.16 +        *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
    6.17 +    }
    6.18 +}
    6.19 +
    6.20 +void hvm_map_io_shared_pages(struct vcpu *v)
    6.21 +{
    6.22 +    unsigned long mfn;
    6.23      void *p;
    6.24      struct domain *d = v->domain;
    6.25  
    6.26 -    if ( d->arch.hvm_domain.shared_page_va )
    6.27 +    if ( d->arch.hvm_domain.shared_page_va ||
    6.28 +         d->arch.hvm_domain.buffered_io_va )
    6.29          return;
    6.30  
    6.31 +    mfn = INVALID_MFN;
    6.32      e820_foreach(d, e820_map_io_shared_callback, &mfn);
    6.33  
    6.34      if ( mfn == INVALID_MFN )
    6.35 @@ -159,6 +172,14 @@ void hvm_map_io_shared_page(struct vcpu 
    6.36      }
    6.37  
    6.38      d->arch.hvm_domain.shared_page_va = (unsigned long)p;
    6.39 +
    6.40 +    mfn = INVALID_MFN;
    6.41 +    e820_foreach(d, e820_map_buffered_io_callback, &mfn);
    6.42 +    if ( mfn != INVALID_MFN ) {
    6.43 +        p = map_domain_page_global(mfn);
    6.44 +        if ( p )
    6.45 +            d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
    6.46 +    }
    6.47  }
    6.48  
    6.49  void hvm_create_event_channels(struct vcpu *v)
    6.50 @@ -210,6 +231,8 @@ void hvm_setup_platform(struct domain* d
    6.51          hvm_vioapic_init(d);
    6.52      }
    6.53  
    6.54 +    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
    6.55 +
    6.56      init_timer(&platform->pl_time.periodic_tm.timer,
    6.57                 pt_timer_fn, v, v->processor);
    6.58      pit_init(v, cpu_khz);
     7.1 --- a/xen/arch/x86/hvm/intercept.c	Wed Aug 09 11:27:28 2006 +0100
     7.2 +++ b/xen/arch/x86/hvm/intercept.c	Wed Aug 09 11:29:06 2006 +0100
     7.3 @@ -36,12 +36,26 @@ extern struct hvm_mmio_handler vioapic_m
     7.4  
     7.5  #define HVM_MMIO_HANDLER_NR 2
     7.6  
     7.7 -struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
     7.8 +static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
     7.9  {
    7.10      &vlapic_mmio_handler,
    7.11      &vioapic_mmio_handler
    7.12  };
    7.13  
    7.14 +struct hvm_buffered_io_range {
    7.15 +    unsigned long start_addr;
    7.16 +    unsigned long length;
    7.17 +};
    7.18 +
    7.19 +#define HVM_BUFFERED_IO_RANGE_NR 1
    7.20 +
    7.21 +static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
    7.22 +static struct hvm_buffered_io_range
    7.23 +*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
    7.24 +{
    7.25 +    &buffered_stdvga_range
    7.26 +};
    7.27 +
    7.28  static inline void hvm_mmio_access(struct vcpu *v,
    7.29                                     ioreq_t *p,
    7.30                                     hvm_mmio_read_t read_handler,
    7.31 @@ -140,6 +154,56 @@ static inline void hvm_mmio_access(struc
    7.32      }
    7.33  }
    7.34  
    7.35 +int hvm_buffered_io_intercept(ioreq_t *p)
    7.36 +{
    7.37 +    struct vcpu *v = current;
    7.38 +    spinlock_t  *buffered_io_lock;
    7.39 +    buffered_iopage_t *buffered_iopage =
    7.40 +        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
    7.41 +    unsigned long tmp_write_pointer = 0;
    7.42 +    int i;
    7.43 +
    7.44 +    /* ignore READ ioreq_t! */
    7.45 +    if ( p->dir == IOREQ_READ )
    7.46 +        return 0;
    7.47 +
    7.48 +    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
    7.49 +        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
    7.50 +             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
    7.51 +                                     hvm_buffered_io_ranges[i]->length )
    7.52 +            break;
    7.53 +    }
    7.54 +
    7.55 +    if ( i == HVM_BUFFERED_IO_RANGE_NR )
    7.56 +        return 0;
    7.57 +
    7.58 +    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
    7.59 +    spin_lock(buffered_io_lock);
    7.60 +
    7.61 +    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
    7.62 +         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
    7.63 +        /* the queue is full.
    7.64 +         * send the iopacket through the normal path.
    7.65 +         * NOTE: The arithimetic operation could handle the situation for
    7.66 +         * write_pointer overflow.
    7.67 +         */
    7.68 +        spin_unlock(buffered_io_lock);
    7.69 +        return 0;
    7.70 +    }
    7.71 +
    7.72 +    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
    7.73 +
    7.74 +    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
    7.75 +
    7.76 +    /*make the ioreq_t visible before write_pointer*/
    7.77 +    wmb();
    7.78 +    buffered_iopage->write_pointer++;
    7.79 +
    7.80 +    spin_unlock(buffered_io_lock);
    7.81 +
    7.82 +    return 1;
    7.83 +}
    7.84 +
    7.85  int hvm_mmio_intercept(ioreq_t *p)
    7.86  {
    7.87      struct vcpu *v = current;
     8.1 --- a/xen/arch/x86/hvm/platform.c	Wed Aug 09 11:27:28 2006 +0100
     8.2 +++ b/xen/arch/x86/hvm/platform.c	Wed Aug 09 11:29:06 2006 +0100
     8.3 @@ -779,7 +779,7 @@ void send_mmio_req(
     8.4      } else
     8.5          p->u.data = value;
     8.6  
     8.7 -    if (hvm_mmio_intercept(p)){
     8.8 +    if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) {
     8.9          p->state = STATE_IORESP_READY;
    8.10          hvm_io_assist(v);
    8.11          return;
     9.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Aug 09 11:27:28 2006 +0100
     9.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Aug 09 11:29:06 2006 +0100
     9.3 @@ -810,6 +810,9 @@ static void svm_relinquish_guest_resourc
     9.4          unmap_domain_page_global(
     9.5              (void *)d->arch.hvm_domain.shared_page_va);
     9.6  
     9.7 +    if ( d->arch.hvm_domain.buffered_io_va )
     9.8 +        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
     9.9 +
    9.10      shadow_direct_map_clean(d);
    9.11  }
    9.12  
    10.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 09 11:27:28 2006 +0100
    10.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 09 11:29:06 2006 +0100
    10.3 @@ -151,6 +151,9 @@ static void vmx_relinquish_guest_resourc
    10.4          unmap_domain_page_global(
    10.5  	        (void *)d->arch.hvm_domain.shared_page_va);
    10.6  
    10.7 +    if ( d->arch.hvm_domain.buffered_io_va )
    10.8 +        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
    10.9 +
   10.10      shadow_direct_map_clean(d);
   10.11  }
   10.12  
    11.1 --- a/xen/include/asm-x86/e820.h	Wed Aug 09 11:27:28 2006 +0100
    11.2 +++ b/xen/include/asm-x86/e820.h	Wed Aug 09 11:29:06 2006 +0100
    11.3 @@ -12,6 +12,7 @@
    11.4  #define E820_IO          16
    11.5  #define E820_SHARED_PAGE 17
    11.6  #define E820_XENSTORE    18
    11.7 +#define E820_BUFFERED_IO 19
    11.8  
    11.9  #define E820_MAP_PAGE        0x00090000
   11.10  #define E820_MAP_NR_OFFSET   0x000001E8
    12.1 --- a/xen/include/asm-x86/hvm/domain.h	Wed Aug 09 11:27:28 2006 +0100
    12.2 +++ b/xen/include/asm-x86/hvm/domain.h	Wed Aug 09 11:29:06 2006 +0100
    12.3 @@ -33,6 +33,8 @@
    12.4  
    12.5  struct hvm_domain {
    12.6      unsigned long          shared_page_va;
    12.7 +    unsigned long          buffered_io_va;
    12.8 +    spinlock_t             buffered_io_lock;
    12.9      s64                    tsc_frequency;
   12.10      struct pl_time         pl_time;
   12.11  
    13.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Aug 09 11:27:28 2006 +0100
    13.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Aug 09 11:29:06 2006 +0100
    13.3 @@ -78,7 +78,7 @@ hvm_disable(void)
    13.4  }
    13.5  
    13.6  void hvm_create_event_channels(struct vcpu *v);
    13.7 -void hvm_map_io_shared_page(struct vcpu *v);
    13.8 +void hvm_map_io_shared_pages(struct vcpu *v);
    13.9  
   13.10  static inline int
   13.11  hvm_initialize_guest_resources(struct vcpu *v)
   13.12 @@ -87,7 +87,7 @@ hvm_initialize_guest_resources(struct vc
   13.13      if ( hvm_funcs.initialize_guest_resources )
   13.14          ret = hvm_funcs.initialize_guest_resources(v);
   13.15      if ( ret == 1 ) {
   13.16 -        hvm_map_io_shared_page(v);
   13.17 +        hvm_map_io_shared_pages(v);
   13.18          hvm_create_event_channels(v);
   13.19      }
   13.20      return ret;
    14.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Aug 09 11:27:28 2006 +0100
    14.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Aug 09 11:29:06 2006 +0100
    14.3 @@ -139,6 +139,7 @@ extern int hvm_copy(void *buf, unsigned 
    14.4  extern void hvm_setup_platform(struct domain* d);
    14.5  extern int hvm_mmio_intercept(ioreq_t *p);
    14.6  extern int hvm_io_intercept(ioreq_t *p, int type);
    14.7 +extern int hvm_buffered_io_intercept(ioreq_t *p);
    14.8  extern void hvm_hooks_assist(struct vcpu *v);
    14.9  extern void hvm_print_line(struct vcpu *v, const char c);
   14.10  extern void hlt_timer_fn(void *data);
    15.1 --- a/xen/include/public/hvm/ioreq.h	Wed Aug 09 11:27:28 2006 +0100
    15.2 +++ b/xen/include/public/hvm/ioreq.h	Wed Aug 09 11:29:06 2006 +0100
    15.3 @@ -78,6 +78,14 @@ struct shared_iopage {
    15.4  };
    15.5  typedef struct shared_iopage shared_iopage_t;
    15.6  
    15.7 +#define IOREQ_BUFFER_SLOT_NUM     80
    15.8 +struct buffered_iopage {
    15.9 +    unsigned long   read_pointer;
   15.10 +    unsigned long   write_pointer;
   15.11 +    ioreq_t         ioreq[IOREQ_BUFFER_SLOT_NUM];
   15.12 +};            /* sizeof this structure must be in one page */
   15.13 +typedef struct buffered_iopage buffered_iopage_t;
   15.14 +
   15.15  #endif /* _IOREQ_H_ */
   15.16  
   15.17  /*