ia64/xen-unstable

changeset 11021:dc7b56b8cfb5

[hvm/qemu] Add HVM buffered IO writes framework to accelerate stdvga speed.
With this patch, win2k VMX guest boots fine on xen.

Signed-off-by: Zhu Han <zhu.han@intel.com>
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author chris@kneesaa.uk.xensource.com
date Wed Aug 09 11:25:33 2006 +0100 (2006-08-09)
parents 3a2fee0d84f7
children 01043d543878
files tools/ioemu/target-i386-dm/helper2.c tools/ioemu/vl.c tools/libxc/xc_hvm_build.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/e820.h xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/support.h xen/include/public/hvm/ioreq.h
line diff
     1.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Wed Aug 09 10:53:16 2006 +0100
     1.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Wed Aug 09 11:25:33 2006 +0100
     1.3 @@ -76,6 +76,10 @@ int xc_handle;
     1.4  
     1.5  shared_iopage_t *shared_page = NULL;
     1.6  
     1.7 +#define BUFFER_IO_MAX_DELAY  100
     1.8 +buffered_iopage_t *buffered_io_page = NULL;
     1.9 +QEMUTimer *buffered_io_timer;
    1.10 +
    1.11  /* the evtchn fd for polling */
    1.12  int xce_handle = -1;
    1.13  
    1.14 @@ -419,36 +423,68 @@ void cpu_ioreq_xor(CPUState *env, ioreq_
    1.15      req->u.data = tmp1;
    1.16  }
    1.17  
    1.18 +void __handle_ioreq(CPUState *env, ioreq_t *req)
    1.19 +{
    1.20 +    if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
    1.21 +	req->u.data &= (1UL << (8 * req->size)) - 1;
    1.22 +
    1.23 +    switch (req->type) {
    1.24 +    case IOREQ_TYPE_PIO:
    1.25 +        cpu_ioreq_pio(env, req);
    1.26 +        break;
    1.27 +    case IOREQ_TYPE_COPY:
    1.28 +        cpu_ioreq_move(env, req);
    1.29 +        break;
    1.30 +    case IOREQ_TYPE_AND:
    1.31 +        cpu_ioreq_and(env, req);
    1.32 +        break;
    1.33 +    case IOREQ_TYPE_OR:
    1.34 +        cpu_ioreq_or(env, req);
    1.35 +        break;
    1.36 +    case IOREQ_TYPE_XOR:
    1.37 +        cpu_ioreq_xor(env, req);
    1.38 +        break;
    1.39 +    default:
    1.40 +        hw_error("Invalid ioreq type 0x%x\n", req->type);
    1.41 +    }
    1.42 +}
    1.43 +
    1.44 +void __handle_buffered_iopage(CPUState *env)
    1.45 +{
    1.46 +    ioreq_t *req = NULL;
    1.47 +
    1.48 +    if (!buffered_io_page)
    1.49 +        return;
    1.50 +
    1.51 +    while (buffered_io_page->read_pointer !=
    1.52 +           buffered_io_page->write_pointer) {
    1.53 +        req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
    1.54 +				       IOREQ_BUFFER_SLOT_NUM];
    1.55 +
    1.56 +        __handle_ioreq(env, req);
    1.57 +
    1.58 +        mb();
    1.59 +        buffered_io_page->read_pointer++;
    1.60 +    }
    1.61 +}
    1.62 +
    1.63 +void handle_buffered_io(void *opaque)
    1.64 +{
    1.65 +    CPUState *env = opaque;
    1.66 +
    1.67 +    __handle_buffered_iopage(env);
    1.68 +    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
    1.69 +		   qemu_get_clock(rt_clock));
    1.70 +}
    1.71 +
    1.72  void cpu_handle_ioreq(void *opaque)
    1.73  {
    1.74      CPUState *env = opaque;
    1.75      ioreq_t *req = cpu_get_ioreq();
    1.76  
    1.77 +    handle_buffered_io(env);
    1.78      if (req) {
    1.79 -        if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
    1.80 -            if (req->size != 4)
    1.81 -                req->u.data &= (1UL << (8 * req->size))-1;
    1.82 -        }
    1.83 -
    1.84 -        switch (req->type) {
    1.85 -        case IOREQ_TYPE_PIO:
    1.86 -            cpu_ioreq_pio(env, req);
    1.87 -            break;
    1.88 -        case IOREQ_TYPE_COPY:
    1.89 -            cpu_ioreq_move(env, req);
    1.90 -            break;
    1.91 -        case IOREQ_TYPE_AND:
    1.92 -            cpu_ioreq_and(env, req);
    1.93 -            break;
    1.94 -        case IOREQ_TYPE_OR:
    1.95 -            cpu_ioreq_or(env, req);
    1.96 -            break;
    1.97 -        case IOREQ_TYPE_XOR:
    1.98 -            cpu_ioreq_xor(env, req);
    1.99 -            break;
   1.100 -        default:
   1.101 -            hw_error("Invalid ioreq type 0x%x\n", req->type);
   1.102 -        }
   1.103 +        __handle_ioreq(env, req);
   1.104  
   1.105          /* No state change if state = STATE_IORESP_HOOK */
   1.106          if (req->state == STATE_IOREQ_INPROCESS) {
   1.107 @@ -466,6 +502,10 @@ int main_loop(void)
   1.108      CPUState *env = cpu_single_env;
   1.109      int evtchn_fd = xc_evtchn_fd(xce_handle);
   1.110  
   1.111 +    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
   1.112 +				       cpu_single_env);
   1.113 +    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
   1.114 +
   1.115      qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
   1.116  
   1.117      env->send_event = 0;
     2.1 --- a/tools/ioemu/vl.c	Wed Aug 09 10:53:16 2006 +0100
     2.2 +++ b/tools/ioemu/vl.c	Wed Aug 09 11:25:33 2006 +0100
     2.3 @@ -5834,6 +5834,7 @@ int main(int argc, char **argv)
     2.4      unsigned long nr_pages;
     2.5      xen_pfn_t *page_array;
     2.6      extern void *shared_page;
     2.7 +    extern void *buffered_io_page;
     2.8  
     2.9      char qemu_dm_logfilename[64];
    2.10  
    2.11 @@ -6378,12 +6379,17 @@ int main(int argc, char **argv)
    2.12  
    2.13      phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
    2.14                                           PROT_READ|PROT_WRITE, page_array,
    2.15 -                                         nr_pages - 1);
    2.16 +                                         nr_pages - 3);
    2.17      if (phys_ram_base == 0) {
    2.18          fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
    2.19          exit(-1);
    2.20      }
    2.21  
    2.22 +    /* not yet add for IA64 */
    2.23 +    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    2.24 +                                       PROT_READ|PROT_WRITE,
    2.25 +                                       page_array[nr_pages - 3]);
    2.26 +
    2.27      shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
    2.28                                         PROT_READ|PROT_WRITE,
    2.29                                         page_array[nr_pages - 1]);
     3.1 --- a/tools/libxc/xc_hvm_build.c	Wed Aug 09 10:53:16 2006 +0100
     3.2 +++ b/tools/libxc/xc_hvm_build.c	Wed Aug 09 11:25:33 2006 +0100
     3.3 @@ -26,6 +26,7 @@
     3.4  #define E820_IO          16
     3.5  #define E820_SHARED_PAGE 17
     3.6  #define E820_XENSTORE    18
     3.7 +#define E820_BUFFERED_IO 19
     3.8  
     3.9  #define E820_MAP_PAGE       0x00090000
    3.10  #define E820_MAP_NR_OFFSET  0x000001E8
    3.11 @@ -96,7 +97,13 @@ static void build_e820map(void *e820_pag
    3.12      e820entry[nr_map].type = E820_RESERVED;
    3.13      nr_map++;
    3.14  
    3.15 -#define STATIC_PAGES    2       /* for ioreq_t and store_mfn */
    3.16 +#define STATIC_PAGES    3
    3.17 +    /* 3 static pages:
    3.18 +     * - ioreq buffer.
    3.19 +     * - xenstore.
    3.20 +     * - shared_page.
    3.21 +     */
    3.22 +
    3.23      /* Most of the ram goes here */
    3.24      e820entry[nr_map].addr = 0x100000;
    3.25      e820entry[nr_map].size = mem_size - 0x100000 - STATIC_PAGES * PAGE_SIZE;
    3.26 @@ -105,6 +112,12 @@ static void build_e820map(void *e820_pag
    3.27  
    3.28      /* Statically allocated special pages */
    3.29  
    3.30 +    /* For buffered IO requests */
    3.31 +    e820entry[nr_map].addr = mem_size - 3 * PAGE_SIZE;
    3.32 +    e820entry[nr_map].size = PAGE_SIZE;
    3.33 +    e820entry[nr_map].type = E820_BUFFERED_IO;
    3.34 +    nr_map++;
    3.35 +
    3.36      /* For xenstore */
    3.37      e820entry[nr_map].addr = mem_size - 2 * PAGE_SIZE;
    3.38      e820entry[nr_map].size = PAGE_SIZE;
    3.39 @@ -213,6 +226,9 @@ static int setup_guest(int xc_handle,
    3.40      unsigned long shared_page_frame = 0;
    3.41      shared_iopage_t *sp;
    3.42  
    3.43 +    unsigned long ioreq_buffer_frame = 0;
    3.44 +    void *ioreq_buffer_page;
    3.45 +
    3.46      memset(&dsi, 0, sizeof(struct domain_setup_info));
    3.47  
    3.48      if ( (parseelfimage(image, image_size, &dsi)) != 0 )
    3.49 @@ -303,6 +319,19 @@ static int setup_guest(int xc_handle,
    3.50      memset(sp, 0, PAGE_SIZE);
    3.51      munmap(sp, PAGE_SIZE);
    3.52  
    3.53 +    /* clean the buffered IO requests page */
    3.54 +    ioreq_buffer_frame = page_array[(v_end >> PAGE_SHIFT) - 3];
    3.55 +    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
    3.56 +                                             PROT_READ | PROT_WRITE,
    3.57 +                                             ioreq_buffer_frame);
    3.58 +
    3.59 +    if ( ioreq_buffer_page == NULL )
    3.60 +        goto error_out;
    3.61 +
    3.62 +    memset(ioreq_buffer_page, 0, PAGE_SIZE);
    3.63 +
    3.64 +    munmap(ioreq_buffer_page, PAGE_SIZE);
    3.65 +
    3.66      xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, (v_end >> PAGE_SHIFT) - 2);
    3.67      xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
    3.68  
     4.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Aug 09 10:53:16 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Aug 09 11:25:33 2006 +0100
     4.3 @@ -134,15 +134,28 @@ static void e820_map_io_shared_callback(
     4.4      }
     4.5  }
     4.6  
     4.7 -void hvm_map_io_shared_page(struct vcpu *v)
     4.8 +static void e820_map_buffered_io_callback(struct domain *d,
     4.9 +                                          struct e820entry *e,
    4.10 +                                          void *data)
    4.11  {
    4.12 -    unsigned long mfn = INVALID_MFN;
    4.13 +    unsigned long *mfn = data;
    4.14 +    if ( e->type == E820_BUFFERED_IO ) {
    4.15 +        ASSERT(*mfn == INVALID_MFN);
    4.16 +        *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
    4.17 +    }
    4.18 +}
    4.19 +
    4.20 +void hvm_map_io_shared_pages(struct vcpu *v)
    4.21 +{
    4.22 +    unsigned long mfn;
    4.23      void *p;
    4.24      struct domain *d = v->domain;
    4.25  
    4.26 -    if ( d->arch.hvm_domain.shared_page_va )
    4.27 +    if ( d->arch.hvm_domain.shared_page_va ||
    4.28 +         d->arch.hvm_domain.buffered_io_va )
    4.29          return;
    4.30  
    4.31 +    mfn = INVALID_MFN;
    4.32      e820_foreach(d, e820_map_io_shared_callback, &mfn);
    4.33  
    4.34      if ( mfn == INVALID_MFN )
    4.35 @@ -159,6 +172,14 @@ void hvm_map_io_shared_page(struct vcpu 
    4.36      }
    4.37  
    4.38      d->arch.hvm_domain.shared_page_va = (unsigned long)p;
    4.39 +
    4.40 +    mfn = INVALID_MFN;
    4.41 +    e820_foreach(d, e820_map_buffered_io_callback, &mfn);
    4.42 +    if ( mfn != INVALID_MFN ) {
    4.43 +        p = map_domain_page_global(mfn);
    4.44 +        if ( p )
    4.45 +            d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
    4.46 +    }
    4.47  }
    4.48  
    4.49  void hvm_create_event_channels(struct vcpu *v)
    4.50 @@ -210,6 +231,8 @@ void hvm_setup_platform(struct domain* d
    4.51          hvm_vioapic_init(d);
    4.52      }
    4.53  
    4.54 +    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
    4.55 +
    4.56      init_timer(&platform->pl_time.periodic_tm.timer,
    4.57                 pt_timer_fn, v, v->processor);
    4.58      pit_init(v, cpu_khz);
     5.1 --- a/xen/arch/x86/hvm/intercept.c	Wed Aug 09 10:53:16 2006 +0100
     5.2 +++ b/xen/arch/x86/hvm/intercept.c	Wed Aug 09 11:25:33 2006 +0100
     5.3 @@ -36,12 +36,26 @@ extern struct hvm_mmio_handler vioapic_m
     5.4  
     5.5  #define HVM_MMIO_HANDLER_NR 2
     5.6  
     5.7 -struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
     5.8 +static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
     5.9  {
    5.10      &vlapic_mmio_handler,
    5.11      &vioapic_mmio_handler
    5.12  };
    5.13  
    5.14 +struct hvm_buffered_io_range {
    5.15 +    unsigned long start_addr;
    5.16 +    unsigned long length;
    5.17 +};
    5.18 +
    5.19 +#define HVM_BUFFERED_IO_RANGE_NR 1
    5.20 +
    5.21 +static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
    5.22 +static struct hvm_buffered_io_range
    5.23 +*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
    5.24 +{
    5.25 +    &buffered_stdvga_range
    5.26 +};
    5.27 +
    5.28  static inline void hvm_mmio_access(struct vcpu *v,
    5.29                                     ioreq_t *p,
    5.30                                     hvm_mmio_read_t read_handler,
    5.31 @@ -140,6 +154,56 @@ static inline void hvm_mmio_access(struc
    5.32      }
    5.33  }
    5.34  
    5.35 +int hvm_buffered_io_intercept(ioreq_t *p)
    5.36 +{
    5.37 +    struct vcpu *v = current;
    5.38 +    spinlock_t  *buffered_io_lock;
    5.39 +    buffered_iopage_t *buffered_iopage =
    5.40 +        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
    5.41 +    unsigned long tmp_write_pointer = 0;
    5.42 +    int i;
    5.43 +
    5.44 +    /* ignore READ ioreq_t! */
    5.45 +    if ( p->dir == IOREQ_READ )
    5.46 +        return 0;
    5.47 +
    5.48 +    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
    5.49 +        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
    5.50 +             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
    5.51 +                                     hvm_buffered_io_ranges[i]->length )
    5.52 +            break;
    5.53 +    }
    5.54 +
    5.55 +    if ( i == HVM_BUFFERED_IO_RANGE_NR )
    5.56 +        return 0;
    5.57 +
    5.58 +    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
    5.59 +    spin_lock(buffered_io_lock);
    5.60 +
    5.61 +    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
    5.62 +         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
    5.63 +        /* the queue is full.
    5.64 +         * send the iopacket through the normal path.
    5.65 +         * NOTE: The arithimetic operation could handle the situation for
    5.66 +         * write_pointer overflow.
    5.67 +         */
    5.68 +        spin_unlock(buffered_io_lock);
    5.69 +        return 0;
    5.70 +    }
    5.71 +
    5.72 +    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
    5.73 +
    5.74 +    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
    5.75 +
    5.76 +    /*make the ioreq_t visible before write_pointer*/
    5.77 +    wmb();
    5.78 +    buffered_iopage->write_pointer++;
    5.79 +
    5.80 +    spin_unlock(buffered_io_lock);
    5.81 +
    5.82 +    return 1;
    5.83 +}
    5.84 +
    5.85  int hvm_mmio_intercept(ioreq_t *p)
    5.86  {
    5.87      struct vcpu *v = current;
     6.1 --- a/xen/arch/x86/hvm/platform.c	Wed Aug 09 10:53:16 2006 +0100
     6.2 +++ b/xen/arch/x86/hvm/platform.c	Wed Aug 09 11:25:33 2006 +0100
     6.3 @@ -779,7 +779,7 @@ void send_mmio_req(
     6.4      } else
     6.5          p->u.data = value;
     6.6  
     6.7 -    if (hvm_mmio_intercept(p)){
     6.8 +    if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) {
     6.9          p->state = STATE_IORESP_READY;
    6.10          hvm_io_assist(v);
    6.11          return;
     7.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Aug 09 10:53:16 2006 +0100
     7.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Aug 09 11:25:33 2006 +0100
     7.3 @@ -810,6 +810,9 @@ static void svm_relinquish_guest_resourc
     7.4          unmap_domain_page_global(
     7.5              (void *)d->arch.hvm_domain.shared_page_va);
     7.6  
     7.7 +    if ( d->arch.hvm_domain.buffered_io_va )
     7.8 +        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
     7.9 +
    7.10      shadow_direct_map_clean(d);
    7.11  }
    7.12  
     8.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 09 10:53:16 2006 +0100
     8.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Aug 09 11:25:33 2006 +0100
     8.3 @@ -151,6 +151,9 @@ static void vmx_relinquish_guest_resourc
     8.4          unmap_domain_page_global(
     8.5  	        (void *)d->arch.hvm_domain.shared_page_va);
     8.6  
     8.7 +    if ( d->arch.hvm_domain.buffered_io_va )
     8.8 +        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
     8.9 +
    8.10      shadow_direct_map_clean(d);
    8.11  }
    8.12  
     9.1 --- a/xen/include/asm-x86/e820.h	Wed Aug 09 10:53:16 2006 +0100
     9.2 +++ b/xen/include/asm-x86/e820.h	Wed Aug 09 11:25:33 2006 +0100
     9.3 @@ -12,6 +12,7 @@
     9.4  #define E820_IO          16
     9.5  #define E820_SHARED_PAGE 17
     9.6  #define E820_XENSTORE    18
     9.7 +#define E820_BUFFERED_IO 19
     9.8  
     9.9  #define E820_MAP_PAGE        0x00090000
    9.10  #define E820_MAP_NR_OFFSET   0x000001E8
    10.1 --- a/xen/include/asm-x86/hvm/domain.h	Wed Aug 09 10:53:16 2006 +0100
    10.2 +++ b/xen/include/asm-x86/hvm/domain.h	Wed Aug 09 11:25:33 2006 +0100
    10.3 @@ -33,6 +33,8 @@
    10.4  
    10.5  struct hvm_domain {
    10.6      unsigned long          shared_page_va;
    10.7 +    unsigned long          buffered_io_va;
    10.8 +    spinlock_t             buffered_io_lock;
    10.9      s64                    tsc_frequency;
   10.10      struct pl_time         pl_time;
   10.11  
    11.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Aug 09 10:53:16 2006 +0100
    11.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Aug 09 11:25:33 2006 +0100
    11.3 @@ -78,7 +78,7 @@ hvm_disable(void)
    11.4  }
    11.5  
    11.6  void hvm_create_event_channels(struct vcpu *v);
    11.7 -void hvm_map_io_shared_page(struct vcpu *v);
    11.8 +void hvm_map_io_shared_pages(struct vcpu *v);
    11.9  
   11.10  static inline int
   11.11  hvm_initialize_guest_resources(struct vcpu *v)
   11.12 @@ -87,7 +87,7 @@ hvm_initialize_guest_resources(struct vc
   11.13      if ( hvm_funcs.initialize_guest_resources )
   11.14          ret = hvm_funcs.initialize_guest_resources(v);
   11.15      if ( ret == 1 ) {
   11.16 -        hvm_map_io_shared_page(v);
   11.17 +        hvm_map_io_shared_pages(v);
   11.18          hvm_create_event_channels(v);
   11.19      }
   11.20      return ret;
    12.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Aug 09 10:53:16 2006 +0100
    12.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Aug 09 11:25:33 2006 +0100
    12.3 @@ -139,6 +139,7 @@ extern int hvm_copy(void *buf, unsigned 
    12.4  extern void hvm_setup_platform(struct domain* d);
    12.5  extern int hvm_mmio_intercept(ioreq_t *p);
    12.6  extern int hvm_io_intercept(ioreq_t *p, int type);
    12.7 +extern int hvm_buffered_io_intercept(ioreq_t *p);
    12.8  extern void hvm_hooks_assist(struct vcpu *v);
    12.9  extern void hvm_print_line(struct vcpu *v, const char c);
   12.10  extern void hlt_timer_fn(void *data);
    13.1 --- a/xen/include/public/hvm/ioreq.h	Wed Aug 09 10:53:16 2006 +0100
    13.2 +++ b/xen/include/public/hvm/ioreq.h	Wed Aug 09 11:25:33 2006 +0100
    13.3 @@ -78,6 +78,14 @@ struct shared_iopage {
    13.4  };
    13.5  typedef struct shared_iopage shared_iopage_t;
    13.6  
    13.7 +#define IOREQ_BUFFER_SLOT_NUM     80
    13.8 +struct buffered_iopage {
    13.9 +    unsigned long   read_pointer;
   13.10 +    unsigned long   write_pointer;
   13.11 +    ioreq_t         ioreq[IOREQ_BUFFER_SLOT_NUM];
   13.12 +};            /* sizeof this structure must be in one page */
   13.13 +typedef struct buffered_iopage buffered_iopage_t;
   13.14 +
   13.15  #endif /* _IOREQ_H_ */
   13.16  
   13.17  /*