ia64/xen-unstable

changeset 16248:cae485f682aa

x86, hvm: Improve standard VGA performance

This patch improves the performance of Standard VGA,
the mode used during Windows boot and by the Linux
splash screen.

It does so by buffering all the stdvga programmed output ops
and memory mapped ops (both reads and writes) that are sent to QEMU.

We maintain locally essential VGA state so we can respond
immediately to input and read ops without waiting for
QEMU. We snoop output and write ops to keep our state
up-to-date.

PIO input ops are satisfied from cached state without
bothering QEMU.

PIO output and mmio ops are passed through to QEMU, including
mmio read ops. This is necessary because mmio reads
can have side effects.

I have changed the format of the buffered_iopage.
It used to contain 80 elements of type ioreq_t (48 bytes each).
Now it contains 672 elements of type buf_ioreq_t (6 bytes each).
Being able to pipeline 8 times as many ops improves
VGA performance by a factor of 8.

I changed hvm_buffered_io_intercept to use the same
registration and callback mechanism as hvm_portio_intercept
rather than the hacky hardcoding it used before.

In platform.c, I fixed send_timeoffset_req() to sets its
ioreq size to 8 (rather than 4), and its count to 1 (which
was missing).

Signed-off-by: Ben Guthro <bguthro@virtualron.com>
Signed-off-by: Robert Phillips <rphillips@virtualiron.com>
author Keir Fraser <keir@xensource.com>
date Fri Oct 26 10:32:20 2007 +0100 (2007-10-26)
parents 2d238ca6d51a
children 413107fa49a5
files tools/ioemu/target-i386-dm/helper2.c tools/ioemu/xenstore.c xen/arch/x86/hvm/Makefile xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/stdvga.c xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/io.h xen/include/public/hvm/ioreq.h
line diff
     1.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Fri Oct 26 10:00:10 2007 +0100
     1.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Fri Oct 26 10:32:20 2007 +0100
     1.3 @@ -478,6 +478,7 @@ void cpu_ioreq_timeoffset(CPUState *env,
     1.4  
     1.5      time_offset += (ulong)req->data;
     1.6  
     1.7 +    fprintf(logfile, "Time offset set %ld, added offset %ld\n", time_offset, req->data);
     1.8      sprintf(b, "%ld", time_offset);
     1.9      xenstore_vm_write(domid, "rtc/timeoffset", b);
    1.10  }
    1.11 @@ -538,20 +539,39 @@ void __handle_ioreq(CPUState *env, ioreq
    1.12  
    1.13  void __handle_buffered_iopage(CPUState *env)
    1.14  {
    1.15 -    ioreq_t *req = NULL;
    1.16 +    buf_ioreq_t *buf_req = NULL;
    1.17 +    ioreq_t req;
    1.18 +    int qw = 0;
    1.19  
    1.20      if (!buffered_io_page)
    1.21          return;
    1.22  
    1.23      while (buffered_io_page->read_pointer !=
    1.24             buffered_io_page->write_pointer) {
    1.25 -        req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
    1.26 +        memset(&req, 0, sizeof(req));
    1.27 +        buf_req = &buffered_io_page->buf_ioreq[buffered_io_page->read_pointer %
    1.28  				       IOREQ_BUFFER_SLOT_NUM];
    1.29 +        req.size = 1UL << buf_req->size;
    1.30 +        req.count = 1;
    1.31 +        req.data = buf_req->data;
    1.32 +        req.state = STATE_IOREQ_READY;
    1.33 +        req.dir  = buf_req->dir;
    1.34 +        req.type = buf_req->type;
    1.35 +        qw = req.size == 8;
    1.36 +        if (qw) {
    1.37 +            req.data |= ((uint64_t)buf_req->addr) << 16;
    1.38 +            buf_req = &buffered_io_page->buf_ioreq[(buffered_io_page->read_pointer+1) %
    1.39 +                                               IOREQ_BUFFER_SLOT_NUM];
    1.40 +            req.data |= ((uint64_t)buf_req->data) << 32;
    1.41 +            req.data |= ((uint64_t)buf_req->addr) << 48;
    1.42 +        }
    1.43 +        else
    1.44 +            req.addr = buf_req->addr;
    1.45  
    1.46 -        __handle_ioreq(env, req);
    1.47 +        __handle_ioreq(env, &req);
    1.48  
    1.49          mb();
    1.50 -        buffered_io_page->read_pointer++;
    1.51 +        buffered_io_page->read_pointer += qw ? 2 : 1;
    1.52      }
    1.53  }
    1.54  
     2.1 --- a/tools/ioemu/xenstore.c	Fri Oct 26 10:00:10 2007 +0100
     2.2 +++ b/tools/ioemu/xenstore.c	Fri Oct 26 10:32:20 2007 +0100
     2.3 @@ -734,7 +734,7 @@ int xenstore_vm_write(int domid, char *k
     2.4  
     2.5      pasprintf(&buf, "%s/%s", path, key);
     2.6      rc = xs_write(xsh, XBT_NULL, buf, value, strlen(value));
     2.7 -    if (rc) {
     2.8 +    if (rc == 0) {
     2.9          fprintf(logfile, "xs_write(%s, %s): write error\n", buf, key);
    2.10          goto out;
    2.11      }
     3.1 --- a/xen/arch/x86/hvm/Makefile	Fri Oct 26 10:00:10 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/Makefile	Fri Oct 26 10:32:20 2007 +0100
     3.3 @@ -17,3 +17,4 @@ obj-y += vioapic.o
     3.4  obj-y += vlapic.o
     3.5  obj-y += vpic.o
     3.6  obj-y += save.o
     3.7 +obj-y += stdvga.o
     4.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Oct 26 10:00:10 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Oct 26 10:32:20 2007 +0100
     4.3 @@ -241,6 +241,8 @@ int hvm_domain_initialise(struct domain 
     4.4      if ( rc != 0 )
     4.5          goto fail1;
     4.6  
     4.7 +    stdvga_init(d);
     4.8 +
     4.9      hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
    4.10      hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
    4.11  
    4.12 @@ -266,6 +268,7 @@ void hvm_domain_relinquish_resources(str
    4.13      rtc_deinit(d);
    4.14      pmtimer_deinit(d);
    4.15      hpet_deinit(d);
    4.16 +    stdvga_deinit(d);
    4.17  }
    4.18  
    4.19  void hvm_domain_destroy(struct domain *d)
     5.1 --- a/xen/arch/x86/hvm/intercept.c	Fri Oct 26 10:00:10 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/intercept.c	Fri Oct 26 10:32:20 2007 +0100
     5.3 @@ -45,20 +45,6 @@ static struct hvm_mmio_handler *hvm_mmio
     5.4      &vioapic_mmio_handler
     5.5  };
     5.6  
     5.7 -struct hvm_buffered_io_range {
     5.8 -    unsigned long start_addr;
     5.9 -    unsigned long length;
    5.10 -};
    5.11 -
    5.12 -#define HVM_BUFFERED_IO_RANGE_NR 1
    5.13 -
    5.14 -static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
    5.15 -static struct hvm_buffered_io_range
    5.16 -*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
    5.17 -{
    5.18 -    &buffered_stdvga_range
    5.19 -};
    5.20 -
    5.21  static inline void hvm_mmio_access(struct vcpu *v,
    5.22                                     ioreq_t *p,
    5.23                                     hvm_mmio_read_t read_handler,
    5.24 @@ -170,49 +156,70 @@ int hvm_buffered_io_send(ioreq_t *p)
    5.25      struct vcpu *v = current;
    5.26      struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
    5.27      buffered_iopage_t *pg = iorp->va;
    5.28 +    buf_ioreq_t bp;
    5.29 +    /* Timeoffset sends 64b data, but no address.  Use two consecutive slots. */
    5.30 +    int qw = 0;
    5.31  
    5.32 +    /* Ensure buffered_iopage fits in a page */
    5.33 +    BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
    5.34 +
    5.35 +    /* Return 0 for the cases we can't deal with. */
    5.36 +    if (p->addr > 0xffffful || p->data_is_ptr || p->df || p->count != 1)
    5.37 +        return 0;
    5.38 +
    5.39 +    bp.type = p->type;
    5.40 +    bp.dir  = p->dir;
    5.41 +    switch (p->size) {
    5.42 +    case 1:
    5.43 +        bp.size = 0;
    5.44 +        break;
    5.45 +    case 2:
    5.46 +        bp.size = 1;
    5.47 +        break;
    5.48 +    case 4:
    5.49 +        bp.size = 2;
    5.50 +        break;
    5.51 +    case 8:
    5.52 +        bp.size = 3;
    5.53 +        qw = 1;
    5.54 +        gdprintk(XENLOG_INFO, "quadword ioreq type:%d data:%ld\n", p->type, p->data);
    5.55 +        break;
    5.56 +    default:
    5.57 +        gdprintk(XENLOG_WARNING, "unexpected ioreq size:%ld\n", p->size);
    5.58 +        return 0;
    5.59 +    }
    5.60 +    
    5.61 +    bp.data = p->data;
    5.62 +    bp.addr = qw ? ((p->data >> 16) & 0xfffful) : (p->addr & 0xffffful);
    5.63 +    
    5.64      spin_lock(&iorp->lock);
    5.65  
    5.66 -    if ( (pg->write_pointer - pg->read_pointer) == IOREQ_BUFFER_SLOT_NUM )
    5.67 +    if ( (pg->write_pointer - pg->read_pointer) >= IOREQ_BUFFER_SLOT_NUM - (qw ? 1 : 0))
    5.68      {
    5.69          /* The queue is full: send the iopacket through the normal path. */
    5.70          spin_unlock(&iorp->lock);
    5.71          return 0;
    5.72      }
    5.73 -
    5.74 -    memcpy(&pg->ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
    5.75 -           p, sizeof(ioreq_t));
    5.76 +    
    5.77 +    memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
    5.78 +           &bp, sizeof(bp));
    5.79 +    
    5.80 +    if (qw) {
    5.81 +        bp.data = p->data >> 32;
    5.82 +        bp.addr = (p->data >> 48) & 0xfffful;
    5.83 +        memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM],
    5.84 +               &bp, sizeof(bp));
    5.85 +    }
    5.86  
    5.87      /* Make the ioreq_t visible /before/ write_pointer. */
    5.88      wmb();
    5.89 -    pg->write_pointer++;
    5.90 -
    5.91 +    pg->write_pointer += qw ? 2 : 1;
    5.92 +    
    5.93      spin_unlock(&iorp->lock);
    5.94 -
    5.95 +    
    5.96      return 1;
    5.97  }
    5.98  
    5.99 -int hvm_buffered_io_intercept(ioreq_t *p)
   5.100 -{
   5.101 -    int i;
   5.102 -
   5.103 -    /* ignore READ ioreq_t! */
   5.104 -    if ( p->dir == IOREQ_READ )
   5.105 -        return 0;
   5.106 -
   5.107 -    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
   5.108 -        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
   5.109 -             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
   5.110 -                                     hvm_buffered_io_ranges[i]->length )
   5.111 -            break;
   5.112 -    }
   5.113 -
   5.114 -    if ( i == HVM_BUFFERED_IO_RANGE_NR )
   5.115 -        return 0;
   5.116 -
   5.117 -    return hvm_buffered_io_send(p);
   5.118 -}
   5.119 -
   5.120  int hvm_mmio_intercept(ioreq_t *p)
   5.121  {
   5.122      struct vcpu *v = current;
   5.123 @@ -253,7 +260,7 @@ int hvm_io_intercept(ioreq_t *p, int typ
   5.124          addr = handler->hdl_list[i].addr;
   5.125          size = handler->hdl_list[i].size;
   5.126          if (p->addr >= addr &&
   5.127 -            p->addr <  addr + size)
   5.128 +            p->addr + p->size <=  addr + size)
   5.129              return handler->hdl_list[i].action(p);
   5.130      }
   5.131      return 0;
     6.1 --- a/xen/arch/x86/hvm/platform.c	Fri Oct 26 10:00:10 2007 +0100
     6.2 +++ b/xen/arch/x86/hvm/platform.c	Fri Oct 26 10:32:20 2007 +0100
     6.3 @@ -944,7 +944,8 @@ void send_timeoffset_req(unsigned long t
     6.4      memset(p, 0, sizeof(*p));
     6.5  
     6.6      p->type = IOREQ_TYPE_TIMEOFFSET;
     6.7 -    p->size = 4;
     6.8 +    p->size = 8;
     6.9 +    p->count = 1;
    6.10      p->dir = IOREQ_WRITE;
    6.11      p->data = timeoff;
    6.12  
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/x86/hvm/stdvga.c	Fri Oct 26 10:32:20 2007 +0100
     7.3 @@ -0,0 +1,712 @@
     7.4 +/*
     7.5 + *  Copyright (c) 2003-2007, Virtual Iron Software, Inc.
     7.6 + *
     7.7 + *  Portions have been modified by Virtual Iron Software, Inc.
     7.8 + *  (c) 2007. This file and the modifications can be redistributed and/or
     7.9 + *  modified under the terms and conditions of the GNU General Public
    7.10 + *  License, version 2.1 and not any later version of the GPL, as published
    7.11 + *  by the Free Software Foundation. 
    7.12 + *
    7.13 + *
    7.14 + *
    7.15 + *  This improves the performance of Standard VGA,
    7.16 + *  the mode used during Windows boot and by the Linux
    7.17 + *  splash screen.
    7.18 + *
    7.19 + *  It does so by buffering all the stdvga programmed output ops
    7.20 + *  and memory mapped ops (both reads and writes) that are sent to QEMU.
    7.21 + *
    7.22 + *  We maintain locally essential VGA state so we can respond
    7.23 + *  immediately to input and read ops without waiting for
    7.24 + *  QEMU.  We snoop output and write ops to keep our state
    7.25 + *  up-to-date.
    7.26 + *
    7.27 + *  PIO input ops are satisfied from cached state without
    7.28 + *  bothering QEMU.
    7.29 + *
    7.30 +    PIO output and mmio ops are passed through to QEMU, including
    7.31 + *  mmio read ops.  This is necessary because mmio reads
    7.32 + *  can have side effects.
    7.33 + */
    7.34 +
    7.35 +#include <xen/config.h>
    7.36 +#include <xen/types.h>
    7.37 +#include <xen/sched.h>
    7.38 +#include <asm/hvm/support.h>
    7.39 +
    7.40 +#define vram_b(_s, _a) (((uint8_t*) (_s)->vram_ptr[((_a)>>12)&0x3f])[(_a)&0xfff])
    7.41 +#define vram_w(_s, _a) (((uint16_t*)(_s)->vram_ptr[((_a)>>11)&0x3f])[(_a)&0x7ff])
    7.42 +#define vram_l(_s, _a) (((uint32_t*)(_s)->vram_ptr[((_a)>>10)&0x3f])[(_a)&0x3ff])
    7.43 +
    7.44 +#ifdef STDVGA_STATS
    7.45 +#define UPDATE_STATS(x) x
    7.46 +#else
    7.47 +#define UPDATE_STATS(x)
    7.48 +#endif
    7.49 +
    7.50 +#define PAT(x) (x)
    7.51 +static const uint32_t mask16[16] = {
    7.52 +    PAT(0x00000000),
    7.53 +    PAT(0x000000ff),
    7.54 +    PAT(0x0000ff00),
    7.55 +    PAT(0x0000ffff),
    7.56 +    PAT(0x00ff0000),
    7.57 +    PAT(0x00ff00ff),
    7.58 +    PAT(0x00ffff00),
    7.59 +    PAT(0x00ffffff),
    7.60 +    PAT(0xff000000),
    7.61 +    PAT(0xff0000ff),
    7.62 +    PAT(0xff00ff00),
    7.63 +    PAT(0xff00ffff),
    7.64 +    PAT(0xffff0000),
    7.65 +    PAT(0xffff00ff),
    7.66 +    PAT(0xffffff00),
    7.67 +    PAT(0xffffffff),
    7.68 +};
    7.69 +
    7.70 +/* force some bits to zero */
    7.71 +const uint8_t sr_mask[8] = {
    7.72 +    (uint8_t)~0xfc,
    7.73 +    (uint8_t)~0xc2,
    7.74 +    (uint8_t)~0xf0,
    7.75 +    (uint8_t)~0xc0,
    7.76 +    (uint8_t)~0xf1,
    7.77 +    (uint8_t)~0xff,
    7.78 +    (uint8_t)~0xff,
    7.79 +    (uint8_t)~0x00,
    7.80 +};
    7.81 +
    7.82 +const uint8_t gr_mask[16] = {
    7.83 +    (uint8_t)~0xf0, /* 0x00 */
    7.84 +    (uint8_t)~0xf0, /* 0x01 */
    7.85 +    (uint8_t)~0xf0, /* 0x02 */
    7.86 +    (uint8_t)~0xe0, /* 0x03 */
    7.87 +    (uint8_t)~0xfc, /* 0x04 */
    7.88 +    (uint8_t)~0x84, /* 0x05 */
    7.89 +    (uint8_t)~0xf0, /* 0x06 */
    7.90 +    (uint8_t)~0xf0, /* 0x07 */
    7.91 +    (uint8_t)~0x00, /* 0x08 */
    7.92 +};
    7.93 +
    7.94 +static uint64_t stdvga_inb(uint64_t addr)
    7.95 +{
    7.96 +    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
    7.97 +    uint8_t val = 0;
    7.98 +    switch (addr) {
    7.99 +    case 0x3c4:                 /* sequencer address register */
   7.100 +        val = s->sr_index;
   7.101 +        break;
   7.102 +
   7.103 +    case 0x3c5:                 /* sequencer data register */
   7.104 +        if (s->sr_index < sizeof(s->sr))
   7.105 +            val = s->sr[s->sr_index];
   7.106 +        break;
   7.107 +
   7.108 +    case 0x3ce:                 /* graphics address register */
   7.109 +        val = s->gr_index;
   7.110 +        break;
   7.111 +
   7.112 +    case 0x3cf:                 /* graphics data register */
   7.113 +        val = s->gr[s->gr_index];
   7.114 +        break;
   7.115 +
   7.116 +    default:
   7.117 +        gdprintk(XENLOG_WARNING, "unexpected io addr 0x%04x\n", (int)addr);
   7.118 +    }
   7.119 +    return val;
   7.120 +}
   7.121 +
   7.122 +static uint64_t stdvga_in(ioreq_t *p)
   7.123 +{
   7.124 +    /* Satisfy reads from sequence and graphics registers using local values */
   7.125 +    uint64_t data = 0;
   7.126 +    switch (p->size) {
   7.127 +    case 1:
   7.128 +        data = stdvga_inb(p->addr);
   7.129 +        break;
   7.130 +
   7.131 +    case 2:
   7.132 +        data = stdvga_inb(p->addr);
   7.133 +        data |= stdvga_inb(p->addr + 1) << 8;
   7.134 +        break;
   7.135 +
   7.136 +    case 4:
   7.137 +        data = stdvga_inb(p->addr);
   7.138 +        data |= stdvga_inb(p->addr + 1) << 8;
   7.139 +        data |= stdvga_inb(p->addr + 2) << 16;
   7.140 +        data |= stdvga_inb(p->addr + 3) << 24;
   7.141 +        break;
   7.142 +
   7.143 +    case 8:
   7.144 +        data = stdvga_inb(p->addr);
   7.145 +        data |= stdvga_inb(p->addr + 1) << 8;
   7.146 +        data |= stdvga_inb(p->addr + 2) << 16;
   7.147 +        data |= stdvga_inb(p->addr + 3) << 24;
   7.148 +        data |= stdvga_inb(p->addr + 4) << 32;
   7.149 +        data |= stdvga_inb(p->addr + 5) << 40;
   7.150 +        data |= stdvga_inb(p->addr + 6) << 48;
   7.151 +        data |= stdvga_inb(p->addr + 7) << 56;
   7.152 +        break;
   7.153 +
   7.154 +    default:
   7.155 +        gdprintk(XENLOG_WARNING, "invalid io size:%d\n", (int)p->size);
   7.156 +    }
   7.157 +    return data;
   7.158 +}
   7.159 +
   7.160 +static void stdvga_outb(uint64_t addr, uint8_t val)
   7.161 +{
   7.162 +    /* Bookkeep (via snooping) the sequencer and graphics registers */
   7.163 +
   7.164 +    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
   7.165 +    int prev_stdvga = s->stdvga;
   7.166 +
   7.167 +    switch (addr) {
   7.168 +    case 0x3c4:                 /* sequencer address register */
   7.169 +        s->sr_index = val;
   7.170 +        break;
   7.171 +
   7.172 +    case 0x3c5:                 /* sequencer data register */
   7.173 +        switch (s->sr_index) {
   7.174 +        case 0x00 ... 0x05:
   7.175 +        case 0x07:
   7.176 +            s->sr[s->sr_index] = val & sr_mask[s->sr_index];
   7.177 +            break;
   7.178 +        case 0x06:
   7.179 +            s->sr[s->sr_index] = ((val & 0x17) == 0x12) ? 0x12 : 0x0f;
   7.180 +            break;
   7.181 +        default:
   7.182 +            if (s->sr_index < sizeof(s->sr))
   7.183 +                s->sr[s->sr_index] = val;
   7.184 +            break;
   7.185 +        }
   7.186 +        break;
   7.187 +
   7.188 +    case 0x3ce:                 /* graphics address register */
   7.189 +        s->gr_index = val;
   7.190 +        break;
   7.191 +
   7.192 +    case 0x3cf:                 /* graphics data register */
   7.193 +        if (s->gr_index < sizeof(gr_mask)) {
   7.194 +            s->gr[s->gr_index] = val & gr_mask[s->gr_index];
   7.195 +        }
   7.196 +        else if (s->gr_index == 0xff && s->vram_ptr != NULL) {
   7.197 +            uint32_t addr;
   7.198 +            for (addr = 0xa0000; addr < 0xa4000; addr += 2)
   7.199 +                vram_w(s, addr) = (val << 8) | s->gr[0xfe];
   7.200 +        }
   7.201 +        else
   7.202 +            s->gr[s->gr_index] = val;
   7.203 +        break;
   7.204 +    }
   7.205 +
   7.206 +    /* When in standard vga mode, emulate here all writes to the vram buffer
   7.207 +     * so we can immediately satisfy reads without waiting for qemu. */
   7.208 +    s->stdvga =
   7.209 +        s->sr[0x07] == 0 &&          /* standard vga mode */
   7.210 +        s->gr[6] == 0x05;            /* misc graphics register w/ MemoryMapSelect=1  0xa0000-0xaffff (64K region) and AlphaDis=1 */
   7.211 +
   7.212 +    if (!prev_stdvga && s->stdvga) {
   7.213 +        s->cache = 1;       /* (re)start caching video buffer */
   7.214 +        gdprintk(XENLOG_INFO, "entering stdvga and caching modes\n");
   7.215 +    }
   7.216 +    else
   7.217 +    if (prev_stdvga && !s->stdvga)
   7.218 +        gdprintk(XENLOG_INFO, "leaving  stdvga\n");
   7.219 +}
   7.220 +
   7.221 +static void stdvga_outv(uint64_t addr, uint64_t data, uint32_t size)
   7.222 +{
   7.223 +    switch (size) {
   7.224 +    case 1:
   7.225 +        stdvga_outb(addr, data);
   7.226 +        break;
   7.227 +
   7.228 +    case 2:
   7.229 +        stdvga_outb(addr+0, data >>  0);
   7.230 +        stdvga_outb(addr+1, data >>  8);
   7.231 +        break;
   7.232 +
   7.233 +    case 4:
   7.234 +        stdvga_outb(addr+0, data >>  0);
   7.235 +        stdvga_outb(addr+1, data >>  8);
   7.236 +        stdvga_outb(addr+2, data >> 16);
   7.237 +        stdvga_outb(addr+3, data >> 24);
   7.238 +        break;
   7.239 +
   7.240 +    case 8:
   7.241 +        stdvga_outb(addr+0, data >>  0);
   7.242 +        stdvga_outb(addr+1, data >>  8);
   7.243 +        stdvga_outb(addr+2, data >> 16);
   7.244 +        stdvga_outb(addr+3, data >> 24);
   7.245 +        stdvga_outb(addr+4, data >> 32);
   7.246 +        stdvga_outb(addr+5, data >> 40);
   7.247 +        stdvga_outb(addr+6, data >> 48);
   7.248 +        stdvga_outb(addr+7, data >> 56);
   7.249 +        break;
   7.250 +
   7.251 +    default:
   7.252 +        gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
   7.253 +    }
   7.254 +}
   7.255 +
   7.256 +static void stdvga_out(ioreq_t *p)
   7.257 +{
   7.258 +    if (p->data_is_ptr) {
   7.259 +        int i, sign = p->df ? -1 : 1;
   7.260 +        uint64_t addr = p->addr, data = p->data, tmp;
   7.261 +        for (i = 0; i < p->count; i++) {
   7.262 +            hvm_copy_from_guest_phys(&tmp, data, p->size);
   7.263 +            stdvga_outv(addr, tmp, p->size);
   7.264 +            data += sign * p->size;
   7.265 +            addr += sign * p->size;
   7.266 +        }
   7.267 +    }
   7.268 +    else
   7.269 +        stdvga_outv(p->addr, p->data, p->size);
   7.270 +}
   7.271 +
   7.272 +int stdvga_intercept_pio(ioreq_t *p)
   7.273 +{
   7.274 +    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
   7.275 +    int buf = 0;
   7.276 +
   7.277 +    if (p->size > 8) {
   7.278 +        gdprintk(XENLOG_WARNING, "stdvga bad access size %d\n", (int)p->size);
   7.279 +        return 0;
   7.280 +    }
   7.281 +
   7.282 +    spin_lock(&s->lock);
   7.283 +    if ( p->dir == IOREQ_READ ) {
   7.284 +        if (p->size != 1)
   7.285 +            gdprintk(XENLOG_WARNING, "unexpected io size:%d\n", (int)p->size);
   7.286 +        if (!(p->addr == 0x3c5 && s->sr_index >= sizeof(sr_mask)) &&
   7.287 +            !(p->addr == 0x3cf && s->gr_index >= sizeof(gr_mask)))
   7.288 +        {
   7.289 +            p->data = stdvga_in(p);
   7.290 +            buf = 1;
   7.291 +        }
   7.292 +    }
   7.293 +    else {
   7.294 +        stdvga_out(p);
   7.295 +        buf = 1;
   7.296 +    }
   7.297 +
   7.298 +    if (buf && hvm_buffered_io_send(p)) {
   7.299 +        UPDATE_STATS(s->stats.nr_pio_buffered_wr++);
   7.300 +        spin_unlock(&s->lock);
   7.301 +        return 1;
   7.302 +    }
   7.303 +    else {
   7.304 +        UPDATE_STATS(s->stats.nr_pio_unbuffered_wr++);
   7.305 +        spin_unlock(&s->lock);
   7.306 +        return 0;
   7.307 +    }
   7.308 +}
   7.309 +
   7.310 +#define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
   7.311 +
   7.312 +static uint8_t stdvga_mem_readb(uint64_t addr)
   7.313 +{
   7.314 +    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
   7.315 +    int plane;
   7.316 +    uint32_t ret;
   7.317 +
   7.318 +    addr &= 0x1ffff;
   7.319 +    if (addr >= 0x10000)
   7.320 +        return 0xff;
   7.321 +
   7.322 +    if (s->sr[4] & 0x08) {
   7.323 +        /* chain 4 mode : simplest access */
   7.324 +        ret = vram_b(s, addr);
   7.325 +    } else if (s->gr[5] & 0x10) {
   7.326 +        /* odd/even mode (aka text mode mapping) */
   7.327 +        plane = (s->gr[4] & 2) | (addr & 1);
   7.328 +        ret = vram_b(s, ((addr & ~1) << 1) | plane);
   7.329 +    } else {
   7.330 +        /* standard VGA latched access */
   7.331 +        s->latch = vram_l(s, addr);
   7.332 +
   7.333 +        if (!(s->gr[5] & 0x08)) {
   7.334 +            /* read mode 0 */
   7.335 +            plane = s->gr[4];
   7.336 +            ret = GET_PLANE(s->latch, plane);
   7.337 +        } else {
   7.338 +            /* read mode 1 */
   7.339 +            ret = (s->latch ^ mask16[s->gr[2]]) & mask16[s->gr[7]];
   7.340 +            ret |= ret >> 16;
   7.341 +            ret |= ret >> 8;
   7.342 +            ret = (~ret) & 0xff;
   7.343 +        }
   7.344 +    }
   7.345 +    return ret;
   7.346 +}
   7.347 +
   7.348 +static uint32_t stdvga_mem_read(uint32_t addr, uint32_t size)
   7.349 +{
   7.350 +    uint32_t data = 0;
   7.351 +
   7.352 +    switch (size) {
   7.353 +    case 1:
   7.354 +        data = stdvga_mem_readb(addr);
   7.355 +        break;
   7.356 +
   7.357 +    case 2:
   7.358 +        data = stdvga_mem_readb(addr);
   7.359 +        data |= stdvga_mem_readb(addr + 1) << 8;
   7.360 +        break;
   7.361 +
   7.362 +    case 4:
   7.363 +        data = stdvga_mem_readb(addr);
   7.364 +        data |= stdvga_mem_readb(addr + 1) << 8;
   7.365 +        data |= stdvga_mem_readb(addr + 2) << 16;
   7.366 +        data |= stdvga_mem_readb(addr + 3) << 24;
   7.367 +        break;
   7.368 +
   7.369 +    default:
   7.370 +        gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
   7.371 +    }
   7.372 +    return data;
   7.373 +}
   7.374 +
   7.375 +static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
   7.376 +{
   7.377 +    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
   7.378 +    int plane, write_mode, b, func_select, mask;
   7.379 +    uint32_t write_mask, bit_mask, set_mask;
   7.380 +
   7.381 +    addr &= 0x1ffff;
   7.382 +    if (addr >= 0x10000)
   7.383 +        return;
   7.384 +
   7.385 +    if (s->sr[4] & 0x08) {
   7.386 +        /* chain 4 mode : simplest access */
   7.387 +        plane = addr & 3;
   7.388 +        mask = (1 << plane);
   7.389 +        if (s->sr[2] & mask) {
   7.390 +            vram_b(s, addr) = val;
   7.391 +        }
   7.392 +    } else if (s->gr[5] & 0x10) {
   7.393 +        /* odd/even mode (aka text mode mapping) */
   7.394 +        plane = (s->gr[4] & 2) | (addr & 1);
   7.395 +        mask = (1 << plane);
   7.396 +        if (s->sr[2] & mask) {
   7.397 +            addr = ((addr & ~1) << 1) | plane;
   7.398 +            vram_b(s, addr) = val;
   7.399 +        }
   7.400 +    } else {
   7.401 +        write_mode = s->gr[5] & 3;
   7.402 +        switch(write_mode) {
   7.403 +        default:
   7.404 +        case 0:
   7.405 +            /* rotate */
   7.406 +            b = s->gr[3] & 7;
   7.407 +            val = ((val >> b) | (val << (8 - b))) & 0xff;
   7.408 +            val |= val << 8;
   7.409 +            val |= val << 16;
   7.410 +
   7.411 +            /* apply set/reset mask */
   7.412 +            set_mask = mask16[s->gr[1]];
   7.413 +            val = (val & ~set_mask) | (mask16[s->gr[0]] & set_mask);
   7.414 +            bit_mask = s->gr[8];
   7.415 +            break;
   7.416 +        case 1:
   7.417 +            val = s->latch;
   7.418 +            goto do_write;
   7.419 +        case 2:
   7.420 +            val = mask16[val & 0x0f];
   7.421 +            bit_mask = s->gr[8];
   7.422 +            break;
   7.423 +        case 3:
   7.424 +            /* rotate */
   7.425 +            b = s->gr[3] & 7;
   7.426 +            val = (val >> b) | (val << (8 - b));
   7.427 +
   7.428 +            bit_mask = s->gr[8] & val;
   7.429 +            val = mask16[s->gr[0]];
   7.430 +            break;
   7.431 +        }
   7.432 +
   7.433 +        /* apply logical operation */
   7.434 +        func_select = s->gr[3] >> 3;
   7.435 +        switch(func_select) {
   7.436 +        case 0:
   7.437 +        default:
   7.438 +            /* nothing to do */
   7.439 +            break;
   7.440 +        case 1:
   7.441 +            /* and */
   7.442 +            val &= s->latch;
   7.443 +            break;
   7.444 +        case 2:
   7.445 +            /* or */
   7.446 +            val |= s->latch;
   7.447 +            break;
   7.448 +        case 3:
   7.449 +            /* xor */
   7.450 +            val ^= s->latch;
   7.451 +            break;
   7.452 +        }
   7.453 +
   7.454 +        /* apply bit mask */
   7.455 +        bit_mask |= bit_mask << 8;
   7.456 +        bit_mask |= bit_mask << 16;
   7.457 +        val = (val & bit_mask) | (s->latch & ~bit_mask);
   7.458 +
   7.459 +    do_write:
   7.460 +        /* mask data according to sr[2] */
   7.461 +        mask = s->sr[2];
   7.462 +        write_mask = mask16[mask];
   7.463 +        vram_l(s, addr) =
   7.464 +            (vram_l(s, addr) & ~write_mask) |
   7.465 +            (val & write_mask);
   7.466 +    }
   7.467 +}
   7.468 +
   7.469 +static void stdvga_mem_write(uint32_t addr, uint32_t data, uint32_t size)
   7.470 +{
   7.471 +    /* Intercept mmio write */
   7.472 +    switch (size) {
   7.473 +    case 1:
   7.474 +        stdvga_mem_writeb(addr, (data >>  0) & 0xff);
   7.475 +        break;
   7.476 +
   7.477 +    case 2:
   7.478 +        stdvga_mem_writeb(addr+0, (data >>  0) & 0xff);
   7.479 +        stdvga_mem_writeb(addr+1, (data >>  8) & 0xff);
   7.480 +        break;
   7.481 +
   7.482 +    case 4:
   7.483 +        stdvga_mem_writeb(addr+0, (data >>  0) & 0xff);
   7.484 +        stdvga_mem_writeb(addr+1, (data >>  8) & 0xff);
   7.485 +        stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
   7.486 +        stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
   7.487 +        break;
   7.488 +
   7.489 +    default:
   7.490 +        gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
   7.491 +    }
   7.492 +}
   7.493 +
   7.494 +static uint32_t read_data;
   7.495 +
   7.496 +static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
   7.497 +{
   7.498 +    int i;
   7.499 +    int sign = p->df ? -1 : 1;
   7.500 +
   7.501 +    if (p->data_is_ptr) {
   7.502 +        if (p->dir == IOREQ_READ ) {
   7.503 +            uint32_t addr = p->addr, data = p->data, tmp;
   7.504 +            for (i = 0; i < p->count; i++) {
   7.505 +                tmp = stdvga_mem_read(addr, p->size);
   7.506 +                hvm_copy_to_guest_phys(data, &tmp, p->size);
   7.507 +                data += sign * p->size;
   7.508 +                addr += sign * p->size;
   7.509 +            }
   7.510 +        }
   7.511 +        else {
   7.512 +            uint32_t addr = p->addr, data = p->data, tmp;
   7.513 +            for (i = 0; i < p->count; i++) {
   7.514 +                hvm_copy_from_guest_phys(&tmp, data, p->size);
   7.515 +                stdvga_mem_write(addr, tmp, p->size);
   7.516 +                data += sign * p->size;
   7.517 +                addr += sign * p->size;
   7.518 +            }
   7.519 +        }
   7.520 +    }
   7.521 +    else {
   7.522 +        if (p->dir == IOREQ_READ ) {
   7.523 +            uint32_t addr = p->addr;
   7.524 +            for (i = 0; i < p->count; i++) {
   7.525 +                p->data = stdvga_mem_read(addr, p->size);
   7.526 +                addr += sign * p->size;
   7.527 +            }
   7.528 +        }
   7.529 +        else {
   7.530 +            uint32_t addr = p->addr;
   7.531 +            for (i = 0; i < p->count; i++) {
   7.532 +                stdvga_mem_write(addr, p->data, p->size);
   7.533 +                addr += sign * p->size;
   7.534 +            }
   7.535 +        }
   7.536 +    }
   7.537 +
   7.538 +    read_data = p->data;
   7.539 +    return 1;
   7.540 +}
   7.541 +
   7.542 +static uint32_t op_and(uint32_t a, uint32_t b) { return a & b; }
   7.543 +static uint32_t op_or (uint32_t a, uint32_t b) { return a | b; }
   7.544 +static uint32_t op_xor(uint32_t a, uint32_t b) { return a ^ b; }
   7.545 +static uint32_t op_add(uint32_t a, uint32_t b) { return a + b; }
   7.546 +static uint32_t op_sub(uint32_t a, uint32_t b) { return a - b; }
   7.547 +static uint32_t (*op_array[])(uint32_t, uint32_t) = {
   7.548 +    [IOREQ_TYPE_AND] = op_and,
   7.549 +    [IOREQ_TYPE_OR ] = op_or,
   7.550 +    [IOREQ_TYPE_XOR] = op_xor,
   7.551 +    [IOREQ_TYPE_ADD] = op_add,
   7.552 +    [IOREQ_TYPE_SUB] = op_sub
   7.553 +};
   7.554 +
   7.555 +static int mmio_op(struct hvm_hw_stdvga *s, ioreq_t *p)
   7.556 +{
   7.557 +    uint32_t orig, mod = 0;
   7.558 +    orig = stdvga_mem_read(p->addr, p->size);
   7.559 +    if (p->dir == IOREQ_WRITE) {
   7.560 +        mod = (op_array[p->type])(orig, p->data);
   7.561 +        stdvga_mem_write(p->addr, mod, p->size);
   7.562 +    }
   7.563 +    // p->data = orig; // Can't modify p->data yet.  QEMU still needs to use it.  So return zero below.
   7.564 +    return 0; /* Don't try to buffer these operations */
   7.565 +}
   7.566 +
   7.567 +int stdvga_intercept_mmio(ioreq_t *p)
   7.568 +{
   7.569 +    struct domain *d = current->domain;
   7.570 +    struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
   7.571 +    int buf = 0;
   7.572 +
   7.573 +    if (p->size > 8) {
   7.574 +        gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
   7.575 +        return 0;
   7.576 +    }
   7.577 +
   7.578 +    spin_lock(&s->lock);
   7.579 +
   7.580 +    if (s->stdvga && s->cache) {
   7.581 +        switch (p->type) {
   7.582 +        case IOREQ_TYPE_COPY:
   7.583 +            buf = mmio_move(s, p);
   7.584 +            break;
   7.585 +        case IOREQ_TYPE_AND:
   7.586 +        case IOREQ_TYPE_OR:
   7.587 +        case IOREQ_TYPE_XOR:
   7.588 +        case IOREQ_TYPE_ADD:
   7.589 +        case IOREQ_TYPE_SUB:
   7.590 +            buf = mmio_op(s, p);
   7.591 +            break;
   7.592 +        default:
   7.593 +            gdprintk(XENLOG_ERR, "unsupported mmio request type:%d "
   7.594 +                     "addr:0x%04x data:0x%04x size:%d count:%d state:%d isptr:%d dir:%d df:%d\n",
   7.595 +                     p->type,
   7.596 +                     (int)p->addr, (int)p->data, (int)p->size, (int)p->count, p->state,
   7.597 +                     p->data_is_ptr, p->dir, p->df);
   7.598 +            s->cache = 0;
   7.599 +        }
   7.600 +    }
   7.601 +    if (buf && hvm_buffered_io_send(p)) {
   7.602 +        UPDATE_STATS(p->dir == IOREQ_READ ? s->stats.nr_mmio_buffered_rd++ : s->stats.nr_mmio_buffered_wr++);
   7.603 +        spin_unlock(&s->lock);
   7.604 +        return 1;
   7.605 +    }
   7.606 +    else {
   7.607 +        UPDATE_STATS(p->dir == IOREQ_READ ? s->stats.nr_mmio_unbuffered_rd++ : s->stats.nr_mmio_unbuffered_wr++);
   7.608 +        spin_unlock(&s->lock);
   7.609 +        return 0;
   7.610 +    }
   7.611 +}
   7.612 +
   7.613 +void stdvga_init(struct domain *d)
   7.614 +{
   7.615 +    int i;
   7.616 +    struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
   7.617 +    memset(s, 0, sizeof(*s));
   7.618 +    spin_lock_init(&s->lock);
   7.619 +    
   7.620 +    for (i = 0; i != ARRAY_SIZE(s->vram_ptr); i++) {
   7.621 +        struct page_info *vram_page;
   7.622 +        vram_page = alloc_domheap_page(NULL);
   7.623 +        if (!vram_page)
   7.624 +            break;
   7.625 +        s->vram_ptr[i] = page_to_virt(vram_page);
   7.626 +        memset(s->vram_ptr[i], 0, PAGE_SIZE);
   7.627 +    }
   7.628 +    if (i == ARRAY_SIZE(s->vram_ptr)) {
   7.629 +        register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio); /* sequencer registers */
   7.630 +        register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio); /* graphics registers */
   7.631 +        register_buffered_io_handler(d, 0xa0000, 0x10000, stdvga_intercept_mmio); /* mmio */
   7.632 +    }
   7.633 +}
   7.634 +
   7.635 +void stdvga_deinit(struct domain *d)
   7.636 +{
   7.637 +    struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
   7.638 +    int i;
   7.639 +    for (i = 0; i != ARRAY_SIZE(s->vram_ptr); i++) {
   7.640 +        struct page_info *vram_page;
   7.641 +        if (s->vram_ptr[i] == NULL)
   7.642 +            continue;
   7.643 +        vram_page = virt_to_page(s->vram_ptr[i]);
   7.644 +        free_domheap_page(vram_page);
   7.645 +        s->vram_ptr[i] = NULL;
   7.646 +    }
   7.647 +}
   7.648 +
   7.649 +#ifdef STDVGA_STATS
   7.650 +static void stdvga_stats_dump(unsigned char key)
   7.651 +{
   7.652 +    struct domain *d;
   7.653 +
   7.654 +    printk("%s: key '%c' pressed\n", __FUNCTION__, key);
   7.655 +
   7.656 +    rcu_read_lock(&domlist_read_lock);
   7.657 +
   7.658 +    for_each_domain ( d )
   7.659 +    {
   7.660 +        struct hvm_hw_stdvga *s;
   7.661 +        int i;
   7.662 +
   7.663 +        if ( !is_hvm_domain(d) )
   7.664 +            continue;
   7.665 +
   7.666 +        s = &d->arch.hvm_domain.stdvga;
   7.667 +        spin_lock(&s->lock);
   7.668 +        printk("\n>>> Domain %d <<<\n", d->domain_id);
   7.669 +        printk("    modes: stdvga:%d caching:%d\n", s->stdvga, s->cache);
   7.670 +        printk("                       %8s %8s\n", "read", "write");
   7.671 +        printk("    nr_mmio_buffered:  %8u %8u\n", s->stats.nr_mmio_buffered_rd, s->stats.nr_mmio_buffered_wr);
   7.672 +        printk("    nr_mmio_unbuffered:%8u %8u\n", s->stats.nr_mmio_unbuffered_rd, s->stats.nr_mmio_unbuffered_wr);
   7.673 +        printk("    nr_pio_buffered:   %8u %8u\n", s->stats.nr_pio_buffered_rd, s->stats.nr_pio_buffered_wr);
   7.674 +        printk("    nr_pio_unbuffered: %8u %8u\n", s->stats.nr_pio_unbuffered_rd, s->stats.nr_pio_unbuffered_wr);
   7.675 +
   7.676 +        for (i = 0; i != sizeof(s->sr); i++) {
   7.677 +            if (i % 8 == 0)
   7.678 +                printk("    sr[0x%02x] ", i);
   7.679 +            printk("%02x ", s->sr[i]);
   7.680 +            if (i % 8 == 7)
   7.681 +                printk("\n");
   7.682 +        }
   7.683 +        if (i % 8 != 7)
   7.684 +            printk("\n");
   7.685 +
   7.686 +        for (i = 0; i != sizeof(s->gr); i++) {
   7.687 +            if (i % 8 == 0)
   7.688 +                printk("    gr[0x%02x] ", i);
   7.689 +            printk("%02x ", s->gr[i]);
   7.690 +            if (i % 8 == 7)
   7.691 +                printk("\n");
   7.692 +        }
   7.693 +        if (i % 8 != 7)
   7.694 +            printk("\n");
   7.695 +
   7.696 +        memset(&s->stats, 0, sizeof(s->stats));
   7.697 +
   7.698 +        spin_unlock(&s->lock);
   7.699 +    }
   7.700 +
   7.701 +    rcu_read_unlock(&domlist_read_lock);
   7.702 +}
   7.703 +
   7.704 +#include <xen/keyhandler.h>
   7.705 +
   7.706 +static int __init setup_stdvga_stats_dump(void)
   7.707 +{
   7.708 +    register_keyhandler('<', stdvga_stats_dump, "dump stdvga stats");
   7.709 +    return 0;
   7.710 +}
   7.711 +
   7.712 +__initcall(setup_stdvga_stats_dump);
   7.713 +
   7.714 +#endif
   7.715 +
     8.1 --- a/xen/include/asm-x86/hvm/domain.h	Fri Oct 26 10:00:10 2007 +0100
     8.2 +++ b/xen/include/asm-x86/hvm/domain.h	Fri Oct 26 10:32:20 2007 +0100
     8.3 @@ -51,6 +51,7 @@ struct hvm_domain {
     8.4      struct hvm_irq         irq;
     8.5      struct hvm_hw_vpic     vpic[2]; /* 0=master; 1=slave */
     8.6      struct hvm_vioapic    *vioapic;
     8.7 +    struct hvm_hw_stdvga   stdvga;
     8.8  
     8.9      /* hvm_print_line() logging. */
    8.10      char                   pbuf[80];
     9.1 --- a/xen/include/asm-x86/hvm/io.h	Fri Oct 26 10:00:10 2007 +0100
     9.2 +++ b/xen/include/asm-x86/hvm/io.h	Fri Oct 26 10:32:20 2007 +0100
     9.3 @@ -80,10 +80,11 @@ struct hvm_io_op {
     9.4      struct cpu_user_regs    io_context; /* current context */
     9.5  };
     9.6  
     9.7 -#define MAX_IO_HANDLER              9
     9.8 +#define MAX_IO_HANDLER             12
     9.9  
    9.10  #define HVM_PORTIO                  0
    9.11  #define HVM_MMIO                    1
    9.12 +#define HVM_BUFFERED_IO             2
    9.13  
    9.14  typedef int (*intercept_action_t)(ioreq_t *);
    9.15  typedef unsigned long (*hvm_mmio_read_t)(struct vcpu *v,
    9.16 @@ -126,9 +127,13 @@ static inline int hvm_portio_intercept(i
    9.17      return hvm_io_intercept(p, HVM_PORTIO);
    9.18  }
    9.19  
    9.20 +static inline int hvm_buffered_io_intercept(ioreq_t *p)
    9.21 +{
    9.22 +    return hvm_io_intercept(p, HVM_BUFFERED_IO);
    9.23 +}
    9.24 +
    9.25  extern int hvm_mmio_intercept(ioreq_t *p);
    9.26  extern int hvm_buffered_io_send(ioreq_t *p);
    9.27 -extern int hvm_buffered_io_intercept(ioreq_t *p);
    9.28  
    9.29  static inline int register_portio_handler(
    9.30      struct domain *d, unsigned long addr,
    9.31 @@ -137,6 +142,13 @@ static inline int register_portio_handle
    9.32      return register_io_handler(d, addr, size, action, HVM_PORTIO);
    9.33  }
    9.34  
    9.35 +static inline int register_buffered_io_handler(
    9.36 +    struct domain *d, unsigned long addr,
    9.37 +    unsigned long size, intercept_action_t action)
    9.38 +{
    9.39 +    return register_io_handler(d, addr, size, action, HVM_BUFFERED_IO);
    9.40 +}
    9.41 +
    9.42  #if defined(__i386__) || defined(__x86_64__)
    9.43  static inline int irq_masked(unsigned long eflags)
    9.44  {
    9.45 @@ -154,5 +166,38 @@ extern void hvm_io_assist(void);
    9.46  extern void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
    9.47                           union vioapic_redir_entry *ent);
    9.48  
    9.49 +
    9.50 +#undef  STDVGA_STATS /* #define to enable stdvga statistics */
    9.51 +#undef  STDVGA_CHECK /* debug: ensure cached value matches qemu value */
    9.52 +
    9.53 +struct hvm_hw_stdvga {
    9.54 +    uint8_t sr_index;
    9.55 +    uint8_t sr[0x18];
    9.56 +    uint8_t gr_index;
    9.57 +    uint8_t gr[256];
    9.58 +    uint32_t latch;
    9.59 +    int stdvga;
    9.60 +    int cache;
    9.61 +    uint8_t *vram_ptr[64];  /* shadow of 0xa0000-0xaffff */
    9.62 +    spinlock_t lock;
    9.63 +    
    9.64 +#ifdef STDVGA_STATS
    9.65 +    struct {
    9.66 +        uint32_t nr_mmio_buffered_rd;
    9.67 +        uint32_t nr_mmio_buffered_wr;
    9.68 +        uint32_t nr_mmio_unbuffered_rd;
    9.69 +        uint32_t nr_mmio_unbuffered_wr;
    9.70 +        uint32_t nr_pio_buffered_rd;
    9.71 +        uint32_t nr_pio_buffered_wr;
    9.72 +        uint32_t nr_pio_unbuffered_rd;
    9.73 +        uint32_t nr_pio_unbuffered_wr;
    9.74 +    } stats;
    9.75 +#endif
    9.76 +};
    9.77 +
    9.78 +extern void stdvga_init(struct domain *d);
    9.79 +extern void stdvga_deinit(struct domain *d);
    9.80 +extern void stdvga_check_cached_value(ioreq_t *p);
    9.81 +
    9.82  #endif /* __ASM_X86_HVM_IO_H__ */
    9.83  
    10.1 --- a/xen/include/public/hvm/ioreq.h	Fri Oct 26 10:00:10 2007 +0100
    10.2 +++ b/xen/include/public/hvm/ioreq.h	Fri Oct 26 10:32:20 2007 +0100
    10.3 @@ -77,14 +77,27 @@ struct shared_iopage {
    10.4  };
    10.5  typedef struct shared_iopage shared_iopage_t;
    10.6  
    10.7 -#define IOREQ_BUFFER_SLOT_NUM     80
    10.8 +#pragma pack(push,2)
    10.9 +
   10.10 +struct buf_ioreq {
   10.11 +    uint8_t  type;   /*  I/O type                    */
   10.12 +    uint8_t  dir:1;  /*  1=read, 0=write             */
   10.13 +    uint8_t  size:2; /*  0=>1, 1=>2, 3=>8. If 8 then use two contig buf_ioreqs */
   10.14 +    uint32_t addr:20; /*  physical address or high-order data */
   10.15 +    uint16_t data;   /*  (low order) data            */
   10.16 +};
   10.17 +typedef struct buf_ioreq buf_ioreq_t;
   10.18 +
   10.19 +#define IOREQ_BUFFER_SLOT_NUM     672
   10.20  struct buffered_iopage {
   10.21 -    unsigned int    read_pointer;
   10.22 -    unsigned int    write_pointer;
   10.23 -    ioreq_t         ioreq[IOREQ_BUFFER_SLOT_NUM];
   10.24 +    volatile unsigned int read_pointer;
   10.25 +    volatile unsigned int write_pointer;
   10.26 +    buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM];
   10.27  }; /* NB. Size of this structure must be no greater than one page. */
   10.28  typedef struct buffered_iopage buffered_iopage_t;
   10.29  
   10.30 +#pragma pack(pop)
   10.31 +
   10.32  #if defined(__ia64__)
   10.33  struct pio_buffer {
   10.34      uint32_t page_offset;