ia64/xen-unstable

changeset 17454:0fd09ab6043c

x86, hvm: I/O emulation handlers return X86EMUL_* return codes.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Apr 15 13:28:02 2008 +0100 (2008-04-15)
parents 592e3ab73459
children 107340347279
files xen/arch/x86/hvm/emulate.c xen/arch/x86/hvm/hpet.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/i8254.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/pmtimer.c xen/arch/x86/hvm/rtc.c xen/arch/x86/hvm/stdvga.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vpic.c xen/include/asm-x86/hvm/io.h
line diff
     1.1 --- a/xen/arch/x86/hvm/emulate.c	Tue Apr 15 10:24:55 2008 +0100
     1.2 +++ b/xen/arch/x86/hvm/emulate.c	Tue Apr 15 13:28:02 2008 +0100
     1.3 @@ -20,12 +20,13 @@
     1.4  #include <asm/hvm/support.h>
     1.5  
     1.6  static int hvmemul_do_io(
     1.7 -    int is_mmio, paddr_t addr, unsigned long count, int size,
     1.8 +    int is_mmio, paddr_t addr, unsigned long *reps, int size,
     1.9      paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
    1.10  {
    1.11      struct vcpu *curr = current;
    1.12      vcpu_iodata_t *vio = get_ioreq(curr);
    1.13      ioreq_t *p = &vio->vp_ioreq;
    1.14 +    int rc;
    1.15  
    1.16      switch ( curr->arch.hvm_vcpu.io_state )
    1.17      {
    1.18 @@ -56,40 +57,58 @@ static int hvmemul_do_io(
    1.19      p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO;
    1.20      p->size = size;
    1.21      p->addr = addr;
    1.22 -    p->count = count;
    1.23 +    p->count = *reps;
    1.24      p->df = df;
    1.25      p->data = value;
    1.26      p->io_count++;
    1.27  
    1.28 -    if ( is_mmio
    1.29 -         ? (hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p))
    1.30 -         : hvm_portio_intercept(p) )
    1.31 +    if ( is_mmio )
    1.32 +    {
    1.33 +        rc = hvm_mmio_intercept(p);
    1.34 +        if ( rc == X86EMUL_UNHANDLEABLE )
    1.35 +            rc = hvm_buffered_io_intercept(p);
    1.36 +    }
    1.37 +    else
    1.38      {
    1.39 +        rc = hvm_portio_intercept(p);
    1.40 +    }
    1.41 +
    1.42 +    switch ( rc )
    1.43 +    {
    1.44 +    case X86EMUL_OKAY:
    1.45 +        *reps = p->count;
    1.46          p->state = STATE_IORESP_READY;
    1.47          hvm_io_assist();
    1.48          if ( val != NULL )
    1.49              *val = curr->arch.hvm_vcpu.io_data;
    1.50          curr->arch.hvm_vcpu.io_state = HVMIO_none;
    1.51 -        return X86EMUL_OKAY;
    1.52 +        break;
    1.53 +    case X86EMUL_UNHANDLEABLE:
    1.54 +        hvm_send_assist_req(curr);
    1.55 +        rc = (val != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
    1.56 +        break;
    1.57 +    case X86EMUL_RETRY:
    1.58 +        break;
    1.59 +    default:
    1.60 +        BUG();
    1.61      }
    1.62  
    1.63 -    hvm_send_assist_req(curr);
    1.64 -    return (val != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
    1.65 +    return rc;
    1.66  }
    1.67  
    1.68  static int hvmemul_do_pio(
    1.69 -    unsigned long port, unsigned long count, int size,
    1.70 +    unsigned long port, unsigned long *reps, int size,
    1.71      paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
    1.72  {
    1.73 -    return hvmemul_do_io(0, port, count, size, value,
    1.74 +    return hvmemul_do_io(0, port, reps, size, value,
    1.75                           dir, df, value_is_ptr, val);
    1.76  }
    1.77  
    1.78  static int hvmemul_do_mmio(
    1.79 -    paddr_t gpa, unsigned long count, int size,
    1.80 +    paddr_t gpa, unsigned long *reps, int size,
    1.81      paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
    1.82  {
    1.83 -    return hvmemul_do_io(1, gpa, count, size, value,
    1.84 +    return hvmemul_do_io(1, gpa, reps, size, value,
    1.85                           dir, df, value_is_ptr, val);
    1.86  }
    1.87  
    1.88 @@ -209,7 +228,7 @@ static int __hvmemul_read(
    1.89      struct hvm_emulate_ctxt *hvmemul_ctxt)
    1.90  {
    1.91      struct vcpu *curr = current;
    1.92 -    unsigned long addr;
    1.93 +    unsigned long addr, reps = 1;
    1.94      uint32_t pfec = PFEC_page_present;
    1.95      paddr_t gpa;
    1.96      int rc;
    1.97 @@ -229,7 +248,8 @@ static int __hvmemul_read(
    1.98              return X86EMUL_UNHANDLEABLE;
    1.99          gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
   1.100          if ( (off + bytes) <= PAGE_SIZE )
   1.101 -            return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val);
   1.102 +            return hvmemul_do_mmio(gpa, &reps, bytes, 0,
   1.103 +                                   IOREQ_READ, 0, 0, val);
   1.104      }
   1.105  
   1.106      if ( (seg != x86_seg_none) &&
   1.107 @@ -254,7 +274,7 @@ static int __hvmemul_read(
   1.108          if ( rc != X86EMUL_OKAY )
   1.109              return rc;
   1.110  
   1.111 -        return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val);
   1.112 +        return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
   1.113      }
   1.114  
   1.115      return X86EMUL_OKAY;
   1.116 @@ -305,7 +325,7 @@ static int hvmemul_write(
   1.117      struct hvm_emulate_ctxt *hvmemul_ctxt =
   1.118          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
   1.119      struct vcpu *curr = current;
   1.120 -    unsigned long addr;
   1.121 +    unsigned long addr, reps = 1;
   1.122      uint32_t pfec = PFEC_page_present | PFEC_write_access;
   1.123      paddr_t gpa;
   1.124      int rc;
   1.125 @@ -321,8 +341,8 @@ static int hvmemul_write(
   1.126          unsigned int off = addr & (PAGE_SIZE - 1);
   1.127          gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
   1.128          if ( (off + bytes) <= PAGE_SIZE )
   1.129 -            return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE,
   1.130 -                                   0, 0, NULL);
   1.131 +            return hvmemul_do_mmio(gpa, &reps, bytes, val,
   1.132 +                                   IOREQ_WRITE, 0, 0, NULL);
   1.133      }
   1.134  
   1.135      if ( (seg != x86_seg_none) &&
   1.136 @@ -342,7 +362,8 @@ static int hvmemul_write(
   1.137          if ( rc != X86EMUL_OKAY )
   1.138              return rc;
   1.139  
   1.140 -        return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL);
   1.141 +        return hvmemul_do_mmio(gpa, &reps, bytes, val,
   1.142 +                               IOREQ_WRITE, 0, 0, NULL);
   1.143      }
   1.144  
   1.145      return X86EMUL_OKAY;
   1.146 @@ -389,7 +410,7 @@ static int hvmemul_rep_ins(
   1.147      if ( rc != X86EMUL_OKAY )
   1.148          return rc;
   1.149  
   1.150 -    return hvmemul_do_pio(src_port, *reps, bytes_per_rep, gpa, IOREQ_READ,
   1.151 +    return hvmemul_do_pio(src_port, reps, bytes_per_rep, gpa, IOREQ_READ,
   1.152                            !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   1.153  }
   1.154  
   1.155 @@ -422,7 +443,7 @@ static int hvmemul_rep_outs(
   1.156      if ( rc != X86EMUL_OKAY )
   1.157          return rc;
   1.158  
   1.159 -    return hvmemul_do_pio(dst_port, *reps, bytes_per_rep, gpa, IOREQ_WRITE,
   1.160 +    return hvmemul_do_pio(dst_port, reps, bytes_per_rep, gpa, IOREQ_WRITE,
   1.161                            !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   1.162  }
   1.163  
   1.164 @@ -472,14 +493,14 @@ static int hvmemul_rep_movs(
   1.165      (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt);
   1.166      if ( !p2m_is_ram(p2mt) )
   1.167          return hvmemul_do_mmio(
   1.168 -            sgpa, *reps, bytes_per_rep, dgpa, IOREQ_READ,
   1.169 +            sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ,
   1.170              !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   1.171  
   1.172      (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
   1.173      if ( p2m_is_ram(p2mt) )
   1.174          return X86EMUL_UNHANDLEABLE;
   1.175      return hvmemul_do_mmio(
   1.176 -        dgpa, *reps, bytes_per_rep, sgpa, IOREQ_WRITE,
   1.177 +        dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE,
   1.178          !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   1.179  }
   1.180  
   1.181 @@ -516,7 +537,8 @@ static int hvmemul_read_io(
   1.182      unsigned long *val,
   1.183      struct x86_emulate_ctxt *ctxt)
   1.184  {
   1.185 -    return hvmemul_do_pio(port, 1, bytes, 0, IOREQ_READ, 0, 0, val);
   1.186 +    unsigned long reps = 1;
   1.187 +    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
   1.188  }
   1.189  
   1.190  static int hvmemul_write_io(
   1.191 @@ -525,7 +547,8 @@ static int hvmemul_write_io(
   1.192      unsigned long val,
   1.193      struct x86_emulate_ctxt *ctxt)
   1.194  {
   1.195 -    return hvmemul_do_pio(port, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL);
   1.196 +    unsigned long reps = 1;
   1.197 +    return hvmemul_do_pio(port, &reps, bytes, val, IOREQ_WRITE, 0, 0, NULL);
   1.198  }
   1.199  
   1.200  static int hvmemul_read_cr(
     2.1 --- a/xen/arch/x86/hvm/hpet.c	Tue Apr 15 10:24:55 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/hpet.c	Tue Apr 15 13:28:02 2008 +0100
     2.3 @@ -150,8 +150,9 @@ static inline uint64_t hpet_read_maincou
     2.4          return h->hpet.mc64;
     2.5  }
     2.6  
     2.7 -static unsigned long hpet_read(
     2.8 -    struct vcpu *v, unsigned long addr, unsigned long length)
     2.9 +static int hpet_read(
    2.10 +    struct vcpu *v, unsigned long addr, unsigned long length,
    2.11 +    unsigned long *pval)
    2.12  {
    2.13      HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
    2.14      unsigned long result;
    2.15 @@ -160,7 +161,10 @@ static unsigned long hpet_read(
    2.16      addr &= HPET_MMAP_SIZE-1;
    2.17  
    2.18      if ( hpet_check_access_length(addr, length) != 0 )
    2.19 -        return ~0UL;
    2.20 +    {
    2.21 +        result = ~0ul;
    2.22 +        goto out;
    2.23 +    }
    2.24  
    2.25      spin_lock(&h->lock);
    2.26  
    2.27 @@ -174,7 +178,9 @@ static unsigned long hpet_read(
    2.28  
    2.29      spin_unlock(&h->lock);
    2.30  
    2.31 -    return result;
    2.32 + out:
    2.33 +    *pval = result;
    2.34 +    return X86EMUL_OKAY;
    2.35  }
    2.36  
    2.37  static void hpet_stop_timer(HPETState *h, unsigned int tn)
    2.38 @@ -234,7 +240,7 @@ static inline uint64_t hpet_fixup_reg(
    2.39      return new;
    2.40  }
    2.41  
    2.42 -static void hpet_write(
    2.43 +static int hpet_write(
    2.44      struct vcpu *v, unsigned long addr,
    2.45      unsigned long length, unsigned long val)
    2.46  {
    2.47 @@ -245,7 +251,7 @@ static void hpet_write(
    2.48      addr &= HPET_MMAP_SIZE-1;
    2.49  
    2.50      if ( hpet_check_access_length(addr, length) != 0 )
    2.51 -        return;
    2.52 +        goto out;
    2.53  
    2.54      spin_lock(&h->lock);
    2.55  
    2.56 @@ -349,6 +355,9 @@ static void hpet_write(
    2.57      }
    2.58  
    2.59      spin_unlock(&h->lock);
    2.60 +
    2.61 + out:
    2.62 +    return X86EMUL_OKAY;
    2.63  }
    2.64  
    2.65  static int hpet_range(struct vcpu *v, unsigned long addr)
     3.1 --- a/xen/arch/x86/hvm/hvm.c	Tue Apr 15 10:24:55 2008 +0100
     3.2 +++ b/xen/arch/x86/hvm/hvm.c	Tue Apr 15 13:28:02 2008 +0100
     3.3 @@ -277,7 +277,7 @@ static int hvm_print_line(
     3.4      }
     3.5      spin_unlock(&hd->pbuf_lock);
     3.6  
     3.7 -    return 1;
     3.8 +    return X86EMUL_OKAY;
     3.9  }
    3.10  
    3.11  int hvm_domain_initialise(struct domain *d)
     4.1 --- a/xen/arch/x86/hvm/i8254.c	Tue Apr 15 10:24:55 2008 +0100
     4.2 +++ b/xen/arch/x86/hvm/i8254.c	Tue Apr 15 13:28:02 2008 +0100
     4.3 @@ -487,7 +487,7 @@ static int handle_pit_io(
     4.4      if ( bytes != 1 )
     4.5      {
     4.6          gdprintk(XENLOG_WARNING, "PIT bad access\n");
     4.7 -        return 1;
     4.8 +        return X86EMUL_OKAY;
     4.9      }
    4.10  
    4.11      if ( dir == IOREQ_WRITE )
    4.12 @@ -502,7 +502,7 @@ static int handle_pit_io(
    4.13              gdprintk(XENLOG_WARNING, "PIT: read A1:A0=3!\n");
    4.14      }
    4.15  
    4.16 -    return 1;
    4.17 +    return X86EMUL_OKAY;
    4.18  }
    4.19  
    4.20  static void speaker_ioport_write(
    4.21 @@ -526,11 +526,7 @@ static int handle_speaker_io(
    4.22  {
    4.23      struct PITState *vpit = vcpu_vpit(current);
    4.24  
    4.25 -    if ( bytes != 1 )
    4.26 -    {
    4.27 -        gdprintk(XENLOG_WARNING, "PIT_SPEAKER bad access\n");
    4.28 -        return 1;
    4.29 -    }
    4.30 +    BUG_ON(bytes != 1);
    4.31  
    4.32      spin_lock(&vpit->lock);
    4.33  
    4.34 @@ -541,7 +537,7 @@ static int handle_speaker_io(
    4.35  
    4.36      spin_unlock(&vpit->lock);
    4.37  
    4.38 -    return 1;
    4.39 +    return X86EMUL_OKAY;
    4.40  }
    4.41  
    4.42  int pv_pit_handler(int port, int data, int write)
     5.1 --- a/xen/arch/x86/hvm/intercept.c	Tue Apr 15 10:24:55 2008 +0100
     5.2 +++ b/xen/arch/x86/hvm/intercept.c	Tue Apr 15 13:28:02 2008 +0100
     5.3 @@ -45,53 +45,63 @@ static struct hvm_mmio_handler *hvm_mmio
     5.4      &vioapic_mmio_handler
     5.5  };
     5.6  
     5.7 -static inline void hvm_mmio_access(struct vcpu *v,
     5.8 -                                   ioreq_t *p,
     5.9 -                                   hvm_mmio_read_t read_handler,
    5.10 -                                   hvm_mmio_write_t write_handler)
    5.11 +static int hvm_mmio_access(struct vcpu *v,
    5.12 +                           ioreq_t *p,
    5.13 +                           hvm_mmio_read_t read_handler,
    5.14 +                           hvm_mmio_write_t write_handler)
    5.15  {
    5.16      unsigned long data;
    5.17 +    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;
    5.18  
    5.19      if ( !p->data_is_ptr )
    5.20      {
    5.21          if ( p->dir == IOREQ_READ )
    5.22 -            p->data = read_handler(v, p->addr, p->size);
    5.23 -        else    /* p->dir == IOREQ_WRITE */
    5.24 -            write_handler(v, p->addr, p->size, p->data);
    5.25 +        {
    5.26 +            rc = read_handler(v, p->addr, p->size, &data);
    5.27 +            p->data = data;
    5.28 +        }
    5.29 +        else /* p->dir == IOREQ_WRITE */
    5.30 +            rc = write_handler(v, p->addr, p->size, p->data);
    5.31 +        return rc;
    5.32 +    }
    5.33 +
    5.34 +    if ( p->dir == IOREQ_READ )
    5.35 +    {
    5.36 +        for ( i = 0; i < p->count; i++ )
    5.37 +        {
    5.38 +            rc = read_handler(
    5.39 +                v,
    5.40 +                p->addr + (sign * i * p->size),
    5.41 +                p->size, &data);
    5.42 +            if ( rc != X86EMUL_OKAY )
    5.43 +                break;
    5.44 +            (void)hvm_copy_to_guest_phys(
    5.45 +                p->data + (sign * i * p->size),
    5.46 +                &data,
    5.47 +                p->size);
    5.48 +        }
    5.49      }
    5.50      else
    5.51      {
    5.52 -        int i, sign = (p->df) ? -1 : 1;
    5.53 -
    5.54 -        if ( p->dir == IOREQ_READ )
    5.55 +        for ( i = 0; i < p->count; i++ )
    5.56          {
    5.57 -            for ( i = 0; i < p->count; i++ )
    5.58 -            {
    5.59 -                data = read_handler(
    5.60 -                    v,
    5.61 -                    p->addr + (sign * i * p->size),
    5.62 -                    p->size);
    5.63 -                (void)hvm_copy_to_guest_phys(
    5.64 -                    p->data + (sign * i * p->size),
    5.65 -                    &data,
    5.66 -                    p->size);
    5.67 -            }
    5.68 -        }
    5.69 -        else
    5.70 -        {
    5.71 -            for ( i = 0; i < p->count; i++ )
    5.72 -            {
    5.73 -                (void)hvm_copy_from_guest_phys(
    5.74 -                    &data,
    5.75 -                    p->data + (sign * i * p->size),
    5.76 -                    p->size);
    5.77 -                write_handler(
    5.78 -                    v,
    5.79 -                    p->addr + (sign * i * p->size),
    5.80 -                    p->size, data);
    5.81 -            }
    5.82 +            (void)hvm_copy_from_guest_phys(
    5.83 +                &data,
    5.84 +                p->data + (sign * i * p->size),
    5.85 +                p->size);
    5.86 +            rc = write_handler(
    5.87 +                v,
    5.88 +                p->addr + (sign * i * p->size),
    5.89 +                p->size, data);
    5.90 +            if ( rc != X86EMUL_OKAY )
    5.91 +                break;
    5.92          }
    5.93      }
    5.94 +
    5.95 +    if ( (p->count = i) != 0 )
    5.96 +        rc = X86EMUL_OKAY;
    5.97 +
    5.98 +    return rc;
    5.99  }
   5.100  
   5.101  int hvm_mmio_intercept(ioreq_t *p)
   5.102 @@ -100,60 +110,62 @@ int hvm_mmio_intercept(ioreq_t *p)
   5.103      int i;
   5.104  
   5.105      for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
   5.106 -    {
   5.107          if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) )
   5.108 -        {
   5.109 -            hvm_mmio_access(v, p,
   5.110 -                            hvm_mmio_handlers[i]->read_handler,
   5.111 -                            hvm_mmio_handlers[i]->write_handler);
   5.112 -            return 1;
   5.113 -        }
   5.114 -    }
   5.115 +            return hvm_mmio_access(
   5.116 +                v, p,
   5.117 +                hvm_mmio_handlers[i]->read_handler,
   5.118 +                hvm_mmio_handlers[i]->write_handler);
   5.119  
   5.120 -    return 0;
   5.121 +    return X86EMUL_UNHANDLEABLE;
   5.122  }
   5.123  
   5.124  static int process_portio_intercept(portio_action_t action, ioreq_t *p)
   5.125  {
   5.126 -    int rc = 1, i, sign = p->df ? -1 : 1;
   5.127 +    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;
   5.128      uint32_t data;
   5.129  
   5.130 -    if ( p->dir == IOREQ_READ )
   5.131 +    if ( !p->data_is_ptr )
   5.132      {
   5.133 -        if ( !p->data_is_ptr )
   5.134 +        if ( p->dir == IOREQ_READ )
   5.135          {
   5.136              rc = action(IOREQ_READ, p->addr, p->size, &data);
   5.137              p->data = data;
   5.138          }
   5.139          else
   5.140          {
   5.141 -            for ( i = 0; i < p->count; i++ )
   5.142 -            {
   5.143 -                rc = action(IOREQ_READ, p->addr, p->size, &data);
   5.144 -                (void)hvm_copy_to_guest_phys(p->data + sign*i*p->size,
   5.145 -                                             &data, p->size);
   5.146 -            }
   5.147 +            data = p->data;
   5.148 +            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
   5.149 +        }
   5.150 +        return rc;
   5.151 +    }
   5.152 +
   5.153 +    if ( p->dir == IOREQ_READ )
   5.154 +    {
   5.155 +        for ( i = 0; i < p->count; i++ )
   5.156 +        {
   5.157 +            rc = action(IOREQ_READ, p->addr, p->size, &data);
   5.158 +            if ( rc != X86EMUL_OKAY )
   5.159 +                break;
   5.160 +            (void)hvm_copy_to_guest_phys(p->data + sign*i*p->size,
   5.161 +                                         &data, p->size);
   5.162          }
   5.163      }
   5.164      else /* p->dir == IOREQ_WRITE */
   5.165      {
   5.166 -        if ( !p->data_is_ptr )
   5.167 -        {
   5.168 -            data = p->data;
   5.169 -            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
   5.170 -        }
   5.171 -        else
   5.172 +        for ( i = 0; i < p->count; i++ )
   5.173          {
   5.174 -            for ( i = 0; i < p->count; i++ )
   5.175 -            {
   5.176 -                data = 0;
   5.177 -                (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size,
   5.178 -                                               p->size);
   5.179 -                rc = action(IOREQ_WRITE, p->addr, p->size, &data);
   5.180 -            }
   5.181 +            data = 0;
   5.182 +            (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size,
   5.183 +                                           p->size);
   5.184 +            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
   5.185 +            if ( rc != X86EMUL_OKAY )
   5.186 +                break;
   5.187          }
   5.188      }
   5.189  
   5.190 +    if ( (p->count = i) != 0 )
   5.191 +        rc = X86EMUL_OKAY;
   5.192 +
   5.193      return rc;
   5.194  }
   5.195  
   5.196 @@ -170,7 +182,7 @@ int hvm_io_intercept(ioreq_t *p, int typ
   5.197      unsigned long addr, size;
   5.198  
   5.199      if ( (type == HVM_PORTIO) && (dpci_ioport_intercept(p)) )
   5.200 -        return 1;
   5.201 +        return X86EMUL_OKAY;
   5.202  
   5.203      for ( i = 0; i < handler->num_slot; i++ )
   5.204      {
   5.205 @@ -188,10 +200,10 @@ int hvm_io_intercept(ioreq_t *p, int typ
   5.206          }
   5.207      }
   5.208  
   5.209 -    return 0;
   5.210 +    return X86EMUL_UNHANDLEABLE;
   5.211  }
   5.212  
   5.213 -int register_io_handler(
   5.214 +void register_io_handler(
   5.215      struct domain *d, unsigned long addr, unsigned long size,
   5.216      void *action, int type)
   5.217  {
   5.218 @@ -207,9 +219,8 @@ int register_io_handler(
   5.219      else
   5.220          handler->hdl_list[num].action.mmio = action;
   5.221      handler->num_slot++;
   5.222 +}
   5.223  
   5.224 -    return 1;
   5.225 -}
   5.226  /*
   5.227   * Local variables:
   5.228   * mode: C
     6.1 --- a/xen/arch/x86/hvm/io.c	Tue Apr 15 10:24:55 2008 +0100
     6.2 +++ b/xen/arch/x86/hvm/io.c	Tue Apr 15 13:28:02 2008 +0100
     6.3 @@ -246,74 +246,59 @@ void hvm_io_assist(void)
     6.4  
     6.5  void dpci_ioport_read(uint32_t mport, ioreq_t *p)
     6.6  {
     6.7 -    uint64_t i;
     6.8 -    uint64_t z_data;
     6.9 -    uint64_t length = (p->count * p->size);
    6.10 +    int i, sign = p->df ? -1 : 1;
    6.11 +    uint32_t data = 0;
    6.12  
    6.13 -    for ( i = 0; i < length; i += p->size )
    6.14 +    for ( i = 0; i < p->count; i++ )
    6.15      {
    6.16 -        z_data = ~0ULL;
    6.17 -        
    6.18          switch ( p->size )
    6.19          {
    6.20          case 1:
    6.21 -            z_data = (uint64_t)inb(mport);
    6.22 +            data = inb(mport);
    6.23              break;
    6.24          case 2:
    6.25 -            z_data = (uint64_t)inw(mport);
    6.26 +            data = inw(mport);
    6.27              break;
    6.28          case 4:
    6.29 -            z_data = (uint64_t)inl(mport);
    6.30 +            data = inl(mport);
    6.31              break;
    6.32          default:
    6.33 -            gdprintk(XENLOG_ERR, "Error: unable to handle size: %"
    6.34 -                     PRId64 "\n", p->size);
    6.35 -            return;
    6.36 +            BUG();
    6.37          }
    6.38  
    6.39 -        p->data = z_data;
    6.40 -        if ( p->data_is_ptr &&
    6.41 -             hvm_copy_to_guest_phys(p->data + i, (void *)&z_data,
    6.42 -                                    (int)p->size) )
    6.43 -        {
    6.44 -            gdprintk(XENLOG_ERR, "Error: couldn't copy to hvm phys\n");
    6.45 -            return;
    6.46 -        }
    6.47 +        if ( p->data_is_ptr )
    6.48 +            (void)hvm_copy_to_guest_phys(
    6.49 +                p->data + (sign * i * p->size), &data, p->size);
    6.50 +        else
    6.51 +            p->data = data;
    6.52      }
    6.53  }
    6.54  
    6.55  void dpci_ioport_write(uint32_t mport, ioreq_t *p)
    6.56  {
    6.57 -    uint64_t i;
    6.58 -    uint64_t z_data = 0;
    6.59 -    uint64_t length = (p->count * p->size);
    6.60 +    int i, sign = p->df ? -1 : 1;
    6.61 +    uint32_t data;
    6.62  
    6.63 -    for ( i = 0; i < length; i += p->size )
    6.64 +    for ( i = 0; i < p->count; i++ )
    6.65      {
    6.66 -        z_data = p->data;
    6.67 -        if ( p->data_is_ptr &&
    6.68 -             hvm_copy_from_guest_phys((void *)&z_data,
    6.69 -                                      p->data + i, (int)p->size) )
    6.70 -        {
    6.71 -            gdprintk(XENLOG_ERR, "Error: couldn't copy from hvm phys\n");
    6.72 -            return;
    6.73 -        }
    6.74 +        data = p->data;
    6.75 +        if ( p->data_is_ptr )
    6.76 +            (void)hvm_copy_from_guest_phys(
    6.77 +                &data, p->data + (sign * i & p->size), p->size);
    6.78  
    6.79          switch ( p->size )
    6.80          {
    6.81          case 1:
    6.82 -            outb((uint8_t) z_data, mport);
    6.83 +            outb(data, mport);
    6.84              break;
    6.85          case 2:
    6.86 -            outw((uint16_t) z_data, mport);
    6.87 +            outw(data, mport);
    6.88              break;
    6.89          case 4:
    6.90 -            outl((uint32_t) z_data, mport);
    6.91 +            outl(data, mport);
    6.92              break;
    6.93          default:
    6.94 -            gdprintk(XENLOG_ERR, "Error: unable to handle size: %"
    6.95 -                     PRId64 "\n", p->size);
    6.96 -            break;
    6.97 +            BUG();
    6.98          }
    6.99      }
   6.100  }
     7.1 --- a/xen/arch/x86/hvm/pmtimer.c	Tue Apr 15 10:24:55 2008 +0100
     7.2 +++ b/xen/arch/x86/hvm/pmtimer.c	Tue Apr 15 13:28:02 2008 +0100
     7.3 @@ -169,7 +169,7 @@ static int handle_evt_io(
     7.4  
     7.5      spin_unlock(&s->lock);
     7.6  
     7.7 -    return 1;
     7.8 +    return X86EMUL_OKAY;
     7.9  }
    7.10  
    7.11  
    7.12 @@ -183,7 +183,7 @@ static int handle_pmt_io(
    7.13      if ( bytes != 4 )
    7.14      {
    7.15          gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
    7.16 -        return 1;
    7.17 +        return X86EMUL_OKAY;
    7.18      }
    7.19      
    7.20      if ( dir == IOREQ_READ )
    7.21 @@ -192,10 +192,10 @@ static int handle_pmt_io(
    7.22          pmt_update_time(s);
    7.23          *val = s->pm.tmr_val;
    7.24          spin_unlock(&s->lock);
    7.25 -        return 1;
    7.26 +        return X86EMUL_OKAY;
    7.27      }
    7.28  
    7.29 -    return 0;
    7.30 +    return X86EMUL_UNHANDLEABLE;
    7.31  }
    7.32  
    7.33  static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
     8.1 --- a/xen/arch/x86/hvm/rtc.c	Tue Apr 15 10:24:55 2008 +0100
     8.2 +++ b/xen/arch/x86/hvm/rtc.c	Tue Apr 15 13:28:02 2008 +0100
     8.3 @@ -403,21 +403,21 @@ static int handle_rtc_io(
     8.4      if ( bytes != 1 )
     8.5      {
     8.6          gdprintk(XENLOG_WARNING, "HVM_RTC bas access\n");
     8.7 -        return 1;
     8.8 +        return X86EMUL_OKAY;
     8.9      }
    8.10      
    8.11      if ( dir == IOREQ_WRITE )
    8.12      {
    8.13          if ( rtc_ioport_write(vrtc, port, (uint8_t)*val) )
    8.14 -            return 1;
    8.15 +            return X86EMUL_OKAY;
    8.16      }
    8.17      else if ( vrtc->hw.cmos_index < RTC_CMOS_SIZE )
    8.18      {
    8.19          *val = rtc_ioport_read(vrtc, port);
    8.20 -        return 1;
    8.21 +        return X86EMUL_OKAY;
    8.22      }
    8.23  
    8.24 -    return 0;
    8.25 +    return X86EMUL_UNHANDLEABLE;
    8.26  }
    8.27  
    8.28  void rtc_migrate_timers(struct vcpu *v)
     9.1 --- a/xen/arch/x86/hvm/stdvga.c	Tue Apr 15 10:24:55 2008 +0100
     9.2 +++ b/xen/arch/x86/hvm/stdvga.c	Tue Apr 15 13:28:02 2008 +0100
     9.3 @@ -167,19 +167,19 @@ static void stdvga_out(uint32_t port, ui
     9.4      }
     9.5  }
     9.6  
     9.7 -int stdvga_intercept_pio(
     9.8 +static int stdvga_intercept_pio(
     9.9      int dir, uint32_t port, uint32_t bytes, uint32_t *val)
    9.10  {
    9.11      struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
    9.12  
    9.13 -    if ( dir == IOREQ_READ )
    9.14 -        return 0;
    9.15 +    if ( dir == IOREQ_WRITE )
    9.16 +    {
    9.17 +        spin_lock(&s->lock);
    9.18 +        stdvga_out(port, bytes, *val);
    9.19 +        spin_unlock(&s->lock);
    9.20 +    }
    9.21  
    9.22 -    spin_lock(&s->lock);
    9.23 -    stdvga_out(port, bytes, *val);
    9.24 -    spin_unlock(&s->lock);
    9.25 -
    9.26 -    return 0; /* propagate to external ioemu */
    9.27 +    return X86EMUL_UNHANDLEABLE; /* propagate to external ioemu */
    9.28  }
    9.29  
    9.30  #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
    9.31 @@ -459,7 +459,7 @@ static int mmio_move(struct hvm_hw_stdvg
    9.32      return 1;
    9.33  }
    9.34  
    9.35 -int stdvga_intercept_mmio(ioreq_t *p)
    9.36 +static int stdvga_intercept_mmio(ioreq_t *p)
    9.37  {
    9.38      struct domain *d = current->domain;
    9.39      struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
    9.40 @@ -468,7 +468,7 @@ int stdvga_intercept_mmio(ioreq_t *p)
    9.41      if ( p->size > 8 )
    9.42      {
    9.43          gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
    9.44 -        return 0;
    9.45 +        return X86EMUL_UNHANDLEABLE;
    9.46      }
    9.47  
    9.48      spin_lock(&s->lock);
    9.49 @@ -499,7 +499,7 @@ int stdvga_intercept_mmio(ioreq_t *p)
    9.50  
    9.51      spin_unlock(&s->lock);
    9.52  
    9.53 -    return rc;
    9.54 +    return rc ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE;
    9.55  }
    9.56  
    9.57  void stdvga_init(struct domain *d)
    10.1 --- a/xen/arch/x86/hvm/vioapic.c	Tue Apr 15 10:24:55 2008 +0100
    10.2 +++ b/xen/arch/x86/hvm/vioapic.c	Tue Apr 15 13:28:02 2008 +0100
    10.3 @@ -88,9 +88,9 @@ static unsigned long vioapic_read_indire
    10.4      return result;
    10.5  }
    10.6  
    10.7 -static unsigned long vioapic_read(struct vcpu *v,
    10.8 -                                  unsigned long addr,
    10.9 -                                  unsigned long length)
   10.10 +static int vioapic_read(
   10.11 +    struct vcpu *v, unsigned long addr,
   10.12 +    unsigned long length, unsigned long *pval)
   10.13  {
   10.14      struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
   10.15      uint32_t result;
   10.16 @@ -114,11 +114,13 @@ static unsigned long vioapic_read(struct
   10.17          break;
   10.18      }
   10.19  
   10.20 -    return result;
   10.21 +    *pval = result;
   10.22 +    return X86EMUL_OKAY;
   10.23  }
   10.24  
   10.25  static void vioapic_write_redirent(
   10.26 -    struct hvm_hw_vioapic *vioapic, unsigned int idx, int top_word, uint32_t val)
   10.27 +    struct hvm_hw_vioapic *vioapic, unsigned int idx,
   10.28 +    int top_word, uint32_t val)
   10.29  {
   10.30      struct domain *d = vioapic_domain(vioapic);
   10.31      struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   10.32 @@ -196,10 +198,9 @@ static void vioapic_write_indirect(
   10.33      }
   10.34  }
   10.35  
   10.36 -static void vioapic_write(struct vcpu *v,
   10.37 -                          unsigned long addr,
   10.38 -                          unsigned long length,
   10.39 -                          unsigned long val)
   10.40 +static int vioapic_write(
   10.41 +    struct vcpu *v, unsigned long addr,
   10.42 +    unsigned long length, unsigned long val)
   10.43  {
   10.44      struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
   10.45  
   10.46 @@ -224,6 +225,8 @@ static void vioapic_write(struct vcpu *v
   10.47      default:
   10.48          break;
   10.49      }
   10.50 +
   10.51 +    return X86EMUL_OKAY;
   10.52  }
   10.53  
   10.54  static int vioapic_range(struct vcpu *v, unsigned long addr)
    11.1 --- a/xen/arch/x86/hvm/vlapic.c	Tue Apr 15 10:24:55 2008 +0100
    11.2 +++ b/xen/arch/x86/hvm/vlapic.c	Tue Apr 15 13:28:02 2008 +0100
    11.3 @@ -466,17 +466,18 @@ static void vlapic_read_aligned(
    11.4      }
    11.5  }
    11.6  
    11.7 -static unsigned long vlapic_read(struct vcpu *v, unsigned long address,
    11.8 -                                 unsigned long len)
    11.9 +static int vlapic_read(
   11.10 +    struct vcpu *v, unsigned long address,
   11.11 +    unsigned long len, unsigned long *pval)
   11.12  {
   11.13      unsigned int alignment;
   11.14      unsigned int tmp;
   11.15 -    unsigned long result;
   11.16 +    unsigned long result = 0;
   11.17      struct vlapic *vlapic = vcpu_vlapic(v);
   11.18      unsigned int offset = address - vlapic_base_address(vlapic);
   11.19  
   11.20      if ( offset > (APIC_TDCR + 0x3) )
   11.21 -        return 0;
   11.22 +        goto out;
   11.23  
   11.24      alignment = offset & 0x3;
   11.25  
   11.26 @@ -508,14 +509,16 @@ static unsigned long vlapic_read(struct 
   11.27      HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
   11.28                  "and the result is 0x%lx", offset, len, result);
   11.29  
   11.30 -    return result;
   11.31 + out:
   11.32 +    *pval = result;
   11.33 +    return X86EMUL_OKAY;
   11.34  
   11.35   unaligned_exit_and_crash:
   11.36      gdprintk(XENLOG_ERR, "Unaligned LAPIC read len=0x%lx at offset=0x%x.\n",
   11.37               len, offset);
   11.38   exit_and_crash:
   11.39      domain_crash(v->domain);
   11.40 -    return 0;
   11.41 +    return X86EMUL_OKAY;
   11.42  }
   11.43  
   11.44  void vlapic_pt_cb(struct vcpu *v, void *data)
   11.45 @@ -523,8 +526,8 @@ void vlapic_pt_cb(struct vcpu *v, void *
   11.46      *(s_time_t *)data = hvm_get_guest_time(v);
   11.47  }
   11.48  
   11.49 -static void vlapic_write(struct vcpu *v, unsigned long address,
   11.50 -                         unsigned long len, unsigned long val)
   11.51 +static int vlapic_write(struct vcpu *v, unsigned long address,
   11.52 +                        unsigned long len, unsigned long val)
   11.53  {
   11.54      struct vlapic *vlapic = vcpu_vlapic(v);
   11.55      unsigned int offset = address - vlapic_base_address(vlapic);
   11.56 @@ -541,13 +544,13 @@ static void vlapic_write(struct vcpu *v,
   11.57      val = (uint32_t)val;
   11.58      if ( len != 4 )
   11.59      {
   11.60 -        unsigned int tmp;
   11.61 +        unsigned long tmp;
   11.62          unsigned char alignment;
   11.63  
   11.64          gdprintk(XENLOG_INFO, "Notice: Local APIC write with len = %lx\n",len);
   11.65  
   11.66          alignment = offset & 0x3;
   11.67 -        tmp = vlapic_read(v, offset & ~0x3, 4);
   11.68 +        (void)vlapic_read(v, offset & ~0x3, 4, &tmp);
   11.69  
   11.70          switch ( len )
   11.71          {
   11.72 @@ -670,13 +673,14 @@ static void vlapic_write(struct vcpu *v,
   11.73          break;
   11.74      }
   11.75  
   11.76 -    return;
   11.77 +    return X86EMUL_OKAY;
   11.78  
   11.79   unaligned_exit_and_crash:
   11.80      gdprintk(XENLOG_ERR, "Unaligned LAPIC write len=0x%lx at offset=0x%x.\n",
   11.81               len, offset);
   11.82   exit_and_crash:
   11.83      domain_crash(v->domain);
   11.84 +    return X86EMUL_OKAY;
   11.85  }
   11.86  
   11.87  static int vlapic_range(struct vcpu *v, unsigned long addr)
    12.1 --- a/xen/arch/x86/hvm/vpic.c	Tue Apr 15 10:24:55 2008 +0100
    12.2 +++ b/xen/arch/x86/hvm/vpic.c	Tue Apr 15 13:28:02 2008 +0100
    12.3 @@ -319,7 +319,7 @@ static int vpic_intercept_pic_io(
    12.4      if ( bytes != 1 )
    12.5      {
    12.6          gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes);
    12.7 -        return 1;
    12.8 +        return X86EMUL_OKAY;
    12.9      }
   12.10  
   12.11      vpic = &current->domain->arch.hvm_domain.vpic[port >> 7];
   12.12 @@ -329,7 +329,7 @@ static int vpic_intercept_pic_io(
   12.13      else
   12.14          *val = (uint8_t)vpic_ioport_read(vpic, port);
   12.15  
   12.16 -    return 1;
   12.17 +    return X86EMUL_OKAY;
   12.18  }
   12.19  
   12.20  static int vpic_intercept_elcr_io(
   12.21 @@ -338,11 +338,7 @@ static int vpic_intercept_elcr_io(
   12.22      struct hvm_hw_vpic *vpic;
   12.23      uint32_t data;
   12.24  
   12.25 -    if ( bytes != 1 )
   12.26 -    {
   12.27 -        gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes);
   12.28 -        return 1;
   12.29 -    }
   12.30 +    BUG_ON(bytes != 1);
   12.31  
   12.32      vpic = &current->domain->arch.hvm_domain.vpic[port & 1];
   12.33  
   12.34 @@ -360,7 +356,7 @@ static int vpic_intercept_elcr_io(
   12.35          *val = vpic->elcr & vpic_elcr_mask(vpic);
   12.36      }
   12.37  
   12.38 -    return 1;
   12.39 +    return X86EMUL_OKAY;
   12.40  }
   12.41  
   12.42  static int vpic_save(struct domain *d, hvm_domain_context_t *h)
    13.1 --- a/xen/include/asm-x86/hvm/io.h	Tue Apr 15 10:24:55 2008 +0100
    13.2 +++ b/xen/include/asm-x86/hvm/io.h	Tue Apr 15 13:28:02 2008 +0100
    13.3 @@ -30,13 +30,14 @@
    13.4  #define HVM_PORTIO                  0
    13.5  #define HVM_BUFFERED_IO             2
    13.6  
    13.7 -typedef unsigned long (*hvm_mmio_read_t)(struct vcpu *v,
    13.8 -                                         unsigned long addr,
    13.9 -                                         unsigned long length);
   13.10 -typedef void (*hvm_mmio_write_t)(struct vcpu *v,
   13.11 +typedef int (*hvm_mmio_read_t)(struct vcpu *v,
   13.12                                 unsigned long addr,
   13.13                                 unsigned long length,
   13.14 -                               unsigned long val);
   13.15 +                               unsigned long *val);
   13.16 +typedef int (*hvm_mmio_write_t)(struct vcpu *v,
   13.17 +                                unsigned long addr,
   13.18 +                                unsigned long length,
   13.19 +                                unsigned long val);
   13.20  typedef int (*hvm_mmio_check_t)(struct vcpu *v, unsigned long addr);
   13.21  
   13.22  typedef int (*portio_action_t)(
   13.23 @@ -64,7 +65,7 @@ struct hvm_mmio_handler {
   13.24  };
   13.25  
   13.26  int hvm_io_intercept(ioreq_t *p, int type);
   13.27 -int register_io_handler(
   13.28 +void register_io_handler(
   13.29      struct domain *d, unsigned long addr, unsigned long size,
   13.30      void *action, int type);
   13.31  
   13.32 @@ -81,18 +82,18 @@ static inline int hvm_buffered_io_interc
   13.33  int hvm_mmio_intercept(ioreq_t *p);
   13.34  int hvm_buffered_io_send(ioreq_t *p);
   13.35  
   13.36 -static inline int register_portio_handler(
   13.37 +static inline void register_portio_handler(
   13.38      struct domain *d, unsigned long addr,
   13.39      unsigned long size, portio_action_t action)
   13.40  {
   13.41 -    return register_io_handler(d, addr, size, action, HVM_PORTIO);
   13.42 +    register_io_handler(d, addr, size, action, HVM_PORTIO);
   13.43  }
   13.44  
   13.45 -static inline int register_buffered_io_handler(
   13.46 +static inline void register_buffered_io_handler(
   13.47      struct domain *d, unsigned long addr,
   13.48      unsigned long size, mmio_action_t action)
   13.49  {
   13.50 -    return register_io_handler(d, addr, size, action, HVM_BUFFERED_IO);
   13.51 +    register_io_handler(d, addr, size, action, HVM_BUFFERED_IO);
   13.52  }
   13.53  
   13.54  void send_timeoffset_req(unsigned long timeoff);