ia64/xen-unstable

changeset 17931:c33a40b4c22b

x86_emulate: read/write/insn_fetch emulation hooks now all take a
pointer to emulator data buffer, and an arbitrary byte count (up to
the size of a page of memory).

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 30 14:19:09 2008 +0100 (2008-06-30)
parents 51b392ab1912
children 20966aa89739
files tools/tests/test_x86_emulator.c xen/arch/x86/hvm/emulate.c xen/arch/x86/mm.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/x86_emulate/x86_emulate.c xen/arch/x86/x86_emulate/x86_emulate.h
line diff
     1.1 --- a/tools/tests/test_x86_emulator.c	Mon Jun 30 11:39:10 2008 +0100
     1.2 +++ b/tools/tests/test_x86_emulator.c	Mon Jun 30 14:19:09 2008 +0100
     1.3 @@ -22,23 +22,22 @@
     1.4  static int read(
     1.5      unsigned int seg,
     1.6      unsigned long offset,
     1.7 -    unsigned long *val,
     1.8 +    void *p_data,
     1.9      unsigned int bytes,
    1.10      struct x86_emulate_ctxt *ctxt)
    1.11  {
    1.12 -    *val = 0;
    1.13 -    memcpy(val, (void *)offset, bytes);
    1.14 +    memcpy(p_data, (void *)offset, bytes);
    1.15      return X86EMUL_OKAY;
    1.16  }
    1.17  
    1.18  static int write(
    1.19      unsigned int seg,
    1.20      unsigned long offset,
    1.21 -    unsigned long val,
    1.22 +    void *p_data,
    1.23      unsigned int bytes,
    1.24      struct x86_emulate_ctxt *ctxt)
    1.25  {
    1.26 -    memcpy((void *)offset, &val, bytes);
    1.27 +    memcpy((void *)offset, p_data, bytes);
    1.28      return X86EMUL_OKAY;
    1.29  }
    1.30  
     2.1 --- a/xen/arch/x86/hvm/emulate.c	Mon Jun 30 11:39:10 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/emulate.c	Mon Jun 30 14:19:09 2008 +0100
     2.3 @@ -21,15 +21,33 @@
     2.4  
     2.5  static int hvmemul_do_io(
     2.6      int is_mmio, paddr_t addr, unsigned long *reps, int size,
     2.7 -    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
     2.8 +    paddr_t ram_gpa, int dir, int df, void *p_data)
     2.9  {
    2.10 +    paddr_t value = ram_gpa;
    2.11 +    int value_is_ptr = (p_data == NULL);
    2.12      struct vcpu *curr = current;
    2.13      vcpu_iodata_t *vio = get_ioreq(curr);
    2.14      ioreq_t *p = &vio->vp_ioreq;
    2.15      int rc;
    2.16  
    2.17 -    /* Only retrieve the value from singleton (non-REP) reads. */
    2.18 -    ASSERT((val == NULL) || ((dir == IOREQ_READ) && !value_is_ptr));
    2.19 +    /*
    2.20 +     * Weird-sized accesses have undefined behaviour: we discard writes
    2.21 +     * and read all-ones.
    2.22 +     */
    2.23 +    if ( unlikely((size > sizeof(long)) || (size & (size - 1))) )
    2.24 +    {
    2.25 +        gdprintk(XENLOG_WARNING, "bad mmio size %d\n", size);
    2.26 +        ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
    2.27 +        if ( dir == IOREQ_READ )
    2.28 +            memset(p_data, ~0, size);
    2.29 +        return X86EMUL_UNHANDLEABLE;
    2.30 +    }
    2.31 +
    2.32 +    if ( (p_data != NULL) && (dir == IOREQ_WRITE) )
    2.33 +    {
    2.34 +        memcpy(&value, p_data, size);
    2.35 +        p_data = NULL;
    2.36 +    }
    2.37  
    2.38      if ( is_mmio && !value_is_ptr )
    2.39      {
    2.40 @@ -47,8 +65,7 @@ static int hvmemul_do_io(
    2.41              unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes;
    2.42              if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
    2.43              {
    2.44 -                *val = 0;
    2.45 -                memcpy(val, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
    2.46 +                memcpy(p_data, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
    2.47                         size);
    2.48                  return X86EMUL_OKAY;
    2.49              }
    2.50 @@ -61,7 +78,7 @@ static int hvmemul_do_io(
    2.51          break;
    2.52      case HVMIO_completed:
    2.53          curr->arch.hvm_vcpu.io_state = HVMIO_none;
    2.54 -        if ( val == NULL )
    2.55 +        if ( p_data == NULL )
    2.56              return X86EMUL_UNHANDLEABLE;
    2.57          goto finish_access;
    2.58      case HVMIO_dispatched:
    2.59 @@ -82,7 +99,7 @@ static int hvmemul_do_io(
    2.60      }
    2.61  
    2.62      curr->arch.hvm_vcpu.io_state =
    2.63 -        (val == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
    2.64 +        (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
    2.65  
    2.66      p->dir = dir;
    2.67      p->data_is_ptr = value_is_ptr;
    2.68 @@ -116,7 +133,7 @@ static int hvmemul_do_io(
    2.69          break;
    2.70      case X86EMUL_UNHANDLEABLE:
    2.71          hvm_send_assist_req(curr);
    2.72 -        rc = (val != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
    2.73 +        rc = (p_data != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
    2.74          break;
    2.75      default:
    2.76          BUG();
    2.77 @@ -126,8 +143,8 @@ static int hvmemul_do_io(
    2.78          return rc;
    2.79  
    2.80   finish_access:
    2.81 -    if ( val != NULL )
    2.82 -        *val = curr->arch.hvm_vcpu.io_data;
    2.83 +    if ( p_data != NULL )
    2.84 +        memcpy(p_data, &curr->arch.hvm_vcpu.io_data, size);
    2.85  
    2.86      if ( is_mmio && !value_is_ptr )
    2.87      {
    2.88 @@ -152,7 +169,7 @@ static int hvmemul_do_io(
    2.89                    sizeof(curr->arch.hvm_vcpu.mmio_large_read)) )
    2.90              {
    2.91                  memcpy(&curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
    2.92 -                       val, size);
    2.93 +                       p_data, size);
    2.94                  curr->arch.hvm_vcpu.mmio_large_read_bytes += size;
    2.95              }
    2.96          }
    2.97 @@ -163,18 +180,16 @@ static int hvmemul_do_io(
    2.98  
    2.99  static int hvmemul_do_pio(
   2.100      unsigned long port, unsigned long *reps, int size,
   2.101 -    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
   2.102 +    paddr_t ram_gpa, int dir, int df, void *p_data)
   2.103  {
   2.104 -    return hvmemul_do_io(0, port, reps, size, value,
   2.105 -                         dir, df, value_is_ptr, val);
   2.106 +    return hvmemul_do_io(0, port, reps, size, ram_gpa, dir, df, p_data);
   2.107  }
   2.108  
   2.109  static int hvmemul_do_mmio(
   2.110      paddr_t gpa, unsigned long *reps, int size,
   2.111 -    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
   2.112 +    paddr_t ram_gpa, int dir, int df, void *p_data)
   2.113  {
   2.114 -    return hvmemul_do_io(1, gpa, reps, size, value,
   2.115 -                         dir, df, value_is_ptr, val);
   2.116 +    return hvmemul_do_io(1, gpa, reps, size, ram_gpa, dir, df, p_data);
   2.117  }
   2.118  
   2.119  /*
   2.120 @@ -287,7 +302,7 @@ static int hvmemul_virtual_to_linear(
   2.121  static int __hvmemul_read(
   2.122      enum x86_segment seg,
   2.123      unsigned long offset,
   2.124 -    unsigned long *val,
   2.125 +    void *p_data,
   2.126      unsigned int bytes,
   2.127      enum hvm_access_type access_type,
   2.128      struct hvm_emulate_ctxt *hvmemul_ctxt)
   2.129 @@ -303,8 +318,6 @@ static int __hvmemul_read(
   2.130      if ( rc != X86EMUL_OKAY )
   2.131          return rc;
   2.132  
   2.133 -    *val = 0;
   2.134 -
   2.135      if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
   2.136           curr->arch.hvm_vcpu.mmio_gva )
   2.137      {
   2.138 @@ -314,7 +327,7 @@ static int __hvmemul_read(
   2.139          gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
   2.140          if ( (off + bytes) <= PAGE_SIZE )
   2.141              return hvmemul_do_mmio(gpa, &reps, bytes, 0,
   2.142 -                                   IOREQ_READ, 0, 0, val);
   2.143 +                                   IOREQ_READ, 0, p_data);
   2.144      }
   2.145  
   2.146      if ( (seg != x86_seg_none) &&
   2.147 @@ -322,15 +335,13 @@ static int __hvmemul_read(
   2.148          pfec |= PFEC_user_mode;
   2.149  
   2.150      rc = ((access_type == hvm_access_insn_fetch) ?
   2.151 -          hvm_fetch_from_guest_virt(val, addr, bytes, pfec) :
   2.152 -          hvm_copy_from_guest_virt(val, addr, bytes, pfec));
   2.153 +          hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec) :
   2.154 +          hvm_copy_from_guest_virt(p_data, addr, bytes, pfec));
   2.155      if ( rc == HVMCOPY_bad_gva_to_gfn )
   2.156          return X86EMUL_EXCEPTION;
   2.157  
   2.158      if ( rc == HVMCOPY_bad_gfn_to_mfn )
   2.159      {
   2.160 -        unsigned long reps = 1;
   2.161 -
   2.162          if ( access_type == hvm_access_insn_fetch )
   2.163              return X86EMUL_UNHANDLEABLE;
   2.164  
   2.165 @@ -339,7 +350,7 @@ static int __hvmemul_read(
   2.166          if ( rc != X86EMUL_OKAY )
   2.167              return rc;
   2.168  
   2.169 -        return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
   2.170 +        return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data);
   2.171      }
   2.172  
   2.173      return X86EMUL_OKAY;
   2.174 @@ -348,19 +359,19 @@ static int __hvmemul_read(
   2.175  static int hvmemul_read(
   2.176      enum x86_segment seg,
   2.177      unsigned long offset,
   2.178 -    unsigned long *val,
   2.179 +    void *p_data,
   2.180      unsigned int bytes,
   2.181      struct x86_emulate_ctxt *ctxt)
   2.182  {
   2.183      return __hvmemul_read(
   2.184 -        seg, offset, val, bytes, hvm_access_read,
   2.185 +        seg, offset, p_data, bytes, hvm_access_read,
   2.186          container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
   2.187  }
   2.188  
   2.189  static int hvmemul_insn_fetch(
   2.190      enum x86_segment seg,
   2.191      unsigned long offset,
   2.192 -    unsigned long *val,
   2.193 +    void *p_data,
   2.194      unsigned int bytes,
   2.195      struct x86_emulate_ctxt *ctxt)
   2.196  {
   2.197 @@ -371,19 +382,18 @@ static int hvmemul_insn_fetch(
   2.198      /* Fall back if requested bytes are not in the prefetch cache. */
   2.199      if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
   2.200          return __hvmemul_read(
   2.201 -            seg, offset, val, bytes,
   2.202 +            seg, offset, p_data, bytes,
   2.203              hvm_access_insn_fetch, hvmemul_ctxt);
   2.204  
   2.205      /* Hit the cache. Simple memcpy. */
   2.206 -    *val = 0;
   2.207 -    memcpy(val, &hvmemul_ctxt->insn_buf[insn_off], bytes);
   2.208 +    memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes);
   2.209      return X86EMUL_OKAY;
   2.210  }
   2.211  
   2.212  static int hvmemul_write(
   2.213      enum x86_segment seg,
   2.214      unsigned long offset,
   2.215 -    unsigned long val,
   2.216 +    void *p_data,
   2.217      unsigned int bytes,
   2.218      struct x86_emulate_ctxt *ctxt)
   2.219  {
   2.220 @@ -406,29 +416,27 @@ static int hvmemul_write(
   2.221          unsigned int off = addr & (PAGE_SIZE - 1);
   2.222          gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
   2.223          if ( (off + bytes) <= PAGE_SIZE )
   2.224 -            return hvmemul_do_mmio(gpa, &reps, bytes, val,
   2.225 -                                   IOREQ_WRITE, 0, 0, NULL);
   2.226 +            return hvmemul_do_mmio(gpa, &reps, bytes, 0,
   2.227 +                                   IOREQ_WRITE, 0, p_data);
   2.228      }
   2.229  
   2.230      if ( (seg != x86_seg_none) &&
   2.231           (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
   2.232          pfec |= PFEC_user_mode;
   2.233  
   2.234 -    rc = hvm_copy_to_guest_virt(addr, &val, bytes, pfec);
   2.235 +    rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec);
   2.236      if ( rc == HVMCOPY_bad_gva_to_gfn )
   2.237          return X86EMUL_EXCEPTION;
   2.238  
   2.239      if ( rc == HVMCOPY_bad_gfn_to_mfn )
   2.240      {
   2.241 -        unsigned long reps = 1;
   2.242 -
   2.243          rc = hvmemul_linear_to_phys(
   2.244              addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
   2.245          if ( rc != X86EMUL_OKAY )
   2.246              return rc;
   2.247  
   2.248 -        return hvmemul_do_mmio(gpa, &reps, bytes, val,
   2.249 -                               IOREQ_WRITE, 0, 0, NULL);
   2.250 +        return hvmemul_do_mmio(gpa, &reps, bytes, 0,
   2.251 +                               IOREQ_WRITE, 0, p_data);
   2.252      }
   2.253  
   2.254      return X86EMUL_OKAY;
   2.255 @@ -442,12 +450,8 @@ static int hvmemul_cmpxchg(
   2.256      unsigned int bytes,
   2.257      struct x86_emulate_ctxt *ctxt)
   2.258  {
   2.259 -    unsigned long new = 0;
   2.260 -    if ( bytes > sizeof(new) )
   2.261 -        return X86EMUL_UNHANDLEABLE;
   2.262 -    memcpy(&new, p_new, bytes);
   2.263      /* Fix this in case the guest is really relying on r-m-w atomicity. */
   2.264 -    return hvmemul_write(seg, offset, new, bytes, ctxt);
   2.265 +    return hvmemul_write(seg, offset, p_new, bytes, ctxt);
   2.266  }
   2.267  
   2.268  static int hvmemul_rep_ins(
   2.269 @@ -480,7 +484,7 @@ static int hvmemul_rep_ins(
   2.270          return rc;
   2.271  
   2.272      return hvmemul_do_pio(src_port, reps, bytes_per_rep, gpa, IOREQ_READ,
   2.273 -                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   2.274 +                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
   2.275  }
   2.276  
   2.277  static int hvmemul_rep_outs(
   2.278 @@ -513,7 +517,7 @@ static int hvmemul_rep_outs(
   2.279          return rc;
   2.280  
   2.281      return hvmemul_do_pio(dst_port, reps, bytes_per_rep, gpa, IOREQ_WRITE,
   2.282 -                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   2.283 +                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
   2.284  }
   2.285  
   2.286  static int hvmemul_rep_movs(
   2.287 @@ -563,14 +567,14 @@ static int hvmemul_rep_movs(
   2.288      if ( !p2m_is_ram(p2mt) )
   2.289          return hvmemul_do_mmio(
   2.290              sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ,
   2.291 -            !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   2.292 +            !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
   2.293  
   2.294      (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
   2.295      if ( p2m_is_ram(p2mt) )
   2.296          return X86EMUL_UNHANDLEABLE;
   2.297      return hvmemul_do_mmio(
   2.298          dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE,
   2.299 -        !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
   2.300 +        !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
   2.301  }
   2.302  
   2.303  static int hvmemul_read_segment(
   2.304 @@ -607,7 +611,8 @@ static int hvmemul_read_io(
   2.305      struct x86_emulate_ctxt *ctxt)
   2.306  {
   2.307      unsigned long reps = 1;
   2.308 -    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
   2.309 +    *val = 0;
   2.310 +    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, val);
   2.311  }
   2.312  
   2.313  static int hvmemul_write_io(
   2.314 @@ -617,7 +622,7 @@ static int hvmemul_write_io(
   2.315      struct x86_emulate_ctxt *ctxt)
   2.316  {
   2.317      unsigned long reps = 1;
   2.318 -    return hvmemul_do_pio(port, &reps, bytes, val, IOREQ_WRITE, 0, 0, NULL);
   2.319 +    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_WRITE, 0, &val);
   2.320  }
   2.321  
   2.322  static int hvmemul_read_cr(
     3.1 --- a/xen/arch/x86/mm.c	Mon Jun 30 11:39:10 2008 +0100
     3.2 +++ b/xen/arch/x86/mm.c	Mon Jun 30 14:19:09 2008 +0100
     3.3 @@ -3539,15 +3539,14 @@ struct ptwr_emulate_ctxt {
     3.4  static int ptwr_emulated_read(
     3.5      enum x86_segment seg,
     3.6      unsigned long offset,
     3.7 -    unsigned long *val,
     3.8 +    void *p_data,
     3.9      unsigned int bytes,
    3.10      struct x86_emulate_ctxt *ctxt)
    3.11  {
    3.12      unsigned int rc;
    3.13      unsigned long addr = offset;
    3.14  
    3.15 -    *val = 0;
    3.16 -    if ( (rc = copy_from_user((void *)val, (void *)addr, bytes)) != 0 )
    3.17 +    if ( (rc = copy_from_user(p_data, (void *)addr, bytes)) != 0 )
    3.18      {
    3.19          propagate_page_fault(addr + bytes - rc, 0); /* read fault */
    3.20          return X86EMUL_EXCEPTION;
    3.21 @@ -3574,7 +3573,7 @@ static int ptwr_emulated_update(
    3.22      /* Only allow naturally-aligned stores within the original %cr2 page. */
    3.23      if ( unlikely(((addr^ptwr_ctxt->cr2) & PAGE_MASK) || (addr & (bytes-1))) )
    3.24      {
    3.25 -        MEM_LOG("Bad ptwr access (cr2=%lx, addr=%lx, bytes=%u)",
    3.26 +        MEM_LOG("ptwr_emulate: bad access (cr2=%lx, addr=%lx, bytes=%u)",
    3.27                  ptwr_ctxt->cr2, addr, bytes);
    3.28          return X86EMUL_UNHANDLEABLE;
    3.29      }
    3.30 @@ -3682,10 +3681,21 @@ static int ptwr_emulated_update(
    3.31  static int ptwr_emulated_write(
    3.32      enum x86_segment seg,
    3.33      unsigned long offset,
    3.34 -    unsigned long val,
    3.35 +    void *p_data,
    3.36      unsigned int bytes,
    3.37      struct x86_emulate_ctxt *ctxt)
    3.38  {
    3.39 +    paddr_t val = 0;
    3.40 +
    3.41 +    if ( (bytes > sizeof(paddr_t)) || (bytes & (bytes -1)) )
    3.42 +    {
    3.43 +        MEM_LOG("ptwr_emulate: bad write size (addr=%lx, bytes=%u)",
    3.44 +                offset, bytes);
    3.45 +        return X86EMUL_UNHANDLEABLE;
    3.46 +    }
    3.47 +
    3.48 +    memcpy(&val, p_data, bytes);
    3.49 +
    3.50      return ptwr_emulated_update(
    3.51          offset, 0, val, bytes, 0,
    3.52          container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
    3.53 @@ -3700,10 +3710,17 @@ static int ptwr_emulated_cmpxchg(
    3.54      struct x86_emulate_ctxt *ctxt)
    3.55  {
    3.56      paddr_t old = 0, new = 0;
    3.57 -    if ( bytes > sizeof(paddr_t) )
    3.58 +
    3.59 +    if ( (bytes > sizeof(paddr_t)) || (bytes & (bytes -1)) )
    3.60 +    {
    3.61 +        MEM_LOG("ptwr_emulate: bad cmpxchg size (addr=%lx, bytes=%u)",
    3.62 +                offset, bytes);
    3.63          return X86EMUL_UNHANDLEABLE;
    3.64 +    }
    3.65 +
    3.66      memcpy(&old, p_old, bytes);
    3.67      memcpy(&new, p_new, bytes);
    3.68 +
    3.69      return ptwr_emulated_update(
    3.70          offset, old, new, bytes, 1,
    3.71          container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
     4.1 --- a/xen/arch/x86/mm/shadow/common.c	Mon Jun 30 11:39:10 2008 +0100
     4.2 +++ b/xen/arch/x86/mm/shadow/common.c	Mon Jun 30 14:19:09 2008 +0100
     4.3 @@ -145,7 +145,7 @@ static int hvm_translate_linear_addr(
     4.4  static int
     4.5  hvm_read(enum x86_segment seg,
     4.6           unsigned long offset,
     4.7 -         unsigned long *val,
     4.8 +         void *p_data,
     4.9           unsigned int bytes,
    4.10           enum hvm_access_type access_type,
    4.11           struct sh_emulate_ctxt *sh_ctxt)
    4.12 @@ -158,12 +158,10 @@ hvm_read(enum x86_segment seg,
    4.13      if ( rc )
    4.14          return rc;
    4.15  
    4.16 -    *val = 0;
    4.17 -
    4.18      if ( access_type == hvm_access_insn_fetch )
    4.19 -        rc = hvm_fetch_from_guest_virt(val, addr, bytes, 0);
    4.20 +        rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0);
    4.21      else
    4.22 -        rc = hvm_copy_from_guest_virt(val, addr, bytes, 0);
    4.23 +        rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0);
    4.24  
    4.25      switch ( rc )
    4.26      {
    4.27 @@ -181,20 +179,20 @@ hvm_read(enum x86_segment seg,
    4.28  static int
    4.29  hvm_emulate_read(enum x86_segment seg,
    4.30                   unsigned long offset,
    4.31 -                 unsigned long *val,
    4.32 +                 void *p_data,
    4.33                   unsigned int bytes,
    4.34                   struct x86_emulate_ctxt *ctxt)
    4.35  {
    4.36      if ( !is_x86_user_segment(seg) )
    4.37          return X86EMUL_UNHANDLEABLE;
    4.38 -    return hvm_read(seg, offset, val, bytes, hvm_access_read,
    4.39 +    return hvm_read(seg, offset, p_data, bytes, hvm_access_read,
    4.40                      container_of(ctxt, struct sh_emulate_ctxt, ctxt));
    4.41  }
    4.42  
    4.43  static int
    4.44  hvm_emulate_insn_fetch(enum x86_segment seg,
    4.45                         unsigned long offset,
    4.46 -                       unsigned long *val,
    4.47 +                       void *p_data,
    4.48                         unsigned int bytes,
    4.49                         struct x86_emulate_ctxt *ctxt)
    4.50  {
    4.51 @@ -206,19 +204,18 @@ hvm_emulate_insn_fetch(enum x86_segment 
    4.52  
    4.53      /* Fall back if requested bytes are not in the prefetch cache. */
    4.54      if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
    4.55 -        return hvm_read(seg, offset, val, bytes,
    4.56 +        return hvm_read(seg, offset, p_data, bytes,
    4.57                          hvm_access_insn_fetch, sh_ctxt);
    4.58  
    4.59      /* Hit the cache. Simple memcpy. */
    4.60 -    *val = 0;
    4.61 -    memcpy(val, &sh_ctxt->insn_buf[insn_off], bytes);
    4.62 +    memcpy(p_data, &sh_ctxt->insn_buf[insn_off], bytes);
    4.63      return X86EMUL_OKAY;
    4.64  }
    4.65  
    4.66  static int
    4.67  hvm_emulate_write(enum x86_segment seg,
    4.68                    unsigned long offset,
    4.69 -                  unsigned long val,
    4.70 +                  void *p_data,
    4.71                    unsigned int bytes,
    4.72                    struct x86_emulate_ctxt *ctxt)
    4.73  {
    4.74 @@ -241,7 +238,7 @@ hvm_emulate_write(enum x86_segment seg,
    4.75          return rc;
    4.76  
    4.77      return v->arch.paging.mode->shadow.x86_emulate_write(
    4.78 -        v, addr, &val, bytes, sh_ctxt);
    4.79 +        v, addr, p_data, bytes, sh_ctxt);
    4.80  }
    4.81  
    4.82  static int 
    4.83 @@ -293,7 +290,7 @@ static struct x86_emulate_ops hvm_shadow
    4.84  static int
    4.85  pv_emulate_read(enum x86_segment seg,
    4.86                  unsigned long offset,
    4.87 -                unsigned long *val,
    4.88 +                void *p_data,
    4.89                  unsigned int bytes,
    4.90                  struct x86_emulate_ctxt *ctxt)
    4.91  {
    4.92 @@ -302,8 +299,7 @@ pv_emulate_read(enum x86_segment seg,
    4.93      if ( !is_x86_user_segment(seg) )
    4.94          return X86EMUL_UNHANDLEABLE;
    4.95  
    4.96 -    *val = 0;
    4.97 -    if ( (rc = copy_from_user((void *)val, (void *)offset, bytes)) != 0 )
    4.98 +    if ( (rc = copy_from_user(p_data, (void *)offset, bytes)) != 0 )
    4.99      {
   4.100          propagate_page_fault(offset + bytes - rc, 0); /* read fault */
   4.101          return X86EMUL_EXCEPTION;
   4.102 @@ -315,7 +311,7 @@ pv_emulate_read(enum x86_segment seg,
   4.103  static int
   4.104  pv_emulate_write(enum x86_segment seg,
   4.105                   unsigned long offset,
   4.106 -                 unsigned long val,
   4.107 +                 void *p_data,
   4.108                   unsigned int bytes,
   4.109                   struct x86_emulate_ctxt *ctxt)
   4.110  {
   4.111 @@ -325,7 +321,7 @@ pv_emulate_write(enum x86_segment seg,
   4.112      if ( !is_x86_user_segment(seg) )
   4.113          return X86EMUL_UNHANDLEABLE;
   4.114      return v->arch.paging.mode->shadow.x86_emulate_write(
   4.115 -        v, offset, &val, bytes, sh_ctxt);
   4.116 +        v, offset, p_data, bytes, sh_ctxt);
   4.117  }
   4.118  
   4.119  static int 
     5.1 --- a/xen/arch/x86/x86_emulate/x86_emulate.c	Mon Jun 30 11:39:10 2008 +0100
     5.2 +++ b/xen/arch/x86/x86_emulate/x86_emulate.c	Mon Jun 30 14:19:09 2008 +0100
     5.3 @@ -466,7 +466,7 @@ do{ asm volatile (                      
     5.4  
     5.5  /* Fetch next part of the instruction being emulated. */
     5.6  #define insn_fetch_bytes(_size)                                         \
     5.7 -({ unsigned long _x, _eip = _regs.eip;                                  \
     5.8 +({ unsigned long _x = 0, _eip = _regs.eip;                              \
     5.9     if ( !mode_64bit() ) _eip = (uint32_t)_eip; /* ignore upper dword */ \
    5.10     _regs.eip += (_size); /* real hardware doesn't truncate */           \
    5.11     generate_exception_if((uint8_t)(_regs.eip - ctxt->regs->eip) > 15,   \
    5.12 @@ -655,6 +655,19 @@ static void __put_rep_prefix(
    5.13          __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \
    5.14  })
    5.15  
    5.16 +/* Compatibility function: read guest memory, zero-extend result to a ulong. */
    5.17 +static int read_ulong(
    5.18 +        enum x86_segment seg,
    5.19 +        unsigned long offset,
    5.20 +        unsigned long *val,
    5.21 +        unsigned int bytes,
    5.22 +        struct x86_emulate_ctxt *ctxt,
    5.23 +        struct x86_emulate_ops *ops)
    5.24 +{
    5.25 +    *val = 0;
    5.26 +    return ops->read(seg, offset, val, bytes, ctxt);
    5.27 +}
    5.28 +
    5.29  /*
    5.30   * Unsigned multiplication with double-word result.
    5.31   * IN:  Multiplicand=m[0], Multiplier=m[1]
    5.32 @@ -841,7 +854,8 @@ static int ioport_access_check(
    5.33           (tr.limit < 0x67) )
    5.34          goto raise_exception;
    5.35  
    5.36 -    if ( (rc = ops->read(x86_seg_none, tr.base + 0x66, &iobmp, 2, ctxt)) )
    5.37 +    if ( (rc = read_ulong(x86_seg_none, tr.base + 0x66,
    5.38 +                          &iobmp, 2, ctxt, ops)) )
    5.39          return rc;
    5.40  
    5.41      /* Ensure TSS includes two bytes including byte containing first port. */
    5.42 @@ -849,7 +863,8 @@ static int ioport_access_check(
    5.43      if ( tr.limit <= iobmp )
    5.44          goto raise_exception;
    5.45  
    5.46 -    if ( (rc = ops->read(x86_seg_none, tr.base + iobmp, &iobmp, 2, ctxt)) )
    5.47 +    if ( (rc = read_ulong(x86_seg_none, tr.base + iobmp,
    5.48 +                          &iobmp, 2, ctxt, ops)) )
    5.49          return rc;
    5.50      if ( (iobmp & (((1<<bytes)-1) << (first_port&7))) != 0 )
    5.51          goto raise_exception;
    5.52 @@ -941,12 +956,12 @@ protmode_load_seg(
    5.53          goto raise_exn;
    5.54  
    5.55      do {
    5.56 -        if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8),
    5.57 -                             &val, 4, ctxt)) )
    5.58 +        if ( (rc = read_ulong(x86_seg_none, desctab.base + (sel & 0xfff8),
    5.59 +                              &val, 4, ctxt, ops)) )
    5.60              return rc;
    5.61          desc.a = val;
    5.62 -        if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
    5.63 -                             &val, 4, ctxt)) )
    5.64 +        if ( (rc = read_ulong(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
    5.65 +                              &val, 4, ctxt, ops)) )
    5.66              return rc;
    5.67          desc.b = val;
    5.68  
    5.69 @@ -1402,8 +1417,8 @@ x86_emulate(
    5.70              case 8: src.val = *(uint64_t *)src.reg; break;
    5.71              }
    5.72          }
    5.73 -        else if ( (rc = ops->read(src.mem.seg, src.mem.off,
    5.74 -                                  &src.val, src.bytes, ctxt)) )
    5.75 +        else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
    5.76 +                                   &src.val, src.bytes, ctxt, ops)) )
    5.77              goto done;
    5.78          break;
    5.79      case SrcImm:
    5.80 @@ -1494,8 +1509,8 @@ x86_emulate(
    5.81          }
    5.82          else if ( !(d & Mov) ) /* optimisation - avoid slow emulated read */
    5.83          {
    5.84 -            if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
    5.85 -                                 &dst.val, dst.bytes, ctxt)) )
    5.86 +            if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
    5.87 +                                  &dst.val, dst.bytes, ctxt, ops)) )
    5.88                  goto done;
    5.89              dst.orig_val = dst.val;
    5.90          }
    5.91 @@ -1571,8 +1586,8 @@ x86_emulate(
    5.92          int lb, ub, idx;
    5.93          generate_exception_if(mode_64bit() || (src.type != OP_MEM),
    5.94                                EXC_UD, -1);
    5.95 -        if ( (rc = ops->read(src.mem.seg, src.mem.off + op_bytes,
    5.96 -                             &src_val2, op_bytes, ctxt)) )
    5.97 +        if ( (rc = read_ulong(src.mem.seg, src.mem.off + op_bytes,
    5.98 +                              &src_val2, op_bytes, ctxt, ops)) )
    5.99              goto done;
   5.100          ub  = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
   5.101          lb  = (op_bytes == 2) ? (int16_t)src.val  : (int32_t)src.val;
   5.102 @@ -1588,8 +1603,8 @@ x86_emulate(
   5.103              /* movsxd */
   5.104              if ( src.type == OP_REG )
   5.105                  src.val = *(int32_t *)src.reg;
   5.106 -            else if ( (rc = ops->read(src.mem.seg, src.mem.off,
   5.107 -                                      &src.val, 4, ctxt)) )
   5.108 +            else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
   5.109 +                                       &src.val, 4, ctxt, ops)) )
   5.110                  goto done;
   5.111              dst.val = (int32_t)src.val;
   5.112          }
   5.113 @@ -1613,8 +1628,8 @@ x86_emulate(
   5.114          unsigned long src1; /* ModR/M source operand */
   5.115          if ( ea.type == OP_REG )
   5.116              src1 = *ea.reg;
   5.117 -        else if ( (rc = ops->read(ea.mem.seg, ea.mem.off,
   5.118 -                                  &src1, op_bytes, ctxt)) )
   5.119 +        else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
   5.120 +                                   &src1, op_bytes, ctxt, ops)) )
   5.121              goto done;
   5.122          _regs.eflags &= ~(EFLG_OF|EFLG_CF);
   5.123          switch ( dst.bytes )
   5.124 @@ -1720,8 +1735,8 @@ x86_emulate(
   5.125          /* 64-bit mode: POP defaults to a 64-bit operand. */
   5.126          if ( mode_64bit() && (dst.bytes == 4) )
   5.127              dst.bytes = 8;
   5.128 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
   5.129 -                             &dst.val, dst.bytes, ctxt)) != 0 )
   5.130 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
   5.131 +                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
   5.132              goto done;
   5.133          break;
   5.134  
   5.135 @@ -1773,8 +1788,8 @@ x86_emulate(
   5.136          dst.val = x86_seg_es;
   5.137      les: /* dst.val identifies the segment */
   5.138          generate_exception_if(src.type != OP_MEM, EXC_UD, -1);
   5.139 -        if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes,
   5.140 -                             &sel, 2, ctxt)) != 0 )
   5.141 +        if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
   5.142 +                              &sel, 2, ctxt, ops)) != 0 )
   5.143              goto done;
   5.144          if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
   5.145              goto done;
   5.146 @@ -2020,8 +2035,8 @@ x86_emulate(
   5.147                  dst.bytes = op_bytes = 8;
   5.148                  if ( dst.type == OP_REG )
   5.149                      dst.val = *dst.reg;
   5.150 -                else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
   5.151 -                                          &dst.val, 8, ctxt)) != 0 )
   5.152 +                else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
   5.153 +                                           &dst.val, 8, ctxt, ops)) != 0 )
   5.154                      goto done;
   5.155              }
   5.156              src.val = _regs.eip;
   5.157 @@ -2036,8 +2051,8 @@ x86_emulate(
   5.158  
   5.159              generate_exception_if(dst.type != OP_MEM, EXC_UD, -1);
   5.160  
   5.161 -            if ( (rc = ops->read(dst.mem.seg, dst.mem.off+dst.bytes,
   5.162 -                                 &sel, 2, ctxt)) )
   5.163 +            if ( (rc = read_ulong(dst.mem.seg, dst.mem.off+dst.bytes,
   5.164 +                                  &sel, 2, ctxt, ops)) )
   5.165                  goto done;
   5.166  
   5.167              if ( (modrm_reg & 7) == 3 ) /* call */
   5.168 @@ -2046,9 +2061,9 @@ x86_emulate(
   5.169                  fail_if(ops->read_segment == NULL);
   5.170                  if ( (rc = ops->read_segment(x86_seg_cs, &reg, ctxt)) ||
   5.171                       (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
   5.172 -                                      reg.sel, op_bytes, ctxt)) ||
   5.173 +                                      &reg.sel, op_bytes, ctxt)) ||
   5.174                       (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
   5.175 -                                      _regs.eip, op_bytes, ctxt)) )
   5.176 +                                      &_regs.eip, op_bytes, ctxt)) )
   5.177                      goto done;
   5.178              }
   5.179  
   5.180 @@ -2066,12 +2081,12 @@ x86_emulate(
   5.181                  dst.bytes = 8;
   5.182                  if ( dst.type == OP_REG )
   5.183                      dst.val = *dst.reg;
   5.184 -                else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
   5.185 -                                          &dst.val, 8, ctxt)) != 0 )
   5.186 +                else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
   5.187 +                                           &dst.val, 8, ctxt, ops)) != 0 )
   5.188                      goto done;
   5.189              }
   5.190              if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
   5.191 -                                  dst.val, dst.bytes, ctxt)) != 0 )
   5.192 +                                  &dst.val, dst.bytes, ctxt)) != 0 )
   5.193                  goto done;
   5.194              dst.type = OP_NONE;
   5.195              break;
   5.196 @@ -2106,7 +2121,7 @@ x86_emulate(
   5.197                  &dst.val, dst.bytes, ctxt);
   5.198          else
   5.199              rc = ops->write(
   5.200 -                dst.mem.seg, dst.mem.off, dst.val, dst.bytes, ctxt);
   5.201 +                dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, ctxt);
   5.202          if ( rc != 0 )
   5.203              goto done;
   5.204      default:
   5.205 @@ -2153,7 +2168,7 @@ x86_emulate(
   5.206          if ( mode_64bit() && (op_bytes == 4) )
   5.207              op_bytes = 8;
   5.208          if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
   5.209 -                              reg.sel, op_bytes, ctxt)) != 0 )
   5.210 +                              &reg.sel, op_bytes, ctxt)) != 0 )
   5.211              goto done;
   5.212          break;
   5.213      }
   5.214 @@ -2165,8 +2180,8 @@ x86_emulate(
   5.215          /* 64-bit mode: POP defaults to a 64-bit operand. */
   5.216          if ( mode_64bit() && (op_bytes == 4) )
   5.217              op_bytes = 8;
   5.218 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
   5.219 -                             &dst.val, op_bytes, ctxt)) != 0 )
   5.220 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
   5.221 +                              &dst.val, op_bytes, ctxt, ops)) != 0 )
   5.222              goto done;
   5.223          if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 )
   5.224              return rc;
   5.225 @@ -2275,8 +2290,8 @@ x86_emulate(
   5.226          dst.bytes = op_bytes;
   5.227          if ( mode_64bit() && (dst.bytes == 4) )
   5.228              dst.bytes = 8;
   5.229 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
   5.230 -                             &dst.val, dst.bytes, ctxt)) != 0 )
   5.231 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
   5.232 +                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
   5.233              goto done;
   5.234          break;
   5.235  
   5.236 @@ -2288,7 +2303,7 @@ x86_emulate(
   5.237          generate_exception_if(mode_64bit(), EXC_UD, -1);
   5.238          for ( i = 0; i < 8; i++ )
   5.239              if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
   5.240 -                                  regs[i], op_bytes, ctxt)) != 0 )
   5.241 +                                  &regs[i], op_bytes, ctxt)) != 0 )
   5.242              goto done;
   5.243          break;
   5.244      }
   5.245 @@ -2303,8 +2318,8 @@ x86_emulate(
   5.246          generate_exception_if(mode_64bit(), EXC_UD, -1);
   5.247          for ( i = 0; i < 8; i++ )
   5.248          {
   5.249 -            if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
   5.250 -                                 &dst.val, op_bytes, ctxt)) != 0 )
   5.251 +            if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
   5.252 +                                  &dst.val, op_bytes, ctxt, ops)) != 0 )
   5.253                  goto done;
   5.254              switch ( op_bytes )
   5.255              {
   5.256 @@ -2382,8 +2397,8 @@ x86_emulate(
   5.257          }
   5.258          else
   5.259          {
   5.260 -            if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
   5.261 -                                 &dst.val, dst.bytes, ctxt)) != 0 )
   5.262 +            if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
   5.263 +                                  &dst.val, dst.bytes, ctxt, ops)) != 0 )
   5.264                  goto done;
   5.265              fail_if(ops->write_io == NULL);
   5.266              if ( (rc = ops->write_io(port, dst.bytes, dst.val, ctxt)) != 0 )
   5.267 @@ -2455,9 +2470,9 @@ x86_emulate(
   5.268  
   5.269          if ( (rc = ops->read_segment(x86_seg_cs, &reg, ctxt)) ||
   5.270               (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
   5.271 -                              reg.sel, op_bytes, ctxt)) ||
   5.272 +                              &reg.sel, op_bytes, ctxt)) ||
   5.273               (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
   5.274 -                              _regs.eip, op_bytes, ctxt)) )
   5.275 +                              &_regs.eip, op_bytes, ctxt)) )
   5.276              goto done;
   5.277  
   5.278          if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
   5.279 @@ -2483,8 +2498,8 @@ x86_emulate(
   5.280          /* 64-bit mode: POP defaults to a 64-bit operand. */
   5.281          if ( mode_64bit() && (op_bytes == 4) )
   5.282              op_bytes = 8;
   5.283 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
   5.284 -                             &dst.val, op_bytes, ctxt)) != 0 )
   5.285 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
   5.286 +                              &dst.val, op_bytes, ctxt, ops)) != 0 )
   5.287              goto done;
   5.288          if ( op_bytes == 2 )
   5.289              dst.val = (uint16_t)dst.val | (_regs.eflags & 0xffff0000u);
   5.290 @@ -2507,8 +2522,8 @@ x86_emulate(
   5.291          dst.type  = OP_REG;
   5.292          dst.reg   = (unsigned long *)&_regs.eax;
   5.293          dst.bytes = (d & ByteOp) ? 1 : op_bytes;
   5.294 -        if ( (rc = ops->read(ea.mem.seg, insn_fetch_bytes(ad_bytes),
   5.295 -                             &dst.val, dst.bytes, ctxt)) != 0 )
   5.296 +        if ( (rc = read_ulong(ea.mem.seg, insn_fetch_bytes(ad_bytes),
   5.297 +                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
   5.298              goto done;
   5.299          break;
   5.300  
   5.301 @@ -2536,8 +2551,8 @@ x86_emulate(
   5.302          }
   5.303          else
   5.304          {
   5.305 -            if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
   5.306 -                                 &dst.val, dst.bytes, ctxt)) != 0 )
   5.307 +            if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
   5.308 +                                  &dst.val, dst.bytes, ctxt, ops)) != 0 )
   5.309                  goto done;
   5.310              dst.type = OP_MEM;
   5.311              nr_reps = 1;
   5.312 @@ -2556,10 +2571,10 @@ x86_emulate(
   5.313          unsigned long next_eip = _regs.eip;
   5.314          get_rep_prefix();
   5.315          src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes;
   5.316 -        if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
   5.317 -                             &dst.val, dst.bytes, ctxt)) ||
   5.318 -             (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi),
   5.319 -                             &src.val, src.bytes, ctxt)) )
   5.320 +        if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
   5.321 +                              &dst.val, dst.bytes, ctxt, ops)) ||
   5.322 +             (rc = read_ulong(x86_seg_es, truncate_ea(_regs.edi),
   5.323 +                              &src.val, src.bytes, ctxt, ops)) )
   5.324              goto done;
   5.325          register_address_increment(
   5.326              _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
   5.327 @@ -2592,8 +2607,8 @@ x86_emulate(
   5.328          dst.type  = OP_REG;
   5.329          dst.bytes = (d & ByteOp) ? 1 : op_bytes;
   5.330          dst.reg   = (unsigned long *)&_regs.eax;
   5.331 -        if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
   5.332 -                             &dst.val, dst.bytes, ctxt)) != 0 )
   5.333 +        if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
   5.334 +                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
   5.335              goto done;
   5.336          register_address_increment(
   5.337              _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
   5.338 @@ -2606,8 +2621,8 @@ x86_emulate(
   5.339          get_rep_prefix();
   5.340          src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes;
   5.341          dst.val = _regs.eax;
   5.342 -        if ( (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi),
   5.343 -                             &src.val, src.bytes, ctxt)) != 0 )
   5.344 +        if ( (rc = read_ulong(x86_seg_es, truncate_ea(_regs.edi),
   5.345 +                              &src.val, src.bytes, ctxt, ops)) != 0 )
   5.346              goto done;
   5.347          register_address_increment(
   5.348              _regs.edi, (_regs.eflags & EFLG_DF) ? -src.bytes : src.bytes);
   5.349 @@ -2624,8 +2639,8 @@ x86_emulate(
   5.350      case 0xc3: /* ret (near) */ {
   5.351          int offset = (b == 0xc2) ? insn_fetch_type(uint16_t) : 0;
   5.352          op_bytes = mode_64bit() ? 8 : op_bytes;
   5.353 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
   5.354 -                             &dst.val, op_bytes, ctxt)) != 0 )
   5.355 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
   5.356 +                              &dst.val, op_bytes, ctxt, ops)) != 0 )
   5.357              goto done;
   5.358          _regs.eip = dst.val;
   5.359          break;
   5.360 @@ -2640,7 +2655,7 @@ x86_emulate(
   5.361          dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes;
   5.362          dst.reg = (unsigned long *)&_regs.ebp;
   5.363          if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
   5.364 -                              _regs.ebp, dst.bytes, ctxt)) )
   5.365 +                              &_regs.ebp, dst.bytes, ctxt)) )
   5.366              goto done;
   5.367          dst.val = _regs.esp;
   5.368  
   5.369 @@ -2650,14 +2665,14 @@ x86_emulate(
   5.370              {
   5.371                  unsigned long ebp, temp_data;
   5.372                  ebp = truncate_word(_regs.ebp - i*dst.bytes, ctxt->sp_size/8);
   5.373 -                if ( (rc = ops->read(x86_seg_ss, ebp,
   5.374 -                                     &temp_data, dst.bytes, ctxt)) ||
   5.375 +                if ( (rc = read_ulong(x86_seg_ss, ebp,
   5.376 +                                      &temp_data, dst.bytes, ctxt, ops)) ||
   5.377                       (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
   5.378 -                                      temp_data, dst.bytes, ctxt)) )
   5.379 +                                      &temp_data, dst.bytes, ctxt)) )
   5.380                      goto done;
   5.381              }
   5.382              if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
   5.383 -                                  dst.val, dst.bytes, ctxt)) )
   5.384 +                                  &dst.val, dst.bytes, ctxt)) )
   5.385                  goto done;
   5.386          }
   5.387  
   5.388 @@ -2683,8 +2698,8 @@ x86_emulate(
   5.389  
   5.390          /* Second writeback, to %%ebp. */
   5.391          dst.reg = (unsigned long *)&_regs.ebp;
   5.392 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
   5.393 -                             &dst.val, dst.bytes, ctxt)) )
   5.394 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
   5.395 +                              &dst.val, dst.bytes, ctxt, ops)) )
   5.396              goto done;
   5.397          break;
   5.398  
   5.399 @@ -2692,10 +2707,10 @@ x86_emulate(
   5.400      case 0xcb: /* ret (far) */ {
   5.401          int offset = (b == 0xca) ? insn_fetch_type(uint16_t) : 0;
   5.402          op_bytes = mode_64bit() ? 8 : op_bytes;
   5.403 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
   5.404 -                             &dst.val, op_bytes, ctxt)) || 
   5.405 -             (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
   5.406 -                             &src.val, op_bytes, ctxt)) ||
   5.407 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
   5.408 +                              &dst.val, op_bytes, ctxt, ops)) || 
   5.409 +             (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
   5.410 +                              &src.val, op_bytes, ctxt, ops)) ||
   5.411               (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) )
   5.412              goto done;
   5.413          _regs.eip = dst.val;
   5.414 @@ -2729,12 +2744,12 @@ x86_emulate(
   5.415          if ( !mode_iopl() )
   5.416              mask |= EFLG_IF;
   5.417          fail_if(!in_realmode(ctxt, ops));
   5.418 -        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
   5.419 -                             &eip, op_bytes, ctxt)) ||
   5.420 -             (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
   5.421 -                             &cs, op_bytes, ctxt)) ||
   5.422 -             (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
   5.423 -                             &eflags, op_bytes, ctxt)) )
   5.424 +        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
   5.425 +                              &eip, op_bytes, ctxt, ops)) ||
   5.426 +             (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
   5.427 +                              &cs, op_bytes, ctxt, ops)) ||
   5.428 +             (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
   5.429 +                              &eflags, op_bytes, ctxt, ops)) )
   5.430              goto done;
   5.431          if ( op_bytes == 2 )
   5.432              eflags = (uint16_t)eflags | (_regs.eflags & 0xffff0000u);
   5.433 @@ -2779,8 +2794,8 @@ x86_emulate(
   5.434  
   5.435      case 0xd7: /* xlat */ {
   5.436          unsigned long al = (uint8_t)_regs.eax;
   5.437 -        if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.ebx + al),
   5.438 -                             &al, 1, ctxt)) != 0 )
   5.439 +        if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.ebx + al),
   5.440 +                              &al, 1, ctxt, ops)) != 0 )
   5.441              goto done;
   5.442          *(uint8_t *)&_regs.eax = al;
   5.443          break;
   5.444 @@ -3242,9 +3257,9 @@ x86_emulate(
   5.445              if ( op_bytes == 2 )
   5.446                  reg.base &= 0xffffff;
   5.447              if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0,
   5.448 -                                  reg.limit, 2, ctxt)) ||
   5.449 +                                  &reg.limit, 2, ctxt)) ||
   5.450                   (rc = ops->write(ea.mem.seg, ea.mem.off+2,
   5.451 -                                  reg.base, mode_64bit() ? 8 : 4, ctxt)) )
   5.452 +                                  &reg.base, mode_64bit() ? 8 : 4, ctxt)) )
   5.453                  goto done;
   5.454              break;
   5.455          case 2: /* lgdt */
   5.456 @@ -3252,10 +3267,10 @@ x86_emulate(
   5.457              generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
   5.458              fail_if(ops->write_segment == NULL);
   5.459              memset(&reg, 0, sizeof(reg));
   5.460 -            if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0,
   5.461 -                                 &limit, 2, ctxt)) ||
   5.462 -                 (rc = ops->read(ea.mem.seg, ea.mem.off+2,
   5.463 -                                 &base, mode_64bit() ? 8 : 4, ctxt)) )
   5.464 +            if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
   5.465 +                                  &limit, 2, ctxt, ops)) ||
   5.466 +                 (rc = read_ulong(ea.mem.seg, ea.mem.off+2,
   5.467 +                                  &base, mode_64bit() ? 8 : 4, ctxt, ops)) )
   5.468                  goto done;
   5.469              reg.base = base;
   5.470              reg.limit = limit;
   5.471 @@ -3282,8 +3297,8 @@ x86_emulate(
   5.472                  goto done;
   5.473              if ( ea.type == OP_REG )
   5.474                  cr0w = *ea.reg;
   5.475 -            else if ( (rc = ops->read(ea.mem.seg, ea.mem.off,
   5.476 -                                      &cr0w, 2, ctxt)) )
   5.477 +            else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
   5.478 +                                       &cr0w, 2, ctxt, ops)) )
   5.479                  goto done;
   5.480              /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
   5.481              cr0 = (cr0 & ~0xe) | (cr0w & 0xf);
   5.482 @@ -3405,8 +3420,10 @@ x86_emulate(
   5.483          if ( ea.type == OP_MEM )
   5.484          {
   5.485              unsigned long lval, hval;
   5.486 -            if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &lval, 4, ctxt)) ||
   5.487 -                 (rc = ops->read(ea.mem.seg, ea.mem.off+4, &hval, 4, ctxt)) )
   5.488 +            if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
   5.489 +                                  &lval, 4, ctxt, ops)) ||
   5.490 +                 (rc = read_ulong(ea.mem.seg, ea.mem.off+4,
   5.491 +                                  &hval, 4, ctxt, ops)) )
   5.492                  goto done;
   5.493              val = ((uint64_t)hval << 32) | (uint32_t)lval;
   5.494              stub[2] = modrm & 0x38; /* movq (%eax),%mmN */
   5.495 @@ -3429,8 +3446,8 @@ x86_emulate(
   5.496          if ( ea.type == OP_MEM )
   5.497          {
   5.498              unsigned long lval = (uint32_t)val, hval = (uint32_t)(val >> 32);
   5.499 -            if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, lval, 4, ctxt)) ||
   5.500 -                 (rc = ops->write(ea.mem.seg, ea.mem.off+4, hval, 4, ctxt)) )
   5.501 +            if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, &lval, 4, ctxt)) ||
   5.502 +                 (rc = ops->write(ea.mem.seg, ea.mem.off+4, &hval, 4, ctxt)) )
   5.503                  goto done;
   5.504          }
   5.505          break;
   5.506 @@ -3482,8 +3499,8 @@ x86_emulate(
   5.507  
   5.508          /* Get actual old value. */
   5.509          for ( i = 0; i < (op_bytes/sizeof(long)); i++ )
   5.510 -            if ( (rc = ops->read(ea.mem.seg, ea.mem.off + i*sizeof(long),
   5.511 -                                 &old[i], sizeof(long), ctxt)) != 0 )
   5.512 +            if ( (rc = read_ulong(ea.mem.seg, ea.mem.off + i*sizeof(long),
   5.513 +                                  &old[i], sizeof(long), ctxt, ops)) != 0 )
   5.514                  goto done;
   5.515  
   5.516          /* Get expected and proposed values. */
     6.1 --- a/xen/arch/x86/x86_emulate/x86_emulate.h	Mon Jun 30 11:39:10 2008 +0100
     6.2 +++ b/xen/arch/x86/x86_emulate/x86_emulate.h	Mon Jun 30 14:19:09 2008 +0100
     6.3 @@ -102,7 +102,8 @@ enum x86_emulate_fpu_type {
     6.4  };
     6.5  
     6.6  /*
     6.7 - * These operations represent the instruction emulator's interface to memory.
     6.8 + * These operations represent the instruction emulator's interface to memory,
     6.9 + * I/O ports, privileged state... pretty much everything other than GPRs.
    6.10   * 
    6.11   * NOTES:
    6.12   *  1. If the access fails (cannot emulate, or a standard access faults) then
    6.13 @@ -110,8 +111,7 @@ enum x86_emulate_fpu_type {
    6.14   *     some out-of-band mechanism, unknown to the emulator. The memop signals
    6.15   *     failure by returning X86EMUL_EXCEPTION to the emulator, which will
    6.16   *     then immediately bail.
    6.17 - *  2. Valid access sizes are 1, 2, 4 and 8 (x86/64 only) bytes.
    6.18 - *  3. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
    6.19 + *  2. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
    6.20   */
    6.21  struct x86_emulate_ops
    6.22  {
    6.23 @@ -121,19 +121,25 @@ struct x86_emulate_ops
    6.24       * All memory-access functions:
    6.25       *  @seg:   [IN ] Segment being dereferenced (specified as x86_seg_??).
    6.26       *  @offset:[IN ] Offset within segment.
    6.27 +     *  @p_data:[IN ] Pointer to i/o data buffer (length is @bytes)
    6.28       * Read functions:
    6.29       *  @val:   [OUT] Value read, zero-extended to 'ulong'.
    6.30       * Write functions:
    6.31       *  @val:   [IN ] Value to write (low-order bytes used as req'd).
    6.32       * Variable-length access functions:
    6.33 -     *  @bytes: [IN ] Number of bytes to read or write.
    6.34 +     *  @bytes: [IN ] Number of bytes to read or write. Valid access sizes are
    6.35 +     *                1, 2, 4 and 8 (x86/64 only) bytes, unless otherwise
    6.36 +     *                stated.
    6.37       */
    6.38  
    6.39 -    /* read: Emulate a memory read. */
    6.40 +    /*
    6.41 +     * read: Emulate a memory read.
    6.42 +     *  @bytes: Access length (0 < @bytes < 4096).
    6.43 +     */
    6.44      int (*read)(
    6.45          enum x86_segment seg,
    6.46          unsigned long offset,
    6.47 -        unsigned long *val,
    6.48 +        void *p_data,
    6.49          unsigned int bytes,
    6.50          struct x86_emulate_ctxt *ctxt);
    6.51  
    6.52 @@ -144,15 +150,18 @@ struct x86_emulate_ops
    6.53      int (*insn_fetch)(
    6.54          enum x86_segment seg,
    6.55          unsigned long offset,
    6.56 -        unsigned long *val,
    6.57 +        void *p_data,
    6.58          unsigned int bytes,
    6.59          struct x86_emulate_ctxt *ctxt);
    6.60  
    6.61 -    /* write: Emulate a memory write. */
    6.62 +    /*
    6.63 +     * write: Emulate a memory write.
    6.64 +     *  @bytes: Access length (0 < @bytes < 4096).
    6.65 +     */
    6.66      int (*write)(
    6.67          enum x86_segment seg,
    6.68          unsigned long offset,
    6.69 -        unsigned long val,
    6.70 +        void *p_data,
    6.71          unsigned int bytes,
    6.72          struct x86_emulate_ctxt *ctxt);
    6.73