ia64/xen-unstable

changeset 12895:f5121d001d1a

[XEN] Shadow-mode-refcount PTE update fix.

Add back in support for emulated PTE updates which is critical for
shdow_refcount operation.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sat Dec 09 16:29:52 2006 +0000 (2006-12-09)
parents 245f7ce8763e
children e948333c2c38 57b36b893a8d
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/guest_access.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Sat Dec 09 15:04:27 2006 +0000
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Sat Dec 09 16:29:52 2006 +0000
     1.3 @@ -185,15 +185,7 @@ hvm_read(enum x86_segment seg,
     1.4      //        In this case, that is only a user vs supervisor access check.
     1.5      //
     1.6      if ( (rc = hvm_copy_from_guest_virt(val, addr, bytes)) == 0 )
     1.7 -    {
     1.8 -#if 0
     1.9 -        struct vcpu *v = current;
    1.10 -        SHADOW_PRINTK("d=%u v=%u a=%#lx v=%#lx bytes=%u\n",
    1.11 -                       v->domain->domain_id, v->vcpu_id, 
    1.12 -                       addr, *val, bytes);
    1.13 -#endif
    1.14          return X86EMUL_CONTINUE;
    1.15 -    }
    1.16  
    1.17      /* If we got here, there was nothing mapped here, or a bad GFN 
    1.18       * was mapped here.  This should never happen: we're here because
    1.19 @@ -206,8 +198,190 @@ hvm_read(enum x86_segment seg,
    1.20      return X86EMUL_PROPAGATE_FAULT;
    1.21  }
    1.22  
    1.23 -void shadow_init_emulation(struct sh_emulate_ctxt *sh_ctxt, 
    1.24 -                           struct cpu_user_regs *regs)
    1.25 +static int
    1.26 +hvm_emulate_read(enum x86_segment seg,
    1.27 +                 unsigned long offset,
    1.28 +                 unsigned long *val,
    1.29 +                 unsigned int bytes,
    1.30 +                 struct x86_emulate_ctxt *ctxt)
    1.31 +{
    1.32 +    return hvm_read(seg, offset, val, bytes, hvm_access_read,
    1.33 +                    container_of(ctxt, struct sh_emulate_ctxt, ctxt));
    1.34 +}
    1.35 +
    1.36 +static int
    1.37 +hvm_emulate_insn_fetch(enum x86_segment seg,
    1.38 +                       unsigned long offset,
    1.39 +                       unsigned long *val,
    1.40 +                       unsigned int bytes,
    1.41 +                       struct x86_emulate_ctxt *ctxt)
    1.42 +{
    1.43 +    struct sh_emulate_ctxt *sh_ctxt =
    1.44 +        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
    1.45 +    unsigned int insn_off = offset - ctxt->regs->eip;
    1.46 +
    1.47 +    /* Fall back if requested bytes are not in the prefetch cache. */
    1.48 +    if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
    1.49 +        return hvm_read(seg, offset, val, bytes,
    1.50 +                        hvm_access_insn_fetch, sh_ctxt);
    1.51 +
    1.52 +    /* Hit the cache. Simple memcpy. */
    1.53 +    *val = 0;
    1.54 +    memcpy(val, &sh_ctxt->insn_buf[insn_off], bytes);
    1.55 +    return X86EMUL_CONTINUE;
    1.56 +}
    1.57 +
    1.58 +static int
    1.59 +hvm_emulate_write(enum x86_segment seg,
    1.60 +                  unsigned long offset,
    1.61 +                  unsigned long val,
    1.62 +                  unsigned int bytes,
    1.63 +                  struct x86_emulate_ctxt *ctxt)
    1.64 +{
    1.65 +    struct sh_emulate_ctxt *sh_ctxt =
    1.66 +        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
    1.67 +    struct vcpu *v = current;
    1.68 +    unsigned long addr;
    1.69 +    int rc;
    1.70 +
    1.71 +    rc = hvm_translate_linear_addr(
    1.72 +        seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
    1.73 +    if ( rc )
    1.74 +        return rc;
    1.75 +
    1.76 +    return v->arch.shadow.mode->x86_emulate_write(
    1.77 +        v, addr, &val, bytes, sh_ctxt);
    1.78 +}
    1.79 +
    1.80 +static int 
    1.81 +hvm_emulate_cmpxchg(enum x86_segment seg,
    1.82 +                    unsigned long offset,
    1.83 +                    unsigned long old,
    1.84 +                    unsigned long new,
    1.85 +                    unsigned int bytes,
    1.86 +                    struct x86_emulate_ctxt *ctxt)
    1.87 +{
    1.88 +    struct sh_emulate_ctxt *sh_ctxt =
    1.89 +        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
    1.90 +    struct vcpu *v = current;
    1.91 +    unsigned long addr;
    1.92 +    int rc;
    1.93 +
    1.94 +    rc = hvm_translate_linear_addr(
    1.95 +        seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
    1.96 +    if ( rc )
    1.97 +        return rc;
    1.98 +
    1.99 +    return v->arch.shadow.mode->x86_emulate_cmpxchg(
   1.100 +        v, addr, old, new, bytes, sh_ctxt);
   1.101 +}
   1.102 +
   1.103 +static int 
   1.104 +hvm_emulate_cmpxchg8b(enum x86_segment seg,
   1.105 +                      unsigned long offset,
   1.106 +                      unsigned long old_lo,
   1.107 +                      unsigned long old_hi,
   1.108 +                      unsigned long new_lo,
   1.109 +                      unsigned long new_hi,
   1.110 +                      struct x86_emulate_ctxt *ctxt)
   1.111 +{
   1.112 +    struct sh_emulate_ctxt *sh_ctxt =
   1.113 +        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.114 +    struct vcpu *v = current;
   1.115 +    unsigned long addr;
   1.116 +    int rc;
   1.117 +
   1.118 +    rc = hvm_translate_linear_addr(
   1.119 +        seg, offset, 8, hvm_access_write, sh_ctxt, &addr);
   1.120 +    if ( rc )
   1.121 +        return rc;
   1.122 +
   1.123 +    return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
   1.124 +        v, addr, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
   1.125 +}
   1.126 +
   1.127 +static struct x86_emulate_ops hvm_shadow_emulator_ops = {
   1.128 +    .read       = hvm_emulate_read,
   1.129 +    .insn_fetch = hvm_emulate_insn_fetch,
   1.130 +    .write      = hvm_emulate_write,
   1.131 +    .cmpxchg    = hvm_emulate_cmpxchg,
   1.132 +    .cmpxchg8b  = hvm_emulate_cmpxchg8b,
   1.133 +};
   1.134 +
   1.135 +static int
   1.136 +pv_emulate_read(enum x86_segment seg,
   1.137 +                unsigned long offset,
   1.138 +                unsigned long *val,
   1.139 +                unsigned int bytes,
   1.140 +                struct x86_emulate_ctxt *ctxt)
   1.141 +{
   1.142 +    unsigned int rc;
   1.143 +
   1.144 +    *val = 0;
   1.145 +    if ( (rc = copy_from_user((void *)val, (void *)offset, bytes)) != 0 )
   1.146 +    {
   1.147 +        propagate_page_fault(offset + bytes - rc, 0); /* read fault */
   1.148 +        return X86EMUL_PROPAGATE_FAULT;
   1.149 +    }
   1.150 +
   1.151 +    return X86EMUL_CONTINUE;
   1.152 +}
   1.153 +
   1.154 +static int
   1.155 +pv_emulate_write(enum x86_segment seg,
   1.156 +                 unsigned long offset,
   1.157 +                 unsigned long val,
   1.158 +                 unsigned int bytes,
   1.159 +                 struct x86_emulate_ctxt *ctxt)
   1.160 +{
   1.161 +    struct sh_emulate_ctxt *sh_ctxt =
   1.162 +        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.163 +    struct vcpu *v = current;
   1.164 +    return v->arch.shadow.mode->x86_emulate_write(
   1.165 +        v, offset, &val, bytes, sh_ctxt);
   1.166 +}
   1.167 +
   1.168 +static int 
   1.169 +pv_emulate_cmpxchg(enum x86_segment seg,
   1.170 +                   unsigned long offset,
   1.171 +                   unsigned long old,
   1.172 +                   unsigned long new,
   1.173 +                   unsigned int bytes,
   1.174 +                   struct x86_emulate_ctxt *ctxt)
   1.175 +{
   1.176 +    struct sh_emulate_ctxt *sh_ctxt =
   1.177 +        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.178 +    struct vcpu *v = current;
   1.179 +    return v->arch.shadow.mode->x86_emulate_cmpxchg(
   1.180 +        v, offset, old, new, bytes, sh_ctxt);
   1.181 +}
   1.182 +
   1.183 +static int 
   1.184 +pv_emulate_cmpxchg8b(enum x86_segment seg,
   1.185 +                     unsigned long offset,
   1.186 +                     unsigned long old_lo,
   1.187 +                     unsigned long old_hi,
   1.188 +                     unsigned long new_lo,
   1.189 +                     unsigned long new_hi,
   1.190 +                     struct x86_emulate_ctxt *ctxt)
   1.191 +{
   1.192 +    struct sh_emulate_ctxt *sh_ctxt =
   1.193 +        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.194 +    struct vcpu *v = current;
   1.195 +    return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
   1.196 +        v, offset, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
   1.197 +}
   1.198 +
   1.199 +static struct x86_emulate_ops pv_shadow_emulator_ops = {
   1.200 +    .read       = pv_emulate_read,
   1.201 +    .insn_fetch = pv_emulate_read,
   1.202 +    .write      = pv_emulate_write,
   1.203 +    .cmpxchg    = pv_emulate_cmpxchg,
   1.204 +    .cmpxchg8b  = pv_emulate_cmpxchg8b,
   1.205 +};
   1.206 +
   1.207 +struct x86_emulate_ops *shadow_init_emulation(
   1.208 +    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
   1.209  {
   1.210      struct segment_register *creg;
   1.211      struct vcpu *v = current;
   1.212 @@ -215,6 +389,12 @@ void shadow_init_emulation(struct sh_emu
   1.213  
   1.214      sh_ctxt->ctxt.regs = regs;
   1.215  
   1.216 +    if ( !is_hvm_vcpu(v) )
   1.217 +    {
   1.218 +        sh_ctxt->ctxt.mode = X86EMUL_MODE_HOST;
   1.219 +        return &pv_shadow_emulator_ops;
   1.220 +    }
   1.221 +
   1.222      /* Segment cache initialisation. Primed with CS. */
   1.223      sh_ctxt->valid_seg_regs = 0;
   1.224      creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
   1.225 @@ -237,132 +417,10 @@ void shadow_init_emulation(struct sh_emu
   1.226           !hvm_copy_from_guest_virt(
   1.227               sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
   1.228          ? sizeof(sh_ctxt->insn_buf) : 0;
   1.229 -}
   1.230 -
   1.231 -static int
   1.232 -sh_x86_emulate_read(enum x86_segment seg,
   1.233 -                    unsigned long offset,
   1.234 -                    unsigned long *val,
   1.235 -                    unsigned int bytes,
   1.236 -                    struct x86_emulate_ctxt *ctxt)
   1.237 -{
   1.238 -    return hvm_read(seg, offset, val, bytes, hvm_access_read,
   1.239 -                    container_of(ctxt, struct sh_emulate_ctxt, ctxt));
   1.240 -}
   1.241 -
   1.242 -static int
   1.243 -sh_x86_emulate_insn_fetch(enum x86_segment seg,
   1.244 -                          unsigned long offset,
   1.245 -                          unsigned long *val,
   1.246 -                          unsigned int bytes,
   1.247 -                          struct x86_emulate_ctxt *ctxt)
   1.248 -{
   1.249 -    struct sh_emulate_ctxt *sh_ctxt =
   1.250 -        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.251 -    unsigned int insn_off = offset - ctxt->regs->eip;
   1.252 -
   1.253 -    /* Fall back if requested bytes are not in the prefetch cache. */
   1.254 -    if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
   1.255 -        return hvm_read(seg, offset, val, bytes,
   1.256 -                        hvm_access_insn_fetch, sh_ctxt);
   1.257 -
   1.258 -    /* Hit the cache. Simple memcpy. */
   1.259 -    *val = 0;
   1.260 -    memcpy(val, &sh_ctxt->insn_buf[insn_off], bytes);
   1.261 -    return X86EMUL_CONTINUE;
   1.262 -}
   1.263 -
   1.264 -static int
   1.265 -sh_x86_emulate_write(enum x86_segment seg,
   1.266 -                     unsigned long offset,
   1.267 -                     unsigned long val,
   1.268 -                     unsigned int bytes,
   1.269 -                     struct x86_emulate_ctxt *ctxt)
   1.270 -{
   1.271 -    struct sh_emulate_ctxt *sh_ctxt =
   1.272 -        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.273 -    struct vcpu *v = current;
   1.274 -    unsigned long addr;
   1.275 -    int rc;
   1.276 -
   1.277 -    rc = hvm_translate_linear_addr(
   1.278 -        seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
   1.279 -    if ( rc )
   1.280 -        return rc;
   1.281 -
   1.282 -#if 0
   1.283 -    SHADOW_PRINTK("d=%u v=%u a=%#lx v=%#lx bytes=%u\n",
   1.284 -                  v->domain->domain_id, v->vcpu_id, addr, val, bytes);
   1.285 -#endif
   1.286 -    return v->arch.shadow.mode->x86_emulate_write(
   1.287 -        v, addr, &val, bytes, sh_ctxt);
   1.288 +
   1.289 +    return &hvm_shadow_emulator_ops;
   1.290  }
   1.291  
   1.292 -static int 
   1.293 -sh_x86_emulate_cmpxchg(enum x86_segment seg,
   1.294 -                       unsigned long offset,
   1.295 -                       unsigned long old,
   1.296 -                       unsigned long new,
   1.297 -                       unsigned int bytes,
   1.298 -                       struct x86_emulate_ctxt *ctxt)
   1.299 -{
   1.300 -    struct sh_emulate_ctxt *sh_ctxt =
   1.301 -        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.302 -    struct vcpu *v = current;
   1.303 -    unsigned long addr;
   1.304 -    int rc;
   1.305 -
   1.306 -    rc = hvm_translate_linear_addr(
   1.307 -        seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
   1.308 -    if ( rc )
   1.309 -        return rc;
   1.310 -
   1.311 -#if 0
   1.312 -    SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx n:=%#lx bytes=%u\n",
   1.313 -                   v->domain->domain_id, v->vcpu_id, addr, old, new, bytes);
   1.314 -#endif
   1.315 -    return v->arch.shadow.mode->x86_emulate_cmpxchg(
   1.316 -        v, addr, old, new, bytes, sh_ctxt);
   1.317 -}
   1.318 -
   1.319 -static int 
   1.320 -sh_x86_emulate_cmpxchg8b(enum x86_segment seg,
   1.321 -                         unsigned long offset,
   1.322 -                         unsigned long old_lo,
   1.323 -                         unsigned long old_hi,
   1.324 -                         unsigned long new_lo,
   1.325 -                         unsigned long new_hi,
   1.326 -                         struct x86_emulate_ctxt *ctxt)
   1.327 -{
   1.328 -    struct sh_emulate_ctxt *sh_ctxt =
   1.329 -        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
   1.330 -    struct vcpu *v = current;
   1.331 -    unsigned long addr;
   1.332 -    int rc;
   1.333 -
   1.334 -    rc = hvm_translate_linear_addr(
   1.335 -        seg, offset, 8, hvm_access_write, sh_ctxt, &addr);
   1.336 -    if ( rc )
   1.337 -        return rc;
   1.338 -
   1.339 -#if 0
   1.340 -    SHADOW_PRINTK("d=%u v=%u a=%#lx o?=%#lx:%lx n:=%#lx:%lx\n",
   1.341 -                   v->domain->domain_id, v->vcpu_id, addr, old_hi, old_lo,
   1.342 -                   new_hi, new_lo, ctxt);
   1.343 -#endif
   1.344 -    return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
   1.345 -        v, addr, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
   1.346 -}
   1.347 -
   1.348 -
   1.349 -struct x86_emulate_ops shadow_emulator_ops = {
   1.350 -    .read       = sh_x86_emulate_read,
   1.351 -    .insn_fetch = sh_x86_emulate_insn_fetch,
   1.352 -    .write      = sh_x86_emulate_write,
   1.353 -    .cmpxchg    = sh_x86_emulate_cmpxchg,
   1.354 -    .cmpxchg8b  = sh_x86_emulate_cmpxchg8b,
   1.355 -};
   1.356 -
   1.357  /**************************************************************************/
   1.358  /* Code for "promoting" a guest page to the point where the shadow code is
   1.359   * willing to let it be treated as a guest page table.  This generally
     2.1 --- a/xen/arch/x86/mm/shadow/multi.c	Sat Dec 09 15:04:27 2006 +0000
     2.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Sat Dec 09 16:29:52 2006 +0000
     2.3 @@ -2585,6 +2585,7 @@ static int sh_page_fault(struct vcpu *v,
     2.4      shadow_l1e_t sl1e, *ptr_sl1e;
     2.5      paddr_t gpa;
     2.6      struct sh_emulate_ctxt emul_ctxt;
     2.7 +    struct x86_emulate_ops *emul_ops;
     2.8      int r, mmio;
     2.9      fetch_type_t ft = 0;
    2.10  
    2.11 @@ -2811,13 +2812,14 @@ static int sh_page_fault(struct vcpu *v,
    2.12      return EXCRET_fault_fixed;
    2.13  
    2.14   emulate:
    2.15 -    if ( !is_hvm_domain(d) || !guest_mode(regs) )
    2.16 +    if ( !shadow_mode_refcounts(d) || !guest_mode(regs) )
    2.17          goto not_a_shadow_fault;
    2.18  
    2.19 -    hvm_store_cpu_guest_regs(v, regs, NULL);
    2.20 +    if ( is_hvm_domain(d) )
    2.21 +        hvm_store_cpu_guest_regs(v, regs, NULL);
    2.22      SHADOW_PRINTK("emulate: eip=%#lx\n", regs->eip);
    2.23  
    2.24 -    shadow_init_emulation(&emul_ctxt, regs);
    2.25 +    emul_ops = shadow_init_emulation(&emul_ctxt, regs);
    2.26  
    2.27      /*
    2.28       * We do not emulate user writes. Instead we use them as a hint that the
    2.29 @@ -2825,7 +2827,7 @@ static int sh_page_fault(struct vcpu *v,
    2.30       * it seems very unlikely that any OS grants user access to page tables.
    2.31       */
    2.32      if ( (regs->error_code & PFEC_user_mode) ||
    2.33 -         x86_emulate_memop(&emul_ctxt.ctxt, &shadow_emulator_ops) )
    2.34 +         x86_emulate_memop(&emul_ctxt.ctxt, emul_ops) )
    2.35      {
    2.36          SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n", 
    2.37                         mfn_x(gmfn));
    2.38 @@ -2837,7 +2839,8 @@ static int sh_page_fault(struct vcpu *v,
    2.39      }
    2.40  
    2.41      /* Emulator has changed the user registers: write back */
    2.42 -    hvm_load_cpu_guest_regs(v, regs);
    2.43 +    if ( is_hvm_domain(d) )
    2.44 +        hvm_load_cpu_guest_regs(v, regs);
    2.45      goto done;
    2.46  
    2.47   mmio:
    2.48 @@ -3814,9 +3817,10 @@ static inline void * emulate_map_dest(st
    2.49  
    2.50   page_fault:
    2.51      errcode |= PFEC_write_access;
    2.52 -    if ( ring_3(sh_ctxt->ctxt.regs) )
    2.53 -        errcode |= PFEC_user_mode;
    2.54 -    hvm_inject_exception(TRAP_page_fault, errcode, vaddr);
    2.55 +    if ( is_hvm_vcpu(v) )
    2.56 +        hvm_inject_exception(TRAP_page_fault, errcode, vaddr);
    2.57 +    else
    2.58 +        propagate_page_fault(vaddr, errcode);
    2.59      return NULL;
    2.60  }
    2.61  
     3.1 --- a/xen/arch/x86/mm/shadow/private.h	Sat Dec 09 15:04:27 2006 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/private.h	Sat Dec 09 16:29:52 2006 +0000
     3.3 @@ -257,9 +257,6 @@ static inline int sh_type_is_pinnable(st
     3.4   * Various function declarations 
     3.5   */
     3.6  
     3.7 -/* x86 emulator support */
     3.8 -extern struct x86_emulate_ops shadow_emulator_ops;
     3.9 -
    3.10  /* Hash table functions */
    3.11  mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, unsigned int t);
    3.12  void  shadow_hash_insert(struct vcpu *v, 
    3.13 @@ -513,17 +510,17 @@ static inline void sh_unpin(struct vcpu 
    3.14  struct sh_emulate_ctxt {
    3.15      struct x86_emulate_ctxt ctxt;
    3.16  
    3.17 -    /* Cache of up to 15 bytes of instruction. */
    3.18 +    /* [HVM] Cache of up to 15 bytes of instruction. */
    3.19      uint8_t insn_buf[15];
    3.20      uint8_t insn_buf_bytes;
    3.21  
    3.22 -    /* Cache of segment registers already gathered for this emulation. */
    3.23 +    /* [HVM] Cache of segment registers already gathered for this emulation. */
    3.24      unsigned int valid_seg_regs;
    3.25      struct segment_register seg_reg[6];
    3.26  };
    3.27  
    3.28 -void shadow_init_emulation(struct sh_emulate_ctxt *sh_ctxt,
    3.29 -                           struct cpu_user_regs *regs);
    3.30 +struct x86_emulate_ops *shadow_init_emulation(
    3.31 +    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
    3.32  
    3.33  #endif /* _XEN_SHADOW_PRIVATE_H */
    3.34  
     4.1 --- a/xen/include/asm-x86/guest_access.h	Sat Dec 09 15:04:27 2006 +0000
     4.2 +++ b/xen/include/asm-x86/guest_access.h	Sat Dec 09 16:29:52 2006 +0000
     4.3 @@ -34,7 +34,7 @@
     4.4  #define copy_to_guest_offset(hnd, off, ptr, nr) ({      \
     4.5      const typeof(ptr) _x = (hnd).p;                     \
     4.6      const typeof(ptr) _y = (ptr);                       \
     4.7 -    shadow_mode_translate(current->domain) ?            \
     4.8 +    is_hvm_vcpu(current) ?                              \
     4.9      copy_to_user_hvm(_x+(off), _y, sizeof(*_x)*(nr)) :  \
    4.10      copy_to_user(_x+(off), _y, sizeof(*_x)*(nr));       \
    4.11  })
    4.12 @@ -46,7 +46,7 @@
    4.13  #define copy_from_guest_offset(ptr, hnd, off, nr) ({    \
    4.14      const typeof(ptr) _x = (hnd).p;                     \
    4.15      const typeof(ptr) _y = (ptr);                       \
    4.16 -    shadow_mode_translate(current->domain) ?            \
    4.17 +    is_hvm_vcpu(current) ?                              \
    4.18      copy_from_user_hvm(_y, _x+(off), sizeof(*_x)*(nr)) :\
    4.19      copy_from_user(_y, _x+(off), sizeof(*_x)*(nr));     \
    4.20  })
    4.21 @@ -55,7 +55,7 @@
    4.22  #define copy_field_to_guest(hnd, ptr, field) ({         \
    4.23      const typeof(&(ptr)->field) _x = &(hnd).p->field;   \
    4.24      const typeof(&(ptr)->field) _y = &(ptr)->field;     \
    4.25 -    shadow_mode_translate(current->domain) ?            \
    4.26 +    is_hvm_vcpu(current) ?                              \
    4.27      copy_to_user_hvm(_x, _y, sizeof(*_x)) :             \
    4.28      copy_to_user(_x, _y, sizeof(*_x));                  \
    4.29  })
    4.30 @@ -64,7 +64,7 @@
    4.31  #define copy_field_from_guest(ptr, hnd, field) ({       \
    4.32      const typeof(&(ptr)->field) _x = &(hnd).p->field;   \
    4.33      const typeof(&(ptr)->field) _y = &(ptr)->field;     \
    4.34 -    shadow_mode_translate(current->domain) ?            \
    4.35 +    is_hvm_vcpu(current) ?                              \
    4.36      copy_from_user_hvm(_y, _x, sizeof(*_x)) :           \
    4.37      copy_from_user(_y, _x, sizeof(*_x));                \
    4.38  })
    4.39 @@ -80,7 +80,7 @@
    4.40  #define __copy_to_guest_offset(hnd, off, ptr, nr) ({    \
    4.41      const typeof(ptr) _x = (hnd).p;                     \
    4.42      const typeof(ptr) _y = (ptr);                       \
    4.43 -    shadow_mode_translate(current->domain) ?            \
    4.44 +    is_hvm_vcpu(current) ?                              \
    4.45      copy_to_user_hvm(_x+(off), _y, sizeof(*_x)*(nr)) :  \
    4.46      __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr));     \
    4.47  })
    4.48 @@ -88,7 +88,7 @@
    4.49  #define __copy_from_guest_offset(ptr, hnd, off, nr) ({  \
    4.50      const typeof(ptr) _x = (hnd).p;                     \
    4.51      const typeof(ptr) _y = (ptr);                       \
    4.52 -    shadow_mode_translate(current->domain) ?            \
    4.53 +    is_hvm_vcpu(current) ?                              \
    4.54      copy_from_user_hvm(_y, _x+(off),sizeof(*_x)*(nr)) : \
    4.55      __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr));   \
    4.56  })
    4.57 @@ -96,7 +96,7 @@
    4.58  #define __copy_field_to_guest(hnd, ptr, field) ({       \
    4.59      const typeof(&(ptr)->field) _x = &(hnd).p->field;   \
    4.60      const typeof(&(ptr)->field) _y = &(ptr)->field;     \
    4.61 -    shadow_mode_translate(current->domain) ?            \
    4.62 +    is_hvm_vcpu(current) ?                              \
    4.63      copy_to_user_hvm(_x, _y, sizeof(*_x)) :             \
    4.64      __copy_to_user(_x, _y, sizeof(*_x));                \
    4.65  })
    4.66 @@ -104,7 +104,7 @@
    4.67  #define __copy_field_from_guest(ptr, hnd, field) ({     \
    4.68      const typeof(&(ptr)->field) _x = &(hnd).p->field;   \
    4.69      const typeof(&(ptr)->field) _y = &(ptr)->field;     \
    4.70 -    shadow_mode_translate(current->domain) ?            \
    4.71 +    is_hvm_vcpu(current) ?                              \
    4.72      copy_from_user_hvm(_x, _y, sizeof(*_x)) :           \
    4.73      __copy_from_user(_y, _x, sizeof(*_x));              \
    4.74  })