ia64/xen-unstable

changeset 18340:a3fe573a0e1e

x86, hvm: Observe EFLAGS.DF when performing segmentation checks and
address translations on multi-iteration string instructions.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Aug 19 15:57:19 2008 +0100 (2008-08-19)
parents 6e3c97f43f9c
children e6a4f6a682ba
files xen/arch/x86/hvm/emulate.c
line diff
     1.1 --- a/xen/arch/x86/hvm/emulate.c	Tue Aug 19 15:56:31 2008 +0100
     1.2 +++ b/xen/arch/x86/hvm/emulate.c	Tue Aug 19 15:57:19 2008 +0100
     1.3 @@ -208,6 +208,7 @@ static int hvmemul_linear_to_phys(
     1.4  {
     1.5      struct vcpu *curr = current;
     1.6      unsigned long pfn, npfn, done, todo, i;
     1.7 +    int reverse;
     1.8  
     1.9      /* Clip repetitions to a sensible maximum. */
    1.10      *reps = min_t(unsigned long, *reps, 4096);
    1.11 @@ -221,41 +222,53 @@ static int hvmemul_linear_to_phys(
    1.12  
    1.13      *paddr = addr & ~PAGE_MASK;
    1.14  
    1.15 -    /* Get the first PFN in the range. */
    1.16 -    if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN )
    1.17 +    /* Reverse mode if this is a backwards multi-iteration string operation. */
    1.18 +    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
    1.19 +
    1.20 +    if ( reverse && ((-addr & ~PAGE_MASK) < bytes_per_rep) )
    1.21 +    {
    1.22 +        /* Do page-straddling first iteration forwards via recursion. */
    1.23 +        unsigned long _paddr, one_rep = 1;
    1.24 +        int rc = hvmemul_linear_to_phys(
    1.25 +            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
    1.26 +        if ( rc != X86EMUL_OKAY )
    1.27 +            return rc;
    1.28 +        pfn = _paddr >> PAGE_SHIFT;
    1.29 +    }
    1.30 +    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN )
    1.31      {
    1.32          hvm_inject_exception(TRAP_page_fault, pfec, addr);
    1.33          return X86EMUL_EXCEPTION;
    1.34      }
    1.35  
    1.36      /* If the range does not straddle a page boundary then we're done. */
    1.37 -    done = PAGE_SIZE - (addr & ~PAGE_MASK);
    1.38 +    done = reverse ? bytes_per_rep + (addr & ~PAGE_MASK) : -addr & ~PAGE_MASK;
    1.39      todo = *reps * bytes_per_rep;
    1.40      if ( done >= todo )
    1.41          goto done;
    1.42  
    1.43 -    addr += done;
    1.44      for ( i = 1; done < todo; i++ )
    1.45      {
    1.46          /* Get the next PFN in the range. */
    1.47 +        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
    1.48          npfn = paging_gva_to_gfn(curr, addr, &pfec);
    1.49  
    1.50          /* Is it contiguous with the preceding PFNs? If not then we're done. */
    1.51 -        if ( (npfn == INVALID_GFN) || (npfn != (pfn + i)) )
    1.52 +        if ( (npfn == INVALID_GFN) || (npfn != (pfn + (reverse ? -i : i))) )
    1.53          {
    1.54              done /= bytes_per_rep;
    1.55              if ( done == 0 )
    1.56              {
    1.57 +                ASSERT(!reverse);
    1.58                  if ( npfn != INVALID_GFN )
    1.59                      return X86EMUL_UNHANDLEABLE;
    1.60 -                hvm_inject_exception(TRAP_page_fault, pfec, addr);
    1.61 +                hvm_inject_exception(TRAP_page_fault, pfec, addr & PAGE_MASK);
    1.62                  return X86EMUL_EXCEPTION;
    1.63              }
    1.64              *reps = done;
    1.65              break;
    1.66          }
    1.67  
    1.68 -        addr += PAGE_SIZE;
    1.69          done += PAGE_SIZE;
    1.70      }
    1.71  
    1.72 @@ -268,7 +281,8 @@ static int hvmemul_linear_to_phys(
    1.73  static int hvmemul_virtual_to_linear(
    1.74      enum x86_segment seg,
    1.75      unsigned long offset,
    1.76 -    unsigned int bytes,
    1.77 +    unsigned int bytes_per_rep,
    1.78 +    unsigned long *reps,
    1.79      enum hvm_access_type access_type,
    1.80      struct hvm_emulate_ctxt *hvmemul_ctxt,
    1.81      unsigned long *paddr)
    1.82 @@ -282,21 +296,40 @@ static int hvmemul_virtual_to_linear(
    1.83          return X86EMUL_OKAY;
    1.84      }
    1.85  
    1.86 +    *reps = min_t(unsigned long, *reps, 4096);
    1.87      reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
    1.88 -    okay = hvm_virtual_to_linear_addr(
    1.89 -        seg, reg, offset, bytes, access_type,
    1.90 -        hvmemul_ctxt->ctxt.addr_size, paddr);
    1.91  
    1.92 -    if ( !okay )
    1.93 +    if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
    1.94      {
    1.95 -        hvmemul_ctxt->exn_pending = 1;
    1.96 -        hvmemul_ctxt->exn_vector = TRAP_gp_fault;
    1.97 -        hvmemul_ctxt->exn_error_code = 0;
    1.98 -        hvmemul_ctxt->exn_insn_len = 0;
    1.99 -        return X86EMUL_EXCEPTION;
   1.100 +        ASSERT(offset >= ((*reps - 1) * bytes_per_rep));
   1.101 +        okay = hvm_virtual_to_linear_addr(
   1.102 +            seg, reg, offset - (*reps - 1) * bytes_per_rep,
   1.103 +            *reps * bytes_per_rep, access_type,
   1.104 +            hvmemul_ctxt->ctxt.addr_size, paddr);
   1.105 +        *paddr += (*reps - 1) * bytes_per_rep;
   1.106 +        if ( hvmemul_ctxt->ctxt.addr_size != 64 )
   1.107 +            *paddr = (uint32_t)*paddr;
   1.108 +    }
   1.109 +    else
   1.110 +    {
   1.111 +        okay = hvm_virtual_to_linear_addr(
   1.112 +            seg, reg, offset, *reps * bytes_per_rep, access_type,
   1.113 +            hvmemul_ctxt->ctxt.addr_size, paddr);
   1.114      }
   1.115  
   1.116 -    return X86EMUL_OKAY;
   1.117 +    if ( okay )
   1.118 +        return X86EMUL_OKAY;
   1.119 +
   1.120 +    /* If this is a string operation, emulate each iteration separately. */
   1.121 +    if ( *reps != 1 )
   1.122 +        return X86EMUL_UNHANDLEABLE;
   1.123 +
   1.124 +    /* This is a singleton operation: fail it with an exception. */
   1.125 +    hvmemul_ctxt->exn_pending = 1;
   1.126 +    hvmemul_ctxt->exn_vector = TRAP_gp_fault;
   1.127 +    hvmemul_ctxt->exn_error_code = 0;
   1.128 +    hvmemul_ctxt->exn_insn_len = 0;
   1.129 +    return X86EMUL_EXCEPTION;
   1.130  }
   1.131  
   1.132  static int __hvmemul_read(
   1.133 @@ -314,7 +347,7 @@ static int __hvmemul_read(
   1.134      int rc;
   1.135  
   1.136      rc = hvmemul_virtual_to_linear(
   1.137 -        seg, offset, bytes, access_type, hvmemul_ctxt, &addr);
   1.138 +        seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
   1.139      if ( rc != X86EMUL_OKAY )
   1.140          return rc;
   1.141  
   1.142 @@ -406,7 +439,7 @@ static int hvmemul_write(
   1.143      int rc;
   1.144  
   1.145      rc = hvmemul_virtual_to_linear(
   1.146 -        seg, offset, bytes, hvm_access_write, hvmemul_ctxt, &addr);
   1.147 +        seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
   1.148      if ( rc != X86EMUL_OKAY )
   1.149          return rc;
   1.150  
   1.151 @@ -470,7 +503,7 @@ static int hvmemul_rep_ins(
   1.152      int rc;
   1.153  
   1.154      rc = hvmemul_virtual_to_linear(
   1.155 -        dst_seg, dst_offset, *reps * bytes_per_rep, hvm_access_write,
   1.156 +        dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,
   1.157          hvmemul_ctxt, &addr);
   1.158      if ( rc != X86EMUL_OKAY )
   1.159          return rc;
   1.160 @@ -503,7 +536,7 @@ static int hvmemul_rep_outs(
   1.161      int rc;
   1.162  
   1.163      rc = hvmemul_virtual_to_linear(
   1.164 -        src_seg, src_offset, *reps * bytes_per_rep, hvm_access_read,
   1.165 +        src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
   1.166          hvmemul_ctxt, &addr);
   1.167      if ( rc != X86EMUL_OKAY )
   1.168          return rc;
   1.169 @@ -538,13 +571,13 @@ static int hvmemul_rep_movs(
   1.170      int rc;
   1.171  
   1.172      rc = hvmemul_virtual_to_linear(
   1.173 -        src_seg, src_offset, *reps * bytes_per_rep, hvm_access_read,
   1.174 +        src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
   1.175          hvmemul_ctxt, &saddr);
   1.176      if ( rc != X86EMUL_OKAY )
   1.177          return rc;
   1.178  
   1.179      rc = hvmemul_virtual_to_linear(
   1.180 -        dst_seg, dst_offset, *reps * bytes_per_rep, hvm_access_write,
   1.181 +        dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,
   1.182          hvmemul_ctxt, &daddr);
   1.183      if ( rc != X86EMUL_OKAY )
   1.184          return rc;
   1.185 @@ -792,11 +825,11 @@ static int hvmemul_invlpg(
   1.186  {
   1.187      struct hvm_emulate_ctxt *hvmemul_ctxt =
   1.188          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
   1.189 -    unsigned long addr;
   1.190 +    unsigned long addr, reps = 1;
   1.191      int rc;
   1.192  
   1.193      rc = hvmemul_virtual_to_linear(
   1.194 -        seg, offset, 1, hvm_access_none, hvmemul_ctxt, &addr);
   1.195 +        seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);
   1.196  
   1.197      if ( rc == X86EMUL_OKAY )
   1.198          hvm_funcs.invlpg_intercept(addr);