ia64/xen-unstable

changeset 17582:01aa7c088e98

SVM: clean up __get_instruction_length_from_list()

Remove unused arguments, fix its behaviour near page boundaries,
inject appropriate pagefaults, and inject #GP if the instruction is
not decodable or %eip is not pointing to valid RAM.

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 06 13:32:18 2008 +0100 (2008-05-06)
parents e6f20d5ed5fe
children a3bddc22d2f5
files xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/hvm/svm/svm.c xen/include/asm-x86/hvm/svm/emulate.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Tue May 06 11:05:00 2008 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Tue May 06 13:32:18 2008 +0100
     1.3 @@ -29,18 +29,6 @@
     1.4  
     1.5  #define MAX_INST_LEN 15
     1.6  
     1.7 -static int inst_copy_from_guest(
     1.8 -    unsigned char *buf, unsigned long guest_eip, int inst_len)
     1.9 -{
    1.10 -    struct vmcb_struct *vmcb = current->arch.hvm_svm.vmcb;
    1.11 -    uint32_t pfec = (vmcb->cpl == 3) ? PFEC_user_mode : 0;
    1.12 -    if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) )
    1.13 -        return 0;
    1.14 -    if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len, pfec) )
    1.15 -        return 0;
    1.16 -    return inst_len;
    1.17 -}
    1.18 -
    1.19  static unsigned int is_prefix(u8 opc)
    1.20  {
    1.21      switch ( opc )
    1.22 @@ -73,12 +61,7 @@ static unsigned long svm_rip2pointer(str
    1.23      return p;
    1.24  }
    1.25  
    1.26 -/* 
    1.27 - * Here's how it works:
    1.28 - * First byte: Length. 
    1.29 - * Following bytes: Opcode bytes. 
    1.30 - * Special case: Last byte, if zero, doesn't need to match. 
    1.31 - */
    1.32 +/* First byte: Length. Following bytes: Opcode bytes. */
    1.33  #define MAKE_INSTR(nm, ...) static const u8 OPCODE_##nm[] = { __VA_ARGS__ }
    1.34  MAKE_INSTR(INVD,   2, 0x0f, 0x08);
    1.35  MAKE_INSTR(WBINVD, 2, 0x0f, 0x09);
    1.36 @@ -101,70 +84,90 @@ static const u8 *opc_bytes[INSTR_MAX_COU
    1.37      [INSTR_INT3]   = OPCODE_INT3
    1.38  };
    1.39  
    1.40 +static int fetch(struct vcpu *v, u8 *buf, unsigned long addr, int len)
    1.41 +{
    1.42 +    uint32_t pfec = (v->arch.hvm_svm.vmcb->cpl == 3) ? PFEC_user_mode : 0;
    1.43 +
    1.44 +    switch ( hvm_fetch_from_guest_virt(buf, addr, len, pfec) )
    1.45 +    {
    1.46 +    case HVMCOPY_okay:
    1.47 +        return 1;
    1.48 +    case HVMCOPY_bad_gva_to_gfn:
    1.49 +        /* OK just to give up; we'll have injected #PF already */
    1.50 +        return 0;
    1.51 +    case HVMCOPY_bad_gfn_to_mfn:
    1.52 +    default:
    1.53 +        /* Not OK: fetches from non-RAM pages are not supportable. */
    1.54 +        gdprintk(XENLOG_WARNING, "Bad instruction fetch at %#lx (%#lx)\n",
    1.55 +                 (unsigned long) guest_cpu_user_regs()->eip, addr);
    1.56 +        hvm_inject_exception(TRAP_gp_fault, 0, 0);
    1.57 +        return 0;
    1.58 +    }
    1.59 +}
    1.60 +
    1.61  int __get_instruction_length_from_list(struct vcpu *v,
    1.62 -        enum instruction_index *list, unsigned int list_count, 
    1.63 -        u8 *guest_eip_buf, enum instruction_index *match)
    1.64 +        enum instruction_index *list, unsigned int list_count)
    1.65  {
    1.66      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.67      unsigned int i, j, inst_len = 0;
    1.68 -    int found = 0;
    1.69      enum instruction_index instr = 0;
    1.70 -    u8 buffer[MAX_INST_LEN];
    1.71 -    u8 *buf;
    1.72 +    u8 buf[MAX_INST_LEN];
    1.73      const u8 *opcode = NULL;
    1.74 +    unsigned long fetch_addr;
    1.75 +    unsigned int fetch_len;
    1.76  
    1.77 -    if ( guest_eip_buf )
    1.78 +    /* Fetch up to the next page break; we'll fetch from the next page
    1.79 +     * later if we have to. */
    1.80 +    fetch_addr = svm_rip2pointer(v);
    1.81 +    fetch_len = min_t(unsigned int, MAX_INST_LEN,
    1.82 +                      PAGE_SIZE - (fetch_addr & ~PAGE_MASK));
    1.83 +    if ( !fetch(v, buf, fetch_addr, fetch_len) )
    1.84 +        return 0;
    1.85 +
    1.86 +    while ( (inst_len < MAX_INST_LEN) && is_prefix(buf[inst_len]) )
    1.87      {
    1.88 -        buf = guest_eip_buf;
    1.89 -    }
    1.90 -    else
    1.91 -    {
    1.92 -        if ( inst_copy_from_guest(buffer, svm_rip2pointer(v), MAX_INST_LEN)
    1.93 -             != MAX_INST_LEN )
    1.94 -            return 0;
    1.95 -        buf = buffer;
    1.96 +        inst_len++;
    1.97 +        if ( inst_len >= fetch_len )
    1.98 +        {
    1.99 +            if ( !fetch(v, buf + fetch_len, fetch_addr + fetch_len,
   1.100 +                        MAX_INST_LEN - fetch_len) )
   1.101 +                return 0;
   1.102 +            fetch_len = MAX_INST_LEN;
   1.103 +        }
   1.104      }
   1.105  
   1.106      for ( j = 0; j < list_count; j++ )
   1.107      {
   1.108          instr = list[j];
   1.109          opcode = opc_bytes[instr];
   1.110 -        ASSERT(opcode);
   1.111  
   1.112 -        while ( (inst_len < MAX_INST_LEN) && 
   1.113 -                is_prefix(buf[inst_len]) && 
   1.114 -                !is_prefix(opcode[1]) )
   1.115 -            inst_len++;
   1.116 -
   1.117 -        ASSERT(opcode[0] <= 15);    /* Make sure the table is correct. */
   1.118 -        found = 1;
   1.119 -
   1.120 -        for ( i = 0; i < opcode[0]; i++ )
   1.121 +        for ( i = 0; (i < opcode[0]) && ((inst_len + i) < MAX_INST_LEN); i++ )
   1.122          {
   1.123 -            /* If the last byte is zero, we just accept it without checking */
   1.124 -            if ( (i == (opcode[0]-1)) && (opcode[i+1] == 0) )
   1.125 -                break;
   1.126 +            if ( (inst_len + i) >= fetch_len ) 
   1.127 +            { 
   1.128 +                if ( !fetch(v, buf + fetch_len, 
   1.129 +                            fetch_addr + fetch_len, 
   1.130 +                            MAX_INST_LEN - fetch_len) ) 
   1.131 +                    return 0;
   1.132 +                fetch_len = MAX_INST_LEN;
   1.133 +            }
   1.134  
   1.135              if ( buf[inst_len+i] != opcode[i+1] )
   1.136 -            {
   1.137 -                found = 0;
   1.138 -                break;
   1.139 -            }
   1.140 +                goto mismatch;
   1.141          }
   1.142 -
   1.143 -        if ( found )
   1.144 -            goto done;
   1.145 +        goto done;
   1.146 +    mismatch: ;
   1.147      }
   1.148  
   1.149 -    printk("%s: Mismatch between expected and actual instruction bytes: "
   1.150 -            "eip = %lx\n",  __func__, (unsigned long)vmcb->rip);
   1.151 +    gdprintk(XENLOG_WARNING,
   1.152 +             "%s: Mismatch between expected and actual instruction bytes: "
   1.153 +             "eip = %lx\n",  __func__, (unsigned long)vmcb->rip);
   1.154 +    hvm_inject_exception(TRAP_gp_fault, 0, 0);
   1.155      return 0;
   1.156  
   1.157   done:
   1.158      inst_len += opcode[0];
   1.159      ASSERT(inst_len <= MAX_INST_LEN);
   1.160 -    if ( match )
   1.161 -        *match = instr;
   1.162      return inst_len;
   1.163  }
   1.164  
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue May 06 11:05:00 2008 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue May 06 13:32:18 2008 +0100
     2.3 @@ -84,7 +84,10 @@ static void inline __update_guest_eip(
     2.4  {
     2.5      struct vcpu *curr = current;
     2.6  
     2.7 -    if ( unlikely((inst_len == 0) || (inst_len > 15)) )
     2.8 +    if ( unlikely(inst_len == 0) )
     2.9 +        return;
    2.10 +
    2.11 +    if ( unlikely(inst_len > 15) )
    2.12      {
    2.13          gdprintk(XENLOG_ERR, "Bad instruction length %u\n", inst_len);
    2.14          domain_crash(curr->domain);
    2.15 @@ -907,8 +910,7 @@ static void svm_vmexit_do_cpuid(struct c
    2.16  {
    2.17      unsigned int eax, ebx, ecx, edx, inst_len;
    2.18  
    2.19 -    inst_len = __get_instruction_length(current, INSTR_CPUID, NULL);
    2.20 -    if ( inst_len == 0 ) 
    2.21 +    if ( (inst_len = __get_instruction_length(current, INSTR_CPUID)) == 0 )
    2.22          return;
    2.23  
    2.24      eax = regs->eax;
    2.25 @@ -1083,13 +1085,15 @@ static void svm_do_msr_access(struct cpu
    2.26  
    2.27      if ( vmcb->exitinfo1 == 0 )
    2.28      {
    2.29 +        if ( (inst_len = __get_instruction_length(v, INSTR_RDMSR)) == 0 )
    2.30 +            return;
    2.31          rc = hvm_msr_read_intercept(regs);
    2.32 -        inst_len = __get_instruction_length(v, INSTR_RDMSR, NULL);
    2.33      }
    2.34      else
    2.35      {
    2.36 +        if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
    2.37 +            return;
    2.38          rc = hvm_msr_write_intercept(regs);
    2.39 -        inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL);
    2.40      }
    2.41  
    2.42      if ( rc == X86EMUL_OKAY )
    2.43 @@ -1101,8 +1105,7 @@ static void svm_vmexit_do_hlt(struct vmc
    2.44  {
    2.45      unsigned int inst_len;
    2.46  
    2.47 -    inst_len = __get_instruction_length(current, INSTR_HLT, NULL);
    2.48 -    if ( inst_len == 0 )
    2.49 +    if ( (inst_len = __get_instruction_length(current, INSTR_HLT)) == 0 )
    2.50          return;
    2.51      __update_guest_eip(regs, inst_len);
    2.52  
    2.53 @@ -1125,10 +1128,13 @@ static void svm_vmexit_do_invalidate_cac
    2.54      enum instruction_index list[] = { INSTR_INVD, INSTR_WBINVD };
    2.55      int inst_len;
    2.56  
    2.57 +    inst_len = __get_instruction_length_from_list(
    2.58 +        current, list, ARRAY_SIZE(list));
    2.59 +    if ( inst_len == 0 )
    2.60 +        return;
    2.61 +
    2.62      svm_wbinvd_intercept();
    2.63  
    2.64 -    inst_len = __get_instruction_length_from_list(
    2.65 -        current, list, ARRAY_SIZE(list), NULL, NULL);
    2.66      __update_guest_eip(regs, inst_len);
    2.67  }
    2.68  
    2.69 @@ -1204,7 +1210,8 @@ asmlinkage void svm_vmexit_handler(struc
    2.70          if ( !v->domain->debugger_attached )
    2.71              goto exit_and_crash;
    2.72          /* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */
    2.73 -        inst_len = __get_instruction_length(v, INSTR_INT3, NULL);
    2.74 +        if ( (inst_len = __get_instruction_length(v, INSTR_INT3)) == 0 )
    2.75 +            break;
    2.76          __update_guest_eip(regs, inst_len);
    2.77          domain_pause_for_debugger();
    2.78          break;
    2.79 @@ -1281,8 +1288,7 @@ asmlinkage void svm_vmexit_handler(struc
    2.80          break;
    2.81  
    2.82      case VMEXIT_VMMCALL:
    2.83 -        inst_len = __get_instruction_length(v, INSTR_VMCALL, NULL);
    2.84 -        if ( inst_len == 0 )
    2.85 +        if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
    2.86              break;
    2.87          HVMTRACE_1D(VMMCALL, v, regs->eax);
    2.88          rc = hvm_do_hypercall(regs);
     3.1 --- a/xen/include/asm-x86/hvm/svm/emulate.h	Tue May 06 11:05:00 2008 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/svm/emulate.h	Tue May 06 13:32:18 2008 +0100
     3.3 @@ -34,15 +34,12 @@ enum instruction_index {
     3.4  };
     3.5  
     3.6  int __get_instruction_length_from_list(
     3.7 -    struct vcpu *v,
     3.8 -    enum instruction_index *list, unsigned int list_count, 
     3.9 -    u8 *guest_eip_buf, enum instruction_index *match);
    3.10 +    struct vcpu *v, enum instruction_index *list, unsigned int list_count);
    3.11  
    3.12 -static inline int __get_instruction_length(struct vcpu *v, 
    3.13 -        enum instruction_index instr, u8 *guest_eip_buf)
    3.14 +static inline int __get_instruction_length(
    3.15 +    struct vcpu *v, enum instruction_index instr)
    3.16  {
    3.17 -    return __get_instruction_length_from_list(
    3.18 -        v, &instr, 1, guest_eip_buf, NULL);
    3.19 +    return __get_instruction_length_from_list(v, &instr, 1);
    3.20  }
    3.21  
    3.22  #endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */