ia64/xen-unstable

changeset 10822:f42039dcdc81

[HVM][VMX] Fix injection of software exceptions (#BP,#OF)
From: George Dunlap <dunlapg@umich.edu>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jul 27 13:05:33 2006 +0100 (2006-07-27)
parents ed20a5addce4
children 5fa2cd68d059
files xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Jul 27 12:59:36 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Jul 27 13:05:33 2006 +0100
     1.3 @@ -286,7 +286,7 @@ static inline int long_mode_do_msr_write
     1.4          if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
     1.5          {
     1.6              printk("trying to set reserved bit in EFER\n");
     1.7 -            vmx_inject_exception(v, TRAP_gp_fault, 0);
     1.8 +            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
     1.9              return 0;
    1.10          }
    1.11  
    1.12 @@ -300,7 +300,7 @@ static inline int long_mode_do_msr_write
    1.13              {
    1.14                  printk("trying to set LME bit when "
    1.15                         "in paging mode or PAE bit is not set\n");
    1.16 -                vmx_inject_exception(v, TRAP_gp_fault, 0);
    1.17 +                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    1.18                  return 0;
    1.19              }
    1.20  
    1.21 @@ -318,7 +318,7 @@ static inline int long_mode_do_msr_write
    1.22          if ( !IS_CANO_ADDRESS(msr_content) )
    1.23          {
    1.24              HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
    1.25 -            vmx_inject_exception(v, TRAP_gp_fault, 0);
    1.26 +            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    1.27              return 0;
    1.28          }
    1.29  
    1.30 @@ -1438,7 +1438,7 @@ static int vmx_set_cr0(unsigned long val
    1.31                         &v->arch.hvm_vmx.cpu_state) )
    1.32          {
    1.33              HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enabled\n");
    1.34 -            vmx_inject_exception(v, TRAP_gp_fault, 0);
    1.35 +            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    1.36          }
    1.37  
    1.38          if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
    1.39 @@ -1520,7 +1520,7 @@ static int vmx_set_cr0(unsigned long val
    1.40      {
    1.41          if ( value & X86_CR0_PG ) {
    1.42              /* inject GP here */
    1.43 -            vmx_inject_exception(v, TRAP_gp_fault, 0);
    1.44 +            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    1.45              return 0;
    1.46          } else {
    1.47              /*
    1.48 @@ -1764,7 +1764,7 @@ static int mov_to_cr(int gp, int cr, str
    1.49          else
    1.50          {
    1.51              if ( test_bit(VMX_CPU_STATE_LMA_ENABLED, &v->arch.hvm_vmx.cpu_state) )
    1.52 -                vmx_inject_exception(v, TRAP_gp_fault, 0);
    1.53 +                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
    1.54  
    1.55              clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
    1.56          }
    1.57 @@ -2192,7 +2192,7 @@ asmlinkage void vmx_vmexit_handler(struc
    1.58              if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
    1.59                  domain_pause_for_debugger();
    1.60              else 
    1.61 -                vmx_inject_exception(v, TRAP_int3, VMX_DELIVER_NO_ERROR_CODE);
    1.62 +                vmx_reflect_exception(v);
    1.63              break;
    1.64          }
    1.65  #endif
    1.66 @@ -2219,7 +2219,7 @@ asmlinkage void vmx_vmexit_handler(struc
    1.67                  /*
    1.68                   * Inject #PG using Interruption-Information Fields
    1.69                   */
    1.70 -                vmx_inject_exception(v, TRAP_page_fault, regs.error_code);
    1.71 +                vmx_inject_hw_exception(v, TRAP_page_fault, regs.error_code);
    1.72                  v->arch.hvm_vmx.cpu_cr2 = va;
    1.73                  TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
    1.74              }
    1.75 @@ -2335,7 +2335,7 @@ asmlinkage void vmx_vmexit_handler(struc
    1.76      case EXIT_REASON_VMON:
    1.77          /* Report invalid opcode exception when a VMX guest tries to execute 
    1.78              any of the VMX instructions */
    1.79 -        vmx_inject_exception(v, TRAP_invalid_op, VMX_DELIVER_NO_ERROR_CODE);
    1.80 +        vmx_inject_hw_exception(v, TRAP_invalid_op, VMX_DELIVER_NO_ERROR_CODE);
    1.81          break;
    1.82  
    1.83      default:
     2.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Jul 27 12:59:36 2006 +0100
     2.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Jul 27 13:05:33 2006 +0100
     2.3 @@ -143,11 +143,12 @@ extern unsigned int cpu_rev;
     2.4   */
     2.5  #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
     2.6  #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
     2.7 -#define INTR_INFO_DELIEVER_CODE_MASK    0x800           /* 11 */
     2.8 +#define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
     2.9  #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
    2.10  
    2.11  #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
    2.12 -#define INTR_TYPE_EXCEPTION             (3 << 8) /* processor exception */
    2.13 +#define INTR_TYPE_HW_EXCEPTION             (3 << 8) /* hardware exception */
    2.14 +#define INTR_TYPE_SW_EXCEPTION             (6 << 8) /* software exception */
    2.15  
    2.16  /*
    2.17   * Exit Qualifications for MOV for Control Register Access
    2.18 @@ -421,7 +422,7 @@ static inline int vmx_pgbit_test(struct 
    2.19  }
    2.20  
    2.21  static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type, 
    2.22 -                                         int error_code)
    2.23 +                                         int error_code, int ilen)
    2.24  {
    2.25      unsigned long intr_fields;
    2.26  
    2.27 @@ -429,22 +430,33 @@ static inline int __vmx_inject_exception
    2.28      intr_fields = (INTR_INFO_VALID_MASK | type | trap);
    2.29      if (error_code != VMX_DELIVER_NO_ERROR_CODE) {
    2.30          __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
    2.31 -        intr_fields |= INTR_INFO_DELIEVER_CODE_MASK;
    2.32 +        intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
    2.33       }
    2.34 -    
    2.35 +
    2.36 +    if(ilen)
    2.37 +      __vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
    2.38 +
    2.39      __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
    2.40      return 0;
    2.41  }
    2.42  
    2.43 -static inline int vmx_inject_exception(struct vcpu *v, int trap, int error_code)
    2.44 +static inline int vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code)
    2.45  {
    2.46      v->arch.hvm_vmx.vector_injected = 1;
    2.47 -    return __vmx_inject_exception(v, trap, INTR_TYPE_EXCEPTION, error_code);
    2.48 +    return __vmx_inject_exception(v, trap, INTR_TYPE_HW_EXCEPTION,
    2.49 +				  error_code, 0);
    2.50 +}
    2.51 +
    2.52 +static inline int vmx_inject_sw_exception(struct vcpu *v, int trap, int instruction_len) {
    2.53 +     v->arch.hvm_vmx.vector_injected=1;
    2.54 +     return __vmx_inject_exception(v, trap, INTR_TYPE_SW_EXCEPTION,
    2.55 +				   VMX_DELIVER_NO_ERROR_CODE,
    2.56 +				   instruction_len);
    2.57  }
    2.58  
    2.59  static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code)
    2.60  {
    2.61 -    __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code);
    2.62 +    __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code, 0);
    2.63      __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
    2.64  
    2.65      return 0;
    2.66 @@ -452,14 +464,14 @@ static inline int vmx_inject_extint(stru
    2.67  
    2.68  static inline int vmx_reflect_exception(struct vcpu *v)
    2.69  {
    2.70 -    int error_code, vector;
    2.71 +    int error_code, intr_info, vector;
    2.72  
    2.73 -    __vmread(VM_EXIT_INTR_INFO, &vector);
    2.74 -    if (vector & INTR_INFO_DELIEVER_CODE_MASK)
    2.75 +    __vmread(VM_EXIT_INTR_INFO, &intr_info);
    2.76 +    vector = intr_info & 0xff;
    2.77 +    if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
    2.78          __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
    2.79      else
    2.80          error_code = VMX_DELIVER_NO_ERROR_CODE;
    2.81 -    vector &= 0xff;
    2.82  
    2.83  #ifndef NDEBUG
    2.84      {
    2.85 @@ -472,7 +484,19 @@ static inline int vmx_reflect_exception(
    2.86      }
    2.87  #endif /* NDEBUG */
    2.88  
    2.89 -    vmx_inject_exception(v, vector, error_code);
    2.90 +    /* According to Intel Virtualization Technology Specification for
    2.91 +       the IA-32 Intel Architecture (C97063-002 April 2005), section
    2.92 +       2.8.3, SW_EXCEPTION should be used for #BP and #OV, and
    2.93 +       HW_EXCPEPTION used for everything else.  The main difference
    2.94 +       appears to be that for SW_EXCEPTION, the EIP/RIP is incremented
    2.95 +       by VM_ENTER_INSTRUCTION_LEN bytes, whereas for HW_EXCEPTION, 
    2.96 +       it is not.  */
    2.97 +    if((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_SW_EXCEPTION) {
    2.98 +      int ilen;
    2.99 +      __vmread(VM_EXIT_INSTRUCTION_LEN, &ilen);
   2.100 +      vmx_inject_sw_exception(v, vector, ilen);
   2.101 +    } else
   2.102 +      vmx_inject_hw_exception(v, vector, error_code);
   2.103      return 0;
   2.104  }
   2.105