direct-io.hg

changeset 10971:986cf8354d11

[HVM][VMX] Cleanup vmx vmexit handler.

Current code doesn't handle vmexits caused by triple fault, INIT, SIPI,
etc, which are listed in IA32 Architecture System Programming Guide 2,
Appendix I VMX Basic Exit Reasons, and calls __hvm_bug() when not
handling such cases, actually domain crash should be good enough.

Also various coding style cleanups.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Aug 08 10:29:58 2006 +0100 (2006-08-08)
parents ae14b5b77938
children abd14c5c5496
files xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 08 10:21:51 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 08 10:29:58 2006 +0100
     1.3 @@ -2128,12 +2128,10 @@ void restore_cpu_user_regs(struct cpu_us
     1.4  asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
     1.5  {
     1.6      unsigned int exit_reason;
     1.7 -    unsigned long exit_qualification, eip, inst_len = 0;
     1.8 +    unsigned long exit_qualification, rip, inst_len = 0;
     1.9      struct vcpu *v = current;
    1.10 -    int error;
    1.11  
    1.12 -    error = __vmread(VM_EXIT_REASON, &exit_reason);
    1.13 -    BUG_ON(error);
    1.14 +    __vmread(VM_EXIT_REASON, &exit_reason);
    1.15  
    1.16      perfc_incra(vmexits, exit_reason);
    1.17  
    1.18 @@ -2172,11 +2170,9 @@ asmlinkage void vmx_vmexit_handler(struc
    1.19          domain_crash_synchronous();
    1.20      }
    1.21  
    1.22 -    __vmread(GUEST_RIP, &eip);
    1.23      TRACE_VMEXIT(0,exit_reason);
    1.24  
    1.25 -    switch ( exit_reason )
    1.26 -    {
    1.27 +    switch ( exit_reason ) {
    1.28      case EXIT_REASON_EXCEPTION_NMI:
    1.29      {
    1.30          /*
    1.31 @@ -2187,15 +2183,15 @@ asmlinkage void vmx_vmexit_handler(struc
    1.32          unsigned int vector;
    1.33          unsigned long va;
    1.34  
    1.35 -        if (__vmread(VM_EXIT_INTR_INFO, &vector)
    1.36 -            || !(vector & INTR_INFO_VALID_MASK))
    1.37 -            __hvm_bug(&regs);
    1.38 +        if ( __vmread(VM_EXIT_INTR_INFO, &vector) ||
    1.39 +             !(vector & INTR_INFO_VALID_MASK) )
    1.40 +            domain_crash_synchronous();
    1.41          vector &= INTR_INFO_VECTOR_MASK;
    1.42  
    1.43          TRACE_VMEXIT(1,vector);
    1.44          perfc_incra(cause_vector, vector);
    1.45  
    1.46 -        switch (vector) {
    1.47 +        switch ( vector ) {
    1.48  #ifdef XEN_DEBUGGER
    1.49          case TRAP_debug:
    1.50          {
    1.51 @@ -2236,7 +2232,7 @@ asmlinkage void vmx_vmexit_handler(struc
    1.52          {
    1.53              if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
    1.54                  domain_pause_for_debugger();
    1.55 -            else 
    1.56 +            else
    1.57                  vmx_reflect_exception(v);
    1.58              break;
    1.59          }
    1.60 @@ -2260,7 +2256,7 @@ asmlinkage void vmx_vmexit_handler(struc
    1.61                          (unsigned long)regs.ecx, (unsigned long)regs.edx,
    1.62                          (unsigned long)regs.esi, (unsigned long)regs.edi);
    1.63  
    1.64 -            if (!vmx_do_page_fault(va, &regs)) {
    1.65 +            if ( !vmx_do_page_fault(va, &regs) ) {
    1.66                  /*
    1.67                   * Inject #PG using Interruption-Information Fields
    1.68                   */
    1.69 @@ -2282,6 +2278,9 @@ asmlinkage void vmx_vmexit_handler(struc
    1.70      case EXIT_REASON_EXTERNAL_INTERRUPT:
    1.71          vmx_vmexit_do_extint(&regs);
    1.72          break;
    1.73 +    case EXIT_REASON_TRIPLE_FAULT:
    1.74 +        domain_crash_synchronous();
    1.75 +        break;
    1.76      case EXIT_REASON_PENDING_INTERRUPT:
    1.77          /*
    1.78           * Not sure exactly what the purpose of this is.  The only bits set
    1.79 @@ -2296,7 +2295,7 @@ asmlinkage void vmx_vmexit_handler(struc
    1.80                    v->arch.hvm_vcpu.u.vmx.exec_control);
    1.81          break;
    1.82      case EXIT_REASON_TASK_SWITCH:
    1.83 -        __hvm_bug(&regs);
    1.84 +        domain_crash_synchronous();
    1.85          break;
    1.86      case EXIT_REASON_CPUID:
    1.87          vmx_vmexit_do_cpuid(&regs);
    1.88 @@ -2321,7 +2320,7 @@ asmlinkage void vmx_vmexit_handler(struc
    1.89      case EXIT_REASON_VMCALL:
    1.90      {
    1.91          __get_instruction_length(inst_len);
    1.92 -        __vmread(GUEST_RIP, &eip);
    1.93 +        __vmread(GUEST_RIP, &rip);
    1.94          __vmread(EXIT_QUALIFICATION, &exit_qualification);
    1.95  
    1.96          hvm_do_hypercall(&regs);
    1.97 @@ -2330,13 +2329,13 @@ asmlinkage void vmx_vmexit_handler(struc
    1.98      }
    1.99      case EXIT_REASON_CR_ACCESS:
   1.100      {
   1.101 -        __vmread(GUEST_RIP, &eip);
   1.102 +        __vmread(GUEST_RIP, &rip);
   1.103          __get_instruction_length(inst_len);
   1.104          __vmread(EXIT_QUALIFICATION, &exit_qualification);
   1.105  
   1.106 -        HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
   1.107 -                    eip, inst_len, exit_qualification);
   1.108 -        if (vmx_cr_access(exit_qualification, &regs))
   1.109 +        HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, inst_len =%lx, exit_qualification = %lx",
   1.110 +                    rip, inst_len, exit_qualification);
   1.111 +        if ( vmx_cr_access(exit_qualification, &regs) )
   1.112              __update_guest_eip(inst_len);
   1.113          TRACE_VMEXIT(3,regs.error_code);
   1.114          TRACE_VMEXIT(4,exit_qualification);
   1.115 @@ -2360,13 +2359,14 @@ asmlinkage void vmx_vmexit_handler(struc
   1.116          __update_guest_eip(inst_len);
   1.117          break;
   1.118      case EXIT_REASON_MSR_WRITE:
   1.119 -        __vmread(GUEST_RIP, &eip);
   1.120          vmx_do_msr_write(&regs);
   1.121          __get_instruction_length(inst_len);
   1.122          __update_guest_eip(inst_len);
   1.123          break;
   1.124      case EXIT_REASON_MWAIT_INSTRUCTION:
   1.125 -        __hvm_bug(&regs);
   1.126 +    case EXIT_REASON_MONITOR_INSTRUCTION:
   1.127 +    case EXIT_REASON_PAUSE_INSTRUCTION:
   1.128 +        domain_crash_synchronous();
   1.129          break;
   1.130      case EXIT_REASON_VMCLEAR:
   1.131      case EXIT_REASON_VMLAUNCH:
   1.132 @@ -2375,15 +2375,15 @@ asmlinkage void vmx_vmexit_handler(struc
   1.133      case EXIT_REASON_VMREAD:
   1.134      case EXIT_REASON_VMRESUME:
   1.135      case EXIT_REASON_VMWRITE:
   1.136 -    case EXIT_REASON_VMOFF:
   1.137 -    case EXIT_REASON_VMON:
   1.138 -        /* Report invalid opcode exception when a VMX guest tries to execute 
   1.139 +    case EXIT_REASON_VMXOFF:
   1.140 +    case EXIT_REASON_VMXON:
   1.141 +        /* Report invalid opcode exception when a VMX guest tries to execute
   1.142              any of the VMX instructions */
   1.143          vmx_inject_hw_exception(v, TRAP_invalid_op, VMX_DELIVER_NO_ERROR_CODE);
   1.144          break;
   1.145  
   1.146      default:
   1.147 -        __hvm_bug(&regs);       /* should not happen */
   1.148 +        domain_crash_synchronous();     /* should not happen */
   1.149      }
   1.150  }
   1.151  
     2.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Aug 08 10:21:51 2006 +0100
     2.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Aug 08 10:29:58 2006 +0100
     2.3 @@ -40,82 +40,91 @@ extern unsigned int cpu_rev;
     2.4   * Need fill bits for SENTER
     2.5   */
     2.6  
     2.7 -#define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE         0x00000016
     2.8 +#define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE  0x00000016
     2.9  
    2.10 -#define MONITOR_PIN_BASED_EXEC_CONTROLS       \
    2.11 -    ( \
    2.12 -    MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE |   \
    2.13 -    PIN_BASED_EXT_INTR_MASK |   \
    2.14 -    PIN_BASED_NMI_EXITING \
    2.15 +#define MONITOR_PIN_BASED_EXEC_CONTROLS                 \
    2.16 +    (                                                   \
    2.17 +    MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE |    \
    2.18 +    PIN_BASED_EXT_INTR_MASK |                           \
    2.19 +    PIN_BASED_NMI_EXITING                               \
    2.20      )
    2.21  
    2.22 -#define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE         0x0401e172
    2.23 +#define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE  0x0401e172
    2.24  
    2.25 -#define _MONITOR_CPU_BASED_EXEC_CONTROLS \
    2.26 -    ( \
    2.27 +#define _MONITOR_CPU_BASED_EXEC_CONTROLS                \
    2.28 +    (                                                   \
    2.29      MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE |    \
    2.30 -    CPU_BASED_HLT_EXITING | \
    2.31 -    CPU_BASED_INVDPG_EXITING | \
    2.32 -    CPU_BASED_MWAIT_EXITING | \
    2.33 -    CPU_BASED_MOV_DR_EXITING | \
    2.34 -    CPU_BASED_ACTIVATE_IO_BITMAP | \
    2.35 -    CPU_BASED_USE_TSC_OFFSETING  \
    2.36 +    CPU_BASED_HLT_EXITING |                             \
    2.37 +    CPU_BASED_INVDPG_EXITING |                          \
    2.38 +    CPU_BASED_MWAIT_EXITING |                           \
    2.39 +    CPU_BASED_MOV_DR_EXITING |                          \
    2.40 +    CPU_BASED_ACTIVATE_IO_BITMAP |                      \
    2.41 +    CPU_BASED_USE_TSC_OFFSETING                         \
    2.42      )
    2.43  
    2.44 -#define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
    2.45 -    ( \
    2.46 -    CPU_BASED_CR8_LOAD_EXITING | \
    2.47 -    CPU_BASED_CR8_STORE_EXITING \
    2.48 +#define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE      \
    2.49 +    (                                                   \
    2.50 +    CPU_BASED_CR8_LOAD_EXITING |                        \
    2.51 +    CPU_BASED_CR8_STORE_EXITING                         \
    2.52      )
    2.53  
    2.54 -#define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE   0x0003edff
    2.55 +#define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE         0x0003edff
    2.56  
    2.57 -#define MONITOR_VM_EXIT_CONTROLS_IA32E_MODE       0x00000200
    2.58 +#define MONITOR_VM_EXIT_CONTROLS_IA32E_MODE             0x00000200
    2.59  
    2.60 -#define _MONITOR_VM_EXIT_CONTROLS                \
    2.61 -    ( \
    2.62 -    MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\
    2.63 -    VM_EXIT_ACK_INTR_ON_EXIT \
    2.64 +#define _MONITOR_VM_EXIT_CONTROLS                       \
    2.65 +    (                                                   \
    2.66 +    MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |           \
    2.67 +    VM_EXIT_ACK_INTR_ON_EXIT                            \
    2.68      )
    2.69  
    2.70  #if defined (__x86_64__)
    2.71 -#define MONITOR_CPU_BASED_EXEC_CONTROLS \
    2.72 -    ( \
    2.73 -    _MONITOR_CPU_BASED_EXEC_CONTROLS | \
    2.74 -    MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
    2.75 +#define MONITOR_CPU_BASED_EXEC_CONTROLS                 \
    2.76 +    (                                                   \
    2.77 +    _MONITOR_CPU_BASED_EXEC_CONTROLS |                  \
    2.78 +    MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE          \
    2.79      )
    2.80 -#define MONITOR_VM_EXIT_CONTROLS \
    2.81 -    ( \
    2.82 -    _MONITOR_VM_EXIT_CONTROLS | \
    2.83 -    MONITOR_VM_EXIT_CONTROLS_IA32E_MODE  \
    2.84 +#define MONITOR_VM_EXIT_CONTROLS                        \
    2.85 +    (                                                   \
    2.86 +    _MONITOR_VM_EXIT_CONTROLS |                         \
    2.87 +    MONITOR_VM_EXIT_CONTROLS_IA32E_MODE                 \
    2.88      )
    2.89  #else
    2.90 -#define MONITOR_CPU_BASED_EXEC_CONTROLS \
    2.91 -    _MONITOR_CPU_BASED_EXEC_CONTROLS 
    2.92 +#define MONITOR_CPU_BASED_EXEC_CONTROLS                 \
    2.93 +    _MONITOR_CPU_BASED_EXEC_CONTROLS
    2.94  
    2.95 -#define MONITOR_VM_EXIT_CONTROLS \
    2.96 +#define MONITOR_VM_EXIT_CONTROLS                        \
    2.97      _MONITOR_VM_EXIT_CONTROLS
    2.98  #endif
    2.99  
   2.100 -#define VM_ENTRY_CONTROLS_RESERVED_VALUE        0x000011ff
   2.101 -#define VM_ENTRY_CONTROLS_IA32E_MODE            0x00000200
   2.102 -#define MONITOR_VM_ENTRY_CONTROLS       VM_ENTRY_CONTROLS_RESERVED_VALUE 
   2.103 +#define VM_ENTRY_CONTROLS_RESERVED_VALUE                0x000011ff
   2.104 +#define VM_ENTRY_CONTROLS_IA32E_MODE                    0x00000200
   2.105 +
   2.106 +#define MONITOR_VM_ENTRY_CONTROLS                       \
   2.107 +    VM_ENTRY_CONTROLS_RESERVED_VALUE
   2.108 +
   2.109  /*
   2.110   * Exit Reasons
   2.111   */
   2.112 -#define VMX_EXIT_REASONS_FAILED_VMENTRY         0x80000000
   2.113 +#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
   2.114  
   2.115  #define EXIT_REASON_EXCEPTION_NMI       0
   2.116  #define EXIT_REASON_EXTERNAL_INTERRUPT  1
   2.117 -
   2.118 +#define EXIT_REASON_TRIPLE_FAULT        2
   2.119 +#define EXIT_REASON_INIT                3
   2.120 +#define EXIT_REASON_SIPI                4
   2.121 +#define EXIT_REASON_IO_SMI              5
   2.122 +#define EXIT_REASON_OTHER_SMI           6
   2.123  #define EXIT_REASON_PENDING_INTERRUPT   7
   2.124  
   2.125  #define EXIT_REASON_TASK_SWITCH         9
   2.126  #define EXIT_REASON_CPUID               10
   2.127  #define EXIT_REASON_HLT                 12
   2.128 +#define EXIT_REASON_INVD                13
   2.129  #define EXIT_REASON_INVLPG              14
   2.130  #define EXIT_REASON_RDPMC               15
   2.131  #define EXIT_REASON_RDTSC               16
   2.132 +#define EXIT_REASON_RSM                 17
   2.133  #define EXIT_REASON_VMCALL              18
   2.134  #define EXIT_REASON_VMCLEAR             19
   2.135  #define EXIT_REASON_VMLAUNCH            20
   2.136 @@ -124,19 +133,24 @@ extern unsigned int cpu_rev;
   2.137  #define EXIT_REASON_VMREAD              23
   2.138  #define EXIT_REASON_VMRESUME            24
   2.139  #define EXIT_REASON_VMWRITE             25
   2.140 -#define EXIT_REASON_VMOFF               26
   2.141 -#define EXIT_REASON_VMON                27
   2.142 +#define EXIT_REASON_VMXOFF              26
   2.143 +#define EXIT_REASON_VMXON               27
   2.144  #define EXIT_REASON_CR_ACCESS           28
   2.145  #define EXIT_REASON_DR_ACCESS           29
   2.146  #define EXIT_REASON_IO_INSTRUCTION      30
   2.147  #define EXIT_REASON_MSR_READ            31
   2.148  #define EXIT_REASON_MSR_WRITE           32
   2.149 -#define EXIT_REASON_MWAIT_INSTRUCTION   36
   2.150  
   2.151  #define EXIT_REASON_INVALID_GUEST_STATE 33
   2.152  #define EXIT_REASON_MSR_LOADING         34
   2.153 +
   2.154 +#define EXIT_REASON_MWAIT_INSTRUCTION   36
   2.155 +#define EXIT_REASON_MONITOR_INSTRUCTION 39
   2.156 +#define EXIT_REASON_PAUSE_INSTRUCTION   40
   2.157 +
   2.158  #define EXIT_REASON_MACHINE_CHECK       41
   2.159  
   2.160 +#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
   2.161  
   2.162  /*
   2.163   * Interruption-information format
   2.164 @@ -146,9 +160,9 @@ extern unsigned int cpu_rev;
   2.165  #define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
   2.166  #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
   2.167  
   2.168 -#define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
   2.169 -#define INTR_TYPE_HW_EXCEPTION             (3 << 8) /* hardware exception */
   2.170 -#define INTR_TYPE_SW_EXCEPTION             (6 << 8) /* software exception */
   2.171 +#define INTR_TYPE_EXT_INTR              (0 << 8)    /* external interrupt */
   2.172 +#define INTR_TYPE_HW_EXCEPTION          (3 << 8)    /* hardware exception */
   2.173 +#define INTR_TYPE_SW_EXCEPTION          (6 << 8)    /* software exception */
   2.174  
   2.175  /*
   2.176   * Exit Qualifications for MOV for Control Register Access
   2.177 @@ -156,33 +170,33 @@ extern unsigned int cpu_rev;
   2.178  #define CONTROL_REG_ACCESS_NUM          0xf     /* 3:0, number of control register */
   2.179  #define CONTROL_REG_ACCESS_TYPE         0x30    /* 5:4, access type */
   2.180  #define CONTROL_REG_ACCESS_REG          0xf00   /* 10:8, general purpose register */
   2.181 -#define LMSW_SOURCE_DATA  (0xFFFF << 16) /* 16:31 lmsw source */
   2.182 -#define REG_EAX                         (0 << 8) 
   2.183 -#define REG_ECX                         (1 << 8) 
   2.184 -#define REG_EDX                         (2 << 8) 
   2.185 -#define REG_EBX                         (3 << 8) 
   2.186 -#define REG_ESP                         (4 << 8) 
   2.187 -#define REG_EBP                         (5 << 8) 
   2.188 -#define REG_ESI                         (6 << 8) 
   2.189 -#define REG_EDI                         (7 << 8) 
   2.190 -#define REG_R8                         (8 << 8)
   2.191 -#define REG_R9                         (9 << 8)
   2.192 -#define REG_R10                        (10 << 8)
   2.193 -#define REG_R11                        (11 << 8)
   2.194 -#define REG_R12                        (12 << 8)
   2.195 -#define REG_R13                        (13 << 8)
   2.196 -#define REG_R14                        (14 << 8)
   2.197 -#define REG_R15                        (15 << 8)
   2.198 +#define LMSW_SOURCE_DATA                (0xFFFF << 16)  /* 16:31 lmsw source */
   2.199 +#define REG_EAX                         (0 << 8)
   2.200 +#define REG_ECX                         (1 << 8)
   2.201 +#define REG_EDX                         (2 << 8)
   2.202 +#define REG_EBX                         (3 << 8)
   2.203 +#define REG_ESP                         (4 << 8)
   2.204 +#define REG_EBP                         (5 << 8)
   2.205 +#define REG_ESI                         (6 << 8)
   2.206 +#define REG_EDI                         (7 << 8)
   2.207 +#define REG_R8                          (8 << 8)
   2.208 +#define REG_R9                          (9 << 8)
   2.209 +#define REG_R10                         (10 << 8)
   2.210 +#define REG_R11                         (11 << 8)
   2.211 +#define REG_R12                         (12 << 8)
   2.212 +#define REG_R13                         (13 << 8)
   2.213 +#define REG_R14                         (14 << 8)
   2.214 +#define REG_R15                         (15 << 8)
   2.215  
   2.216  /*
   2.217   * Exit Qualifications for MOV for Debug Register Access
   2.218   */
   2.219  #define DEBUG_REG_ACCESS_NUM            0x7     /* 2:0, number of debug register */
   2.220  #define DEBUG_REG_ACCESS_TYPE           0x10    /* 4, direction of access */
   2.221 -#define TYPE_MOV_TO_DR                  (0 << 4) 
   2.222 +#define TYPE_MOV_TO_DR                  (0 << 4)
   2.223  #define TYPE_MOV_FROM_DR                (1 << 4)
   2.224  #define DEBUG_REG_ACCESS_REG            0xf00   /* 11:8, general purpose register */
   2.225 - 
   2.226 +
   2.227  /* These bits in the CR4 are owned by the host */
   2.228  #if CONFIG_PAGING_LEVELS >= 3
   2.229  #define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
   2.230 @@ -212,7 +226,7 @@ static inline void __vmptrld(u64 addr)
   2.231                             /* CF==1 or ZF==1 --> crash (ud2) */
   2.232                             "ja 1f ; ud2 ; 1:\n"
   2.233                             :
   2.234 -                           : "a" (&addr) 
   2.235 +                           : "a" (&addr)
   2.236                             : "memory");
   2.237  }
   2.238  
   2.239 @@ -221,7 +235,7 @@ static inline void __vmptrst(u64 addr)
   2.240      __asm__ __volatile__ ( VMPTRST_OPCODE
   2.241                             MODRM_EAX_07
   2.242                             :
   2.243 -                           : "a" (&addr) 
   2.244 +                           : "a" (&addr)
   2.245                             : "memory");
   2.246  }
   2.247  
   2.248 @@ -232,7 +246,7 @@ static inline void __vmpclear(u64 addr)
   2.249                             /* CF==1 or ZF==1 --> crash (ud2) */
   2.250                             "ja 1f ; ud2 ; 1:\n"
   2.251                             :
   2.252 -                           : "a" (&addr) 
   2.253 +                           : "a" (&addr)
   2.254                             : "memory");
   2.255  }
   2.256  
   2.257 @@ -252,7 +266,7 @@ static always_inline int ___vmread(
   2.258                             : "0" (0), "a" (field)
   2.259                             : "memory");
   2.260  
   2.261 -    switch (size) {
   2.262 +    switch ( size ) {
   2.263      case 1:
   2.264          *((u8 *) (ptr)) = ecx;
   2.265          break;
   2.266 @@ -274,43 +288,45 @@ static always_inline int ___vmread(
   2.267  }
   2.268  
   2.269  
   2.270 -static always_inline void __vmwrite_vcpu(struct vcpu *v, unsigned long field, unsigned long value)
   2.271 +static always_inline void __vmwrite_vcpu(
   2.272 +    struct vcpu *v, unsigned long field, unsigned long value)
   2.273  {
   2.274 -    switch(field) {
   2.275 +    switch ( field ) {
   2.276      case CR0_READ_SHADOW:
   2.277 -	v->arch.hvm_vmx.cpu_shadow_cr0 = value;
   2.278 -	break;
   2.279 +        v->arch.hvm_vmx.cpu_shadow_cr0 = value;
   2.280 +        break;
   2.281      case GUEST_CR0:
   2.282 -	v->arch.hvm_vmx.cpu_cr0 = value;
   2.283 -	break;
   2.284 +        v->arch.hvm_vmx.cpu_cr0 = value;
   2.285 +        break;
   2.286      case CPU_BASED_VM_EXEC_CONTROL:
   2.287 -	v->arch.hvm_vmx.cpu_based_exec_control = value;
   2.288 -	break;
   2.289 +        v->arch.hvm_vmx.cpu_based_exec_control = value;
   2.290 +        break;
   2.291      default:
   2.292 -	printk("__vmwrite_cpu: invalid field %lx\n", field);
   2.293 -	break;
   2.294 +        printk("__vmwrite_cpu: invalid field %lx\n", field);
   2.295 +        break;
   2.296      }
   2.297  }
   2.298  
   2.299 -static always_inline void __vmread_vcpu(struct vcpu *v, unsigned long field, unsigned long *value)
   2.300 +static always_inline void __vmread_vcpu(
   2.301 +    struct vcpu *v, unsigned long field, unsigned long *value)
   2.302  {
   2.303 -    switch(field) {
   2.304 +    switch ( field ) {
   2.305      case CR0_READ_SHADOW:
   2.306 -	*value = v->arch.hvm_vmx.cpu_shadow_cr0;
   2.307 -	break;
   2.308 +        *value = v->arch.hvm_vmx.cpu_shadow_cr0;
   2.309 +        break;
   2.310      case GUEST_CR0:
   2.311 -	*value = v->arch.hvm_vmx.cpu_cr0;
   2.312 -	break;
   2.313 +        *value = v->arch.hvm_vmx.cpu_cr0;
   2.314 +        break;
   2.315      case CPU_BASED_VM_EXEC_CONTROL:
   2.316 -	*value = v->arch.hvm_vmx.cpu_based_exec_control;
   2.317 -	break;
   2.318 +        *value = v->arch.hvm_vmx.cpu_based_exec_control;
   2.319 +        break;
   2.320      default:
   2.321 -	printk("__vmread_cpu: invalid field %lx\n", field);
   2.322 -	break;
   2.323 +        printk("__vmread_cpu: invalid field %lx\n", field);
   2.324 +        break;
   2.325      }
   2.326  }
   2.327  
   2.328 -static inline int __vmwrite (unsigned long field, unsigned long value)
   2.329 +static inline int __vmwrite(unsigned long field, unsigned long value)
   2.330  {
   2.331      struct vcpu *v = current;
   2.332      int rc;
   2.333 @@ -323,12 +339,12 @@ static inline int __vmwrite (unsigned lo
   2.334                             : "0" (0), "a" (field) , "c" (value)
   2.335                             : "memory");
   2.336  
   2.337 -    switch(field) {
   2.338 +    switch ( field ) {
   2.339      case CR0_READ_SHADOW:
   2.340      case GUEST_CR0:
   2.341      case CPU_BASED_VM_EXEC_CONTROL:
   2.342 -	__vmwrite_vcpu(v, field, value);
   2.343 -	break;
   2.344 +        __vmwrite_vcpu(v, field, value);
   2.345 +        break;
   2.346      }
   2.347  
   2.348      return rc;
   2.349 @@ -336,31 +352,31 @@ static inline int __vmwrite (unsigned lo
   2.350  
   2.351  static inline int __vm_set_bit(unsigned long field, unsigned long mask)
   2.352  {
   2.353 -        unsigned long tmp;
   2.354 -        int err = 0;
   2.355 +    unsigned long tmp;
   2.356 +    int err = 0;
   2.357  
   2.358 -        err |= __vmread(field, &tmp);
   2.359 -        tmp |= mask;
   2.360 -        err |= __vmwrite(field, tmp);
   2.361 +    err |= __vmread(field, &tmp);
   2.362 +    tmp |= mask;
   2.363 +    err |= __vmwrite(field, tmp);
   2.364  
   2.365 -        return err;
   2.366 +    return err;
   2.367  }
   2.368  
   2.369  static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
   2.370  {
   2.371 -        unsigned long tmp;
   2.372 -        int err = 0;
   2.373 +    unsigned long tmp;
   2.374 +    int err = 0;
   2.375  
   2.376 -        err |= __vmread(field, &tmp);
   2.377 -        tmp &= ~mask;
   2.378 -        err |= __vmwrite(field, tmp);
   2.379 +    err |= __vmread(field, &tmp);
   2.380 +    tmp &= ~mask;
   2.381 +    err |= __vmwrite(field, tmp);
   2.382  
   2.383 -        return err;
   2.384 +    return err;
   2.385  }
   2.386  
   2.387  static inline void __vmxoff (void)
   2.388  {
   2.389 -    __asm__ __volatile__ ( VMXOFF_OPCODE 
   2.390 +    __asm__ __volatile__ ( VMXOFF_OPCODE
   2.391                             ::: "memory");
   2.392  }
   2.393  
   2.394 @@ -373,7 +389,7 @@ static inline int __vmxon (u64 addr)
   2.395                             /* CF==1 or ZF==1 --> rc = -1 */
   2.396                             "setna %b0 ; neg %0"
   2.397                             : "=q" (rc)
   2.398 -                           : "0" (0), "a" (&addr) 
   2.399 +                           : "0" (0), "a" (&addr)
   2.400                             : "memory");
   2.401  
   2.402      return rc;
   2.403 @@ -390,9 +406,9 @@ static inline void vmx_stts(void)
   2.404          return;
   2.405  
   2.406      /*
   2.407 -     * If the guest does not have TS enabled then we must cause and handle an 
   2.408 -     * exception on first use of the FPU. If the guest *does* have TS enabled 
   2.409 -     * then this is not necessary: no FPU activity can occur until the guest 
   2.410 +     * If the guest does not have TS enabled then we must cause and handle an
   2.411 +     * exception on first use of the FPU. If the guest *does* have TS enabled
   2.412 +     * then this is not necessary: no FPU activity can occur until the guest
   2.413       * clears CR0.TS, and we will initialise the FPU when that happens.
   2.414       */
   2.415      __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
   2.416 @@ -421,66 +437,64 @@ static inline int vmx_pgbit_test(struct 
   2.417      return (cr0 & X86_CR0_PG);
   2.418  }
   2.419  
   2.420 -static inline int __vmx_inject_exception(struct vcpu *v, int trap, int type, 
   2.421 +static inline void __vmx_inject_exception(struct vcpu *v, int trap, int type,
   2.422                                           int error_code, int ilen)
   2.423  {
   2.424      unsigned long intr_fields;
   2.425  
   2.426      /* Reflect it back into the guest */
   2.427      intr_fields = (INTR_INFO_VALID_MASK | type | trap);
   2.428 -    if (error_code != VMX_DELIVER_NO_ERROR_CODE) {
   2.429 +    if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) {
   2.430          __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
   2.431          intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
   2.432       }
   2.433  
   2.434 -    if(ilen)
   2.435 +    if ( ilen )
   2.436        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
   2.437  
   2.438      __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
   2.439 -    return 0;
   2.440  }
   2.441  
   2.442 -static inline int vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code)
   2.443 +static inline void vmx_inject_hw_exception(
   2.444 +    struct vcpu *v, int trap, int error_code)
   2.445  {
   2.446      v->arch.hvm_vmx.vector_injected = 1;
   2.447 -    return __vmx_inject_exception(v, trap, INTR_TYPE_HW_EXCEPTION,
   2.448 -				  error_code, 0);
   2.449 +    __vmx_inject_exception(v, trap, INTR_TYPE_HW_EXCEPTION, error_code, 0);
   2.450  }
   2.451  
   2.452 -static inline int vmx_inject_sw_exception(struct vcpu *v, int trap, int instruction_len) {
   2.453 -     v->arch.hvm_vmx.vector_injected=1;
   2.454 -     return __vmx_inject_exception(v, trap, INTR_TYPE_SW_EXCEPTION,
   2.455 -				   VMX_DELIVER_NO_ERROR_CODE,
   2.456 -				   instruction_len);
   2.457 +static inline void vmx_inject_sw_exception(
   2.458 +    struct vcpu *v, int trap, int instruction_len)
   2.459 +{
   2.460 +    v->arch.hvm_vmx.vector_injected = 1;
   2.461 +    __vmx_inject_exception(v, trap, INTR_TYPE_SW_EXCEPTION,
   2.462 +                           VMX_DELIVER_NO_ERROR_CODE,
   2.463 +                           instruction_len);
   2.464  }
   2.465  
   2.466 -static inline int vmx_inject_extint(struct vcpu *v, int trap, int error_code)
   2.467 +static inline void vmx_inject_extint(struct vcpu *v, int trap, int error_code)
   2.468  {
   2.469      __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code, 0);
   2.470      __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
   2.471 -
   2.472 -    return 0;
   2.473  }
   2.474  
   2.475 -static inline int vmx_reflect_exception(struct vcpu *v)
   2.476 +static inline void vmx_reflect_exception(struct vcpu *v)
   2.477  {
   2.478      int error_code, intr_info, vector;
   2.479  
   2.480      __vmread(VM_EXIT_INTR_INFO, &intr_info);
   2.481      vector = intr_info & 0xff;
   2.482 -    if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
   2.483 +    if ( intr_info & INTR_INFO_DELIVER_CODE_MASK )
   2.484          __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
   2.485      else
   2.486          error_code = VMX_DELIVER_NO_ERROR_CODE;
   2.487  
   2.488  #ifndef NDEBUG
   2.489      {
   2.490 -        unsigned long eip;
   2.491 +        unsigned long rip;
   2.492  
   2.493 -        __vmread(GUEST_RIP, &eip);
   2.494 -        HVM_DBG_LOG(DBG_LEVEL_1,
   2.495 -                    "vmx_reflect_exception: eip = %lx, error_code = %x",
   2.496 -                    eip, error_code);
   2.497 +        __vmread(GUEST_RIP, &rip);
   2.498 +        HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, error_code = %x",
   2.499 +                    rip, error_code);
   2.500      }
   2.501  #endif /* NDEBUG */
   2.502  
   2.503 @@ -489,15 +503,14 @@ static inline int vmx_reflect_exception(
   2.504         2.8.3, SW_EXCEPTION should be used for #BP and #OV, and
   2.505         HW_EXCPEPTION used for everything else.  The main difference
   2.506         appears to be that for SW_EXCEPTION, the EIP/RIP is incremented
   2.507 -       by VM_ENTER_INSTRUCTION_LEN bytes, whereas for HW_EXCEPTION, 
   2.508 +       by VM_ENTER_INSTRUCTION_LEN bytes, whereas for HW_EXCEPTION,
   2.509         it is not.  */
   2.510 -    if((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_SW_EXCEPTION) {
   2.511 -      int ilen;
   2.512 -      __vmread(VM_EXIT_INSTRUCTION_LEN, &ilen);
   2.513 -      vmx_inject_sw_exception(v, vector, ilen);
   2.514 +    if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_SW_EXCEPTION ) {
   2.515 +        int ilen;
   2.516 +        __vmread(VM_EXIT_INSTRUCTION_LEN, &ilen);
   2.517 +        vmx_inject_sw_exception(v, vector, ilen);
   2.518      } else
   2.519 -      vmx_inject_hw_exception(v, vector, error_code);
   2.520 -    return 0;
   2.521 +        vmx_inject_hw_exception(v, vector, error_code);
   2.522  }
   2.523  
   2.524  #endif /* __ASM_X86_HVM_VMX_VMX_H__ */