direct-io.hg

changeset 15466:e6d5e4709466

hvm vmx: Support 'virtual NMI' feature of VMX.
Signed-off-by: Haitao Shan <haitao.shan@intel.com>
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Jul 03 18:46:00 2007 +0100 (2007-07-03)
parents 9fa9346e1c70
children e7d40fadbf17
files xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Tue Jul 03 17:22:17 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Tue Jul 03 18:46:00 2007 +0100
     1.3 @@ -71,13 +71,38 @@
     1.4   * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
     1.5   */
     1.6  
     1.7 -static void enable_irq_window(struct vcpu *v)
     1.8 +static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source)
     1.9  {
    1.10 -    u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
    1.11 -    
    1.12 -    if ( !(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) )
    1.13 +    u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
    1.14 +    u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
    1.15 +
    1.16 +    if ( unlikely(intr_source == hvm_intack_none) )
    1.17 +        return;
    1.18 +
    1.19 +    if ( unlikely(intr_source == hvm_intack_nmi) && cpu_has_vmx_vnmi )
    1.20      {
    1.21 -        *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
    1.22 +        /*
    1.23 +         * We set MOV-SS blocking in lieu of STI blocking when delivering an
    1.24 +         * NMI. This is because it is processor-specific whether STI-blocking
    1.25 +         * blocks NMIs. Hence we *must* check for STI-blocking on NMI delivery
    1.26 +         * (otherwise vmentry will fail on processors that check for STI-
    1.27 +         * blocking) but if the processor does not check for STI-blocking then
    1.28 +         * we may immediately vmexit and hance make no progress!
    1.29 +         * (see SDM 3B 21.3, "Other Causes of VM Exits").
    1.30 +         */
    1.31 +        u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
    1.32 +        if ( intr_shadow & VMX_INTR_SHADOW_STI )
    1.33 +        {
    1.34 +            /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
    1.35 +            intr_shadow &= ~VMX_INTR_SHADOW_STI;
    1.36 +            intr_shadow |= VMX_INTR_SHADOW_MOV_SS;
    1.37 +        }
    1.38 +        ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
    1.39 +    }
    1.40 +
    1.41 +    if ( !(*cpu_exec_control & ctl) )
    1.42 +    {
    1.43 +        *cpu_exec_control |= ctl;
    1.44          __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
    1.45      }
    1.46  }
    1.47 @@ -120,8 +145,7 @@ asmlinkage void vmx_intr_assist(void)
    1.48          if ( unlikely(v->arch.hvm_vmx.vector_injected) )
    1.49          {
    1.50              v->arch.hvm_vmx.vector_injected = 0;
    1.51 -            if ( unlikely(intr_source != hvm_intack_none) )
    1.52 -                enable_irq_window(v);
    1.53 +            enable_intr_window(v, intr_source);
    1.54              return;
    1.55          }
    1.56  
    1.57 @@ -129,7 +153,9 @@ asmlinkage void vmx_intr_assist(void)
    1.58          idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
    1.59          if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
    1.60          {
    1.61 -            __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
    1.62 +            /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */
    1.63 +            __vmwrite(VM_ENTRY_INTR_INFO_FIELD,
    1.64 +                      idtv_info_field & ~INTR_INFO_RESVD_BITS_MASK);
    1.65  
    1.66              /*
    1.67               * Safe: the length will only be interpreted for software
    1.68 @@ -143,8 +169,16 @@ asmlinkage void vmx_intr_assist(void)
    1.69              if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */
    1.70                  __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
    1.71                            __vmread(IDT_VECTORING_ERROR_CODE));
    1.72 -            if ( unlikely(intr_source != hvm_intack_none) )
    1.73 -                enable_irq_window(v);
    1.74 +            enable_intr_window(v, intr_source);
    1.75 +
    1.76 +            /*
    1.77 +             * Clear NMI-blocking interruptibility info if an NMI delivery
    1.78 +             * faulted. Re-delivery will re-set it (see SDM 3B 25.7.1.2).
    1.79 +             */
    1.80 +            if ( (idtv_info_field&INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
    1.81 +                __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
    1.82 +                          __vmread(GUEST_INTERRUPTIBILITY_INFO) &
    1.83 +                          ~VMX_INTR_SHADOW_NMI);
    1.84  
    1.85              HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
    1.86              return;
    1.87 @@ -153,14 +187,9 @@ asmlinkage void vmx_intr_assist(void)
    1.88          if ( likely(intr_source == hvm_intack_none) )
    1.89              return;
    1.90  
    1.91 -        /*
    1.92 -         * TODO: Better NMI handling. Shouldn't wait for EFLAGS.IF==1, but
    1.93 -         * should wait for exit from 'NMI blocking' window (NMI injection to
    1.94 -         * next IRET). This requires us to use the new 'virtual NMI' support.
    1.95 -         */
    1.96          if ( !hvm_interrupts_enabled(v, intr_source) )
    1.97          {
    1.98 -            enable_irq_window(v);
    1.99 +            enable_intr_window(v, intr_source);
   1.100              return;
   1.101          }
   1.102      } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
     2.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Tue Jul 03 17:22:17 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Tue Jul 03 18:46:00 2007 +0100
     2.3 @@ -75,7 +75,7 @@ void vmx_init_vmcs_config(void)
     2.4  
     2.5      min = (PIN_BASED_EXT_INTR_MASK |
     2.6             PIN_BASED_NMI_EXITING);
     2.7 -    opt = 0; /*PIN_BASED_VIRTUAL_NMIS*/
     2.8 +    opt = PIN_BASED_VIRTUAL_NMIS;
     2.9      _vmx_pin_based_exec_control = adjust_vmx_controls(
    2.10          min, opt, MSR_IA32_VMX_PINBASED_CTLS);
    2.11  
     3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Jul 03 17:22:17 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Jul 03 18:46:00 2007 +0100
     3.3 @@ -1106,15 +1106,17 @@ static int vmx_interrupts_enabled(struct
     3.4  
     3.5      ASSERT(v == current);
     3.6  
     3.7 -    intr_shadow  = __vmread(GUEST_INTERRUPTIBILITY_INFO);
     3.8 -    intr_shadow &= VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS;
     3.9 +    intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
    3.10  
    3.11      if ( type == hvm_intack_nmi )
    3.12 -        return !intr_shadow;
    3.13 +        return !(intr_shadow & (VMX_INTR_SHADOW_STI|
    3.14 +                                VMX_INTR_SHADOW_MOV_SS|
    3.15 +                                VMX_INTR_SHADOW_NMI));
    3.16  
    3.17      ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
    3.18      eflags = __vmread(GUEST_RFLAGS);
    3.19 -    return !irq_masked(eflags) && !intr_shadow;
    3.20 +    return (!irq_masked(eflags) &&
    3.21 +            !(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
    3.22  }
    3.23  
    3.24  static void vmx_update_host_cr3(struct vcpu *v)
    3.25 @@ -2911,6 +2913,17 @@ asmlinkage void vmx_vmexit_handler(struc
    3.26  
    3.27          vector = intr_info & INTR_INFO_VECTOR_MASK;
    3.28  
    3.29 +        /*
    3.30 +         * Re-set the NMI shadow if vmexit caused by a guest IRET fault (see 3B
    3.31 +         * 25.7.1.2, "Resuming Guest Software after Handling an Exception").
    3.32 +         * (NB. If we emulate this IRET for any reason, we should re-clear!)
    3.33 +         */
    3.34 +        if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
    3.35 +             !(__vmread(IDT_VECTORING_INFO_FIELD) & INTR_INFO_VALID_MASK) &&
    3.36 +             (vector != TRAP_double_fault) )
    3.37 +            __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
    3.38 +                    __vmread(GUEST_INTERRUPTIBILITY_INFO)|VMX_INTR_SHADOW_NMI);
    3.39 +
    3.40          perfc_incra(cause_vector, vector);
    3.41  
    3.42          switch ( vector )
     4.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Tue Jul 03 17:22:17 2007 +0100
     4.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Tue Jul 03 18:46:00 2007 +0100
     4.3 @@ -137,6 +137,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
     4.4      (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
     4.5  #define cpu_has_vmx_tpr_shadow \
     4.6      (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
     4.7 +#define cpu_has_vmx_vnmi \
     4.8 +    (vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS)
     4.9  #define cpu_has_vmx_msr_bitmap \
    4.10      (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
    4.11  extern char *vmx_msr_bitmap;
     5.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Jul 03 17:22:17 2007 +0100
     5.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Jul 03 18:46:00 2007 +0100
     5.3 @@ -90,7 +90,9 @@ void vmx_vlapic_msr_changed(struct vcpu 
     5.4  #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
     5.5  #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
     5.6  #define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
     5.7 +#define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000          /* 12 */
     5.8  #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
     5.9 +#define INTR_INFO_RESVD_BITS_MASK       0x7ffff000
    5.10  
    5.11  #define INTR_TYPE_EXT_INTR              (0 << 8)    /* external interrupt */
    5.12  #define INTR_TYPE_NMI                   (2 << 8)    /* NMI                */