]> xenbits.xensource.com Git - xen.git/commitdiff
hvm: Correctly combine hardware exceptions when one is raised during
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 26 Mar 2008 11:04:06 +0000 (11:04 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 26 Mar 2008 11:04:06 +0000 (11:04 +0000)
attempted delivery of another.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/vmx/vmx.h

index f1aa4fed34df9f50647d5d49bee2a25c4d84dd3c..c25fbf2d2708d4a82d693f3145f5bd7affb4dfff 100644 (file)
@@ -81,6 +81,58 @@ void hvm_enable(struct hvm_function_table *fns)
         printk("HVM: Hardware Assisted Paging detected.\n");
 }
 
+/*
+ * Need to re-inject a given event? We avoid re-injecting software exceptions
+ * and interrupts because the faulting/trapping instruction can simply be
+ * re-executed (neither VMX nor SVM update RIP when they VMEXIT during
+ * INT3/INTO/INTn).
+ */
+int hvm_event_needs_reinjection(uint8_t type, uint8_t vector)
+{
+    switch ( type )
+    {
+    case X86_EVENTTYPE_EXT_INTR:
+    case X86_EVENTTYPE_NMI:
+        return 1;
+    case X86_EVENTTYPE_HW_EXCEPTION:
+        /*
+         * SVM uses type 3 ("HW Exception") for #OF and #BP. We explicitly
+         * check for these vectors, as they are really SW Exceptions. SVM has
+         * not updated RIP to point after the trapping instruction (INT3/INTO).
+         */
+        return (vector != 3) && (vector != 4);
+    default:
+        /* Software exceptions/interrupts can be re-executed (e.g., INT n). */
+        break;
+    }
+    return 0;
+}
+
+/*
+ * Combine two hardware exceptions: @vec2 was raised during delivery of @vec1.
+ * This means we can assume that @vec2 is contributory or a page fault.
+ */
+uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2)
+{
+    /* Exception during double-fault delivery always causes a triple fault. */
+    if ( vec1 == TRAP_double_fault )
+    {
+        hvm_triple_fault();
+        return TRAP_double_fault; /* dummy return */
+    }
+
+    /* Exception during page-fault delivery always causes a double fault. */
+    if ( vec1 == TRAP_page_fault )
+        return TRAP_double_fault;
+
+    /* Discard the first exception if it's benign or if we now have a #PF. */
+    if ( !((1u << vec1) & 0x7c01u) || (vec2 == TRAP_page_fault) )
+        return vec2;
+
+    /* Cannot combine the exceptions: double fault. */
+    return TRAP_double_fault;
+}
+
 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
 {
     u64 host_tsc;
index 225c9893c9bd30ff068a1936242b938871608bb8..be166a868cc9fa106cdc79d57dbd6f73e25a2ace 100644 (file)
@@ -725,7 +725,15 @@ static void svm_inject_exception(
 {
     struct vcpu *curr = current;
     struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
-    eventinj_t event;
+    eventinj_t event = vmcb->eventinj;
+
+    if ( unlikely(event.fields.v) &&
+         (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
+    {
+        trapnr = hvm_combine_hw_exceptions(event.fields.vector, trapnr);
+        if ( trapnr == TRAP_double_fault )
+            errcode = 0;
+    }
 
     event.bytes = 0;
     event.fields.v = 1;
index b78ca6451c7d5850da4a41834cc6c13fcaa79e48..efda084fa4151f586e157551a891c4a43b9b7be7 100644 (file)
@@ -983,6 +983,62 @@ static void vmx_flush_guest_tlbs(void)
      * because VMRESUME will flush it for us. */
 }
 
+
+
+static void __vmx_inject_exception(
+    struct vcpu *v, int trap, int type, int error_code)
+{
+    unsigned long intr_fields;
+
+    /*
+     * NB. Callers do not need to worry about clearing STI/MOV-SS blocking:
+     *  "If the VM entry is injecting, there is no blocking by STI or by
+     *   MOV SS following the VM entry, regardless of the contents of the
+     *   interruptibility-state field [in the guest-state area before the
+     *   VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
+     */
+
+    intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
+    if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) {
+        __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+        intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
+    }
+
+    __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
+
+    if ( trap == TRAP_page_fault )
+        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
+    else
+        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
+}
+
+void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code)
+{
+    unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
+
+    if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
+         (((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) )
+    {
+        trap = hvm_combine_hw_exceptions((uint8_t)intr_info, trap);
+        if ( trap == TRAP_double_fault )
+            error_code = 0;
+    }
+
+    __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
+}
+
+void vmx_inject_extint(struct vcpu *v, int trap)
+{
+    __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
+                           HVM_DELIVER_NO_ERROR_CODE);
+}
+
+void vmx_inject_nmi(struct vcpu *v)
+{
+    __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
+                           HVM_DELIVER_NO_ERROR_CODE);
+}
+
 static void vmx_inject_exception(
     unsigned int trapnr, int errcode, unsigned long cr2)
 {
index 9224cbe79864d5ef37f562fa9c4798dd0dc77649..fe58567263d4910520f63364ac6451f6bda7cc32 100644 (file)
@@ -270,32 +270,9 @@ static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
 #define X86_EVENTTYPE_SW_INTERRUPT          4    /* software interrupt */
 #define X86_EVENTTYPE_SW_EXCEPTION          6    /* software exception */
 
-/*
- * Need to re-inject a given event? We avoid re-injecting software exceptions
- * and interrupts because the faulting/trapping instruction can simply be
- * re-executed (neither VMX nor SVM update RIP when they VMEXIT during
- * INT3/INTO/INTn).
- */
-static inline int hvm_event_needs_reinjection(uint8_t type, uint8_t vector)
-{
-    switch ( type )
-    {
-    case X86_EVENTTYPE_EXT_INTR:
-    case X86_EVENTTYPE_NMI:
-        return 1;
-    case X86_EVENTTYPE_HW_EXCEPTION:
-        /*
-         * SVM uses type 3 ("HW Exception") for #OF and #BP. We explicitly
-         * check for these vectors, as they are really SW Exceptions. SVM has
-         * not updated RIP to point after the trapping instruction (INT3/INTO).
-         */
-        return (vector != 3) && (vector != 4);
-    default:
-        /* Software exceptions/interrupts can be re-executed (e.g., INT n). */
-        break;
-    }
-    return 0;
-}
+int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
+
+uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
 
 static inline int hvm_cpu_up(void)
 {
index 94e4168fd7fda698e579f4542bf9d2b74f9fe9c4..acf523cca48275c800f134e111e28fb6721d1bb2 100644 (file)
@@ -264,49 +264,8 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
-static inline void __vmx_inject_exception(
-    struct vcpu *v, int trap, int type, int error_code)
-{
-    unsigned long intr_fields;
-
-    /*
-     * NB. Callers do not need to worry about clearing STI/MOV-SS blocking:
-     *  "If the VM entry is injecting, there is no blocking by STI or by
-     *   MOV SS following the VM entry, regardless of the contents of the
-     *   interruptibility-state field [in the guest-state area before the
-     *   VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
-     */
-
-    intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
-    if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) {
-        __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
-        intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
-    }
-
-    __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
-
-    if ( trap == TRAP_page_fault )
-        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
-    else
-        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
-}
-
-static inline void vmx_inject_hw_exception(
-    struct vcpu *v, int trap, int error_code)
-{
-    __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
-}
-
-static inline void vmx_inject_extint(struct vcpu *v, int trap)
-{
-    __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
-                           HVM_DELIVER_NO_ERROR_CODE);
-}
-
-static inline void vmx_inject_nmi(struct vcpu *v)
-{
-    __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
-                           HVM_DELIVER_NO_ERROR_CODE);
-}
+void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code);
+void vmx_inject_extint(struct vcpu *v, int trap);
+void vmx_inject_nmi(struct vcpu *v);
 
 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */