]> xenbits.xensource.com Git - people/vhanquez/xen.git/commitdiff
svm: Better handling of NMI injection -- avoid nested NMIs.
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 23 May 2008 10:09:05 +0000 (11:09 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 23 May 2008 10:09:05 +0000 (11:09 +0100)
We do this by emulating the NMI mask which blocks NMI delivery until
next IRET on native hardware.

Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen-unstable changeset:   17655:2ada81810ddb73f29dfd1eb00de466eec2881ce6
xen-unstable date:        Mon May 19 10:03:26 2008 +0100

xen/arch/x86/hvm/svm/intr.c
xen/arch/x86/hvm/svm/svm.c

index d2ba799a426e85ca88de170182ea080829beb50e..bacc31e9e6d5cbfc90da3312ed02d400625fd337 100644 (file)
@@ -51,6 +51,12 @@ static void svm_inject_nmi(struct vcpu *v)
 
     ASSERT(vmcb->eventinj.fields.v == 0);
     vmcb->eventinj = event;
+
+    /*
+     * SVM does not virtualise the NMI mask, so we emulate it by intercepting
+     * the next IRET and blocking NMI injection until the intercept triggers.
+     */
+    vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET;
 }
     
 static void svm_inject_extint(struct vcpu *v, int vector)
index cde129d24c286b5042b76a5230b2623bd4f56b76..206a8dbdd0a6cc2200321e6e6a08404d8b22e97f 100644 (file)
@@ -438,7 +438,8 @@ static enum hvm_intblk svm_interrupt_blocked(
         return hvm_intblk_shadow;
 
     if ( intack.source == hvm_intsrc_nmi )
-        return hvm_intblk_none;
+        return ((vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET) ?
+                hvm_intblk_nmi_iret : hvm_intblk_none);
 
     ASSERT((intack.source == hvm_intsrc_pic) ||
            (intack.source == hvm_intsrc_lapic));
@@ -2112,6 +2113,15 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
             reason = TSW_call_or_int;
         if ( (vmcb->exitinfo2 >> 44) & 1 )
             errcode = (uint32_t)vmcb->exitinfo2;
+
+        /*
+         * Some processors set the EXITINTINFO field when the task switch
+         * is caused by a task gate in the IDT. In this case we will be
+         * emulating the event injection, so we do not want the processor
+         * to re-inject the original event!
+         */
+        vmcb->eventinj.bytes = 0;
+
         hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode);
         break;
     }
@@ -2191,6 +2201,19 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
         svm_do_nested_pgfault(vmcb->exitinfo2, regs);
         break;
 
+    case VMEXIT_IRET:
+        /*
+         * IRET clears the NMI mask. However because we clear the mask
+         * /before/ executing IRET, we set the interrupt shadow to prevent
+         * a pending NMI from being injected immediately. This will work
+         * perfectly unless the IRET instruction faults: in that case we
+         * may inject an NMI before the NMI handler's IRET instruction is
+         * retired.
+         */
+        vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
+        vmcb->interrupt_shadow = 1;
+        break;
+
     default:
     exit_and_crash:
         gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "