ASSERT(vmcb->eventinj.fields.v == 0);
vmcb->eventinj = event;
+
+ /*
+ * SVM does not virtualise the NMI mask, so we emulate it by intercepting
+ * the next IRET and blocking NMI injection until the intercept triggers.
+ */
+ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET;
}
static void svm_inject_extint(struct vcpu *v, int vector)
return hvm_intblk_shadow;
if ( intack.source == hvm_intsrc_nmi )
- return hvm_intblk_none;
+ return ((vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET) ?
+ hvm_intblk_nmi_iret : hvm_intblk_none);
ASSERT((intack.source == hvm_intsrc_pic) ||
(intack.source == hvm_intsrc_lapic));
reason = TSW_call_or_int;
if ( (vmcb->exitinfo2 >> 44) & 1 )
errcode = (uint32_t)vmcb->exitinfo2;
+
+ /*
+ * Some processors set the EXITINTINFO field when the task switch
+ * is caused by a task gate in the IDT. In this case we will be
+ * emulating the event injection, so we do not want the processor
+ * to re-inject the original event!
+ */
+ vmcb->eventinj.bytes = 0;
+
hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode);
break;
}
svm_do_nested_pgfault(vmcb->exitinfo2, regs);
break;
+ case VMEXIT_IRET:
+ /*
+ * IRET clears the NMI mask. However because we clear the mask
+ * /before/ executing IRET, we set the interrupt shadow to prevent
+ * a pending NMI from being injected immediately. This will work
+ * perfectly unless the IRET instruction faults: in that case we
+ * may inject an NMI before the NMI handler's IRET instruction is
+ * retired.
+ */
+ vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
+ vmcb->interrupt_shadow = 1;
+ break;
+
default:
exit_and_crash:
gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "