]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
Implement SVM specific interrupt handling
authorcegger <none@none>
Wed, 9 Mar 2011 11:36:17 +0000 (12:36 +0100)
committercegger <none@none>
Wed, 9 Mar 2011 11:36:17 +0000 (12:36 +0100)
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
Committed-by: Tim Deegan <Tim.Deegan@citrix.com>
xen/arch/x86/hvm/svm/intr.c
xen/arch/x86/hvm/svm/nestedsvm.c
xen/arch/x86/hvm/svm/svm.c
xen/include/asm-x86/hvm/svm/nestedsvm.h
xen/include/asm-x86/hvm/svm/svm.h

index 0a69d38142463359b175f8fb646e2e969988eed7..973373d7ab1a6f1a41de2b6d7056e7a3b0163919 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/hvm/vlapic.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/intr.h>
+#include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
 #include <xen/event.h>
 #include <xen/kernel.h>
 #include <public/hvm/ioreq.h>
@@ -74,15 +75,30 @@ static void svm_inject_extint(struct vcpu *v, int vector)
     ASSERT(vmcb->eventinj.fields.v == 0);
     vmcb->eventinj = event;
 }
-    
+
 static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+    uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
     vintr_t intr;
 
     ASSERT(intack.source != hvm_intsrc_none);
 
+    if ( nestedhvm_enabled(v->domain) ) {
+        struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+        if ( nv->nv_vmentry_pending ) {
+            struct vmcb_struct *gvmcb = nv->nv_vvmcx;
+
+            /* check if l1 guest injects interrupt into l2 guest via vintr.
+             * return here or l2 guest looses interrupts, otherwise.
+             */
+            ASSERT(gvmcb != NULL);
+            intr = vmcb_get_vintr(gvmcb);
+            if ( intr.fields.irq )
+                return;
+        }
+    }
+
     HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
                 vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
 
@@ -121,6 +137,7 @@ asmlinkage void svm_intr_assist(void)
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     struct hvm_intack intack;
+    enum hvm_intblk intblk;
 
     /* Crank the handle on interrupt state. */
     pt_update_irq(v);
@@ -130,6 +147,39 @@ asmlinkage void svm_intr_assist(void)
         if ( likely(intack.source == hvm_intsrc_none) )
             return;
 
+        intblk = hvm_interrupt_blocked(v, intack);
+        if ( intblk == hvm_intblk_svm_gif ) {
+            ASSERT(nestedhvm_enabled(v->domain));
+            return;
+        }
+
+        /* Interrupts for the nested guest are already
+         * in the vmcb.
+         */
+        if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
+        {
+            int rc;
+
+            /* l2 guest was running when an interrupt for
+             * the l1 guest occured.
+             */
+            rc = nestedsvm_vcpu_interrupt(v, intack);
+            switch (rc) {
+            case NSVM_INTR_NOTINTERCEPTED:
+                /* Inject interrupt into 2nd level guest directly. */
+                break; 
+            case NSVM_INTR_NOTHANDLED:
+            case NSVM_INTR_FORCEVMEXIT:
+                return;
+            case NSVM_INTR_MASKED:
+                /* Guest already enabled an interrupt window. */
+                return;
+            default:
+                panic("%s: nestedsvm_vcpu_interrupt can't handle value 0x%x\n",
+                    __func__, rc);
+            }
+        }
+
         /*
          * Pending IRQs must be delayed if:
          * 1. An event is already pending. This is despite the fact that SVM
@@ -144,8 +194,7 @@ asmlinkage void svm_intr_assist(void)
          *      have cleared the interrupt out of the IRR.
          * 2. The IRQ is masked.
          */
-        if ( unlikely(vmcb->eventinj.fields.v) ||
-             hvm_interrupt_blocked(v, intack) )
+        if ( unlikely(vmcb->eventinj.fields.v) || intblk )
         {
             enable_intr_window(v, intack);
             return;
index 44c359134cdd050c27ea59f6576fbe7d52030761..d9cd851351e03585ad8e4482974fd26058f02a2e 100644 (file)
 #include <asm/hvm/svm/nestedsvm.h>
 #include <asm/hvm/svm/svmdebug.h>
 #include <asm/paging.h> /* paging_mode_hap */
+#include <asm/event.h> /* for local_event_delivery_(en|dis)able */
+
+static void
+nestedsvm_vcpu_clgi(struct vcpu *v)
+{
+    /* clear gif flag */
+    vcpu_nestedsvm(v).ns_gif = 0;
+    local_event_delivery_disable(); /* mask events for PV drivers */
+}
+
+static void
+nestedsvm_vcpu_stgi(struct vcpu *v)
+{
+    /* enable gif flag */
+    vcpu_nestedsvm(v).ns_gif = 1;
+    local_event_delivery_enable(); /* unmask events for PV drivers */
+}
 
 static int
 nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr)
@@ -145,6 +162,7 @@ int nsvm_vcpu_reset(struct vcpu *v)
     if (svm->ns_iomap)
         svm->ns_iomap = NULL;
 
+    nestedsvm_vcpu_stgi(v);
     return 0;
 }
 
@@ -601,6 +619,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
         return ret;
     }
 
+    nestedsvm_vcpu_stgi(v);
     return 0;
 }
 
@@ -646,6 +665,7 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs,
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
     struct vmcb_struct *ns_vmcb;
 
+    ASSERT(svm->ns_gif == 0);
     ns_vmcb = nv->nv_vvmcx;
 
     if (nv->nv_vmexit_pending) {
@@ -1035,6 +1055,32 @@ nsvm_vmcb_hap_enabled(struct vcpu *v)
     return vcpu_nestedsvm(v).ns_hap_enabled;
 }
 
+enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
+{
+    struct nestedsvm *svm = &vcpu_nestedsvm(v);
+    struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+
+    ASSERT(nestedhvm_enabled(v->domain));
+
+    if ( !nestedsvm_gif_isset(v) )
+        return hvm_intblk_svm_gif;
+
+    if ( nestedhvm_vcpu_in_guestmode(v) ) {
+        if ( svm->ns_hostflags.fields.vintrmask )
+            if ( !svm->ns_hostflags.fields.rflagsif )
+                return hvm_intblk_rflags_ie;
+    }
+
+    if ( nv->nv_vmexit_pending ) {
+        /* hvm_inject_exception() must have run before.
+         * exceptions have higher priority than interrupts.
+         */
+        return hvm_intblk_rflags_ie;
+    }
+
+    return hvm_intblk_none;
+}
+
 /* MSR handling */
 int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
 {
@@ -1090,6 +1136,7 @@ nestedsvm_vmexit_defer(struct vcpu *v,
 {
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
 
+    nestedsvm_vcpu_clgi(v);
     svm->ns_vmexit.exitcode = exitcode;
     svm->ns_vmexit.exitinfo1 = exitinfo1;
     svm->ns_vmexit.exitinfo2 = exitinfo2;
@@ -1276,4 +1323,98 @@ asmlinkage void nsvm_vcpu_switch(struct cpu_user_regs *regs)
     }
 }
 
+/* Interrupts, Virtual GIF */
+int
+nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack)
+{
+    int ret;
+    enum hvm_intblk intr;
+    uint64_t exitcode = VMEXIT_INTR;
+    uint64_t exitinfo2 = 0;
+    ASSERT(nestedhvm_vcpu_in_guestmode(v));
+
+    intr = nhvm_interrupt_blocked(v);
+    if ( intr != hvm_intblk_none )
+        return NSVM_INTR_MASKED;
+
+    switch (intack.source) {
+    case hvm_intsrc_pic:
+    case hvm_intsrc_lapic:
+        exitcode = VMEXIT_INTR;
+        exitinfo2 = intack.vector;
+        break;
+    case hvm_intsrc_nmi:
+        exitcode = VMEXIT_NMI;
+        exitinfo2 = intack.vector;
+        break;
+    case hvm_intsrc_mce:
+        exitcode = VMEXIT_EXCEPTION_MC;
+        exitinfo2 = intack.vector;
+        break;
+    case hvm_intsrc_none:
+        return NSVM_INTR_NOTHANDLED;
+    default:
+        BUG();
+    }
+
+    ret = nsvm_vmcb_guest_intercepts_exitcode(v,
+                                     guest_cpu_user_regs(), exitcode);
+    if (ret) {
+        nestedsvm_vmexit_defer(v, exitcode, intack.source, exitinfo2);
+        return NSVM_INTR_FORCEVMEXIT;
+    }
+
+    return NSVM_INTR_NOTINTERCEPTED;
+}
+
+bool_t
+nestedsvm_gif_isset(struct vcpu *v)
+{
+    struct nestedsvm *svm = &vcpu_nestedsvm(v);
+
+    return (!!svm->ns_gif);
+}
+
+void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v)
+{
+    unsigned int inst_len;
+
+    if ( !nestedhvm_enabled(v->domain) ) {
+        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        return;
+    }
+
+    if ( (inst_len = __get_instruction_length(v, INSTR_STGI)) == 0 )
+        return;
+
+    nestedsvm_vcpu_stgi(v);
+
+    __update_guest_eip(regs, inst_len);
+}
+
+void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v)
+{
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    unsigned int inst_len;
+    uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+    vintr_t intr;
+
+    if ( !nestedhvm_enabled(v->domain) ) {
+        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+        return;
+    }
+
+    if ( (inst_len = __get_instruction_length(v, INSTR_CLGI)) == 0 )
+        return;
+
+    nestedsvm_vcpu_clgi(v);
 
+    /* After a CLGI no interrupts should come */
+    intr = vmcb_get_vintr(vmcb);
+    intr.fields.irq = 0;
+    general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
+    vmcb_set_vintr(vmcb, intr);
+    vmcb_set_general1_intercepts(vmcb, general1_intercepts);
+
+    __update_guest_eip(regs, inst_len);
+}
index 2a34145398fd62533b4c9fca5f10f2120b148b2f..55e70e6d6be5c8a074d096d01cdb9496aa58fae3 100644 (file)
@@ -78,8 +78,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(void *, root_vmcb);
 
 static bool_t amd_erratum383_found __read_mostly;
 
-static void inline __update_guest_eip(
-    struct cpu_user_regs *regs, unsigned int inst_len)
+void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len)
 {
     struct vcpu *curr = current;
 
@@ -1618,6 +1617,7 @@ static struct hvm_function_table __read_mostly svm_function_table = {
     .nhvm_vcpu_asid = nsvm_vcpu_asid,
     .nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap,
     .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
+    .nhvm_intr_blocked = nsvm_intr_blocked,
 };
 
 asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
@@ -1929,7 +1929,11 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
         svm_vmexit_do_vmsave(vmcb, regs, v, regs->eax);
         break;
     case VMEXIT_STGI:
+        svm_vmexit_do_stgi(regs, v);
+        break;
     case VMEXIT_CLGI:
+        svm_vmexit_do_clgi(regs, v);
+        break;
     case VMEXIT_SKINIT:
         hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
         break;
index 5f6fd6cfd25f2923893c8361a836bf6c846d0d74..876802e521c05ac07e1f945b6711b9e322486c24 100644 (file)
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/svm/vmcb.h>
 
+/* SVM specific intblk types, cannot be an enum because gcc 4.5 complains */
+/* GIF cleared */
+#define hvm_intblk_svm_gif      hvm_intblk_arch
+
 struct nestedsvm {
+    bool_t ns_gif;
     uint64_t ns_msr_hsavepa; /* MSR HSAVE_PA value */
 
     /* l1 guest physical address of virtual vmcb used by prior VMRUN.
@@ -111,11 +116,23 @@ int nsvm_vmcb_guest_intercepts_exitcode(struct vcpu *v,
     struct cpu_user_regs *regs, uint64_t exitcode);
 int nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr);
 bool_t nsvm_vmcb_hap_enabled(struct vcpu *v);
+enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
 
 /* MSRs */
 int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
 int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content);
 
+/* Interrupts, vGIF */
+void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
+void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
+bool_t nestedsvm_gif_isset(struct vcpu *v);
+
+#define NSVM_INTR_NOTHANDLED     3
+#define NSVM_INTR_NOTINTERCEPTED 2
+#define NSVM_INTR_FORCEVMEXIT    1
+#define NSVM_INTR_MASKED         0
+int nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack);
+
 #endif /* ASM_X86_HVM_SVM_NESTEDSVM_H__ */
 
 /*
index eb3b01f1cde52b203ae85e4efe35df5b3668f71e..920c9e29d35a30163b8d3b06c12a23f28c7c944f 100644 (file)
@@ -61,6 +61,7 @@ static inline void svm_vmsave(void *vmcb)
 }
 
 unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
+void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
 
 extern u32 svm_feature_flags;