ia64/xen-unstable

view xen/arch/x86/hvm/svm/intr.c @ 15675:66147ca8f9c4

hvm: Define common (across VMX and SVM) set of event types.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Jul 31 10:11:47 2007 +0100 (2007-07-31)
parents 50358c4b37f4
children 0636f262ecd8
line source
1 /*
2 * intr.c: Interrupt handling for SVM.
3 * Copyright (c) 2005, AMD Inc.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/errno.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/paging.h>
30 #include <asm/hvm/hvm.h>
31 #include <asm/hvm/io.h>
32 #include <asm/hvm/support.h>
33 #include <asm/hvm/svm/svm.h>
34 #include <asm/hvm/svm/intr.h>
35 #include <xen/event.h>
36 #include <xen/kernel.h>
37 #include <public/hvm/ioreq.h>
38 #include <xen/domain_page.h>
39 #include <asm/hvm/trace.h>
41 static void svm_inject_dummy_vintr(struct vcpu *v)
42 {
43 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
44 vintr_t intr = vmcb->vintr;
46 intr.fields.irq = 1;
47 intr.fields.intr_masking = 1;
48 intr.fields.vector = 0;
49 intr.fields.prio = 0xF;
50 intr.fields.ign_tpr = 1;
51 vmcb->vintr = intr;
52 }
54 static void svm_inject_nmi(struct vcpu *v)
55 {
56 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
57 eventinj_t event;
59 event.bytes = 0;
60 event.fields.v = 1;
61 event.fields.type = X86_EVENTTYPE_NMI;
62 event.fields.vector = 2;
64 ASSERT(vmcb->eventinj.fields.v == 0);
65 vmcb->eventinj = event;
66 }
68 static void svm_inject_extint(struct vcpu *v, int vector)
69 {
70 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
71 eventinj_t event;
73 event.bytes = 0;
74 event.fields.v = 1;
75 event.fields.type = X86_EVENTTYPE_EXT_INTR;
76 event.fields.vector = vector;
78 ASSERT(vmcb->eventinj.fields.v == 0);
79 vmcb->eventinj = event;
80 }
82 asmlinkage void svm_intr_assist(void)
83 {
84 struct vcpu *v = current;
85 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
86 enum hvm_intack intr_source;
87 int intr_vector;
89 /*
90 * Previous event delivery caused this intercept?
91 * This will happen if the injection is latched by the processor (hence
92 * clearing vintr.fields.irq or eventinj.v) but then subsequently a fault
93 * occurs (e.g., due to lack of shadow mapping of guest IDT or guest-kernel
94 * stack).
95 */
96 if ( vmcb->exitintinfo.fields.v )
97 {
98 vmcb->eventinj = vmcb->exitintinfo;
99 vmcb->exitintinfo.bytes = 0;
100 HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
101 return;
102 }
104 /* Crank the handle on interrupt state. */
105 pt_update_irq(v);
106 hvm_set_callback_irq_level();
108 do {
109 intr_source = hvm_vcpu_has_pending_irq(v);
110 if ( likely(intr_source == hvm_intack_none) )
111 return;
113 /*
114 * If the guest can't take an interrupt right now, create a 'fake'
115 * virtual interrupt on to intercept as soon as the guest _can_ take
116 * interrupts. Do not obtain the next interrupt from the vlapic/pic
117 * if unable to inject.
118 *
119 * Also do this if there is an injection already pending. This is
120 * because the event delivery can arbitrarily delay the injection
121 * of the vintr (for example, if the exception is handled via an
122 * interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
123 * - the vTPR could be modified upwards, so we need to wait until the
124 * exception is delivered before we can safely decide that an
125 * interrupt is deliverable; and
126 * - the guest might look at the APIC/PIC state, so we ought not to
127 * have cleared the interrupt out of the IRR.
128 *
129 * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
130 * shadow. This is hard to do without hardware support. We should also
131 * track 'NMI blocking' from NMI injection until IRET. This can be done
132 * quite easily in software by intercepting the unblocking IRET.
133 */
134 if ( !hvm_interrupts_enabled(v, intr_source) ||
135 vmcb->eventinj.fields.v )
136 {
137 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
138 HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
139 svm_inject_dummy_vintr(v);
140 return;
141 }
142 } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
144 if ( intr_source == hvm_intack_nmi )
145 {
146 svm_inject_nmi(v);
147 }
148 else
149 {
150 HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
151 svm_inject_extint(v, intr_vector);
152 pt_intr_post(v, intr_vector, intr_source);
153 }
154 }
156 /*
157 * Local variables:
158 * mode: C
159 * c-set-style: "BSD"
160 * c-basic-offset: 4
161 * tab-width: 4
162 * indent-tabs-mode: nil
163 * End:
164 */