direct-io.hg

view xen/arch/x86/hvm/vmx/intr.c @ 15388:50358c4b37f4

hvm: Support injection of virtual NMIs and clean up ExtInt handling in general.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jun 20 11:50:16 2007 +0100 (2007-06-20)
parents 4d8381679606
children e6d5e4709466
line source
1 /*
2 * intr.c: handling I/O, interrupts related VMX entry/exit
3 * Copyright (c) 2004, Intel Corporation.
4 * Copyright (c) 2004-2007, XenSource Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/lib.h>
24 #include <xen/errno.h>
25 #include <xen/trace.h>
26 #include <xen/event.h>
27 #include <asm/current.h>
28 #include <asm/cpufeature.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/hvm/hvm.h>
32 #include <asm/hvm/io.h>
33 #include <asm/hvm/support.h>
34 #include <asm/hvm/vmx/vmx.h>
35 #include <asm/hvm/vmx/vmcs.h>
36 #include <asm/hvm/vpic.h>
37 #include <asm/hvm/vlapic.h>
38 #include <public/hvm/ioreq.h>
39 #include <asm/hvm/trace.h>
41 /*
42 * A few notes on virtual NMI and INTR delivery, and interactions with
43 * interruptibility states:
44 *
45 * We can only inject an ExtInt if EFLAGS.IF = 1 and no blocking by
46 * STI nor MOV SS. Otherwise the VM entry fails. The 'virtual interrupt
47 * pending' control causes a VM exit when all these checks succeed. It will
48 * exit immediately after VM entry if the checks succeed at that point.
49 *
50 * We can only inject an NMI if no blocking by MOV SS (also, depending on
51 * implementation, if no blocking by STI). If pin-based 'virtual NMIs'
52 * control is specified then the NMI-blocking interruptibility flag is
53 * also checked. The 'virtual NMI pending' control (available only in
54 * conjunction with 'virtual NMIs') causes a VM exit when all these checks
55 * succeed. It will exit immediately after VM entry if the checks succeed
56 * at that point.
57 *
58 * Because a processor may or may not check blocking-by-STI when injecting
59 * a virtual NMI, it will be necessary to convert that to block-by-MOV-SS
60 * before specifying the 'virtual NMI pending' control. Otherwise we could
61 * enter an infinite loop where we check blocking-by-STI in software and
62 * thus delay delivery of a virtual NMI, but the processor causes immediate
63 * VM exit because it does not check blocking-by-STI.
64 *
65 * Injecting a virtual NMI sets the NMI-blocking interruptibility flag only
66 * if the 'virtual NMIs' control is set. Injecting *any* kind of event clears
67 * the STI- and MOV-SS-blocking interruptibility-state flags.
68 *
69 * If MOV/POP SS is executed while MOV-SS-blocking is in effect, the effect
70 * is cleared. If STI is executed while MOV-SS- or STI-blocking is in effect,
71 * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
72 */
74 static void enable_irq_window(struct vcpu *v)
75 {
76 u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
78 if ( !(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) )
79 {
80 *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
81 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
82 }
83 }
85 static void update_tpr_threshold(struct vlapic *vlapic)
86 {
87 int max_irr, tpr;
89 if ( !cpu_has_vmx_tpr_shadow )
90 return;
92 if ( !vlapic_enabled(vlapic) ||
93 ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
94 {
95 __vmwrite(TPR_THRESHOLD, 0);
96 return;
97 }
99 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
100 __vmwrite(TPR_THRESHOLD, (max_irr > tpr) ? (tpr >> 4) : (max_irr >> 4));
101 }
103 asmlinkage void vmx_intr_assist(void)
104 {
105 int intr_vector;
106 enum hvm_intack intr_source;
107 struct vcpu *v = current;
108 unsigned int idtv_info_field;
109 unsigned long inst_len;
111 pt_update_irq(v);
113 hvm_set_callback_irq_level();
115 update_tpr_threshold(vcpu_vlapic(v));
117 do {
118 intr_source = hvm_vcpu_has_pending_irq(v);
120 if ( unlikely(v->arch.hvm_vmx.vector_injected) )
121 {
122 v->arch.hvm_vmx.vector_injected = 0;
123 if ( unlikely(intr_source != hvm_intack_none) )
124 enable_irq_window(v);
125 return;
126 }
128 /* This could be moved earlier in the VMX resume sequence. */
129 idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
130 if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
131 {
132 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
134 /*
135 * Safe: the length will only be interpreted for software
136 * exceptions and interrupts. If we get here then delivery of some
137 * event caused a fault, and this always results in defined
138 * VM_EXIT_INSTRUCTION_LEN.
139 */
140 inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
141 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
143 if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */
144 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
145 __vmread(IDT_VECTORING_ERROR_CODE));
146 if ( unlikely(intr_source != hvm_intack_none) )
147 enable_irq_window(v);
149 HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
150 return;
151 }
153 if ( likely(intr_source == hvm_intack_none) )
154 return;
156 /*
157 * TODO: Better NMI handling. Shouldn't wait for EFLAGS.IF==1, but
158 * should wait for exit from 'NMI blocking' window (NMI injection to
159 * next IRET). This requires us to use the new 'virtual NMI' support.
160 */
161 if ( !hvm_interrupts_enabled(v, intr_source) )
162 {
163 enable_irq_window(v);
164 return;
165 }
166 } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
168 if ( intr_source == hvm_intack_nmi )
169 {
170 vmx_inject_nmi(v);
171 }
172 else
173 {
174 HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
175 vmx_inject_extint(v, intr_vector);
176 pt_intr_post(v, intr_vector, intr_source);
177 }
178 }
180 /*
181 * Local variables:
182 * mode: C
183 * c-set-style: "BSD"
184 * c-basic-offset: 4
185 * tab-width: 4
186 * indent-tabs-mode: nil
187 * End:
188 */