ia64/xen-unstable

view xen/arch/x86/hvm/vmx/intr.c @ 16692:9865d5e82802

hvm: Fix evtchn-to-fake-pci interrupt propagation.

Previously the evtchn_upcall_pending flag would only ever be sampled
on VCPU0, possibly leading to long delays in deasserting the
fake-pci-device INTx line if the interrupt is actually delivered to
other than VCPU0.

Diagnosed by Ian Jackson <ian.jackson@eu.citrix.com>

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 08 15:55:29 2008 +0000 (2008-01-08)
parents 81e63d66a64d
children e4fd457a3dd5
line source
1 /*
2 * intr.c: handling I/O, interrupts related VMX entry/exit
3 * Copyright (c) 2004, Intel Corporation.
4 * Copyright (c) 2004-2007, XenSource Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/lib.h>
24 #include <xen/errno.h>
25 #include <xen/trace.h>
26 #include <xen/event.h>
27 #include <asm/current.h>
28 #include <asm/cpufeature.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/hvm/hvm.h>
32 #include <asm/hvm/io.h>
33 #include <asm/hvm/support.h>
34 #include <asm/hvm/vmx/vmx.h>
35 #include <asm/hvm/vmx/vmcs.h>
36 #include <asm/hvm/vpic.h>
37 #include <asm/hvm/vlapic.h>
38 #include <public/hvm/ioreq.h>
39 #include <asm/hvm/trace.h>
41 /*
42 * A few notes on virtual NMI and INTR delivery, and interactions with
43 * interruptibility states:
44 *
45 * We can only inject an ExtInt if EFLAGS.IF = 1 and no blocking by
46 * STI nor MOV SS. Otherwise the VM entry fails. The 'virtual interrupt
47 * pending' control causes a VM exit when all these checks succeed. It will
48 * exit immediately after VM entry if the checks succeed at that point.
49 *
50 * We can only inject an NMI if no blocking by MOV SS (also, depending on
51 * implementation, if no blocking by STI). If pin-based 'virtual NMIs'
52 * control is specified then the NMI-blocking interruptibility flag is
53 * also checked. The 'virtual NMI pending' control (available only in
54 * conjunction with 'virtual NMIs') causes a VM exit when all these checks
55 * succeed. It will exit immediately after VM entry if the checks succeed
56 * at that point.
57 *
58 * Because a processor may or may not check blocking-by-STI when injecting
59 * a virtual NMI, it will be necessary to convert that to block-by-MOV-SS
60 * before specifying the 'virtual NMI pending' control. Otherwise we could
61 * enter an infinite loop where we check blocking-by-STI in software and
62 * thus delay delivery of a virtual NMI, but the processor causes immediate
63 * VM exit because it does not check blocking-by-STI.
64 *
65 * Injecting a virtual NMI sets the NMI-blocking interruptibility flag only
66 * if the 'virtual NMIs' control is set. Injecting *any* kind of event clears
67 * the STI- and MOV-SS-blocking interruptibility-state flags.
68 *
69 * If MOV/POP SS is executed while MOV-SS-blocking is in effect, the effect
70 * is cleared. If STI is executed while MOV-SS- or STI-blocking is in effect,
71 * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
72 */
74 static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
75 {
76 u32 *cpu_exec_control = &v->arch.hvm_vmx.exec_control;
77 u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
79 ASSERT(intack.source != hvm_intsrc_none);
81 if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi )
82 {
83 /*
84 * We set MOV-SS blocking in lieu of STI blocking when delivering an
85 * NMI. This is because it is processor-specific whether STI-blocking
86 * blocks NMIs. Hence we *must* check for STI-blocking on NMI delivery
87 * (otherwise vmentry will fail on processors that check for STI-
88 * blocking) but if the processor does not check for STI-blocking then
89 * we may immediately vmexit and hance make no progress!
90 * (see SDM 3B 21.3, "Other Causes of VM Exits").
91 */
92 u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
93 if ( intr_shadow & VMX_INTR_SHADOW_STI )
94 {
95 /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
96 intr_shadow &= ~VMX_INTR_SHADOW_STI;
97 intr_shadow |= VMX_INTR_SHADOW_MOV_SS;
98 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
99 }
100 ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
101 }
103 if ( !(*cpu_exec_control & ctl) )
104 {
105 *cpu_exec_control |= ctl;
106 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
107 }
108 }
110 static void vmx_dirq_assist(struct vcpu *v)
111 {
112 unsigned int irq;
113 uint32_t device, intx;
114 struct domain *d = v->domain;
115 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
116 struct dev_intx_gsi_link *digl;
118 if ( !vtd_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
119 return;
121 for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS);
122 irq < NR_IRQS;
123 irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
124 {
125 stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
126 clear_bit(irq, &hvm_irq_dpci->dirq_mask);
128 list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
129 {
130 device = digl->device;
131 intx = digl->intx;
132 hvm_pci_intx_assert(d, device, intx);
133 spin_lock(&hvm_irq_dpci->dirq_lock);
134 hvm_irq_dpci->mirq[irq].pending++;
135 spin_unlock(&hvm_irq_dpci->dirq_lock);
136 }
138 /*
139 * Set a timer to see if the guest can finish the interrupt or not. For
140 * example, the guest OS may unmask the PIC during boot, before the
141 * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
142 * guest will never deal with the irq, then the physical interrupt line
143 * will never be deasserted.
144 */
145 set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)],
146 NOW() + PT_IRQ_TIME_OUT);
147 }
148 }
150 asmlinkage void vmx_intr_assist(void)
151 {
152 struct hvm_intack intack;
153 struct vcpu *v = current;
154 unsigned int tpr_threshold = 0;
155 enum hvm_intblk intblk;
157 /* Crank the handle on interrupt state. */
158 pt_update_irq(v);
159 vmx_dirq_assist(v);
160 hvm_maybe_deassert_evtchn_irq();
162 do {
163 intack = hvm_vcpu_has_pending_irq(v);
164 if ( likely(intack.source == hvm_intsrc_none) )
165 goto out;
167 intblk = hvm_interrupt_blocked(v, intack);
168 if ( intblk == hvm_intblk_tpr )
169 {
170 ASSERT(vlapic_enabled(vcpu_vlapic(v)));
171 ASSERT(intack.source == hvm_intsrc_lapic);
172 tpr_threshold = intack.vector >> 4;
173 goto out;
174 }
176 if ( (intblk != hvm_intblk_none) ||
177 (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
178 {
179 enable_intr_window(v, intack);
180 goto out;
181 }
183 intack = hvm_vcpu_ack_pending_irq(v, intack);
184 } while ( intack.source == hvm_intsrc_none );
186 if ( intack.source == hvm_intsrc_nmi )
187 {
188 vmx_inject_nmi(v);
189 }
190 else
191 {
192 HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
193 vmx_inject_extint(v, intack.vector);
194 pt_intr_post(v, intack);
195 }
197 /* Is there another IRQ to queue up behind this one? */
198 intack = hvm_vcpu_has_pending_irq(v);
199 if ( unlikely(intack.source != hvm_intsrc_none) )
200 enable_intr_window(v, intack);
202 out:
203 if ( cpu_has_vmx_tpr_shadow )
204 __vmwrite(TPR_THRESHOLD, tpr_threshold);
205 }
207 /*
208 * Local variables:
209 * mode: C
210 * c-set-style: "BSD"
211 * c-basic-offset: 4
212 * tab-width: 4
213 * indent-tabs-mode: nil
214 * End:
215 */