ia64/xen-unstable

view xen/arch/x86/hvm/irq.c @ 16692:9865d5e82802

hvm: Fix evtchn-to-fake-pci interrupt propagation.

Previously the evtchn_upcall_pending flag would only ever be sampled
on VCPU0, possibly leading to long delays in deasserting the
fake-pci-device INTx line if the interrupt is actually delivered to
other than VCPU0.

Diagnosed by Ian Jackson <ian.jackson@eu.citrix.com>

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 08 15:55:29 2008 +0000 (2008-01-08)
parents 4553bc1087d9
children c6eeb71a85cf
line source
1 /******************************************************************************
2 * irq.c
3 *
4 * Interrupt distribution and delivery logic.
5 *
6 * Copyright (c) 2006, K A Fraser, XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
22 #include <xen/config.h>
23 #include <xen/types.h>
24 #include <xen/event.h>
25 #include <xen/sched.h>
26 #include <asm/hvm/domain.h>
27 #include <asm/hvm/support.h>
29 static void __hvm_pci_intx_assert(
30 struct domain *d, unsigned int device, unsigned int intx)
31 {
32 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
33 unsigned int gsi, link, isa_irq;
35 ASSERT((device <= 31) && (intx <= 3));
37 if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
38 return;
40 gsi = hvm_pci_intx_gsi(device, intx);
41 if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
42 vioapic_irq_positive_edge(d, gsi);
44 link = hvm_pci_intx_link(device, intx);
45 isa_irq = hvm_irq->pci_link.route[link];
46 if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
47 (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
48 {
49 vioapic_irq_positive_edge(d, isa_irq);
50 vpic_irq_positive_edge(d, isa_irq);
51 }
52 }
54 void hvm_pci_intx_assert(
55 struct domain *d, unsigned int device, unsigned int intx)
56 {
57 spin_lock(&d->arch.hvm_domain.irq_lock);
58 __hvm_pci_intx_assert(d, device, intx);
59 spin_unlock(&d->arch.hvm_domain.irq_lock);
60 }
62 static void __hvm_pci_intx_deassert(
63 struct domain *d, unsigned int device, unsigned int intx)
64 {
65 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
66 unsigned int gsi, link, isa_irq;
68 ASSERT((device <= 31) && (intx <= 3));
70 if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
71 return;
73 gsi = hvm_pci_intx_gsi(device, intx);
74 --hvm_irq->gsi_assert_count[gsi];
76 link = hvm_pci_intx_link(device, intx);
77 isa_irq = hvm_irq->pci_link.route[link];
78 if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
79 (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
80 vpic_irq_negative_edge(d, isa_irq);
81 }
83 void hvm_pci_intx_deassert(
84 struct domain *d, unsigned int device, unsigned int intx)
85 {
86 spin_lock(&d->arch.hvm_domain.irq_lock);
87 __hvm_pci_intx_deassert(d, device, intx);
88 spin_unlock(&d->arch.hvm_domain.irq_lock);
89 }
91 void hvm_isa_irq_assert(
92 struct domain *d, unsigned int isa_irq)
93 {
94 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
95 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
97 ASSERT(isa_irq <= 15);
99 spin_lock(&d->arch.hvm_domain.irq_lock);
101 if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
102 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
103 {
104 vioapic_irq_positive_edge(d, gsi);
105 vpic_irq_positive_edge(d, isa_irq);
106 }
108 spin_unlock(&d->arch.hvm_domain.irq_lock);
109 }
111 void hvm_isa_irq_deassert(
112 struct domain *d, unsigned int isa_irq)
113 {
114 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
115 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
117 ASSERT(isa_irq <= 15);
119 spin_lock(&d->arch.hvm_domain.irq_lock);
121 if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
122 (--hvm_irq->gsi_assert_count[gsi] == 0) )
123 vpic_irq_negative_edge(d, isa_irq);
125 spin_unlock(&d->arch.hvm_domain.irq_lock);
126 }
128 static void hvm_set_callback_irq_level(struct vcpu *v)
129 {
130 struct domain *d = v->domain;
131 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
132 unsigned int gsi, pdev, pintx, asserted;
134 ASSERT(v->vcpu_id == 0);
136 spin_lock(&d->arch.hvm_domain.irq_lock);
138 /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
139 asserted = !!vcpu_info(v, evtchn_upcall_pending);
140 if ( hvm_irq->callback_via_asserted == asserted )
141 goto out;
142 hvm_irq->callback_via_asserted = asserted;
144 /* Callback status has changed. Update the callback via. */
145 switch ( hvm_irq->callback_via_type )
146 {
147 case HVMIRQ_callback_gsi:
148 gsi = hvm_irq->callback_via.gsi;
149 if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) )
150 {
151 vioapic_irq_positive_edge(d, gsi);
152 if ( gsi <= 15 )
153 vpic_irq_positive_edge(d, gsi);
154 }
155 else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) )
156 {
157 if ( gsi <= 15 )
158 vpic_irq_negative_edge(d, gsi);
159 }
160 break;
161 case HVMIRQ_callback_pci_intx:
162 pdev = hvm_irq->callback_via.pci.dev;
163 pintx = hvm_irq->callback_via.pci.intx;
164 if ( asserted )
165 __hvm_pci_intx_assert(d, pdev, pintx);
166 else
167 __hvm_pci_intx_deassert(d, pdev, pintx);
168 default:
169 break;
170 }
172 out:
173 spin_unlock(&d->arch.hvm_domain.irq_lock);
174 }
176 void hvm_maybe_deassert_evtchn_irq(void)
177 {
178 struct domain *d = current->domain;
179 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
181 if ( hvm_irq->callback_via_asserted &&
182 !vcpu_info(d->vcpu[0], evtchn_upcall_pending) )
183 hvm_set_callback_irq_level(d->vcpu[0]);
184 }
186 void hvm_assert_evtchn_irq(struct vcpu *v)
187 {
188 if ( v->vcpu_id == 0 )
189 hvm_set_callback_irq_level(v);
190 }
192 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
193 {
194 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
195 u8 old_isa_irq;
196 int i;
198 ASSERT((link <= 3) && (isa_irq <= 15));
200 spin_lock(&d->arch.hvm_domain.irq_lock);
202 old_isa_irq = hvm_irq->pci_link.route[link];
203 if ( old_isa_irq == isa_irq )
204 goto out;
205 hvm_irq->pci_link.route[link] = isa_irq;
207 /* PCI pass-through fixup. */
208 if ( hvm_irq->dpci )
209 {
210 if ( old_isa_irq )
211 clear_bit(old_isa_irq, &hvm_irq->dpci->isairq_map);
213 for ( i = 0; i < NR_LINK; i++ )
214 if ( test_bit(i, &hvm_irq->dpci->link_map) &&
215 hvm_irq->pci_link.route[i] )
216 set_bit(hvm_irq->pci_link.route[i],
217 &hvm_irq->dpci->isairq_map);
218 }
220 if ( hvm_irq->pci_link_assert_count[link] == 0 )
221 goto out;
223 if ( old_isa_irq && (--hvm_irq->gsi_assert_count[old_isa_irq] == 0) )
224 vpic_irq_negative_edge(d, old_isa_irq);
226 if ( isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
227 {
228 vioapic_irq_positive_edge(d, isa_irq);
229 vpic_irq_positive_edge(d, isa_irq);
230 }
232 out:
233 spin_unlock(&d->arch.hvm_domain.irq_lock);
235 dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
236 d->domain_id, link, old_isa_irq, isa_irq);
237 }
239 void hvm_set_callback_via(struct domain *d, uint64_t via)
240 {
241 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
242 unsigned int gsi=0, pdev=0, pintx=0;
243 uint8_t via_type;
245 via_type = (uint8_t)(via >> 56) + 1;
246 if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
247 (via_type > HVMIRQ_callback_pci_intx) )
248 via_type = HVMIRQ_callback_none;
250 spin_lock(&d->arch.hvm_domain.irq_lock);
252 /* Tear down old callback via. */
253 if ( hvm_irq->callback_via_asserted )
254 {
255 switch ( hvm_irq->callback_via_type )
256 {
257 case HVMIRQ_callback_gsi:
258 gsi = hvm_irq->callback_via.gsi;
259 if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) )
260 vpic_irq_negative_edge(d, gsi);
261 break;
262 case HVMIRQ_callback_pci_intx:
263 pdev = hvm_irq->callback_via.pci.dev;
264 pintx = hvm_irq->callback_via.pci.intx;
265 __hvm_pci_intx_deassert(d, pdev, pintx);
266 break;
267 default:
268 break;
269 }
270 }
272 /* Set up new callback via. */
273 switch ( hvm_irq->callback_via_type = via_type )
274 {
275 case HVMIRQ_callback_gsi:
276 gsi = hvm_irq->callback_via.gsi = (uint8_t)via;
277 if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) )
278 hvm_irq->callback_via_type = HVMIRQ_callback_none;
279 else if ( hvm_irq->callback_via_asserted &&
280 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
281 {
282 vioapic_irq_positive_edge(d, gsi);
283 if ( gsi <= 15 )
284 vpic_irq_positive_edge(d, gsi);
285 }
286 break;
287 case HVMIRQ_callback_pci_intx:
288 pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31;
289 pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3;
290 if ( hvm_irq->callback_via_asserted )
291 __hvm_pci_intx_assert(d, pdev, pintx);
292 break;
293 default:
294 break;
295 }
297 spin_unlock(&d->arch.hvm_domain.irq_lock);
299 dprintk(XENLOG_G_INFO, "Dom%u callback via changed to ", d->domain_id);
300 switch ( via_type )
301 {
302 case HVMIRQ_callback_gsi:
303 printk("GSI %u\n", gsi);
304 break;
305 case HVMIRQ_callback_pci_intx:
306 printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
307 break;
308 default:
309 printk("None\n");
310 break;
311 }
312 }
314 struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
315 {
316 struct hvm_domain *plat = &v->domain->arch.hvm_domain;
317 int vector;
319 if ( unlikely(v->nmi_pending) )
320 return hvm_intack_nmi;
322 if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
323 return hvm_intack_pic(0);
325 vector = vlapic_has_pending_irq(v);
326 if ( vector != -1 )
327 return hvm_intack_lapic(vector);
329 return hvm_intack_none;
330 }
332 struct hvm_intack hvm_vcpu_ack_pending_irq(
333 struct vcpu *v, struct hvm_intack intack)
334 {
335 int vector;
337 switch ( intack.source )
338 {
339 case hvm_intsrc_nmi:
340 if ( !test_and_clear_bool(v->nmi_pending) )
341 intack = hvm_intack_none;
342 break;
343 case hvm_intsrc_pic:
344 ASSERT(v->vcpu_id == 0);
345 if ( (vector = vpic_ack_pending_irq(v)) == -1 )
346 intack = hvm_intack_none;
347 else
348 intack.vector = (uint8_t)vector;
349 break;
350 case hvm_intsrc_lapic:
351 if ( !vlapic_ack_pending_irq(v, intack.vector) )
352 intack = hvm_intack_none;
353 break;
354 default:
355 intack = hvm_intack_none;
356 break;
357 }
359 return intack;
360 }
362 int hvm_local_events_need_delivery(struct vcpu *v)
363 {
364 struct hvm_intack intack = hvm_vcpu_has_pending_irq(v);
366 if ( likely(intack.source == hvm_intsrc_none) )
367 return 0;
369 return !hvm_interrupt_blocked(v, intack);
370 }
372 #if 0 /* Keep for debugging */
373 static void irq_dump(struct domain *d)
374 {
375 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
376 int i;
377 printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
378 " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
379 hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1],
380 (uint32_t) hvm_irq->isa_irq.pad[0],
381 hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
382 hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
383 for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 )
384 printk("GSI %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
385 " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
386 hvm_irq->gsi_assert_count[i+0],
387 hvm_irq->gsi_assert_count[i+1],
388 hvm_irq->gsi_assert_count[i+2],
389 hvm_irq->gsi_assert_count[i+3],
390 hvm_irq->gsi_assert_count[i+4],
391 hvm_irq->gsi_assert_count[i+5],
392 hvm_irq->gsi_assert_count[i+6],
393 hvm_irq->gsi_assert_count[i+7]);
394 printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
395 hvm_irq->pci_link_assert_count[0],
396 hvm_irq->pci_link_assert_count[1],
397 hvm_irq->pci_link_assert_count[2],
398 hvm_irq->pci_link_assert_count[3]);
399 printk("Callback via %i:0x%"PRIx32",%s asserted\n",
400 hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
401 hvm_irq->callback_via_asserted ? "" : " not");
402 }
403 #endif
405 static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
406 {
407 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
408 unsigned int asserted, pdev, pintx;
409 int rc;
411 spin_lock(&d->arch.hvm_domain.irq_lock);
413 pdev = hvm_irq->callback_via.pci.dev;
414 pintx = hvm_irq->callback_via.pci.intx;
415 asserted = (hvm_irq->callback_via_asserted &&
416 (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx));
418 /*
419 * Deassert virtual interrupt via PCI INTx line. The virtual interrupt
420 * status is not save/restored, so the INTx line must be deasserted in
421 * the restore context.
422 */
423 if ( asserted )
424 __hvm_pci_intx_deassert(d, pdev, pintx);
426 /* Save PCI IRQ lines */
427 rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
429 if ( asserted )
430 __hvm_pci_intx_assert(d, pdev, pintx);
432 spin_unlock(&d->arch.hvm_domain.irq_lock);
434 return rc;
435 }
437 static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
438 {
439 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
441 /* Save ISA IRQ lines */
442 return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
443 }
445 static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
446 {
447 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
449 /* Save PCI-ISA link state */
450 return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
451 }
453 static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
454 {
455 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
456 int link, dev, intx, gsi;
458 /* Load the PCI IRQ lines */
459 if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
460 return -EINVAL;
462 /* Clear the PCI link assert counts */
463 for ( link = 0; link < 4; link++ )
464 hvm_irq->pci_link_assert_count[link] = 0;
466 /* Clear the GSI link assert counts */
467 for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ )
468 hvm_irq->gsi_assert_count[gsi] = 0;
470 /* Recalculate the counts from the IRQ line state */
471 for ( dev = 0; dev < 32; dev++ )
472 for ( intx = 0; intx < 4; intx++ )
473 if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
474 {
475 /* Direct GSI assert */
476 gsi = hvm_pci_intx_gsi(dev, intx);
477 hvm_irq->gsi_assert_count[gsi]++;
478 /* PCI-ISA bridge assert */
479 link = hvm_pci_intx_link(dev, intx);
480 hvm_irq->pci_link_assert_count[link]++;
481 }
483 return 0;
484 }
486 static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
487 {
488 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
489 int irq;
491 /* Load the ISA IRQ lines */
492 if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
493 return -EINVAL;
495 /* Adjust the GSI assert counts for the ISA IRQ line state.
496 * This relies on the PCI IRQ state being loaded first. */
497 for ( irq = 0; irq < 16; irq++ )
498 if ( test_bit(irq, &hvm_irq->isa_irq.i) )
499 hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
501 return 0;
502 }
505 static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
506 {
507 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
508 int link, gsi;
510 /* Load the PCI-ISA IRQ link routing table */
511 if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
512 return -EINVAL;
514 /* Sanity check */
515 for ( link = 0; link < 4; link++ )
516 if ( hvm_irq->pci_link.route[link] > 15 )
517 {
518 gdprintk(XENLOG_ERR,
519 "HVM restore: PCI-ISA link %u out of range (%u)\n",
520 link, hvm_irq->pci_link.route[link]);
521 return -EINVAL;
522 }
524 /* Adjust the GSI assert counts for the link outputs.
525 * This relies on the PCI and ISA IRQ state being loaded first */
526 for ( link = 0; link < 4; link++ )
527 {
528 if ( hvm_irq->pci_link_assert_count[link] != 0 )
529 {
530 gsi = hvm_irq->pci_link.route[link];
531 if ( gsi != 0 )
532 hvm_irq->gsi_assert_count[gsi]++;
533 }
534 }
536 return 0;
537 }
539 HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
540 1, HVMSR_PER_DOM);
541 HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
542 1, HVMSR_PER_DOM);
543 HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
544 1, HVMSR_PER_DOM);