ia64/xen-unstable

view xen/arch/x86/hvm/irq.c @ 16316:28487ba2ea1e

x86, hvm: Minor fix of hvm_set_pci_link_route().
Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir@xensource.com>
date Mon Nov 05 10:16:30 2007 +0000 (2007-11-05)
parents 007ff84be506
children 4fd6610949f1
line source
1 /******************************************************************************
2 * irq.c
3 *
4 * Interrupt distribution and delivery logic.
5 *
6 * Copyright (c) 2006, K A Fraser, XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
22 #include <xen/config.h>
23 #include <xen/types.h>
24 #include <xen/event.h>
25 #include <xen/sched.h>
26 #include <asm/hvm/domain.h>
27 #include <asm/hvm/support.h>
29 static void __hvm_pci_intx_assert(
30 struct domain *d, unsigned int device, unsigned int intx)
31 {
32 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
33 unsigned int gsi, link, isa_irq;
35 ASSERT((device <= 31) && (intx <= 3));
37 if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
38 return;
40 gsi = hvm_pci_intx_gsi(device, intx);
41 if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
42 vioapic_irq_positive_edge(d, gsi);
44 link = hvm_pci_intx_link(device, intx);
45 isa_irq = hvm_irq->pci_link.route[link];
46 if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
47 (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
48 {
49 vioapic_irq_positive_edge(d, isa_irq);
50 vpic_irq_positive_edge(d, isa_irq);
51 }
52 }
54 void hvm_pci_intx_assert(
55 struct domain *d, unsigned int device, unsigned int intx)
56 {
57 spin_lock(&d->arch.hvm_domain.irq_lock);
58 __hvm_pci_intx_assert(d, device, intx);
59 spin_unlock(&d->arch.hvm_domain.irq_lock);
60 }
62 static void __hvm_pci_intx_deassert(
63 struct domain *d, unsigned int device, unsigned int intx)
64 {
65 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
66 unsigned int gsi, link, isa_irq;
68 ASSERT((device <= 31) && (intx <= 3));
70 if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
71 return;
73 gsi = hvm_pci_intx_gsi(device, intx);
74 --hvm_irq->gsi_assert_count[gsi];
76 link = hvm_pci_intx_link(device, intx);
77 isa_irq = hvm_irq->pci_link.route[link];
78 if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
79 (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
80 vpic_irq_negative_edge(d, isa_irq);
81 }
83 void hvm_pci_intx_deassert(
84 struct domain *d, unsigned int device, unsigned int intx)
85 {
86 spin_lock(&d->arch.hvm_domain.irq_lock);
87 __hvm_pci_intx_deassert(d, device, intx);
88 spin_unlock(&d->arch.hvm_domain.irq_lock);
89 }
91 void hvm_isa_irq_assert(
92 struct domain *d, unsigned int isa_irq)
93 {
94 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
95 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
97 ASSERT(isa_irq <= 15);
99 spin_lock(&d->arch.hvm_domain.irq_lock);
101 if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
102 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
103 {
104 vioapic_irq_positive_edge(d, gsi);
105 vpic_irq_positive_edge(d, isa_irq);
106 }
108 spin_unlock(&d->arch.hvm_domain.irq_lock);
109 }
111 void hvm_isa_irq_deassert(
112 struct domain *d, unsigned int isa_irq)
113 {
114 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
115 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
117 ASSERT(isa_irq <= 15);
119 spin_lock(&d->arch.hvm_domain.irq_lock);
121 if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
122 (--hvm_irq->gsi_assert_count[gsi] == 0) )
123 vpic_irq_negative_edge(d, isa_irq);
125 spin_unlock(&d->arch.hvm_domain.irq_lock);
126 }
128 void hvm_set_callback_irq_level(void)
129 {
130 struct vcpu *v = current;
131 struct domain *d = v->domain;
132 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
133 unsigned int gsi, pdev, pintx, asserted;
135 /* Fast lock-free tests. */
136 if ( (v->vcpu_id != 0) ||
137 (hvm_irq->callback_via_type == HVMIRQ_callback_none) )
138 return;
140 spin_lock(&d->arch.hvm_domain.irq_lock);
142 /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
143 asserted = !!vcpu_info(v, evtchn_upcall_pending);
144 if ( hvm_irq->callback_via_asserted == asserted )
145 goto out;
146 hvm_irq->callback_via_asserted = asserted;
148 /* Callback status has changed. Update the callback via. */
149 switch ( hvm_irq->callback_via_type )
150 {
151 case HVMIRQ_callback_gsi:
152 gsi = hvm_irq->callback_via.gsi;
153 if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) )
154 {
155 vioapic_irq_positive_edge(d, gsi);
156 if ( gsi <= 15 )
157 vpic_irq_positive_edge(d, gsi);
158 }
159 else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) )
160 {
161 if ( gsi <= 15 )
162 vpic_irq_negative_edge(d, gsi);
163 }
164 break;
165 case HVMIRQ_callback_pci_intx:
166 pdev = hvm_irq->callback_via.pci.dev;
167 pintx = hvm_irq->callback_via.pci.intx;
168 if ( asserted )
169 __hvm_pci_intx_assert(d, pdev, pintx);
170 else
171 __hvm_pci_intx_deassert(d, pdev, pintx);
172 default:
173 break;
174 }
176 out:
177 spin_unlock(&d->arch.hvm_domain.irq_lock);
178 }
180 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
181 {
182 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
183 u8 old_isa_irq;
185 ASSERT((link <= 3) && (isa_irq <= 15));
187 spin_lock(&d->arch.hvm_domain.irq_lock);
189 old_isa_irq = hvm_irq->pci_link.route[link];
190 if ( old_isa_irq == isa_irq )
191 goto out;
192 hvm_irq->pci_link.route[link] = isa_irq;
194 /* PCI pass-through fixup. */
195 if ( hvm_irq->dpci && hvm_irq->dpci->girq[old_isa_irq].valid )
196 {
197 uint32_t device = hvm_irq->dpci->girq[old_isa_irq].device;
198 uint32_t intx = hvm_irq->dpci->girq[old_isa_irq].intx;
199 if ( link == hvm_pci_intx_link(device, intx) )
200 {
201 hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->girq[old_isa_irq];
202 hvm_irq->dpci->girq[old_isa_irq].valid = 0;
203 }
204 }
206 if ( hvm_irq->pci_link_assert_count[link] == 0 )
207 goto out;
209 if ( old_isa_irq && (--hvm_irq->gsi_assert_count[old_isa_irq] == 0) )
210 vpic_irq_negative_edge(d, old_isa_irq);
212 if ( isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
213 {
214 vioapic_irq_positive_edge(d, isa_irq);
215 vpic_irq_positive_edge(d, isa_irq);
216 }
218 out:
219 spin_unlock(&d->arch.hvm_domain.irq_lock);
221 dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
222 d->domain_id, link, old_isa_irq, isa_irq);
223 }
225 void hvm_set_callback_via(struct domain *d, uint64_t via)
226 {
227 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
228 unsigned int gsi=0, pdev=0, pintx=0;
229 uint8_t via_type;
231 via_type = (uint8_t)(via >> 56) + 1;
232 if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
233 (via_type > HVMIRQ_callback_pci_intx) )
234 via_type = HVMIRQ_callback_none;
236 spin_lock(&d->arch.hvm_domain.irq_lock);
238 /* Tear down old callback via. */
239 if ( hvm_irq->callback_via_asserted )
240 {
241 switch ( hvm_irq->callback_via_type )
242 {
243 case HVMIRQ_callback_gsi:
244 gsi = hvm_irq->callback_via.gsi;
245 if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) )
246 vpic_irq_negative_edge(d, gsi);
247 break;
248 case HVMIRQ_callback_pci_intx:
249 pdev = hvm_irq->callback_via.pci.dev;
250 pintx = hvm_irq->callback_via.pci.intx;
251 __hvm_pci_intx_deassert(d, pdev, pintx);
252 break;
253 default:
254 break;
255 }
256 }
258 /* Set up new callback via. */
259 switch ( hvm_irq->callback_via_type = via_type )
260 {
261 case HVMIRQ_callback_gsi:
262 gsi = hvm_irq->callback_via.gsi = (uint8_t)via;
263 if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) )
264 hvm_irq->callback_via_type = HVMIRQ_callback_none;
265 else if ( hvm_irq->callback_via_asserted &&
266 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
267 {
268 vioapic_irq_positive_edge(d, gsi);
269 if ( gsi <= 15 )
270 vpic_irq_positive_edge(d, gsi);
271 }
272 break;
273 case HVMIRQ_callback_pci_intx:
274 pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31;
275 pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3;
276 if ( hvm_irq->callback_via_asserted )
277 __hvm_pci_intx_assert(d, pdev, pintx);
278 break;
279 default:
280 break;
281 }
283 spin_unlock(&d->arch.hvm_domain.irq_lock);
285 dprintk(XENLOG_G_INFO, "Dom%u callback via changed to ", d->domain_id);
286 switch ( via_type )
287 {
288 case HVMIRQ_callback_gsi:
289 printk("GSI %u\n", gsi);
290 break;
291 case HVMIRQ_callback_pci_intx:
292 printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
293 break;
294 default:
295 printk("None\n");
296 break;
297 }
298 }
300 struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
301 {
302 struct hvm_domain *plat = &v->domain->arch.hvm_domain;
303 int vector;
305 if ( unlikely(v->nmi_pending) )
306 return hvm_intack_nmi;
308 if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
309 return hvm_intack_pic(0);
311 vector = vlapic_has_pending_irq(v);
312 if ( vector != -1 )
313 return hvm_intack_lapic(vector);
315 return hvm_intack_none;
316 }
318 struct hvm_intack hvm_vcpu_ack_pending_irq(
319 struct vcpu *v, struct hvm_intack intack)
320 {
321 int vector;
323 switch ( intack.source )
324 {
325 case hvm_intsrc_nmi:
326 if ( !test_and_clear_bool(v->nmi_pending) )
327 intack = hvm_intack_none;
328 break;
329 case hvm_intsrc_pic:
330 ASSERT(v->vcpu_id == 0);
331 if ( (vector = vpic_ack_pending_irq(v)) == -1 )
332 intack = hvm_intack_none;
333 else
334 intack.vector = (uint8_t)vector;
335 break;
336 case hvm_intsrc_lapic:
337 if ( !vlapic_ack_pending_irq(v, intack.vector) )
338 intack = hvm_intack_none;
339 break;
340 default:
341 intack = hvm_intack_none;
342 break;
343 }
345 return intack;
346 }
348 int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intsrc src)
349 {
350 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
352 if ( src == hvm_intsrc_pic )
353 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
354 + (isa_irq & 7));
356 ASSERT(src == hvm_intsrc_lapic);
357 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
358 }
360 int is_isa_irq_masked(struct vcpu *v, int isa_irq)
361 {
362 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
364 if ( is_lvtt(v, isa_irq) )
365 return !is_lvtt_enabled(v);
367 return ((v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr &
368 (1 << (isa_irq & 7))) &&
369 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
370 }
372 int hvm_local_events_need_delivery(struct vcpu *v)
373 {
374 struct hvm_intack intack;
376 /* TODO: Get rid of event-channel special case. */
377 if ( vcpu_info(v, evtchn_upcall_pending) )
378 intack = hvm_intack_pic(0);
379 else
380 intack = hvm_vcpu_has_pending_irq(v);
382 if ( likely(intack.source == hvm_intsrc_none) )
383 return 0;
385 return !hvm_interrupt_blocked(v, intack);
386 }
388 #if 0 /* Keep for debugging */
389 static void irq_dump(struct domain *d)
390 {
391 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
392 int i;
393 printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
394 " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
395 hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1],
396 (uint32_t) hvm_irq->isa_irq.pad[0],
397 hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
398 hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
399 for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 )
400 printk("GSI %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
401 " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
402 hvm_irq->gsi_assert_count[i+0],
403 hvm_irq->gsi_assert_count[i+1],
404 hvm_irq->gsi_assert_count[i+2],
405 hvm_irq->gsi_assert_count[i+3],
406 hvm_irq->gsi_assert_count[i+4],
407 hvm_irq->gsi_assert_count[i+5],
408 hvm_irq->gsi_assert_count[i+6],
409 hvm_irq->gsi_assert_count[i+7]);
410 printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
411 hvm_irq->pci_link_assert_count[0],
412 hvm_irq->pci_link_assert_count[1],
413 hvm_irq->pci_link_assert_count[2],
414 hvm_irq->pci_link_assert_count[3]);
415 printk("Callback via %i:0x%"PRIx32",%s asserted\n",
416 hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
417 hvm_irq->callback_via_asserted ? "" : " not");
418 }
419 #endif
421 static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
422 {
423 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
424 unsigned int asserted, pdev, pintx;
425 int rc;
427 spin_lock(&d->arch.hvm_domain.irq_lock);
429 pdev = hvm_irq->callback_via.pci.dev;
430 pintx = hvm_irq->callback_via.pci.intx;
431 asserted = (hvm_irq->callback_via_asserted &&
432 (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx));
434 /*
435 * Deassert virtual interrupt via PCI INTx line. The virtual interrupt
436 * status is not save/restored, so the INTx line must be deasserted in
437 * the restore context.
438 */
439 if ( asserted )
440 __hvm_pci_intx_deassert(d, pdev, pintx);
442 /* Save PCI IRQ lines */
443 rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
445 if ( asserted )
446 __hvm_pci_intx_assert(d, pdev, pintx);
448 spin_unlock(&d->arch.hvm_domain.irq_lock);
450 return rc;
451 }
453 static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
454 {
455 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
457 /* Save ISA IRQ lines */
458 return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
459 }
461 static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
462 {
463 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
465 /* Save PCI-ISA link state */
466 return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
467 }
469 static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
470 {
471 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
472 int link, dev, intx, gsi;
474 /* Load the PCI IRQ lines */
475 if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
476 return -EINVAL;
478 /* Clear the PCI link assert counts */
479 for ( link = 0; link < 4; link++ )
480 hvm_irq->pci_link_assert_count[link] = 0;
482 /* Clear the GSI link assert counts */
483 for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ )
484 hvm_irq->gsi_assert_count[gsi] = 0;
486 /* Recalculate the counts from the IRQ line state */
487 for ( dev = 0; dev < 32; dev++ )
488 for ( intx = 0; intx < 4; intx++ )
489 if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
490 {
491 /* Direct GSI assert */
492 gsi = hvm_pci_intx_gsi(dev, intx);
493 hvm_irq->gsi_assert_count[gsi]++;
494 /* PCI-ISA bridge assert */
495 link = hvm_pci_intx_link(dev, intx);
496 hvm_irq->pci_link_assert_count[link]++;
497 }
499 return 0;
500 }
502 static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
503 {
504 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
505 int irq;
507 /* Load the ISA IRQ lines */
508 if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
509 return -EINVAL;
511 /* Adjust the GSI assert counts for the ISA IRQ line state.
512 * This relies on the PCI IRQ state being loaded first. */
513 for ( irq = 0; irq < 16; irq++ )
514 if ( test_bit(irq, &hvm_irq->isa_irq.i) )
515 hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
517 return 0;
518 }
521 static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
522 {
523 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
524 int link, gsi;
526 /* Load the PCI-ISA IRQ link routing table */
527 if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
528 return -EINVAL;
530 /* Sanity check */
531 for ( link = 0; link < 4; link++ )
532 if ( hvm_irq->pci_link.route[link] > 15 )
533 {
534 gdprintk(XENLOG_ERR,
535 "HVM restore: PCI-ISA link %u out of range (%u)\n",
536 link, hvm_irq->pci_link.route[link]);
537 return -EINVAL;
538 }
540 /* Adjust the GSI assert counts for the link outputs.
541 * This relies on the PCI and ISA IRQ state being loaded first */
542 for ( link = 0; link < 4; link++ )
543 {
544 if ( hvm_irq->pci_link_assert_count[link] != 0 )
545 {
546 gsi = hvm_irq->pci_link.route[link];
547 if ( gsi != 0 )
548 hvm_irq->gsi_assert_count[gsi]++;
549 }
550 }
552 return 0;
553 }
555 HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
556 1, HVMSR_PER_DOM);
557 HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
558 1, HVMSR_PER_DOM);
559 HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
560 1, HVMSR_PER_DOM);