ia64/xen-unstable

view xen/arch/x86/hvm/irq.c @ 15833:447db1235adf

hvm: Ignore NMI deliveries for now, until hardware taskswitch is emulated.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Sep 06 11:34:38 2007 +0100 (2007-09-06)
parents 4b5f3a087737
children 3727f7570dff
line source
1 /******************************************************************************
2 * irq.c
3 *
4 * Interrupt distribution and delivery logic.
5 *
6 * Copyright (c) 2006, K A Fraser, XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
22 #include <xen/config.h>
23 #include <xen/types.h>
24 #include <xen/event.h>
25 #include <xen/sched.h>
26 #include <asm/hvm/domain.h>
27 #include <asm/hvm/support.h>
29 static void __hvm_pci_intx_assert(
30 struct domain *d, unsigned int device, unsigned int intx)
31 {
32 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
33 unsigned int gsi, link, isa_irq;
35 ASSERT((device <= 31) && (intx <= 3));
37 if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
38 return;
40 gsi = hvm_pci_intx_gsi(device, intx);
41 if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
42 vioapic_irq_positive_edge(d, gsi);
44 link = hvm_pci_intx_link(device, intx);
45 isa_irq = hvm_irq->pci_link.route[link];
46 if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
47 (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
48 {
49 vioapic_irq_positive_edge(d, isa_irq);
50 vpic_irq_positive_edge(d, isa_irq);
51 }
52 }
54 void hvm_pci_intx_assert(
55 struct domain *d, unsigned int device, unsigned int intx)
56 {
57 spin_lock(&d->arch.hvm_domain.irq_lock);
58 __hvm_pci_intx_assert(d, device, intx);
59 spin_unlock(&d->arch.hvm_domain.irq_lock);
60 }
62 static void __hvm_pci_intx_deassert(
63 struct domain *d, unsigned int device, unsigned int intx)
64 {
65 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
66 unsigned int gsi, link, isa_irq;
68 ASSERT((device <= 31) && (intx <= 3));
70 if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
71 return;
73 gsi = hvm_pci_intx_gsi(device, intx);
74 --hvm_irq->gsi_assert_count[gsi];
76 link = hvm_pci_intx_link(device, intx);
77 isa_irq = hvm_irq->pci_link.route[link];
78 if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
79 (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
80 vpic_irq_negative_edge(d, isa_irq);
81 }
83 void hvm_pci_intx_deassert(
84 struct domain *d, unsigned int device, unsigned int intx)
85 {
86 spin_lock(&d->arch.hvm_domain.irq_lock);
87 __hvm_pci_intx_deassert(d, device, intx);
88 spin_unlock(&d->arch.hvm_domain.irq_lock);
89 }
91 void hvm_isa_irq_assert(
92 struct domain *d, unsigned int isa_irq)
93 {
94 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
95 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
97 ASSERT(isa_irq <= 15);
99 spin_lock(&d->arch.hvm_domain.irq_lock);
101 if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
102 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
103 {
104 vioapic_irq_positive_edge(d, gsi);
105 vpic_irq_positive_edge(d, isa_irq);
106 }
108 spin_unlock(&d->arch.hvm_domain.irq_lock);
109 }
111 void hvm_isa_irq_deassert(
112 struct domain *d, unsigned int isa_irq)
113 {
114 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
115 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
117 ASSERT(isa_irq <= 15);
119 spin_lock(&d->arch.hvm_domain.irq_lock);
121 if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
122 (--hvm_irq->gsi_assert_count[gsi] == 0) )
123 vpic_irq_negative_edge(d, isa_irq);
125 spin_unlock(&d->arch.hvm_domain.irq_lock);
126 }
128 void hvm_set_callback_irq_level(void)
129 {
130 struct vcpu *v = current;
131 struct domain *d = v->domain;
132 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
133 unsigned int gsi, pdev, pintx, asserted;
135 /* Fast lock-free tests. */
136 if ( (v->vcpu_id != 0) ||
137 (hvm_irq->callback_via_type == HVMIRQ_callback_none) )
138 return;
140 spin_lock(&d->arch.hvm_domain.irq_lock);
142 /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
143 asserted = !!vcpu_info(v, evtchn_upcall_pending);
144 if ( hvm_irq->callback_via_asserted == asserted )
145 goto out;
146 hvm_irq->callback_via_asserted = asserted;
148 /* Callback status has changed. Update the callback via. */
149 switch ( hvm_irq->callback_via_type )
150 {
151 case HVMIRQ_callback_gsi:
152 gsi = hvm_irq->callback_via.gsi;
153 if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) )
154 {
155 vioapic_irq_positive_edge(d, gsi);
156 if ( gsi <= 15 )
157 vpic_irq_positive_edge(d, gsi);
158 }
159 else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) )
160 {
161 if ( gsi <= 15 )
162 vpic_irq_negative_edge(d, gsi);
163 }
164 break;
165 case HVMIRQ_callback_pci_intx:
166 pdev = hvm_irq->callback_via.pci.dev;
167 pintx = hvm_irq->callback_via.pci.intx;
168 if ( asserted )
169 __hvm_pci_intx_assert(d, pdev, pintx);
170 else
171 __hvm_pci_intx_deassert(d, pdev, pintx);
172 default:
173 break;
174 }
176 out:
177 spin_unlock(&d->arch.hvm_domain.irq_lock);
178 }
180 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
181 {
182 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
183 u8 old_isa_irq;
185 ASSERT((link <= 3) && (isa_irq <= 15));
187 spin_lock(&d->arch.hvm_domain.irq_lock);
189 old_isa_irq = hvm_irq->pci_link.route[link];
190 if ( old_isa_irq == isa_irq )
191 goto out;
192 hvm_irq->pci_link.route[link] = isa_irq;
194 if ( hvm_irq->pci_link_assert_count[link] == 0 )
195 goto out;
197 if ( old_isa_irq && (--hvm_irq->gsi_assert_count[old_isa_irq] == 0) )
198 vpic_irq_negative_edge(d, isa_irq);
200 if ( isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
201 {
202 vioapic_irq_positive_edge(d, isa_irq);
203 vpic_irq_positive_edge(d, isa_irq);
204 }
206 out:
207 spin_unlock(&d->arch.hvm_domain.irq_lock);
209 dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
210 d->domain_id, link, old_isa_irq, isa_irq);
211 }
213 void hvm_set_callback_via(struct domain *d, uint64_t via)
214 {
215 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
216 unsigned int gsi=0, pdev=0, pintx=0;
217 uint8_t via_type;
219 via_type = (uint8_t)(via >> 56) + 1;
220 if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
221 (via_type > HVMIRQ_callback_pci_intx) )
222 via_type = HVMIRQ_callback_none;
224 spin_lock(&d->arch.hvm_domain.irq_lock);
226 /* Tear down old callback via. */
227 if ( hvm_irq->callback_via_asserted )
228 {
229 switch ( hvm_irq->callback_via_type )
230 {
231 case HVMIRQ_callback_gsi:
232 gsi = hvm_irq->callback_via.gsi;
233 if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) )
234 vpic_irq_negative_edge(d, gsi);
235 break;
236 case HVMIRQ_callback_pci_intx:
237 pdev = hvm_irq->callback_via.pci.dev;
238 pintx = hvm_irq->callback_via.pci.intx;
239 __hvm_pci_intx_deassert(d, pdev, pintx);
240 break;
241 default:
242 break;
243 }
244 }
246 /* Set up new callback via. */
247 switch ( hvm_irq->callback_via_type = via_type )
248 {
249 case HVMIRQ_callback_gsi:
250 gsi = hvm_irq->callback_via.gsi = (uint8_t)via;
251 if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) )
252 hvm_irq->callback_via_type = HVMIRQ_callback_none;
253 else if ( hvm_irq->callback_via_asserted &&
254 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
255 {
256 vioapic_irq_positive_edge(d, gsi);
257 if ( gsi <= 15 )
258 vpic_irq_positive_edge(d, gsi);
259 }
260 break;
261 case HVMIRQ_callback_pci_intx:
262 pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31;
263 pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3;
264 if ( hvm_irq->callback_via_asserted )
265 __hvm_pci_intx_assert(d, pdev, pintx);
266 break;
267 default:
268 break;
269 }
271 spin_unlock(&d->arch.hvm_domain.irq_lock);
273 dprintk(XENLOG_G_INFO, "Dom%u callback via changed to ", d->domain_id);
274 switch ( via_type )
275 {
276 case HVMIRQ_callback_gsi:
277 printk("GSI %u\n", gsi);
278 break;
279 case HVMIRQ_callback_pci_intx:
280 printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
281 break;
282 default:
283 printk("None\n");
284 break;
285 }
286 }
288 enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
289 {
290 struct hvm_domain *plat = &v->domain->arch.hvm_domain;
292 if ( unlikely(v->nmi_pending) )
293 return hvm_intack_nmi;
295 if ( vlapic_has_interrupt(v) != -1 )
296 return hvm_intack_lapic;
298 if ( !vlapic_accept_pic_intr(v) )
299 return hvm_intack_none;
301 return plat->vpic[0].int_output ? hvm_intack_pic : hvm_intack_none;
302 }
304 int hvm_vcpu_ack_pending_irq(struct vcpu *v, enum hvm_intack type, int *vector)
305 {
306 switch ( type )
307 {
308 case hvm_intack_nmi:
309 #if 0
310 return test_and_clear_bool(v->nmi_pending);
311 #else
312 if ( test_and_clear_bool(v->nmi_pending) )
313 gdprintk(XENLOG_WARNING, "Dropping NMI delivery to %d:%d\n",
314 v->domain->domain_id, v->vcpu_id);
315 break;
316 #endif
317 case hvm_intack_lapic:
318 return ((*vector = cpu_get_apic_interrupt(v)) != -1);
319 case hvm_intack_pic:
320 ASSERT(v->vcpu_id == 0);
321 return ((*vector = cpu_get_pic_interrupt(v)) != -1);
322 default:
323 break;
324 }
326 return 0;
327 }
329 int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intack src)
330 {
331 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
333 if ( src == hvm_intack_pic )
334 return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
335 + (isa_irq & 7));
337 ASSERT(src == hvm_intack_lapic);
338 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
339 }
341 int is_isa_irq_masked(struct vcpu *v, int isa_irq)
342 {
343 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
345 if ( is_lvtt(v, isa_irq) )
346 return !is_lvtt_enabled(v);
348 return ((v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr &
349 (1 << (isa_irq & 7))) &&
350 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
351 }
353 int hvm_local_events_need_delivery(struct vcpu *v)
354 {
355 enum hvm_intack type;
357 /* TODO: Get rid of event-channel special case. */
358 if ( vcpu_info(v, evtchn_upcall_pending) )
359 type = hvm_intack_pic;
360 else
361 type = hvm_vcpu_has_pending_irq(v);
363 if ( likely(type == hvm_intack_none) )
364 return 0;
366 return hvm_interrupts_enabled(v, type);
367 }
369 #if 0 /* Keep for debugging */
370 static void irq_dump(struct domain *d)
371 {
372 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
373 int i;
374 printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
375 " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
376 hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1],
377 (uint32_t) hvm_irq->isa_irq.pad[0],
378 hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
379 hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
380 for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 )
381 printk("GSI %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
382 " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
383 hvm_irq->gsi_assert_count[i+0],
384 hvm_irq->gsi_assert_count[i+1],
385 hvm_irq->gsi_assert_count[i+2],
386 hvm_irq->gsi_assert_count[i+3],
387 hvm_irq->gsi_assert_count[i+4],
388 hvm_irq->gsi_assert_count[i+5],
389 hvm_irq->gsi_assert_count[i+6],
390 hvm_irq->gsi_assert_count[i+7]);
391 printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
392 hvm_irq->pci_link_assert_count[0],
393 hvm_irq->pci_link_assert_count[1],
394 hvm_irq->pci_link_assert_count[2],
395 hvm_irq->pci_link_assert_count[3]);
396 printk("Callback via %i:0x%"PRIx32",%s asserted\n",
397 hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
398 hvm_irq->callback_via_asserted ? "" : " not");
399 }
400 #endif
402 static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
403 {
404 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
405 unsigned int asserted, pdev, pintx;
406 int rc;
408 spin_lock(&d->arch.hvm_domain.irq_lock);
410 pdev = hvm_irq->callback_via.pci.dev;
411 pintx = hvm_irq->callback_via.pci.intx;
412 asserted = (hvm_irq->callback_via_asserted &&
413 (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx));
415 /*
416 * Deassert virtual interrupt via PCI INTx line. The virtual interrupt
417 * status is not save/restored, so the INTx line must be deasserted in
418 * the restore context.
419 */
420 if ( asserted )
421 __hvm_pci_intx_deassert(d, pdev, pintx);
423 /* Save PCI IRQ lines */
424 rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
426 if ( asserted )
427 __hvm_pci_intx_assert(d, pdev, pintx);
429 spin_unlock(&d->arch.hvm_domain.irq_lock);
431 return rc;
432 }
434 static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
435 {
436 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
438 /* Save ISA IRQ lines */
439 return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
440 }
442 static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
443 {
444 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
446 /* Save PCI-ISA link state */
447 return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
448 }
450 static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
451 {
452 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
453 int link, dev, intx, gsi;
455 /* Load the PCI IRQ lines */
456 if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
457 return -EINVAL;
459 /* Clear the PCI link assert counts */
460 for ( link = 0; link < 4; link++ )
461 hvm_irq->pci_link_assert_count[link] = 0;
463 /* Clear the GSI link assert counts */
464 for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ )
465 hvm_irq->gsi_assert_count[gsi] = 0;
467 /* Recalculate the counts from the IRQ line state */
468 for ( dev = 0; dev < 32; dev++ )
469 for ( intx = 0; intx < 4; intx++ )
470 if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
471 {
472 /* Direct GSI assert */
473 gsi = hvm_pci_intx_gsi(dev, intx);
474 hvm_irq->gsi_assert_count[gsi]++;
475 /* PCI-ISA bridge assert */
476 link = hvm_pci_intx_link(dev, intx);
477 hvm_irq->pci_link_assert_count[link]++;
478 }
480 return 0;
481 }
483 static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
484 {
485 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
486 int irq;
488 /* Load the ISA IRQ lines */
489 if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
490 return -EINVAL;
492 /* Adjust the GSI assert counts for the ISA IRQ line state.
493 * This relies on the PCI IRQ state being loaded first. */
494 for ( irq = 0; irq < 16; irq++ )
495 if ( test_bit(irq, &hvm_irq->isa_irq.i) )
496 hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
498 return 0;
499 }
502 static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
503 {
504 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
505 int link, gsi;
507 /* Load the PCI-ISA IRQ link routing table */
508 if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
509 return -EINVAL;
511 /* Sanity check */
512 for ( link = 0; link < 4; link++ )
513 if ( hvm_irq->pci_link.route[link] > 15 )
514 {
515 gdprintk(XENLOG_ERR,
516 "HVM restore: PCI-ISA link %u out of range (%u)\n",
517 link, hvm_irq->pci_link.route[link]);
518 return -EINVAL;
519 }
521 /* Adjust the GSI assert counts for the link outputs.
522 * This relies on the PCI and ISA IRQ state being loaded first */
523 for ( link = 0; link < 4; link++ )
524 {
525 if ( hvm_irq->pci_link_assert_count[link] != 0 )
526 {
527 gsi = hvm_irq->pci_link.route[link];
528 if ( gsi != 0 )
529 hvm_irq->gsi_assert_count[gsi]++;
530 }
531 }
533 return 0;
534 }
536 HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
537 1, HVMSR_PER_DOM);
538 HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
539 1, HVMSR_PER_DOM);
540 HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
541 1, HVMSR_PER_DOM);