direct-io.hg

view xen/arch/x86/hvm/vpic.c @ 15388:50358c4b37f4

hvm: Support injection of virtual NMIs and clean up ExtInt handling in general.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jun 20 11:50:16 2007 +0100 (2007-06-20)
parents 656b8175f4f2
children d7e3224b661a
line source
1 /*
2 * i8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2005 Intel Corperation
6 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
27 #include <xen/config.h>
28 #include <xen/types.h>
29 #include <xen/event.h>
30 #include <xen/lib.h>
31 #include <xen/errno.h>
32 #include <xen/sched.h>
33 #include <asm/hvm/hvm.h>
34 #include <asm/hvm/io.h>
35 #include <asm/hvm/support.h>
37 #define vpic_domain(v) (container_of((v), struct domain, \
38 arch.hvm_domain.vpic[!vpic->is_master]))
39 #define __vpic_lock(v) &container_of((v), struct hvm_domain, \
40 vpic[!(v)->is_master])->irq_lock
41 #define vpic_lock(v) spin_lock(__vpic_lock(v))
42 #define vpic_unlock(v) spin_unlock(__vpic_lock(v))
43 #define vpic_is_locked(v) spin_is_locked(__vpic_lock(v))
44 #define vpic_elcr_mask(v) (vpic->is_master ? (uint8_t)0xf8 : (uint8_t)0xde);
46 /* Return the highest priority found in mask. Return 8 if none. */
47 #define VPIC_PRIO_NONE 8
48 static int vpic_get_priority(struct hvm_hw_vpic *vpic, uint8_t mask)
49 {
50 int prio;
52 ASSERT(vpic_is_locked(vpic));
54 if ( mask == 0 )
55 return VPIC_PRIO_NONE;
57 /* prio = ffs(mask ROR vpic->priority_add); */
58 asm ( "ror %%cl,%b1 ; bsf %1,%0"
59 : "=r" (prio) : "r" ((uint32_t)mask), "c" (vpic->priority_add) );
60 return prio;
61 }
63 /* Return the PIC's highest priority pending interrupt. Return -1 if none. */
64 static int vpic_get_highest_priority_irq(struct hvm_hw_vpic *vpic)
65 {
66 int cur_priority, priority, irq;
67 uint8_t mask;
69 ASSERT(vpic_is_locked(vpic));
71 mask = vpic->irr & ~vpic->imr;
72 priority = vpic_get_priority(vpic, mask);
73 if ( priority == VPIC_PRIO_NONE )
74 return -1;
76 irq = (priority + vpic->priority_add) & 7;
78 /*
79 * Compute current priority. If special fully nested mode on the master,
80 * the IRQ coming from the slave is not taken into account for the
81 * priority computation. In special mask mode, masked interrupts do not
82 * block lower-priority interrupts even if their IS bit is set.
83 */
84 mask = vpic->isr;
85 if ( vpic->special_fully_nested_mode && vpic->is_master && (irq == 2) )
86 mask &= ~(1 << 2);
87 if ( vpic->special_mask_mode )
88 mask &= ~vpic->imr;
89 cur_priority = vpic_get_priority(vpic, mask);
91 /* If a higher priority is found then an irq should be generated. */
92 return (priority < cur_priority) ? irq : -1;
93 }
95 static void vpic_update_int_output(struct hvm_hw_vpic *vpic)
96 {
97 int irq;
99 ASSERT(vpic_is_locked(vpic));
101 irq = vpic_get_highest_priority_irq(vpic);
102 if ( vpic->int_output == (irq >= 0) )
103 return;
105 /* INT line transition L->H or H->L. */
106 vpic->int_output = !vpic->int_output;
108 if ( vpic->int_output )
109 {
110 if ( vpic->is_master )
111 {
112 /* Master INT line is connected to VCPU0's VLAPIC LVT0. */
113 struct vcpu *v = vpic_domain(vpic)->vcpu[0];
114 if ( (v != NULL) && vlapic_accept_pic_intr(v) )
115 vcpu_kick(v);
116 }
117 else
118 {
119 /* Assert slave line in master PIC. */
120 (--vpic)->irr |= 1 << 2;
121 vpic_update_int_output(vpic);
122 }
123 }
124 else if ( !vpic->is_master )
125 {
126 /* Clear slave line in master PIC. */
127 (--vpic)->irr &= ~(1 << 2);
128 vpic_update_int_output(vpic);
129 }
130 }
132 static void __vpic_intack(struct hvm_hw_vpic *vpic, int irq)
133 {
134 uint8_t mask = 1 << irq;
136 ASSERT(vpic_is_locked(vpic));
138 /* Edge-triggered: clear the IRR (forget the edge). */
139 if ( !(vpic->elcr & mask) )
140 vpic->irr &= ~mask;
142 if ( !vpic->auto_eoi )
143 vpic->isr |= mask;
144 else if ( vpic->rotate_on_auto_eoi )
145 vpic->priority_add = (irq + 1) & 7;
147 vpic_update_int_output(vpic);
148 }
150 static int vpic_intack(struct hvm_hw_vpic *vpic)
151 {
152 int irq = -1;
154 vpic_lock(vpic);
156 if ( !vpic->int_output )
157 goto out;
159 irq = vpic_get_highest_priority_irq(vpic);
160 BUG_ON(irq < 0);
161 __vpic_intack(vpic, irq);
163 if ( (irq == 2) && vpic->is_master )
164 {
165 vpic++; /* Slave PIC */
166 irq = vpic_get_highest_priority_irq(vpic);
167 BUG_ON(irq < 0);
168 __vpic_intack(vpic, irq);
169 irq += 8;
170 }
172 out:
173 vpic_unlock(vpic);
174 return irq;
175 }
177 static void vpic_ioport_write(
178 struct hvm_hw_vpic *vpic, uint32_t addr, uint32_t val)
179 {
180 int priority, cmd, irq;
181 uint8_t mask;
183 vpic_lock(vpic);
185 addr &= 1;
186 if ( addr == 0 )
187 {
188 if ( val & 0x10 )
189 {
190 /* ICW1 */
191 /* Clear edge-sensing logic. */
192 vpic->irr &= vpic->elcr;
194 /* No interrupts masked or in service. */
195 vpic->imr = vpic->isr = 0;
197 /* IR7 is lowest priority. */
198 vpic->priority_add = 0;
199 vpic->rotate_on_auto_eoi = 0;
201 vpic->special_mask_mode = 0;
202 vpic->readsel_isr = 0;
203 vpic->poll = 0;
205 if ( !(val & 1) )
206 {
207 /* NO ICW4: ICW4 features are cleared. */
208 vpic->auto_eoi = 0;
209 vpic->special_fully_nested_mode = 0;
210 }
212 vpic->init_state = ((val & 3) << 2) | 1;
213 }
214 else if ( val & 0x08 )
215 {
216 /* OCW3 */
217 if ( val & 0x04 )
218 vpic->poll = 1;
219 if ( val & 0x02 )
220 vpic->readsel_isr = val & 1;
221 if ( val & 0x40 )
222 vpic->special_mask_mode = (val >> 5) & 1;
223 }
224 else
225 {
226 /* OCW2 */
227 cmd = val >> 5;
228 switch ( cmd )
229 {
230 case 0: /* Rotate in AEOI Mode (Clear) */
231 case 4: /* Rotate in AEOI Mode (Set) */
232 vpic->rotate_on_auto_eoi = cmd >> 2;
233 break;
234 case 1: /* Non-Specific EOI */
235 case 5: /* Non-Specific EOI & Rotate */
236 mask = vpic->isr;
237 if ( vpic->special_mask_mode )
238 mask &= ~vpic->imr; /* SMM: ignore masked IRs. */
239 priority = vpic_get_priority(vpic, mask);
240 if ( priority == VPIC_PRIO_NONE )
241 break;
242 irq = (priority + vpic->priority_add) & 7;
243 vpic->isr &= ~(1 << irq);
244 if ( cmd == 5 )
245 vpic->priority_add = (irq + 1) & 7;
246 break;
247 case 3: /* Specific EOI */
248 case 7: /* Specific EOI & Rotate */
249 irq = val & 7;
250 vpic->isr &= ~(1 << irq);
251 if ( cmd == 7 )
252 vpic->priority_add = (irq + 1) & 7;
253 break;
254 case 6: /* Set Priority */
255 vpic->priority_add = (val + 1) & 7;
256 break;
257 }
258 }
259 }
260 else
261 {
262 switch ( vpic->init_state & 3 )
263 {
264 case 0:
265 /* OCW1 */
266 vpic->imr = val;
267 break;
268 case 1:
269 #if 1 /* Delete me when vmxassist is retired. */
270 /* Which mode is irqbase programmed in? */
271 current->arch.hvm_vmx.irqbase_mode =
272 current->arch.hvm_vmx.vmxassist_enabled;
273 #endif
274 /* ICW2 */
275 vpic->irq_base = val & 0xf8;
276 vpic->init_state++;
277 if ( !(vpic->init_state & 8) )
278 break; /* CASCADE mode: wait for write to ICW3. */
279 /* SNGL mode: fall through (no ICW3). */
280 case 2:
281 /* ICW3 */
282 vpic->init_state++;
283 if ( !(vpic->init_state & 4) )
284 vpic->init_state = 0; /* No ICW4: init done */
285 break;
286 case 3:
287 /* ICW4 */
288 vpic->special_fully_nested_mode = (val >> 4) & 1;
289 vpic->auto_eoi = (val >> 1) & 1;
290 vpic->init_state = 0;
291 break;
292 }
293 }
295 vpic_update_int_output(vpic);
297 vpic_unlock(vpic);
298 }
300 static uint32_t vpic_ioport_read(struct hvm_hw_vpic *vpic, uint32_t addr)
301 {
302 if ( vpic->poll )
303 {
304 vpic->poll = 0;
305 return vpic_intack(vpic);
306 }
308 if ( (addr & 1) == 0 )
309 return (vpic->readsel_isr ? vpic->isr : vpic->irr);
311 return vpic->imr;
312 }
314 static int vpic_intercept_pic_io(ioreq_t *p)
315 {
316 struct hvm_hw_vpic *vpic;
317 uint32_t data;
319 if ( (p->size != 1) || (p->count != 1) )
320 {
321 gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", (int)p->size);
322 return 1;
323 }
325 vpic = &current->domain->arch.hvm_domain.vpic[p->addr >> 7];
327 if ( p->dir == IOREQ_WRITE )
328 {
329 if ( p->data_is_ptr )
330 (void)hvm_copy_from_guest_phys(&data, p->data, p->size);
331 else
332 data = p->data;
333 vpic_ioport_write(vpic, (uint32_t)p->addr, (uint8_t)data);
334 }
335 else
336 {
337 data = vpic_ioport_read(vpic, (uint32_t)p->addr);
338 if ( p->data_is_ptr )
339 (void)hvm_copy_to_guest_phys(p->data, &data, p->size);
340 else
341 p->data = (u64)data;
342 }
344 return 1;
345 }
347 static int vpic_intercept_elcr_io(ioreq_t *p)
348 {
349 struct hvm_hw_vpic *vpic;
350 uint32_t data;
352 if ( (p->size != 1) || (p->count != 1) )
353 {
354 gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", (int)p->size);
355 return 1;
356 }
358 vpic = &current->domain->arch.hvm_domain.vpic[p->addr & 1];
360 if ( p->dir == IOREQ_WRITE )
361 {
362 if ( p->data_is_ptr )
363 (void)hvm_copy_from_guest_phys(&data, p->data, p->size);
364 else
365 data = p->data;
367 /* Some IRs are always edge trig. Slave IR is always level trig. */
368 data &= vpic_elcr_mask(vpic);
369 if ( vpic->is_master )
370 data |= 1 << 2;
371 vpic->elcr = data;
372 }
373 else
374 {
375 /* Reader should not see hardcoded level-triggered slave IR. */
376 data = vpic->elcr & vpic_elcr_mask(vpic);
378 if ( p->data_is_ptr )
379 (void)hvm_copy_to_guest_phys(p->data, &data, p->size);
380 else
381 p->data = data;
382 }
384 return 1;
385 }
387 #ifdef HVM_DEBUG_SUSPEND
388 static void vpic_info(struct hvm_hw_vpic *s)
389 {
390 printk("*****pic state:*****\n");
391 printk("pic 0x%x.\n", s->irr);
392 printk("pic 0x%x.\n", s->imr);
393 printk("pic 0x%x.\n", s->isr);
394 printk("pic 0x%x.\n", s->irq_base);
395 printk("pic 0x%x.\n", s->init_state);
396 printk("pic 0x%x.\n", s->priority_add);
397 printk("pic 0x%x.\n", s->readsel_isr);
398 printk("pic 0x%x.\n", s->poll);
399 printk("pic 0x%x.\n", s->auto_eoi);
400 printk("pic 0x%x.\n", s->rotate_on_auto_eoi);
401 printk("pic 0x%x.\n", s->special_fully_nested_mode);
402 printk("pic 0x%x.\n", s->special_mask_mode);
403 printk("pic 0x%x.\n", s->elcr);
404 printk("pic 0x%x.\n", s->int_output);
405 printk("pic 0x%x.\n", s->is_master);
406 }
407 #else
408 static void vpic_info(struct hvm_hw_vpic *s)
409 {
410 }
411 #endif
413 static int vpic_save(struct domain *d, hvm_domain_context_t *h)
414 {
415 struct hvm_hw_vpic *s;
416 int i;
418 /* Save the state of both PICs */
419 for ( i = 0; i < 2 ; i++ )
420 {
421 s = &d->arch.hvm_domain.vpic[i];
422 vpic_info(s);
423 if ( hvm_save_entry(PIC, i, h, s) )
424 return 1;
425 }
427 return 0;
428 }
430 static int vpic_load(struct domain *d, hvm_domain_context_t *h)
431 {
432 struct hvm_hw_vpic *s;
433 uint16_t inst;
435 /* Which PIC is this? */
436 inst = hvm_load_instance(h);
437 if ( inst > 1 )
438 return -EINVAL;
439 s = &d->arch.hvm_domain.vpic[inst];
441 /* Load the state */
442 if ( hvm_load_entry(PIC, h, s) != 0 )
443 return -EINVAL;
445 vpic_info(s);
446 return 0;
447 }
449 HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
451 void vpic_init(struct domain *d)
452 {
453 struct hvm_hw_vpic *vpic;
455 /* Master PIC. */
456 vpic = &d->arch.hvm_domain.vpic[0];
457 memset(vpic, 0, sizeof(*vpic));
458 vpic->is_master = 1;
459 vpic->elcr = 1 << 2;
460 register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
461 register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
463 /* Slave PIC. */
464 vpic++;
465 memset(vpic, 0, sizeof(*vpic));
466 register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
467 register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
468 }
470 void vpic_irq_positive_edge(struct domain *d, int irq)
471 {
472 struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
473 uint8_t mask = 1 << (irq & 7);
475 ASSERT(irq <= 15);
476 ASSERT(vpic_is_locked(vpic));
478 if ( irq == 2 )
479 return;
481 vpic->irr |= mask;
482 if ( !(vpic->imr & mask) )
483 vpic_update_int_output(vpic);
484 }
486 void vpic_irq_negative_edge(struct domain *d, int irq)
487 {
488 struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
489 uint8_t mask = 1 << (irq & 7);
491 ASSERT(irq <= 15);
492 ASSERT(vpic_is_locked(vpic));
494 if ( irq == 2 )
495 return;
497 vpic->irr &= ~mask;
498 if ( !(vpic->imr & mask) )
499 vpic_update_int_output(vpic);
500 }
502 int cpu_get_pic_interrupt(struct vcpu *v)
503 {
504 int irq, vector;
505 struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
507 if ( !vlapic_accept_pic_intr(v) || !vpic->int_output )
508 return -1;
510 irq = vpic_intack(vpic);
511 if ( irq == -1 )
512 return -1;
514 vector = vpic[irq >> 3].irq_base + (irq & 7);
515 return vector;
516 }