ia64/xen-unstable

view xen/arch/x86/hvm/vpic.c @ 14440:90d6fe6de04d

hvm vpic: Fix IRQ priority calculation in 8259 device model.
The priority shift should be a right-rotation, not a left-rotation.

From: Trolle Selander <trolle.selander@gmail.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Mar 16 16:19:35 2007 +0000 (2007-03-16)
parents 4d7ee9f4336a
children 656b8175f4f2
line source
1 /*
2 * i8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2005 Intel Corperation
6 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
27 #include <xen/config.h>
28 #include <xen/types.h>
29 #include <xen/event.h>
30 #include <xen/lib.h>
31 #include <xen/errno.h>
32 #include <xen/sched.h>
33 #include <asm/hvm/hvm.h>
34 #include <asm/hvm/io.h>
35 #include <asm/hvm/support.h>
37 #define vpic_domain(v) (container_of((v), struct domain, \
38 arch.hvm_domain.vpic[!vpic->is_master]))
39 #define __vpic_lock(v) &container_of((v), struct hvm_domain, \
40 vpic[!(v)->is_master])->irq_lock
41 #define vpic_lock(v) spin_lock(__vpic_lock(v))
42 #define vpic_unlock(v) spin_unlock(__vpic_lock(v))
43 #define vpic_is_locked(v) spin_is_locked(__vpic_lock(v))
44 #define vpic_elcr_mask(v) (vpic->is_master ? (uint8_t)0xf8 : (uint8_t)0xde);
46 /* Return the highest priority found in mask. Return 8 if none. */
47 #define VPIC_PRIO_NONE 8
48 static int vpic_get_priority(struct hvm_hw_vpic *vpic, uint8_t mask)
49 {
50 int prio;
52 ASSERT(vpic_is_locked(vpic));
54 if ( mask == 0 )
55 return VPIC_PRIO_NONE;
57 /* prio = ffs(mask ROR vpic->priority_add); */
58 asm ( "ror %%cl,%b1 ; bsf %1,%0"
59 : "=r" (prio) : "r" ((uint32_t)mask), "c" (vpic->priority_add) );
60 return prio;
61 }
63 /* Return the PIC's highest priority pending interrupt. Return -1 if none. */
64 static int vpic_get_highest_priority_irq(struct hvm_hw_vpic *vpic)
65 {
66 int cur_priority, priority, irq;
67 uint8_t mask;
69 ASSERT(vpic_is_locked(vpic));
71 mask = vpic->irr & ~vpic->imr;
72 priority = vpic_get_priority(vpic, mask);
73 if ( priority == VPIC_PRIO_NONE )
74 return -1;
76 irq = (priority + vpic->priority_add) & 7;
78 /*
79 * Compute current priority. If special fully nested mode on the master,
80 * the IRQ coming from the slave is not taken into account for the
81 * priority computation. In special mask mode, masked interrupts do not
82 * block lower-priority interrupts even if their IS bit is set.
83 */
84 mask = vpic->isr;
85 if ( vpic->special_fully_nested_mode && vpic->is_master && (irq == 2) )
86 mask &= ~(1 << 2);
87 if ( vpic->special_mask_mode )
88 mask &= ~vpic->imr;
89 cur_priority = vpic_get_priority(vpic, mask);
91 /* If a higher priority is found then an irq should be generated. */
92 return (priority < cur_priority) ? irq : -1;
93 }
95 static void vpic_update_int_output(struct hvm_hw_vpic *vpic)
96 {
97 int irq;
99 ASSERT(vpic_is_locked(vpic));
101 irq = vpic_get_highest_priority_irq(vpic);
102 if ( vpic->int_output == (irq >= 0) )
103 return;
105 /* INT line transition L->H or H->L. */
106 vpic->int_output = !vpic->int_output;
108 if ( vpic->int_output )
109 {
110 if ( vpic->is_master )
111 {
112 /* Master INT line is connected to VCPU0's VLAPIC LVT0. */
113 struct vcpu *v = vpic_domain(vpic)->vcpu[0];
114 if ( (v != NULL) && vlapic_accept_pic_intr(v) )
115 vcpu_kick(v);
116 }
117 else
118 {
119 /* Assert slave line in master PIC. */
120 (--vpic)->irr |= 1 << 2;
121 vpic_update_int_output(vpic);
122 }
123 }
124 else if ( !vpic->is_master )
125 {
126 /* Clear slave line in master PIC. */
127 (--vpic)->irr &= ~(1 << 2);
128 vpic_update_int_output(vpic);
129 }
130 }
132 static void __vpic_intack(struct hvm_hw_vpic *vpic, int irq)
133 {
134 uint8_t mask = 1 << irq;
136 ASSERT(vpic_is_locked(vpic));
138 /* Edge-triggered: clear the IRR (forget the edge). */
139 if ( !(vpic->elcr & mask) )
140 vpic->irr &= ~mask;
142 if ( !vpic->auto_eoi )
143 vpic->isr |= mask;
144 else if ( vpic->rotate_on_auto_eoi )
145 vpic->priority_add = (irq + 1) & 7;
147 vpic_update_int_output(vpic);
148 }
150 static int vpic_intack(struct hvm_hw_vpic *vpic)
151 {
152 int irq = -1;
154 vpic_lock(vpic);
156 if ( !vpic->int_output )
157 goto out;
159 irq = vpic_get_highest_priority_irq(vpic);
160 BUG_ON(irq < 0);
161 __vpic_intack(vpic, irq);
163 if ( (irq == 2) && vpic->is_master )
164 {
165 vpic++; /* Slave PIC */
166 irq = vpic_get_highest_priority_irq(vpic);
167 BUG_ON(irq < 0);
168 __vpic_intack(vpic, irq);
169 irq += 8;
170 }
172 out:
173 vpic_unlock(vpic);
174 return irq;
175 }
177 static void vpic_ioport_write(struct hvm_hw_vpic *vpic, uint32_t addr, uint32_t val)
178 {
179 int priority, cmd, irq;
180 uint8_t mask;
182 vpic_lock(vpic);
184 addr &= 1;
185 if ( addr == 0 )
186 {
187 if ( val & 0x10 )
188 {
189 /* ICW1 */
190 /* Clear edge-sensing logic. */
191 vpic->irr &= vpic->elcr;
193 /* No interrupts masked or in service. */
194 vpic->imr = vpic->isr = 0;
196 /* IR7 is lowest priority. */
197 vpic->priority_add = 0;
198 vpic->rotate_on_auto_eoi = 0;
200 vpic->special_mask_mode = 0;
201 vpic->readsel_isr = 0;
202 vpic->poll = 0;
204 if ( !(val & 1) )
205 {
206 /* NO ICW4: ICW4 features are cleared. */
207 vpic->auto_eoi = 0;
208 vpic->special_fully_nested_mode = 0;
209 }
211 vpic->init_state = ((val & 3) << 2) | 1;
212 }
213 else if ( val & 0x08 )
214 {
215 /* OCW3 */
216 if ( val & 0x04 )
217 vpic->poll = 1;
218 if ( val & 0x02 )
219 vpic->readsel_isr = val & 1;
220 if ( val & 0x40 )
221 vpic->special_mask_mode = (val >> 5) & 1;
222 }
223 else
224 {
225 /* OCW2 */
226 cmd = val >> 5;
227 switch ( cmd )
228 {
229 case 0: /* Rotate in AEOI Mode (Clear) */
230 case 4: /* Rotate in AEOI Mode (Set) */
231 vpic->rotate_on_auto_eoi = cmd >> 2;
232 break;
233 case 1: /* Non-Specific EOI */
234 case 5: /* Non-Specific EOI & Rotate */
235 mask = vpic->isr;
236 if ( vpic->special_mask_mode )
237 mask &= ~vpic->imr; /* SMM: ignore masked IRs. */
238 priority = vpic_get_priority(vpic, mask);
239 if ( priority == VPIC_PRIO_NONE )
240 break;
241 irq = (priority + vpic->priority_add) & 7;
242 vpic->isr &= ~(1 << irq);
243 if ( cmd == 5 )
244 vpic->priority_add = (irq + 1) & 7;
245 break;
246 case 3: /* Specific EOI */
247 case 7: /* Specific EOI & Rotate */
248 irq = val & 7;
249 vpic->isr &= ~(1 << irq);
250 if ( cmd == 7 )
251 vpic->priority_add = (irq + 1) & 7;
252 break;
253 case 6: /* Set Priority */
254 vpic->priority_add = (val + 1) & 7;
255 break;
256 }
257 }
258 }
259 else
260 {
261 switch ( vpic->init_state & 3 )
262 {
263 case 0:
264 /* OCW1 */
265 vpic->imr = val;
266 break;
267 case 1:
268 /* ICW2 */
269 vpic->irq_base = val & 0xf8;
270 vpic->init_state++;
271 if ( !(vpic->init_state & 8) )
272 break; /* CASCADE mode: wait for write to ICW3. */
273 /* SNGL mode: fall through (no ICW3). */
274 case 2:
275 /* ICW3 */
276 vpic->init_state++;
277 if ( !(vpic->init_state & 4) )
278 vpic->init_state = 0; /* No ICW4: init done */
279 break;
280 case 3:
281 /* ICW4 */
282 vpic->special_fully_nested_mode = (val >> 4) & 1;
283 vpic->auto_eoi = (val >> 1) & 1;
284 vpic->init_state = 0;
285 break;
286 }
287 }
289 vpic_update_int_output(vpic);
291 vpic_unlock(vpic);
292 }
294 static uint32_t vpic_ioport_read(struct hvm_hw_vpic *vpic, uint32_t addr)
295 {
296 if ( vpic->poll )
297 {
298 vpic->poll = 0;
299 return vpic_intack(vpic);
300 }
302 if ( (addr & 1) == 0 )
303 return (vpic->readsel_isr ? vpic->isr : vpic->irr);
305 return vpic->imr;
306 }
308 static int vpic_intercept_pic_io(ioreq_t *p)
309 {
310 struct hvm_hw_vpic *vpic;
311 uint32_t data;
313 if ( (p->size != 1) || (p->count != 1) )
314 {
315 gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", (int)p->size);
316 return 1;
317 }
319 vpic = &current->domain->arch.hvm_domain.vpic[p->addr >> 7];
321 if ( p->dir == IOREQ_WRITE )
322 {
323 if ( p->data_is_ptr )
324 (void)hvm_copy_from_guest_phys(&data, p->data, p->size);
325 else
326 data = p->data;
327 vpic_ioport_write(vpic, (uint32_t)p->addr, (uint8_t)data);
328 }
329 else
330 {
331 data = vpic_ioport_read(vpic, (uint32_t)p->addr);
332 if ( p->data_is_ptr )
333 (void)hvm_copy_to_guest_phys(p->data, &data, p->size);
334 else
335 p->data = (u64)data;
336 }
338 return 1;
339 }
341 static int vpic_intercept_elcr_io(ioreq_t *p)
342 {
343 struct hvm_hw_vpic *vpic;
344 uint32_t data;
346 if ( (p->size != 1) || (p->count != 1) )
347 {
348 gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", (int)p->size);
349 return 1;
350 }
352 vpic = &current->domain->arch.hvm_domain.vpic[p->addr & 1];
354 if ( p->dir == IOREQ_WRITE )
355 {
356 if ( p->data_is_ptr )
357 (void)hvm_copy_from_guest_phys(&data, p->data, p->size);
358 else
359 data = p->data;
361 /* Some IRs are always edge trig. Slave IR is always level trig. */
362 data &= vpic_elcr_mask(vpic);
363 if ( vpic->is_master )
364 data |= 1 << 2;
365 vpic->elcr = data;
366 }
367 else
368 {
369 /* Reader should not see hardcoded level-triggered slave IR. */
370 data = vpic->elcr & vpic_elcr_mask(vpic);
372 if ( p->data_is_ptr )
373 (void)hvm_copy_to_guest_phys(p->data, &data, p->size);
374 else
375 p->data = data;
376 }
378 return 1;
379 }
381 #ifdef HVM_DEBUG_SUSPEND
382 static void vpic_info(struct hvm_hw_vpic *s)
383 {
384 printk("*****pic state:*****\n");
385 printk("pic 0x%x.\n", s->irr);
386 printk("pic 0x%x.\n", s->imr);
387 printk("pic 0x%x.\n", s->isr);
388 printk("pic 0x%x.\n", s->irq_base);
389 printk("pic 0x%x.\n", s->init_state);
390 printk("pic 0x%x.\n", s->priority_add);
391 printk("pic 0x%x.\n", s->readsel_isr);
392 printk("pic 0x%x.\n", s->poll);
393 printk("pic 0x%x.\n", s->auto_eoi);
394 printk("pic 0x%x.\n", s->rotate_on_auto_eoi);
395 printk("pic 0x%x.\n", s->special_fully_nested_mode);
396 printk("pic 0x%x.\n", s->special_mask_mode);
397 printk("pic 0x%x.\n", s->elcr);
398 printk("pic 0x%x.\n", s->int_output);
399 printk("pic 0x%x.\n", s->is_master);
400 }
401 #else
402 static void vpic_info(struct hvm_hw_vpic *s)
403 {
404 }
405 #endif
407 static int vpic_save(struct domain *d, hvm_domain_context_t *h)
408 {
409 struct hvm_hw_vpic *s;
410 int i;
412 /* Save the state of both PICs */
413 for ( i = 0; i < 2 ; i++ )
414 {
415 s = &d->arch.hvm_domain.vpic[i];
416 vpic_info(s);
417 if ( hvm_save_entry(PIC, i, h, s) )
418 return 1;
419 }
421 return 0;
422 }
424 static int vpic_load(struct domain *d, hvm_domain_context_t *h)
425 {
426 struct hvm_hw_vpic *s;
427 uint16_t inst;
429 /* Which PIC is this? */
430 inst = hvm_load_instance(h);
431 if ( inst > 1 )
432 return -EINVAL;
433 s = &d->arch.hvm_domain.vpic[inst];
435 /* Load the state */
436 if ( hvm_load_entry(PIC, h, s) != 0 )
437 return -EINVAL;
439 vpic_info(s);
440 return 0;
441 }
443 HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
445 void vpic_init(struct domain *d)
446 {
447 struct hvm_hw_vpic *vpic;
449 /* Master PIC. */
450 vpic = &d->arch.hvm_domain.vpic[0];
451 memset(vpic, 0, sizeof(*vpic));
452 vpic->is_master = 1;
453 vpic->elcr = 1 << 2;
454 register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
455 register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
457 /* Slave PIC. */
458 vpic++;
459 memset(vpic, 0, sizeof(*vpic));
460 register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
461 register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
462 }
464 void vpic_irq_positive_edge(struct domain *d, int irq)
465 {
466 struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
467 uint8_t mask = 1 << (irq & 7);
469 ASSERT(irq <= 15);
470 ASSERT(vpic_is_locked(vpic));
472 if ( irq == 2 )
473 return;
475 vpic->irr |= mask;
476 if ( !(vpic->imr & mask) )
477 vpic_update_int_output(vpic);
478 }
480 void vpic_irq_negative_edge(struct domain *d, int irq)
481 {
482 struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
483 uint8_t mask = 1 << (irq & 7);
485 ASSERT(irq <= 15);
486 ASSERT(vpic_is_locked(vpic));
488 if ( irq == 2 )
489 return;
491 vpic->irr &= ~mask;
492 if ( !(vpic->imr & mask) )
493 vpic_update_int_output(vpic);
494 }
496 int cpu_get_pic_interrupt(struct vcpu *v, int *type)
497 {
498 int irq, vector;
499 struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
501 if ( !vlapic_accept_pic_intr(v) || !vpic->int_output )
502 return -1;
504 irq = vpic_intack(vpic);
505 if ( irq == -1 )
506 return -1;
508 vector = vpic[irq >> 3].irq_base + (irq & 7);
509 *type = APIC_DM_EXTINT;
510 return vector;
511 }