ia64/xen-unstable

view xen/arch/x86/hvm/i8259.c @ 10908:a6cb8ba24a91

[HVM] Place all APIC registers into one page in native format.
With this change we can re-use code at include/asm-x86/apicdef.h,
making the code much cleaner. Also it help for future enhancement.

This patch does not change any logic except the change to
CONTROL_REG_ACCESS_NUM, which should be 0xf for CR8 access.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com
author kfraser@localhost.localdomain
date Wed Aug 02 10:07:03 2006 +0100 (2006-08-02)
parents 7cbc1fc8dbea
children d20e1835c24b
line source
1 /*
2 * QEMU 8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2005 Intel Corperation
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25 #include <xen/config.h>
26 #include <xen/types.h>
27 #include <xen/mm.h>
28 #include <xen/xmalloc.h>
29 #include <xen/lib.h>
30 #include <xen/errno.h>
31 #include <xen/sched.h>
32 #include <asm/hvm/hvm.h>
33 #include <asm/hvm/io.h>
34 #include <asm/hvm/support.h>
35 #include <asm/current.h>
37 /* set irq level. If an edge is detected, then the IRR is set to 1 */
38 /* Caller must hold vpic lock */
39 static inline void pic_set_irq1(PicState *s, int irq, int level)
40 {
41 int mask;
43 BUG_ON(!spin_is_locked(&s->pics_state->lock));
45 mask = 1 << irq;
46 if (s->elcr & mask) {
47 /* level triggered */
48 if (level) {
49 s->irr |= mask;
50 s->last_irr |= mask;
51 } else {
52 s->irr &= ~mask;
53 s->last_irr &= ~mask;
54 }
55 } else {
56 /* edge triggered */
57 if (level) {
58 if ((s->last_irr & mask) == 0) {
59 s->irr |= mask;
60 }
61 s->last_irr |= mask;
62 } else {
63 s->last_irr &= ~mask;
64 }
65 }
66 }
68 /* return the highest priority found in mask (highest = smallest
69 number). Return 8 if no irq */
70 /* Caller must hold vpic lock */
71 static inline int get_priority(PicState *s, int mask)
72 {
73 int priority;
75 BUG_ON(!spin_is_locked(&s->pics_state->lock));
77 if (mask == 0)
78 return 8;
79 priority = 0;
80 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
81 priority++;
82 return priority;
83 }
85 /* return the pic wanted interrupt. return -1 if none */
86 /* Caller must hold vpic lock */
87 static int pic_get_irq(PicState *s)
88 {
89 int mask, cur_priority, priority;
91 BUG_ON(!spin_is_locked(&s->pics_state->lock));
93 mask = s->irr & ~s->imr;
94 priority = get_priority(s, mask);
95 if (priority == 8)
96 return -1;
97 /* compute current priority. If special fully nested mode on the
98 master, the IRQ coming from the slave is not taken into account
99 for the priority computation. */
100 mask = s->isr;
101 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
102 mask &= ~(1 << 2);
103 cur_priority = get_priority(s, mask);
104 if (priority < cur_priority) {
105 /* higher priority found: an irq should be generated */
106 return (priority + s->priority_add) & 7;
107 } else {
108 return -1;
109 }
110 }
112 /* raise irq to CPU if necessary. must be called every time the active
113 irq may change */
114 /* XXX: should not export it, but it is needed for an APIC kludge */
115 /* Caller must hold vpic lock */
116 void pic_update_irq(struct hvm_virpic *s)
117 {
118 int irq2, irq;
120 BUG_ON(!spin_is_locked(&s->lock));
122 /* first look at slave pic */
123 irq2 = pic_get_irq(&s->pics[1]);
124 if (irq2 >= 0) {
125 /* if irq request by slave pic, signal master PIC */
126 pic_set_irq1(&s->pics[0], 2, 1);
127 pic_set_irq1(&s->pics[0], 2, 0);
128 }
129 /* look at requested irq */
130 irq = pic_get_irq(&s->pics[0]);
131 if (irq >= 0) {
132 s->irq_request(s->irq_request_opaque, 1);
133 }
134 }
136 void pic_set_irq_new(void *opaque, int irq, int level)
137 {
138 struct hvm_virpic *s = opaque;
139 unsigned long flags;
141 spin_lock_irqsave(&s->lock, flags);
142 hvm_vioapic_set_irq(current->domain, irq, level);
143 pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
144 /* used for IOAPIC irqs */
145 if (s->alt_irq_func)
146 s->alt_irq_func(s->alt_irq_opaque, irq, level);
147 pic_update_irq(s);
148 spin_unlock_irqrestore(&s->lock, flags);
149 }
151 void do_pic_irqs (struct hvm_virpic *s, uint16_t irqs)
152 {
153 unsigned long flags;
155 spin_lock_irqsave(&s->lock, flags);
156 s->pics[1].irr |= (uint8_t)(irqs >> 8);
157 s->pics[0].irr |= (uint8_t) irqs;
158 hvm_vioapic_do_irqs(current->domain, irqs);
159 pic_update_irq(s);
160 spin_unlock_irqrestore(&s->lock, flags);
161 }
163 void do_pic_irqs_clear (struct hvm_virpic *s, uint16_t irqs)
164 {
165 unsigned long flags;
167 spin_lock_irqsave(&s->lock, flags);
168 s->pics[1].irr &= ~(uint8_t)(irqs >> 8);
169 s->pics[0].irr &= ~(uint8_t) irqs;
170 hvm_vioapic_do_irqs_clear(current->domain, irqs);
171 pic_update_irq(s);
172 spin_unlock_irqrestore(&s->lock, flags);
173 }
175 /* obsolete function */
176 void pic_set_irq(struct hvm_virpic *isa_pic, int irq, int level)
177 {
178 pic_set_irq_new(isa_pic, irq, level);
179 }
181 /* acknowledge interrupt 'irq' */
182 /* Caller must hold vpic lock */
183 static inline void pic_intack(PicState *s, int irq)
184 {
185 BUG_ON(!spin_is_locked(&s->pics_state->lock));
187 if (s->auto_eoi) {
188 if (s->rotate_on_auto_eoi)
189 s->priority_add = (irq + 1) & 7;
190 } else {
191 s->isr |= (1 << irq);
192 }
193 /* We don't clear a level sensitive interrupt here */
194 if (!(s->elcr & (1 << irq)))
195 s->irr &= ~(1 << irq);
196 }
198 int pic_read_irq(struct hvm_virpic *s)
199 {
200 int irq, irq2, intno;
201 unsigned long flags;
203 spin_lock_irqsave(&s->lock, flags);
204 irq = pic_get_irq(&s->pics[0]);
205 if (irq >= 0) {
206 pic_intack(&s->pics[0], irq);
207 if (irq == 2) {
208 irq2 = pic_get_irq(&s->pics[1]);
209 if (irq2 >= 0) {
210 pic_intack(&s->pics[1], irq2);
211 } else {
212 /* spurious IRQ on slave controller */
213 irq2 = 7;
214 }
215 intno = s->pics[1].irq_base + irq2;
216 irq = irq2 + 8;
217 } else {
218 intno = s->pics[0].irq_base + irq;
219 }
220 } else {
221 /* spurious IRQ on host controller */
222 printk("spurious IRQ irq got=%d\n",irq);
223 irq = 7;
224 intno = s->pics[0].irq_base + irq;
225 }
226 pic_update_irq(s);
227 spin_unlock_irqrestore(&s->lock, flags);
229 return intno;
230 }
232 /* Caller must hold vpic lock */
233 static void update_shared_irr(struct hvm_virpic *s, PicState *c)
234 {
235 uint8_t *pl, *pe;
237 BUG_ON(!spin_is_locked(&s->lock));
239 get_sp(current->domain)->sp_global.pic_elcr =
240 s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
241 pl =(uint8_t*)&get_sp(current->domain)->sp_global.pic_last_irr;
242 pe =(uint8_t*)&get_sp(current->domain)->sp_global.pic_elcr;
243 if ( c == &s->pics[0] ) {
244 *pl = c->last_irr;
245 *pe = c->elcr;
246 }
247 else {
248 *(pl+1) = c->last_irr;
249 *(pe+1) = c->elcr;
250 }
251 }
253 /* Caller must hold vpic lock */
254 static void pic_reset(void *opaque)
255 {
256 PicState *s = opaque;
258 BUG_ON(!spin_is_locked(&s->pics_state->lock));
260 s->last_irr = 0;
261 s->irr = 0;
262 s->imr = 0;
263 s->isr = 0;
264 s->priority_add = 0;
265 s->irq_base = 0;
266 s->read_reg_select = 0;
267 s->poll = 0;
268 s->special_mask = 0;
269 s->init_state = 0;
270 s->auto_eoi = 0;
271 s->rotate_on_auto_eoi = 0;
272 s->special_fully_nested_mode = 0;
273 s->init4 = 0;
274 s->elcr = 0;
275 }
277 /* Caller must hold vpic lock */
278 static void pic_ioport_write(void *opaque, uint32_t addr, uint32_t val)
279 {
280 PicState *s = opaque;
281 int priority, cmd, irq;
283 BUG_ON(!spin_is_locked(&s->pics_state->lock));
285 addr &= 1;
286 if (addr == 0) {
287 if (val & 0x10) {
288 /* init */
289 pic_reset(s);
290 update_shared_irr(s->pics_state, s);
291 /* deassert a pending interrupt */
292 s->pics_state->irq_request(s->pics_state->irq_request_opaque, 0);
293 s->init_state = 1;
294 s->init4 = val & 1;
295 if (val & 0x02)
296 hw_error("single mode not supported");
297 if (val & 0x08)
298 hw_error("level sensitive irq not supported");
299 } else if (val & 0x08) {
300 if (val & 0x04)
301 s->poll = 1;
302 if (val & 0x02)
303 s->read_reg_select = val & 1;
304 if (val & 0x40)
305 s->special_mask = (val >> 5) & 1;
306 } else {
307 cmd = val >> 5;
308 switch(cmd) {
309 case 0:
310 case 4:
311 s->rotate_on_auto_eoi = cmd >> 2;
312 break;
313 case 1: /* end of interrupt */
314 case 5:
315 priority = get_priority(s, s->isr);
316 if (priority != 8) {
317 irq = (priority + s->priority_add) & 7;
318 s->isr &= ~(1 << irq);
319 if (cmd == 5)
320 s->priority_add = (irq + 1) & 7;
321 pic_update_irq(s->pics_state);
322 }
323 break;
324 case 3:
325 irq = val & 7;
326 s->isr &= ~(1 << irq);
327 pic_update_irq(s->pics_state);
328 break;
329 case 6:
330 s->priority_add = (val + 1) & 7;
331 pic_update_irq(s->pics_state);
332 break;
333 case 7:
334 irq = val & 7;
335 s->isr &= ~(1 << irq);
336 s->priority_add = (irq + 1) & 7;
337 pic_update_irq(s->pics_state);
338 break;
339 default:
340 /* no operation */
341 break;
342 }
343 }
344 } else {
345 switch(s->init_state) {
346 case 0:
347 /* normal mode */
348 s->imr = val;
349 pic_update_irq(s->pics_state);
350 break;
351 case 1:
352 s->irq_base = val & 0xf8;
353 s->init_state = 2;
354 break;
355 case 2:
356 if (s->init4) {
357 s->init_state = 3;
358 } else {
359 s->init_state = 0;
360 }
361 break;
362 case 3:
363 s->special_fully_nested_mode = (val >> 4) & 1;
364 s->auto_eoi = (val >> 1) & 1;
365 s->init_state = 0;
366 break;
367 }
368 }
369 }
371 /* Caller must hold vpic lock */
372 static uint32_t pic_poll_read (PicState *s, uint32_t addr1)
373 {
374 int ret;
376 BUG_ON(!spin_is_locked(&s->pics_state->lock));
378 ret = pic_get_irq(s);
379 if (ret >= 0) {
380 if (addr1 >> 7) {
381 s->pics_state->pics[0].isr &= ~(1 << 2);
382 s->pics_state->pics[0].irr &= ~(1 << 2);
383 }
384 s->irr &= ~(1 << ret);
385 s->isr &= ~(1 << ret);
386 if (addr1 >> 7 || ret != 2)
387 pic_update_irq(s->pics_state);
388 } else {
389 ret = 0x07;
390 pic_update_irq(s->pics_state);
391 }
393 return ret;
394 }
396 /* Caller must hold vpic lock */
397 static uint32_t pic_ioport_read(void *opaque, uint32_t addr1)
398 {
399 PicState *s = opaque;
400 unsigned int addr;
401 int ret;
403 BUG_ON(!spin_is_locked(&s->pics_state->lock));
405 addr = addr1;
406 addr &= 1;
407 if (s->poll) {
408 ret = pic_poll_read(s, addr1);
409 s->poll = 0;
410 } else {
411 if (addr == 0) {
412 if (s->read_reg_select)
413 ret = s->isr;
414 else
415 ret = s->irr;
416 } else {
417 ret = s->imr;
418 }
419 }
420 return ret;
421 }
423 /* memory mapped interrupt status */
424 /* XXX: may be the same than pic_read_rq() */
425 uint32_t pic_intack_read(struct hvm_virpic *s)
426 {
427 int ret;
428 unsigned long flags;
430 spin_lock_irqsave(&s->lock, flags);
431 ret = pic_poll_read(&s->pics[0], 0x00);
432 if (ret == 2)
433 ret = pic_poll_read(&s->pics[1], 0x80) + 8;
434 /* Prepare for ISR read */
435 s->pics[0].read_reg_select = 1;
436 spin_unlock_irqrestore(&s->lock, flags);
438 return ret;
439 }
441 static void elcr_ioport_write(void *opaque, uint32_t addr, uint32_t val)
442 /* Caller must hold vpic lock */
443 {
444 PicState *s = opaque;
446 BUG_ON(!spin_is_locked(&s->pics_state->lock));
448 s->elcr = val & s->elcr_mask;
449 }
451 static uint32_t elcr_ioport_read(void *opaque, uint32_t addr1)
452 {
453 PicState *s = opaque;
454 return s->elcr;
455 }
457 /* XXX: add generic master/slave system */
458 /* Caller must hold vpic lock */
459 static void pic_init1(int io_addr, int elcr_addr, PicState *s)
460 {
461 BUG_ON(!spin_is_locked(&s->pics_state->lock));
463 pic_reset(s);
464 }
466 void pic_init(struct hvm_virpic *s, void (*irq_request)(void *, int),
467 void *irq_request_opaque)
468 {
469 unsigned long flags;
471 memset(s, 0, sizeof(*s));
472 spin_lock_init(&s->lock);
473 s->pics[0].pics_state = s;
474 s->pics[1].pics_state = s;
475 spin_lock_irqsave(&s->lock, flags);
476 pic_init1(0x20, 0x4d0, &s->pics[0]);
477 pic_init1(0xa0, 0x4d1, &s->pics[1]);
478 spin_unlock_irqrestore(&s->lock, flags);
479 s->pics[0].elcr_mask = 0xf8;
480 s->pics[1].elcr_mask = 0xde;
481 s->irq_request = irq_request;
482 s->irq_request_opaque = irq_request_opaque;
483 return;
484 }
486 void pic_set_alt_irq_func(struct hvm_virpic *s,
487 void (*alt_irq_func)(void *, int, int),
488 void *alt_irq_opaque)
489 {
490 unsigned long flags;
492 spin_lock_irqsave(&s->lock, flags);
493 s->alt_irq_func = alt_irq_func;
494 s->alt_irq_opaque = alt_irq_opaque;
495 spin_unlock_irqrestore(&s->lock, flags);
496 }
498 static int intercept_pic_io(ioreq_t *p)
499 {
500 struct hvm_virpic *pic;
501 struct vcpu *v = current;
502 uint32_t data;
503 unsigned long flags;
505 if ( p->size != 1 || p->count != 1) {
506 printk("PIC_IO wrong access size %d!\n", (int)p->size);
507 return 1;
508 }
509 pic = &v->domain->arch.hvm_domain.vpic;
510 if ( p->dir == 0 ) {
511 if(p->pdata_valid)
512 hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_IN);
513 else
514 data = p->u.data;
515 spin_lock_irqsave(&pic->lock, flags);
516 pic_ioport_write((void*)&pic->pics[p->addr>>7],
517 (uint32_t) p->addr, (uint32_t) (data & 0xff));
518 spin_unlock_irqrestore(&pic->lock, flags);
519 }
520 else {
521 spin_lock_irqsave(&pic->lock, flags);
522 data = pic_ioport_read(
523 (void*)&pic->pics[p->addr>>7], (uint32_t) p->addr);
524 spin_unlock_irqrestore(&pic->lock, flags);
525 if(p->pdata_valid)
526 hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_OUT);
527 else
528 p->u.data = (u64)data;
529 }
530 return 1;
531 }
533 static int intercept_elcr_io(ioreq_t *p)
534 {
535 struct hvm_virpic *s;
536 struct vcpu *v = current;
537 uint32_t data;
538 unsigned long flags;
540 if ( p->size != 1 || p->count != 1 ) {
541 printk("PIC_IO wrong access size %d!\n", (int)p->size);
542 return 1;
543 }
545 s = &v->domain->arch.hvm_domain.vpic;
546 if ( p->dir == 0 ) {
547 if(p->pdata_valid)
548 hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_IN);
549 else
550 data = p->u.data;
551 spin_lock_irqsave(&s->lock, flags);
552 elcr_ioport_write((void*)&s->pics[p->addr&1],
553 (uint32_t) p->addr, (uint32_t)( data & 0xff));
554 get_sp(current->domain)->sp_global.pic_elcr =
555 s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
556 spin_unlock_irqrestore(&s->lock, flags);
557 }
558 else {
559 data = (u64) elcr_ioport_read(
560 (void*)&s->pics[p->addr&1], (uint32_t) p->addr);
561 if(p->pdata_valid)
562 hvm_copy(&data, (unsigned long)p->u.pdata, p->size, HVM_COPY_OUT);
563 else
564 p->u.data = (u64)data;
566 }
567 return 1;
568 }
569 void register_pic_io_hook (void)
570 {
571 register_portio_handler(0x20, 2, intercept_pic_io);
572 register_portio_handler(0x4d0, 1, intercept_elcr_io);
573 register_portio_handler(0xa0, 2, intercept_pic_io);
574 register_portio_handler(0x4d1, 1, intercept_elcr_io);
575 }
578 /* IRQ handling */
579 int cpu_get_pic_interrupt(struct vcpu *v, int *type)
580 {
581 int intno;
582 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
583 struct hvm_domain *plat = &v->domain->arch.hvm_domain;
585 if ( !vlapic_accept_pic_intr(v) )
586 return -1;
588 if (cmpxchg(&plat->interrupt_request, 1, 0) != 1)
589 return -1;
591 /* read the irq from the PIC */
592 intno = pic_read_irq(s);
593 *type = APIC_DM_EXTINT;
594 return intno;
595 }
597 int is_pit_irq(struct vcpu *v, int irq, int type)
598 {
599 int pit_vec;
601 if (type == APIC_DM_EXTINT)
602 pit_vec = v->domain->arch.hvm_domain.vpic.pics[0].irq_base;
603 else
604 pit_vec =
605 v->domain->arch.hvm_domain.vioapic.redirtbl[0].RedirForm.vector;
607 return (irq == pit_vec);
608 }
610 int is_irq_enabled(struct vcpu *v, int irq)
611 {
612 struct hvm_virpic *vpic=&v->domain->arch.hvm_domain.vpic;
614 if ( irq & 8 ) {
615 return !( (1 << (irq&7)) & vpic->pics[1].imr);
616 }
617 else {
618 return !( (1 << irq) & vpic->pics[0].imr);
619 }
620 }