ia64/xen-unstable

view xen/arch/x86/hvm/vioapic.c @ 10908:a6cb8ba24a91

[HVM] Place all APIC registers into one page in native format.
With this change we can re-use code at include/asm-x86/apicdef.h,
making the code much cleaner. Also it help for future enhancement.

This patch does not change any logic except the change to
CONTROL_REG_ACCESS_NUM, which should be 0xf for CR8 access.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com
author kfraser@localhost.localdomain
date Wed Aug 02 10:07:03 2006 +0100 (2006-08-02)
parents ab0cae84cfec
children f393ced88d14
line source
1 /*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 *
4 * MandrakeSoft S.A.
5 * 43, rue d'Aboukir
6 * 75002 Paris - France
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
25 /*
26 * Yunhong Jiang <yunhong.jiang@intel.com>
27 * Ported to xen by using virtual IRQ line.
28 */
30 #include <xen/config.h>
31 #include <xen/types.h>
32 #include <xen/mm.h>
33 #include <xen/xmalloc.h>
34 #include <xen/lib.h>
35 #include <xen/errno.h>
36 #include <xen/sched.h>
37 #include <public/hvm/ioreq.h>
38 #include <asm/hvm/io.h>
39 #include <asm/hvm/vpic.h>
40 #include <asm/hvm/support.h>
41 #include <asm/current.h>
43 /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
44 #define IRQ0_SPECIAL_ROUTING 1
46 #if defined(__ia64__)
47 #define opt_hvm_debug_level opt_vmx_debug_level
48 #endif
50 static void ioapic_enable(hvm_vioapic_t *s, uint8_t enable)
51 {
52 if (enable)
53 s->flags |= IOAPIC_ENABLE_FLAG;
54 else
55 s->flags &= ~IOAPIC_ENABLE_FLAG;
56 }
58 #ifdef HVM_DOMAIN_SAVE_RESTORE
59 void ioapic_save(QEMUFile* f, void* opaque)
60 {
61 printk("no implementation for ioapic_save\n");
62 }
64 int ioapic_load(QEMUFile* f, void* opaque, int version_id)
65 {
66 printk("no implementation for ioapic_load\n");
67 return 0;
68 }
69 #endif
71 static unsigned long hvm_vioapic_read_indirect(struct hvm_vioapic *s,
72 unsigned long addr,
73 unsigned long length)
74 {
75 unsigned long result = 0;
77 ASSERT(s);
79 switch (s->ioregsel) {
80 case IOAPIC_REG_VERSION:
81 result = ((((IOAPIC_NUM_PINS-1) & 0xff) << 16)
82 | (IOAPIC_VERSION_ID & 0x0f));
83 break;
85 #ifndef __ia64__
86 case IOAPIC_REG_APIC_ID:
87 result = ((s->id & 0xf) << 24);
88 break;
90 case IOAPIC_REG_ARB_ID:
91 /* XXX how arb_id used on p4? */
92 result = ((s->id & 0xf) << 24);
93 break;
94 #endif
96 default:
97 {
98 uint32_t redir_index = 0;
99 uint64_t redir_content = 0;
101 redir_index = (s->ioregsel - 0x10) >> 1;
103 if (redir_index >= 0 && redir_index < IOAPIC_NUM_PINS) {
104 redir_content = s->redirtbl[redir_index].value;
106 result = (s->ioregsel & 0x1)?
107 (redir_content >> 32) & 0xffffffff :
108 redir_content & 0xffffffff;
109 } else {
110 printk("upic_mem_readl:undefined ioregsel %x\n",
111 s->ioregsel);
112 domain_crash_synchronous();
113 }
114 break;
115 }
116 } /* switch */
118 return result;
119 }
121 static unsigned long hvm_vioapic_read(struct vcpu *v,
122 unsigned long addr,
123 unsigned long length)
124 {
125 struct hvm_vioapic *s = &(v->domain->arch.hvm_domain.vioapic);
126 uint32_t result = 0;
128 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "hvm_vioapic_read addr %lx\n", addr);
130 ASSERT(s);
132 addr &= 0xff;
134 switch (addr) {
135 case IOAPIC_REG_SELECT:
136 result = s->ioregsel;
137 break;
139 case IOAPIC_REG_WINDOW:
140 result = hvm_vioapic_read_indirect(s, addr, length);
141 break;
143 default:
144 break;
145 }
147 return result;
148 }
150 static void hvm_vioapic_update_imr(struct hvm_vioapic *s, int index)
151 {
152 if (s->redirtbl[index].RedirForm.mask)
153 set_bit(index, &s->imr);
154 else
155 clear_bit(index, &s->imr);
156 }
158 static void hvm_vioapic_write_indirect(struct hvm_vioapic *s,
159 unsigned long addr,
160 unsigned long length,
161 unsigned long val)
162 {
163 switch (s->ioregsel) {
164 case IOAPIC_REG_VERSION:
165 printk("hvm_vioapic_write_indirect: version register read only\n");
166 break;
168 #ifndef __ia64__
169 case IOAPIC_REG_APIC_ID:
170 s->id = (val >> 24) & 0xf;
171 break;
173 case IOAPIC_REG_ARB_ID:
174 s->arb_id = val;
175 break;
176 #endif
178 default:
179 {
180 uint32_t redir_index = 0;
182 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "hvm_vioapic_write_indirect "
183 "change redir index %x val %lx\n",
184 redir_index, val);
186 redir_index = (s->ioregsel - 0x10) >> 1;
188 if (redir_index >= 0 && redir_index < IOAPIC_NUM_PINS) {
189 uint64_t redir_content;
191 redir_content = s->redirtbl[redir_index].value;
193 if (s->ioregsel & 0x1)
194 redir_content = (((uint64_t)val & 0xffffffff) << 32) |
195 (redir_content & 0xffffffff);
196 else
197 redir_content = ((redir_content >> 32) << 32) |
198 (val & 0xffffffff);
199 s->redirtbl[redir_index].value = redir_content;
200 hvm_vioapic_update_imr(s, redir_index);
201 } else {
202 printk("hvm_vioapic_write_indirect "
203 "error register %x\n", s->ioregsel);
204 }
205 break;
206 }
207 } /* switch */
208 }
210 static void hvm_vioapic_write(struct vcpu *v,
211 unsigned long addr,
212 unsigned long length,
213 unsigned long val)
214 {
215 hvm_vioapic_t *s = &(v->domain->arch.hvm_domain.vioapic);
217 ASSERT(s);
219 addr &= 0xff;
221 switch (addr) {
222 case IOAPIC_REG_SELECT:
223 s->ioregsel = val;
224 break;
226 case IOAPIC_REG_WINDOW:
227 hvm_vioapic_write_indirect(s, addr, length, val);
228 break;
230 #ifdef __ia64__
231 case IOAPIC_REG_EOI:
232 ioapic_update_EOI(v->domain, val);
233 break;
234 #endif
236 default:
237 break;
238 }
239 }
241 static int hvm_vioapic_range(struct vcpu *v, unsigned long addr)
242 {
243 hvm_vioapic_t *s = &(v->domain->arch.hvm_domain.vioapic);
245 if ((s->flags & IOAPIC_ENABLE_FLAG) &&
246 (addr >= s->base_address &&
247 (addr <= s->base_address + IOAPIC_MEM_LENGTH)))
248 return 1;
249 else
250 return 0;
251 }
253 struct hvm_mmio_handler vioapic_mmio_handler = {
254 .check_handler = hvm_vioapic_range,
255 .read_handler = hvm_vioapic_read,
256 .write_handler = hvm_vioapic_write
257 };
259 static void hvm_vioapic_reset(hvm_vioapic_t *s)
260 {
261 int i;
263 memset(s, 0, sizeof(hvm_vioapic_t));
265 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
266 s->redirtbl[i].RedirForm.mask = 0x1;
267 hvm_vioapic_update_imr(s, i);
268 }
269 }
271 static void ioapic_update_config(hvm_vioapic_t *s,
272 unsigned long address,
273 uint8_t enable)
274 {
275 ASSERT(s);
277 ioapic_enable(s, enable);
279 if (address != s->base_address)
280 s->base_address = address;
281 }
283 static int ioapic_inj_irq(hvm_vioapic_t *s,
284 struct vlapic * target,
285 uint8_t vector,
286 uint8_t trig_mode,
287 uint8_t delivery_mode)
288 {
289 int result = 0;
291 ASSERT(s && target);
293 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_inj_irq "
294 "irq %d trig %d delive mode %d\n",
295 vector, trig_mode, delivery_mode);
297 switch (delivery_mode) {
298 case dest_Fixed:
299 case dest_LowestPrio:
300 if (vlapic_set_irq(target, vector, trig_mode) && (trig_mode == 1))
301 printk("<ioapic_inj_irq> level interrupt happen before cleared\n");
302 result = 1;
303 break;
304 default:
305 printk("<ioapic_inj_irq> error delivery mode %d\n",
306 delivery_mode);
307 break;
308 }
310 return result;
311 }
313 #ifndef __ia64__
314 static int ioapic_match_logical_addr(hvm_vioapic_t *s, int number, uint8_t dest)
315 {
316 int result = 0;
317 uint32_t logical_dest = vlapic_get_reg(s->lapic_info[number], APIC_LDR);
319 ASSERT(s && s->lapic_info[number]);
321 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_match_logical_addr "
322 "number %i dest %x\n",
323 number, dest);
325 switch (vlapic_get_reg(s->lapic_info[number], APIC_DFR))
326 {
327 case APIC_DFR_FLAT:
328 result =
329 (dest & GET_APIC_LOGICAL_ID(logical_dest)) != 0;
330 break;
331 case APIC_DFR_CLUSTER:
332 /* Should we support flat cluster mode ?*/
333 if ( (GET_APIC_LOGICAL_ID(logical_dest) >> 4
334 == ((dest >> 0x4) & 0xf)) &&
335 (logical_dest & (dest & 0xf)) )
336 result = 1;
337 break;
338 default:
339 printk("error DFR value for %x local apic\n", number);
340 break;
341 }
343 return result;
344 }
345 #else
346 extern int ioapic_match_logical_addr(hvm_vioapic_t *s, int number, uint8_t dest);
347 #endif
349 static uint32_t ioapic_get_delivery_bitmask(hvm_vioapic_t *s,
350 uint16_t dest,
351 uint8_t dest_mode,
352 uint8_t vector,
353 uint8_t delivery_mode)
354 {
355 uint32_t mask = 0;
356 int i;
358 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
359 "dest %d dest_mode %d "
360 "vector %d del_mode %d, lapic_count %d\n",
361 dest, dest_mode, vector, delivery_mode, s->lapic_count);
363 ASSERT(s);
365 if (dest_mode == 0) { /* Physical mode */
366 for (i = 0; i < s->lapic_count; i++) {
367 if (VLAPIC_ID(s->lapic_info[i]) == dest) {
368 mask = 1 << i;
369 break;
370 }
371 }
372 } else {
373 /* logical destination. call match_logical_addr for each APIC. */
374 if (dest != 0) {
375 for (i=0; i< s->lapic_count; i++) {
376 if ( s->lapic_info[i] &&
377 ioapic_match_logical_addr(s, i, dest) ) {
378 mask |= (1<<i);
379 }
380 }
381 }
382 }
384 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
385 "mask %x\n", mask);
387 return mask;
388 }
390 static void ioapic_deliver(hvm_vioapic_t *s, int irqno)
391 {
392 uint16_t dest = s->redirtbl[irqno].RedirForm.dest_id;
393 uint8_t dest_mode = s->redirtbl[irqno].RedirForm.destmode;
394 uint8_t delivery_mode = s->redirtbl[irqno].RedirForm.deliver_mode;
395 uint8_t vector = s->redirtbl[irqno].RedirForm.vector;
396 uint8_t trig_mode = s->redirtbl[irqno].RedirForm.trigmod;
397 uint32_t deliver_bitmask;
399 HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
400 "dest %x dest_mode %x delivery_mode %x vector %x trig_mode %x\n",
401 dest, dest_mode, delivery_mode, vector, trig_mode);
403 deliver_bitmask = ioapic_get_delivery_bitmask(
404 s, dest, dest_mode, vector, delivery_mode);
406 if (!deliver_bitmask) {
407 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic deliver "
408 "no target on destination\n");
410 return;
411 }
413 switch (delivery_mode) {
414 case dest_LowestPrio:
415 {
416 struct vlapic* target;
418 #ifdef IRQ0_SPECIAL_ROUTING
419 if (irqno == 0)
420 target = s->lapic_info[0];
421 else
422 #endif
423 target = apic_round_robin(s->domain, dest_mode,
424 vector, deliver_bitmask);
425 if (target)
426 ioapic_inj_irq(s, target, vector, trig_mode, delivery_mode);
427 else
428 HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
429 "null round robin mask %x vector %x delivery_mode %x\n",
430 deliver_bitmask, vector, deliver_bitmask);
431 break;
432 }
434 case dest_Fixed:
435 case dest_ExtINT:
436 {
437 uint8_t bit;
438 for (bit = 0; bit < s->lapic_count; bit++) {
439 if (deliver_bitmask & (1 << bit)) {
440 #ifdef IRQ0_SPECIAL_ROUTING
441 if ( (irqno == 0) && (bit !=0) )
442 {
443 printk("PIT irq to bit %x\n", bit);
444 domain_crash_synchronous();
445 }
446 #endif
447 if (s->lapic_info[bit]) {
448 ioapic_inj_irq(s, s->lapic_info[bit],
449 vector, trig_mode, delivery_mode);
450 }
451 }
452 }
453 break;
454 }
456 case dest_SMI:
457 case dest_NMI:
458 case dest_INIT:
459 case dest__reserved_2:
460 default:
461 printk("Not support delivey mode %d\n", delivery_mode);
462 break;
463 }
464 }
466 static int ioapic_get_highest_irq(hvm_vioapic_t *s)
467 {
468 uint32_t irqs = s->irr & ~s->isr & ~s->imr;
469 return fls(irqs) - 1;
470 }
472 static void service_ioapic(hvm_vioapic_t *s)
473 {
474 int irqno;
476 while ((irqno = ioapic_get_highest_irq(s)) != -1) {
478 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "service_ioapic "
479 "highest irqno %x\n", irqno);
481 if (!test_bit(irqno, &s->imr)) {
482 ioapic_deliver(s, irqno);
483 }
485 if (s->redirtbl[irqno].RedirForm.trigmod == IOAPIC_LEVEL_TRIGGER) {
486 s->isr |= (1 << irqno);
487 }
489 s->irr &= ~(1 << irqno);
490 }
491 }
493 void hvm_vioapic_do_irqs(struct domain *d, uint16_t irqs)
494 {
495 hvm_vioapic_t *s = &(d->arch.hvm_domain.vioapic);
497 if (!hvm_apic_support(d))
498 return;
500 s->irr |= irqs & ~s->imr;
501 service_ioapic(s);
502 }
504 void hvm_vioapic_do_irqs_clear(struct domain *d, uint16_t irqs)
505 {
506 hvm_vioapic_t *s = &(d->arch.hvm_domain.vioapic);
508 if (!hvm_apic_support(d))
509 return;
511 s->irr &= ~irqs;
512 service_ioapic(s);
513 }
515 void hvm_vioapic_set_irq(struct domain *d, int irq, int level)
516 {
517 hvm_vioapic_t *s = &(d->arch.hvm_domain.vioapic);
519 if (!hvm_apic_support(d))
520 return ;
522 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_set_irq "
523 "irq %x level %x\n", irq, level);
525 if (irq < 0 || irq >= IOAPIC_NUM_PINS) {
526 printk("ioapic_set_irq irq %x is illegal\n", irq);
527 domain_crash_synchronous();
528 }
530 if (!IOAPICEnabled(s) || s->redirtbl[irq].RedirForm.mask)
531 return;
533 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "hvm_vioapic_set_irq entry %x "
534 "vector %x deliver_mod %x destmode %x delivestatus %x "
535 "polarity %x remote_irr %x trigmod %x mask %x dest_id %x\n",
536 irq,
537 s->redirtbl[irq].RedirForm.vector,
538 s->redirtbl[irq].RedirForm.deliver_mode,
539 s->redirtbl[irq].RedirForm.destmode,
540 s->redirtbl[irq].RedirForm.delivestatus,
541 s->redirtbl[irq].RedirForm.polarity,
542 s->redirtbl[irq].RedirForm.remoteirr,
543 s->redirtbl[irq].RedirForm.trigmod,
544 s->redirtbl[irq].RedirForm.mask,
545 s->redirtbl[irq].RedirForm.dest_id);
547 if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
548 uint32_t bit = 1 << irq;
549 if (s->redirtbl[irq].RedirForm.trigmod == IOAPIC_LEVEL_TRIGGER) {
550 if (level)
551 s->irr |= bit;
552 else
553 s->irr &= ~bit;
554 } else {
555 if (level)
556 /* XXX No irr clear for edge interrupt */
557 s->irr |= bit;
558 }
559 }
561 service_ioapic(s);
562 }
564 /* XXX If level interrupt, use vector->irq table for performance */
565 static int get_redir_num(hvm_vioapic_t *s, int vector)
566 {
567 int i = 0;
569 ASSERT(s);
571 for(i = 0; i < IOAPIC_NUM_PINS - 1; i++) {
572 if (s->redirtbl[i].RedirForm.vector == vector)
573 return i;
574 }
576 return -1;
577 }
579 void ioapic_update_EOI(struct domain *d, int vector)
580 {
581 hvm_vioapic_t *s = &(d->arch.hvm_domain.vioapic);
582 int redir_num;
584 if ((redir_num = get_redir_num(s, vector)) == -1) {
585 printk("Can't find redir item for %d EOI \n", vector);
586 return;
587 }
589 if (!test_and_clear_bit(redir_num, &s->isr)) {
590 printk("redir %d not set for %d EOI\n", redir_num, vector);
591 return;
592 }
593 }
595 int hvm_vioapic_add_lapic(struct vlapic *vlapic, struct vcpu *v)
596 {
597 hvm_vioapic_t *s = &(v->domain->arch.hvm_domain.vioapic);
599 if (v->vcpu_id != s->lapic_count) {
600 printk("hvm_vioapic_add_lapic "
601 "cpu_id not match vcpu_id %x lapic_count %x\n",
602 v->vcpu_id, s->lapic_count);
603 domain_crash_synchronous();
604 }
606 /* update count later for race condition on interrupt */
607 s->lapic_info[s->lapic_count] = vlapic;
608 s->lapic_count ++;
610 return s->lapic_count;
611 }
613 hvm_vioapic_t * hvm_vioapic_init(struct domain *d)
614 {
615 int i = 0;
616 hvm_vioapic_t *s = &(d->arch.hvm_domain.vioapic);
618 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "hvm_vioapic_init\n");
620 hvm_vioapic_reset(s);
622 s->domain = d;
624 for (i = 0; i < MAX_LAPIC_NUM; i++)
625 s->lapic_info[i] = NULL;
627 /* Remove after GFW ready */
628 ioapic_update_config(s, IOAPIC_DEFAULT_BASE_ADDRESS, 1);
630 return s;
631 }