direct-io.hg

view xen/arch/x86/hvm/vioapic.c @ 12309:6085f1e5366f

[HVM] Avoid long delay between setting IRQ for PV drivers and
servicing it. Should call service_ioapic() synchronously.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Nov 09 09:04:41 2006 +0000 (2006-11-09)
parents 6555ca56d844
children 452010ddef24
line source
1 /*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 *
4 * MandrakeSoft S.A.
5 * 43, rue d'Aboukir
6 * 75002 Paris - France
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Yunhong Jiang <yunhong.jiang@intel.com>
25 * Ported to xen by using virtual IRQ line.
26 */
28 #include <xen/config.h>
29 #include <xen/types.h>
30 #include <xen/mm.h>
31 #include <xen/xmalloc.h>
32 #include <xen/lib.h>
33 #include <xen/errno.h>
34 #include <xen/sched.h>
35 #include <public/hvm/ioreq.h>
36 #include <asm/hvm/io.h>
37 #include <asm/hvm/vpic.h>
38 #include <asm/hvm/support.h>
39 #include <asm/current.h>
40 #include <asm/event.h>
42 /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
43 #define IRQ0_SPECIAL_ROUTING 1
44 #ifdef IRQ0_SPECIAL_ROUTING
45 static int redir_warning_done = 0;
46 #endif
48 #if defined(__ia64__)
49 #define opt_hvm_debug_level opt_vmx_debug_level
50 #endif
52 static unsigned long vioapic_read_indirect(struct vioapic *vioapic,
53 unsigned long addr,
54 unsigned long length)
55 {
56 unsigned long result = 0;
58 switch ( vioapic->ioregsel )
59 {
60 case VIOAPIC_REG_VERSION:
61 result = ((((VIOAPIC_NUM_PINS-1) & 0xff) << 16)
62 | (VIOAPIC_VERSION_ID & 0xff));
63 break;
65 #if !VIOAPIC_IS_IOSAPIC
66 case VIOAPIC_REG_APIC_ID:
67 case VIOAPIC_REG_ARB_ID:
68 result = ((vioapic->id & 0xf) << 24);
69 break;
70 #endif
72 default:
73 {
74 uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
75 uint64_t redir_content;
77 if ( redir_index >= VIOAPIC_NUM_PINS )
78 {
79 gdprintk(XENLOG_WARNING, "apic_mem_readl:undefined ioregsel %x\n",
80 vioapic->ioregsel);
81 break;
82 }
84 redir_content = vioapic->redirtbl[redir_index].bits;
85 result = (vioapic->ioregsel & 0x1)?
86 (redir_content >> 32) & 0xffffffff :
87 redir_content & 0xffffffff;
88 break;
89 }
90 }
92 return result;
93 }
95 static unsigned long vioapic_read(struct vcpu *v,
96 unsigned long addr,
97 unsigned long length)
98 {
99 struct vioapic *vioapic = domain_vioapic(v->domain);
100 uint32_t result;
102 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_read addr %lx\n", addr);
104 addr &= 0xff;
106 switch ( addr )
107 {
108 case VIOAPIC_REG_SELECT:
109 result = vioapic->ioregsel;
110 break;
112 case VIOAPIC_REG_WINDOW:
113 result = vioapic_read_indirect(vioapic, addr, length);
114 break;
116 default:
117 result = 0;
118 break;
119 }
121 return result;
122 }
124 static void vioapic_update_imr(struct vioapic *vioapic, int index)
125 {
126 if ( vioapic->redirtbl[index].fields.mask )
127 set_bit(index, &vioapic->imr);
128 else
129 clear_bit(index, &vioapic->imr);
130 }
133 static void vioapic_write_indirect(struct vioapic *vioapic,
134 unsigned long addr,
135 unsigned long length,
136 unsigned long val)
137 {
138 switch ( vioapic->ioregsel )
139 {
140 case VIOAPIC_REG_VERSION:
141 /* Writes are ignored. */
142 break;
144 #if !VIOAPIC_IS_IOSAPIC
145 case VIOAPIC_REG_APIC_ID:
146 vioapic->id = (val >> 24) & 0xf;
147 break;
149 case VIOAPIC_REG_ARB_ID:
150 break;
151 #endif
153 default:
154 {
155 uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
156 uint64_t redir_content;
158 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_write_indirect "
159 "change redir index %x val %lx\n",
160 redir_index, val);
162 if ( redir_index >= VIOAPIC_NUM_PINS )
163 {
164 gdprintk(XENLOG_WARNING, "vioapic_write_indirect "
165 "error register %x\n", vioapic->ioregsel);
166 break;
167 }
169 redir_content = vioapic->redirtbl[redir_index].bits;
171 if ( vioapic->ioregsel & 0x1 )
172 {
173 #ifdef IRQ0_SPECIAL_ROUTING
174 if ( !redir_warning_done && (redir_index == 0) &&
175 ((val >> 24) != 0) )
176 {
177 /*
178 * Cannot yet handle delivering PIT interrupts to any VCPU !=
179 * 0. Needs proper fixing, but for now simply spit a warning
180 * that we're going to ignore the target in practice and always
181 * deliver to VCPU 0.
182 */
183 printk("IO-APIC: PIT (IRQ0) redirect to VCPU %lx "
184 "will be ignored.\n", val >> 24);
185 redir_warning_done = 1;
186 }
187 #endif
188 redir_content = (((uint64_t)val & 0xffffffff) << 32) |
189 (redir_content & 0xffffffff);
190 }
191 else
192 {
193 redir_content = ((redir_content >> 32) << 32) |
194 (val & 0xffffffff);
195 }
196 vioapic->redirtbl[redir_index].bits = redir_content;
197 vioapic_update_imr(vioapic, redir_index);
198 break;
199 }
200 } /* switch */
201 }
203 static void vioapic_write(struct vcpu *v,
204 unsigned long addr,
205 unsigned long length,
206 unsigned long val)
207 {
208 struct vioapic *vioapic = domain_vioapic(v->domain);
210 addr &= 0xff;
212 switch ( addr )
213 {
214 case VIOAPIC_REG_SELECT:
215 vioapic->ioregsel = val;
216 break;
218 case VIOAPIC_REG_WINDOW:
219 vioapic_write_indirect(vioapic, addr, length, val);
220 break;
222 #if VIOAPIC_IS_IOSAPIC
223 case VIOAPIC_REG_EOI:
224 vioapic_update_EOI(v->domain, val);
225 break;
226 #endif
228 default:
229 break;
230 }
231 }
233 static int vioapic_range(struct vcpu *v, unsigned long addr)
234 {
235 struct vioapic *vioapic = domain_vioapic(v->domain);
237 return ((addr >= vioapic->base_address &&
238 (addr < vioapic->base_address + VIOAPIC_MEM_LENGTH)));
239 }
241 struct hvm_mmio_handler vioapic_mmio_handler = {
242 .check_handler = vioapic_range,
243 .read_handler = vioapic_read,
244 .write_handler = vioapic_write
245 };
247 static void vioapic_reset(struct vioapic *vioapic)
248 {
249 int i;
251 memset(vioapic, 0, sizeof(*vioapic));
253 for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
254 {
255 vioapic->redirtbl[i].fields.mask = 0x1;
256 vioapic_update_imr(vioapic, i);
257 }
258 }
260 static int ioapic_inj_irq(struct vioapic *vioapic,
261 struct vlapic * target,
262 uint8_t vector,
263 uint8_t trig_mode,
264 uint8_t delivery_mode)
265 {
266 int result = 0;
268 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_inj_irq "
269 "irq %d trig %d delive mode %d\n",
270 vector, trig_mode, delivery_mode);
272 switch ( delivery_mode )
273 {
274 case dest_Fixed:
275 case dest_LowestPrio:
276 if ( vlapic_set_irq(target, vector, trig_mode) && (trig_mode == 1) )
277 gdprintk(XENLOG_WARNING, "level interrupt before cleared\n");
278 result = 1;
279 break;
280 default:
281 gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode);
282 break;
283 }
285 return result;
286 }
288 #ifndef __ia64__
289 static int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t dest)
290 {
291 int result = 0;
292 uint32_t logical_dest;
294 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vlapic_match_logical_addr "
295 "vcpu=%d vlapic_id=%x dest=%x\n",
296 vlapic_vcpu(vlapic)->vcpu_id, VLAPIC_ID(vlapic), dest);
298 logical_dest = vlapic_get_reg(vlapic, APIC_LDR);
300 switch ( vlapic_get_reg(vlapic, APIC_DFR) )
301 {
302 case APIC_DFR_FLAT:
303 result = ((dest & GET_APIC_LOGICAL_ID(logical_dest)) != 0);
304 break;
305 case APIC_DFR_CLUSTER:
306 /* Should we support flat cluster mode ?*/
307 if ( (GET_APIC_LOGICAL_ID(logical_dest) >> 4
308 == ((dest >> 0x4) & 0xf)) &&
309 (logical_dest & (dest & 0xf)) )
310 result = 1;
311 break;
312 default:
313 gdprintk(XENLOG_WARNING, "error DFR value for lapic of vcpu %d\n",
314 vlapic_vcpu(vlapic)->vcpu_id);
315 break;
316 }
318 return result;
319 }
320 #else
321 extern int vlapic_match_logical_addr(struct vlapic *vlapic, uint16_t dest);
322 #endif
324 static uint32_t ioapic_get_delivery_bitmask(struct vioapic *vioapic,
325 uint16_t dest,
326 uint8_t dest_mode,
327 uint8_t vector,
328 uint8_t delivery_mode)
329 {
330 uint32_t mask = 0;
331 struct vcpu *v;
333 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
334 "dest %d dest_mode %d vector %d del_mode %d\n",
335 dest, dest_mode, vector, delivery_mode);
337 if ( dest_mode == 0 ) /* Physical mode. */
338 {
339 if ( dest == 0xFF ) /* Broadcast. */
340 {
341 for_each_vcpu ( vioapic_domain(vioapic), v )
342 mask |= 1 << v->vcpu_id;
343 goto out;
344 }
346 for_each_vcpu ( vioapic_domain(vioapic), v )
347 {
348 if ( VLAPIC_ID(vcpu_vlapic(v)) == dest )
349 {
350 mask = 1 << v->vcpu_id;
351 break;
352 }
353 }
354 }
355 else if ( dest != 0 ) /* Logical mode, MDA non-zero. */
356 {
357 for_each_vcpu ( vioapic_domain(vioapic), v )
358 if ( vlapic_match_logical_addr(vcpu_vlapic(v), dest) )
359 mask |= 1 << v->vcpu_id;
360 }
362 out:
363 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
364 "mask %x\n", mask);
365 return mask;
366 }
368 static void ioapic_deliver(struct vioapic *vioapic, int irq)
369 {
370 uint16_t dest = vioapic->redirtbl[irq].fields.dest_id;
371 uint8_t dest_mode = vioapic->redirtbl[irq].fields.dest_mode;
372 uint8_t delivery_mode = vioapic->redirtbl[irq].fields.delivery_mode;
373 uint8_t vector = vioapic->redirtbl[irq].fields.vector;
374 uint8_t trig_mode = vioapic->redirtbl[irq].fields.trig_mode;
375 uint32_t deliver_bitmask;
376 struct vlapic *target;
377 struct vcpu *v;
379 HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
380 "dest=%x dest_mode=%x delivery_mode=%x "
381 "vector=%x trig_mode=%x\n",
382 dest, dest_mode, delivery_mode, vector, trig_mode);
384 deliver_bitmask = ioapic_get_delivery_bitmask(
385 vioapic, dest, dest_mode, vector, delivery_mode);
386 if ( !deliver_bitmask )
387 {
388 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic deliver "
389 "no target on destination\n");
390 return;
391 }
393 switch ( delivery_mode )
394 {
395 case dest_LowestPrio:
396 {
397 #ifdef IRQ0_SPECIAL_ROUTING
398 /* Force round-robin to pick VCPU 0 */
399 if ( irq == 0 )
400 {
401 v = vioapic_domain(vioapic)->vcpu[0];
402 target = v ? vcpu_vlapic(v) : NULL;
403 }
404 else
405 #endif
406 target = apic_round_robin(vioapic_domain(vioapic), dest_mode,
407 vector, deliver_bitmask);
408 if ( target != NULL )
409 {
410 ioapic_inj_irq(vioapic, target, vector, trig_mode, delivery_mode);
411 vcpu_kick(vlapic_vcpu(target));
412 }
413 else
414 {
415 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
416 "mask=%x vector=%x delivery_mode=%x\n",
417 deliver_bitmask, vector, dest_LowestPrio);
418 }
419 break;
420 }
422 case dest_Fixed:
423 case dest_ExtINT:
424 {
425 uint8_t bit;
426 for ( bit = 0; deliver_bitmask != 0; bit++ )
427 {
428 if ( !(deliver_bitmask & (1 << bit)) )
429 continue;
430 deliver_bitmask &= ~(1 << bit);
431 #ifdef IRQ0_SPECIAL_ROUTING
432 /* Do not deliver timer interrupts to VCPU != 0 */
433 if ( (irq == 0) && (bit != 0) )
434 v = vioapic_domain(vioapic)->vcpu[0];
435 else
436 #endif
437 v = vioapic_domain(vioapic)->vcpu[bit];
438 if ( v != NULL )
439 {
440 target = vcpu_vlapic(v);
441 ioapic_inj_irq(vioapic, target, vector,
442 trig_mode, delivery_mode);
443 vcpu_kick(vlapic_vcpu(target));
444 }
445 }
446 break;
447 }
449 case dest_SMI:
450 case dest_NMI:
451 case dest_INIT:
452 case dest__reserved_2:
453 default:
454 gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
455 delivery_mode);
456 break;
457 }
458 }
460 static int ioapic_get_highest_irq(struct vioapic *vioapic)
461 {
462 uint32_t irqs = vioapic->irr | vioapic->irr_xen;
463 irqs &= ~vioapic->isr & ~vioapic->imr;
464 return fls(irqs) - 1;
465 }
467 static void service_ioapic(struct vioapic *vioapic)
468 {
469 int irq;
471 while ( (irq = ioapic_get_highest_irq(vioapic)) != -1 )
472 {
473 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "service_ioapic highest irq %x\n", irq);
475 if ( !test_bit(irq, &vioapic->imr) )
476 ioapic_deliver(vioapic, irq);
478 if ( vioapic->redirtbl[irq].fields.trig_mode == VIOAPIC_LEVEL_TRIG )
479 vioapic->isr |= (1 << irq);
481 vioapic->irr &= ~(1 << irq);
482 vioapic->irr_xen &= ~(1 << irq);
483 }
484 }
486 void vioapic_set_xen_irq(struct domain *d, int irq, int level)
487 {
488 struct vioapic *vioapic = domain_vioapic(d);
490 if ( vioapic->redirtbl[irq].fields.mask )
491 return;
493 if ( vioapic->redirtbl[irq].fields.trig_mode == VIOAPIC_EDGE_TRIG )
494 gdprintk(XENLOG_WARNING, "Forcing edge triggered APIC irq %d?\n", irq);
496 if ( level )
497 vioapic->irr_xen |= 1 << irq;
498 else
499 vioapic->irr_xen &= ~(1 << irq);
501 service_ioapic(vioapic);
502 }
504 void vioapic_set_irq(struct domain *d, int irq, int level)
505 {
506 struct vioapic *vioapic = domain_vioapic(d);
508 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_set_irq "
509 "irq %x level %x\n", irq, level);
511 if ( (irq < 0) || (irq >= VIOAPIC_NUM_PINS) )
512 return;
514 if ( vioapic->redirtbl[irq].fields.mask )
515 return;
517 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_set_irq entry %x "
518 "vector %x delivery_mode %x dest_mode %x delivery_status %x "
519 "polarity %x remote_irr %x trig_mode %x mask %x dest_id %x\n",
520 irq,
521 vioapic->redirtbl[irq].fields.vector,
522 vioapic->redirtbl[irq].fields.delivery_mode,
523 vioapic->redirtbl[irq].fields.dest_mode,
524 vioapic->redirtbl[irq].fields.delivery_status,
525 vioapic->redirtbl[irq].fields.polarity,
526 vioapic->redirtbl[irq].fields.remote_irr,
527 vioapic->redirtbl[irq].fields.trig_mode,
528 vioapic->redirtbl[irq].fields.mask,
529 vioapic->redirtbl[irq].fields.dest_id);
531 if ( (irq >= 0) && (irq < VIOAPIC_NUM_PINS) )
532 {
533 uint32_t bit = 1 << irq;
534 if ( vioapic->redirtbl[irq].fields.trig_mode == VIOAPIC_LEVEL_TRIG )
535 {
536 if ( level )
537 vioapic->irr |= bit;
538 else
539 vioapic->irr &= ~bit;
540 }
541 else
542 {
543 if ( level )
544 /* XXX No irr clear for edge interrupt */
545 vioapic->irr |= bit;
546 }
547 }
549 service_ioapic(vioapic);
550 }
552 /* XXX If level interrupt, use vector->irq table for performance */
553 static int get_redir_num(struct vioapic *vioapic, int vector)
554 {
555 int i;
557 for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
558 if ( vioapic->redirtbl[i].fields.vector == vector )
559 return i;
561 return -1;
562 }
564 void vioapic_update_EOI(struct domain *d, int vector)
565 {
566 struct vioapic *vioapic = domain_vioapic(d);
567 int redir_num;
569 if ( (redir_num = get_redir_num(vioapic, vector)) == -1 )
570 {
571 gdprintk(XENLOG_WARNING, "Can't find redir item for %d EOI\n", vector);
572 return;
573 }
575 if ( !test_and_clear_bit(redir_num, &vioapic->isr) )
576 {
577 gdprintk(XENLOG_WARNING, "redir %d not set for %d EOI\n",
578 redir_num, vector);
579 return;
580 }
581 }
583 void vioapic_init(struct domain *d)
584 {
585 struct vioapic *vioapic = domain_vioapic(d);
587 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_init\n");
589 vioapic_reset(vioapic);
591 vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
592 }