direct-io.hg

view xen/arch/x86/hvm/vioapic.c @ 15388:50358c4b37f4

hvm: Support injection of virtual NMIs and clean up ExtInt handling in general.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jun 20 11:50:16 2007 +0100 (2007-06-20)
parents cb006eecd6f5
children d7e3224b661a
line source
1 /*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 *
4 * MandrakeSoft S.A.
5 * 43, rue d'Aboukir
6 * 75002 Paris - France
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Yunhong Jiang <yunhong.jiang@intel.com>
25 * Ported to xen by using virtual IRQ line.
26 */
28 #include <xen/config.h>
29 #include <xen/types.h>
30 #include <xen/mm.h>
31 #include <xen/xmalloc.h>
32 #include <xen/lib.h>
33 #include <xen/errno.h>
34 #include <xen/sched.h>
35 #include <public/hvm/ioreq.h>
36 #include <asm/hvm/io.h>
37 #include <asm/hvm/vpic.h>
38 #include <asm/hvm/vlapic.h>
39 #include <asm/hvm/support.h>
40 #include <asm/current.h>
41 #include <asm/event.h>
43 /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
44 #define IRQ0_SPECIAL_ROUTING 1
46 #if defined(__ia64__)
47 #define opt_hvm_debug_level opt_vmx_debug_level
48 #endif
50 static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);
52 static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
53 unsigned long addr,
54 unsigned long length)
55 {
56 unsigned long result = 0;
58 switch ( vioapic->ioregsel )
59 {
60 case VIOAPIC_REG_VERSION:
61 result = ((((VIOAPIC_NUM_PINS-1) & 0xff) << 16)
62 | (VIOAPIC_VERSION_ID & 0xff));
63 break;
65 #if !VIOAPIC_IS_IOSAPIC
66 case VIOAPIC_REG_APIC_ID:
67 case VIOAPIC_REG_ARB_ID:
68 result = ((vioapic->id & 0xf) << 24);
69 break;
70 #endif
72 default:
73 {
74 uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
75 uint64_t redir_content;
77 if ( redir_index >= VIOAPIC_NUM_PINS )
78 {
79 gdprintk(XENLOG_WARNING, "apic_mem_readl:undefined ioregsel %x\n",
80 vioapic->ioregsel);
81 break;
82 }
84 redir_content = vioapic->redirtbl[redir_index].bits;
85 result = (vioapic->ioregsel & 0x1)?
86 (redir_content >> 32) & 0xffffffff :
87 redir_content & 0xffffffff;
88 break;
89 }
90 }
92 return result;
93 }
95 static unsigned long vioapic_read(struct vcpu *v,
96 unsigned long addr,
97 unsigned long length)
98 {
99 struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
100 uint32_t result;
102 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "addr %lx", addr);
104 addr &= 0xff;
106 switch ( addr )
107 {
108 case VIOAPIC_REG_SELECT:
109 result = vioapic->ioregsel;
110 break;
112 case VIOAPIC_REG_WINDOW:
113 result = vioapic_read_indirect(vioapic, addr, length);
114 break;
116 default:
117 result = 0;
118 break;
119 }
121 return result;
122 }
124 static void vioapic_write_redirent(
125 struct hvm_hw_vioapic *vioapic, unsigned int idx, int top_word, uint32_t val)
126 {
127 struct domain *d = vioapic_domain(vioapic);
128 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
129 union vioapic_redir_entry *pent, ent;
131 spin_lock(&d->arch.hvm_domain.irq_lock);
133 pent = &vioapic->redirtbl[idx];
134 ent = *pent;
136 if ( top_word )
137 {
138 /* Contains only the dest_id. */
139 ent.bits = (uint32_t)ent.bits | ((uint64_t)val << 32);
140 }
141 else
142 {
143 /* Remote IRR and Delivery Status are read-only. */
144 ent.bits = ((ent.bits >> 32) << 32) | val;
145 ent.fields.delivery_status = 0;
146 ent.fields.remote_irr = pent->fields.remote_irr;
147 }
149 *pent = ent;
151 if ( (ent.fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
152 !ent.fields.mask &&
153 !ent.fields.remote_irr &&
154 hvm_irq->gsi_assert_count[idx] )
155 {
156 pent->fields.remote_irr = 1;
157 vioapic_deliver(vioapic, idx);
158 }
160 spin_unlock(&d->arch.hvm_domain.irq_lock);
161 }
163 static void vioapic_write_indirect(
164 struct hvm_hw_vioapic *vioapic, unsigned long addr,
165 unsigned long length, unsigned long val)
166 {
167 switch ( vioapic->ioregsel )
168 {
169 case VIOAPIC_REG_VERSION:
170 /* Writes are ignored. */
171 break;
173 #if !VIOAPIC_IS_IOSAPIC
174 case VIOAPIC_REG_APIC_ID:
175 vioapic->id = (val >> 24) & 0xf;
176 break;
178 case VIOAPIC_REG_ARB_ID:
179 break;
180 #endif
182 default:
183 {
184 uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
186 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "change redir index %x val %lx",
187 redir_index, val);
189 if ( redir_index >= VIOAPIC_NUM_PINS )
190 {
191 gdprintk(XENLOG_WARNING, "vioapic_write_indirect "
192 "error register %x\n", vioapic->ioregsel);
193 break;
194 }
196 vioapic_write_redirent(
197 vioapic, redir_index, vioapic->ioregsel&1, val);
198 break;
199 }
200 }
201 }
203 static void vioapic_write(struct vcpu *v,
204 unsigned long addr,
205 unsigned long length,
206 unsigned long val)
207 {
208 struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
210 addr &= 0xff;
212 switch ( addr )
213 {
214 case VIOAPIC_REG_SELECT:
215 vioapic->ioregsel = val;
216 break;
218 case VIOAPIC_REG_WINDOW:
219 vioapic_write_indirect(vioapic, addr, length, val);
220 break;
222 #if VIOAPIC_IS_IOSAPIC
223 case VIOAPIC_REG_EOI:
224 vioapic_update_EOI(v->domain, val);
225 break;
226 #endif
228 default:
229 break;
230 }
231 }
233 static int vioapic_range(struct vcpu *v, unsigned long addr)
234 {
235 struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
237 return ((addr >= vioapic->base_address &&
238 (addr < vioapic->base_address + VIOAPIC_MEM_LENGTH)));
239 }
241 struct hvm_mmio_handler vioapic_mmio_handler = {
242 .check_handler = vioapic_range,
243 .read_handler = vioapic_read,
244 .write_handler = vioapic_write
245 };
247 static void ioapic_inj_irq(
248 struct hvm_hw_vioapic *vioapic,
249 struct vlapic *target,
250 uint8_t vector,
251 uint8_t trig_mode,
252 uint8_t delivery_mode)
253 {
254 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d",
255 vector, trig_mode, delivery_mode);
257 ASSERT((delivery_mode == dest_Fixed) ||
258 (delivery_mode == dest_LowestPrio));
260 if ( vlapic_set_irq(target, vector, trig_mode) )
261 vcpu_kick(vlapic_vcpu(target));
262 }
264 static uint32_t ioapic_get_delivery_bitmask(
265 struct hvm_hw_vioapic *vioapic, uint16_t dest, uint8_t dest_mode)
266 {
267 uint32_t mask = 0;
268 struct vcpu *v;
270 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "dest %d dest_mode %d",
271 dest, dest_mode);
273 if ( dest_mode == 0 ) /* Physical mode. */
274 {
275 if ( dest == 0xFF ) /* Broadcast. */
276 {
277 for_each_vcpu ( vioapic_domain(vioapic), v )
278 mask |= 1 << v->vcpu_id;
279 goto out;
280 }
282 for_each_vcpu ( vioapic_domain(vioapic), v )
283 {
284 if ( VLAPIC_ID(vcpu_vlapic(v)) == dest )
285 {
286 mask = 1 << v->vcpu_id;
287 break;
288 }
289 }
290 }
291 else if ( dest != 0 ) /* Logical mode, MDA non-zero. */
292 {
293 for_each_vcpu ( vioapic_domain(vioapic), v )
294 if ( vlapic_match_logical_addr(vcpu_vlapic(v), dest) )
295 mask |= 1 << v->vcpu_id;
296 }
298 out:
299 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "mask %x",
300 mask);
301 return mask;
302 }
304 static inline int pit_channel0_enabled(void)
305 {
306 PITState *pit = &current->domain->arch.hvm_domain.pl_time.vpit;
307 struct periodic_time *pt = &pit->pt[0];
308 return pt->enabled;
309 }
311 static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
312 {
313 uint16_t dest = vioapic->redirtbl[irq].fields.dest_id;
314 uint8_t dest_mode = vioapic->redirtbl[irq].fields.dest_mode;
315 uint8_t delivery_mode = vioapic->redirtbl[irq].fields.delivery_mode;
316 uint8_t vector = vioapic->redirtbl[irq].fields.vector;
317 uint8_t trig_mode = vioapic->redirtbl[irq].fields.trig_mode;
318 uint32_t deliver_bitmask;
319 struct vlapic *target;
320 struct vcpu *v;
322 ASSERT(spin_is_locked(&vioapic_domain(vioapic)->arch.hvm_domain.irq_lock));
324 HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
325 "dest=%x dest_mode=%x delivery_mode=%x "
326 "vector=%x trig_mode=%x",
327 dest, dest_mode, delivery_mode, vector, trig_mode);
329 deliver_bitmask = ioapic_get_delivery_bitmask(vioapic, dest, dest_mode);
330 if ( !deliver_bitmask )
331 {
332 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "no target on destination");
333 return;
334 }
336 switch ( delivery_mode )
337 {
338 case dest_LowestPrio:
339 {
340 #ifdef IRQ0_SPECIAL_ROUTING
341 /* Force round-robin to pick VCPU 0 */
342 if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
343 {
344 v = vioapic_domain(vioapic)->vcpu[0];
345 target = v ? vcpu_vlapic(v) : NULL;
346 }
347 else
348 #endif
349 target = apic_round_robin(vioapic_domain(vioapic),
350 vector, deliver_bitmask);
351 if ( target != NULL )
352 {
353 ioapic_inj_irq(vioapic, target, vector, trig_mode, delivery_mode);
354 }
355 else
356 {
357 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
358 "mask=%x vector=%x delivery_mode=%x",
359 deliver_bitmask, vector, dest_LowestPrio);
360 }
361 break;
362 }
364 case dest_Fixed:
365 {
366 uint8_t bit;
367 for ( bit = 0; deliver_bitmask != 0; bit++ )
368 {
369 if ( !(deliver_bitmask & (1 << bit)) )
370 continue;
371 deliver_bitmask &= ~(1 << bit);
372 #ifdef IRQ0_SPECIAL_ROUTING
373 /* Do not deliver timer interrupts to VCPU != 0 */
374 if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
375 v = vioapic_domain(vioapic)->vcpu[0];
376 else
377 #endif
378 v = vioapic_domain(vioapic)->vcpu[bit];
379 if ( v != NULL )
380 {
381 target = vcpu_vlapic(v);
382 ioapic_inj_irq(vioapic, target, vector,
383 trig_mode, delivery_mode);
384 }
385 }
386 break;
387 }
389 case dest_NMI:
390 {
391 uint8_t bit;
392 for ( bit = 0; deliver_bitmask != 0; bit++ )
393 {
394 if ( !(deliver_bitmask & (1 << bit)) )
395 continue;
396 deliver_bitmask &= ~(1 << bit);
397 if ( ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
398 !test_and_set_bool(v->arch.hvm_vcpu.nmi_pending) )
399 vcpu_kick(v);
400 }
401 break;
402 }
404 default:
405 gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
406 delivery_mode);
407 break;
408 }
409 }
411 void vioapic_irq_positive_edge(struct domain *d, unsigned int irq)
412 {
413 struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
414 union vioapic_redir_entry *ent;
416 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq);
418 ASSERT(irq < VIOAPIC_NUM_PINS);
419 ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
421 ent = &vioapic->redirtbl[irq];
422 if ( ent->fields.mask )
423 return;
425 if ( ent->fields.trig_mode == VIOAPIC_EDGE_TRIG )
426 {
427 vioapic_deliver(vioapic, irq);
428 }
429 else if ( !ent->fields.remote_irr )
430 {
431 ent->fields.remote_irr = 1;
432 vioapic_deliver(vioapic, irq);
433 }
434 }
436 static int get_eoi_gsi(struct hvm_hw_vioapic *vioapic, int vector)
437 {
438 int i;
440 for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
441 if ( vioapic->redirtbl[i].fields.vector == vector )
442 return i;
444 return -1;
445 }
447 void vioapic_update_EOI(struct domain *d, int vector)
448 {
449 struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
450 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
451 union vioapic_redir_entry *ent;
452 int gsi;
454 spin_lock(&d->arch.hvm_domain.irq_lock);
456 if ( (gsi = get_eoi_gsi(vioapic, vector)) == -1 )
457 {
458 gdprintk(XENLOG_WARNING, "Can't find redir item for %d EOI\n", vector);
459 goto out;
460 }
462 ent = &vioapic->redirtbl[gsi];
464 ent->fields.remote_irr = 0;
465 if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
466 !ent->fields.mask &&
467 hvm_irq->gsi_assert_count[gsi] )
468 {
469 ent->fields.remote_irr = 1;
470 vioapic_deliver(vioapic, gsi);
471 }
473 out:
474 spin_unlock(&d->arch.hvm_domain.irq_lock);
475 }
477 #ifdef HVM_DEBUG_SUSPEND
478 static void ioapic_info(struct hvm_hw_vioapic *s)
479 {
480 int i;
481 printk("*****ioapic state:*****\n");
482 printk("ioapic 0x%x.\n", s->ioregsel);
483 printk("ioapic 0x%x.\n", s->id);
484 printk("ioapic 0x%lx.\n", s->base_address);
485 for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
486 printk("ioapic redirtbl[%d]:0x%"PRIx64"\n", i, s->redirtbl[i].bits);
487 }
489 }
490 #else
491 static void ioapic_info(struct hvm_hw_vioapic *s)
492 {
493 }
494 #endif
497 static int ioapic_save(struct domain *d, hvm_domain_context_t *h)
498 {
499 struct hvm_hw_vioapic *s = domain_vioapic(d);
500 ioapic_info(s);
502 /* save io-apic state*/
503 return ( hvm_save_entry(IOAPIC, 0, h, s) );
504 }
506 static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
507 {
508 struct hvm_hw_vioapic *s = domain_vioapic(d);
510 /* restore ioapic state */
511 if ( hvm_load_entry(IOAPIC, h, s) != 0 )
512 return -EINVAL;
514 ioapic_info(s);
515 return 0;
516 }
518 HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
520 void vioapic_init(struct domain *d)
521 {
522 struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
523 int i;
525 memset(vioapic, 0, sizeof(*vioapic));
526 for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
527 vioapic->redirtbl[i].fields.mask = 1;
528 vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
529 }