ia64/xen-unstable

view xen/arch/x86/hvm/io.c @ 10211:5be9e927533d

[HVM] Fix a bug in the emulation of the xchg instruction.

This bug has prevented us from booting fully virtualized SMP guests
that write to the APIC using the xchg instruction (when
CONFIG_X86_GOOD_APIC is not set). On 32 bit platforms, sles 10 kernels
are built without CONFIG_x86_GOOD_APIC not set and hence we have had
problems booting fully virtualized SMP sles 10 guests.

Signed-off-by: K. Y. Srinivasan <ksrinivasan@novell.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue May 30 12:30:47 2006 +0100 (2006-05-30)
parents 5765497cf75e
children 7d37df6c3247
line source
1 /*
2 * io.c: Handling I/O and interrupts.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/mm.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/trace.h>
27 #include <xen/event.h>
29 #include <xen/hypercall.h>
30 #include <asm/current.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/apic.h>
35 #include <asm/shadow.h>
36 #include <asm/hvm/hvm.h>
37 #include <asm/hvm/support.h>
38 #include <asm/hvm/vpit.h>
39 #include <asm/hvm/vpic.h>
40 #include <asm/hvm/vlapic.h>
42 #include <public/sched.h>
43 #include <public/hvm/ioreq.h>
45 #if defined (__i386__)
46 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
47 {
48 switch (size) {
49 case BYTE:
50 switch (index) {
51 case 0:
52 regs->eax &= 0xFFFFFF00;
53 regs->eax |= (value & 0xFF);
54 break;
55 case 1:
56 regs->ecx &= 0xFFFFFF00;
57 regs->ecx |= (value & 0xFF);
58 break;
59 case 2:
60 regs->edx &= 0xFFFFFF00;
61 regs->edx |= (value & 0xFF);
62 break;
63 case 3:
64 regs->ebx &= 0xFFFFFF00;
65 regs->ebx |= (value & 0xFF);
66 break;
67 case 4:
68 regs->eax &= 0xFFFF00FF;
69 regs->eax |= ((value & 0xFF) << 8);
70 break;
71 case 5:
72 regs->ecx &= 0xFFFF00FF;
73 regs->ecx |= ((value & 0xFF) << 8);
74 break;
75 case 6:
76 regs->edx &= 0xFFFF00FF;
77 regs->edx |= ((value & 0xFF) << 8);
78 break;
79 case 7:
80 regs->ebx &= 0xFFFF00FF;
81 regs->ebx |= ((value & 0xFF) << 8);
82 break;
83 default:
84 printk("Error: size:%x, index:%x are invalid!\n", size, index);
85 domain_crash_synchronous();
86 break;
87 }
88 break;
89 case WORD:
90 switch (index) {
91 case 0:
92 regs->eax &= 0xFFFF0000;
93 regs->eax |= (value & 0xFFFF);
94 break;
95 case 1:
96 regs->ecx &= 0xFFFF0000;
97 regs->ecx |= (value & 0xFFFF);
98 break;
99 case 2:
100 regs->edx &= 0xFFFF0000;
101 regs->edx |= (value & 0xFFFF);
102 break;
103 case 3:
104 regs->ebx &= 0xFFFF0000;
105 regs->ebx |= (value & 0xFFFF);
106 break;
107 case 4:
108 regs->esp &= 0xFFFF0000;
109 regs->esp |= (value & 0xFFFF);
110 break;
111 case 5:
112 regs->ebp &= 0xFFFF0000;
113 regs->ebp |= (value & 0xFFFF);
114 break;
115 case 6:
116 regs->esi &= 0xFFFF0000;
117 regs->esi |= (value & 0xFFFF);
118 break;
119 case 7:
120 regs->edi &= 0xFFFF0000;
121 regs->edi |= (value & 0xFFFF);
122 break;
123 default:
124 printk("Error: size:%x, index:%x are invalid!\n", size, index);
125 domain_crash_synchronous();
126 break;
127 }
128 break;
129 case LONG:
130 switch (index) {
131 case 0:
132 regs->eax = value;
133 break;
134 case 1:
135 regs->ecx = value;
136 break;
137 case 2:
138 regs->edx = value;
139 break;
140 case 3:
141 regs->ebx = value;
142 break;
143 case 4:
144 regs->esp = value;
145 break;
146 case 5:
147 regs->ebp = value;
148 break;
149 case 6:
150 regs->esi = value;
151 break;
152 case 7:
153 regs->edi = value;
154 break;
155 default:
156 printk("Error: size:%x, index:%x are invalid!\n", size, index);
157 domain_crash_synchronous();
158 break;
159 }
160 break;
161 default:
162 printk("Error: size:%x, index:%x are invalid!\n", size, index);
163 domain_crash_synchronous();
164 break;
165 }
166 }
167 #else
168 static inline void __set_reg_value(unsigned long *reg, int size, long value)
169 {
170 switch (size) {
171 case BYTE_64:
172 *reg &= ~0xFF;
173 *reg |= (value & 0xFF);
174 break;
175 case WORD:
176 *reg &= ~0xFFFF;
177 *reg |= (value & 0xFFFF);
178 break;
179 case LONG:
180 *reg &= ~0xFFFFFFFF;
181 *reg |= (value & 0xFFFFFFFF);
182 break;
183 case QUAD:
184 *reg = value;
185 break;
186 default:
187 printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
188 domain_crash_synchronous();
189 }
190 }
192 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
193 {
194 if (size == BYTE) {
195 switch (index) {
196 case 0:
197 regs->rax &= ~0xFF;
198 regs->rax |= (value & 0xFF);
199 break;
200 case 1:
201 regs->rcx &= ~0xFF;
202 regs->rcx |= (value & 0xFF);
203 break;
204 case 2:
205 regs->rdx &= ~0xFF;
206 regs->rdx |= (value & 0xFF);
207 break;
208 case 3:
209 regs->rbx &= ~0xFF;
210 regs->rbx |= (value & 0xFF);
211 break;
212 case 4:
213 regs->rax &= 0xFFFFFFFFFFFF00FF;
214 regs->rax |= ((value & 0xFF) << 8);
215 break;
216 case 5:
217 regs->rcx &= 0xFFFFFFFFFFFF00FF;
218 regs->rcx |= ((value & 0xFF) << 8);
219 break;
220 case 6:
221 regs->rdx &= 0xFFFFFFFFFFFF00FF;
222 regs->rdx |= ((value & 0xFF) << 8);
223 break;
224 case 7:
225 regs->rbx &= 0xFFFFFFFFFFFF00FF;
226 regs->rbx |= ((value & 0xFF) << 8);
227 break;
228 default:
229 printk("Error: size:%x, index:%x are invalid!\n", size, index);
230 domain_crash_synchronous();
231 break;
232 }
233 return;
234 }
236 switch (index) {
237 case 0:
238 __set_reg_value(&regs->rax, size, value);
239 break;
240 case 1:
241 __set_reg_value(&regs->rcx, size, value);
242 break;
243 case 2:
244 __set_reg_value(&regs->rdx, size, value);
245 break;
246 case 3:
247 __set_reg_value(&regs->rbx, size, value);
248 break;
249 case 4:
250 __set_reg_value(&regs->rsp, size, value);
251 break;
252 case 5:
253 __set_reg_value(&regs->rbp, size, value);
254 break;
255 case 6:
256 __set_reg_value(&regs->rsi, size, value);
257 break;
258 case 7:
259 __set_reg_value(&regs->rdi, size, value);
260 break;
261 case 8:
262 __set_reg_value(&regs->r8, size, value);
263 break;
264 case 9:
265 __set_reg_value(&regs->r9, size, value);
266 break;
267 case 10:
268 __set_reg_value(&regs->r10, size, value);
269 break;
270 case 11:
271 __set_reg_value(&regs->r11, size, value);
272 break;
273 case 12:
274 __set_reg_value(&regs->r12, size, value);
275 break;
276 case 13:
277 __set_reg_value(&regs->r13, size, value);
278 break;
279 case 14:
280 __set_reg_value(&regs->r14, size, value);
281 break;
282 case 15:
283 __set_reg_value(&regs->r15, size, value);
284 break;
285 default:
286 printk("Error: <set_reg_value> Invalid index\n");
287 domain_crash_synchronous();
288 }
289 return;
290 }
291 #endif
293 extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs);
295 static inline void set_eflags_CF(int size, unsigned long v1,
296 unsigned long v2, struct cpu_user_regs *regs)
297 {
298 unsigned long mask = (1 << (8 * size)) - 1;
300 if ((v1 & mask) > (v2 & mask))
301 regs->eflags |= X86_EFLAGS_CF;
302 else
303 regs->eflags &= ~X86_EFLAGS_CF;
304 }
306 static inline void set_eflags_OF(int size, unsigned long v1,
307 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
308 {
309 if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
310 regs->eflags |= X86_EFLAGS_OF;
311 }
313 static inline void set_eflags_AF(int size, unsigned long v1,
314 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
315 {
316 if ((v1 ^ v2 ^ v3) & 0x10)
317 regs->eflags |= X86_EFLAGS_AF;
318 }
320 static inline void set_eflags_ZF(int size, unsigned long v1,
321 struct cpu_user_regs *regs)
322 {
323 unsigned long mask = (1 << (8 * size)) - 1;
325 if ((v1 & mask) == 0)
326 regs->eflags |= X86_EFLAGS_ZF;
327 }
329 static inline void set_eflags_SF(int size, unsigned long v1,
330 struct cpu_user_regs *regs)
331 {
332 if (v1 & (1 << ((8 * size) - 1)))
333 regs->eflags |= X86_EFLAGS_SF;
334 }
336 static char parity_table[256] = {
337 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
338 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
339 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
340 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
341 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
342 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
343 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
344 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
345 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
346 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
347 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
348 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
349 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
350 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
351 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
352 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
353 };
355 static inline void set_eflags_PF(int size, unsigned long v1,
356 struct cpu_user_regs *regs)
357 {
358 if (parity_table[v1 & 0xFF])
359 regs->eflags |= X86_EFLAGS_PF;
360 }
362 static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
363 struct mmio_op *mmio_opp)
364 {
365 unsigned long old_eax;
366 int sign = p->df ? -1 : 1;
368 if ( p->pdata_valid || (mmio_opp->flags & OVERLAP) )
369 {
370 if ( mmio_opp->flags & REPZ )
371 regs->ecx -= p->count;
372 if ( p->dir == IOREQ_READ )
373 {
374 regs->edi += sign * p->count * p->size;
375 if ( mmio_opp->flags & OVERLAP )
376 {
377 unsigned long addr = regs->edi;
378 if (hvm_realmode(current))
379 addr += regs->es << 4;
380 if (sign > 0)
381 addr -= p->size;
382 hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
383 }
384 }
385 else /* p->dir == IOREQ_WRITE */
386 {
387 ASSERT(p->dir == IOREQ_WRITE);
388 regs->esi += sign * p->count * p->size;
389 }
390 }
391 else if ( p->dir == IOREQ_READ )
392 {
393 old_eax = regs->eax;
394 switch ( p->size )
395 {
396 case 1:
397 regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
398 break;
399 case 2:
400 regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
401 break;
402 case 4:
403 regs->eax = (p->u.data & 0xffffffff);
404 break;
405 default:
406 printk("Error: %s unknown port size\n", __FUNCTION__);
407 domain_crash_synchronous();
408 }
409 }
410 }
412 static void hvm_mmio_assist(struct vcpu *v, struct cpu_user_regs *regs,
413 ioreq_t *p, struct mmio_op *mmio_opp)
414 {
415 int sign = p->df ? -1 : 1;
416 int size = -1, index = -1;
417 unsigned long value = 0, diff = 0;
418 unsigned long src, dst;
420 src = mmio_opp->operand[0];
421 dst = mmio_opp->operand[1];
422 size = operand_size(src);
424 switch (mmio_opp->instr) {
425 case INSTR_MOV:
426 if (dst & REGISTER) {
427 index = operand_index(dst);
428 set_reg_value(size, index, 0, regs, p->u.data);
429 }
430 break;
432 case INSTR_MOVZX:
433 if (dst & REGISTER) {
434 switch (size) {
435 case BYTE:
436 p->u.data &= 0xFFULL;
437 break;
439 case WORD:
440 p->u.data &= 0xFFFFULL;
441 break;
443 case LONG:
444 p->u.data &= 0xFFFFFFFFULL;
445 break;
447 default:
448 printk("Impossible source operand size of movzx instr: %d\n", size);
449 domain_crash_synchronous();
450 }
451 index = operand_index(dst);
452 set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
453 }
454 break;
456 case INSTR_MOVSX:
457 if (dst & REGISTER) {
458 switch (size) {
459 case BYTE:
460 p->u.data &= 0xFFULL;
461 if ( p->u.data & 0x80ULL )
462 p->u.data |= 0xFFFFFFFFFFFFFF00ULL;
463 break;
465 case WORD:
466 p->u.data &= 0xFFFFULL;
467 if ( p->u.data & 0x8000ULL )
468 p->u.data |= 0xFFFFFFFFFFFF0000ULL;
469 break;
471 case LONG:
472 p->u.data &= 0xFFFFFFFFULL;
473 if ( p->u.data & 0x80000000ULL )
474 p->u.data |= 0xFFFFFFFF00000000ULL;
475 break;
477 default:
478 printk("Impossible source operand size of movsx instr: %d\n", size);
479 domain_crash_synchronous();
480 }
481 index = operand_index(dst);
482 set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
483 }
484 break;
486 case INSTR_MOVS:
487 sign = p->df ? -1 : 1;
488 regs->esi += sign * p->count * p->size;
489 regs->edi += sign * p->count * p->size;
491 if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
492 unsigned long addr = regs->edi;
494 if (sign > 0)
495 addr -= p->size;
496 hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
497 }
499 if (mmio_opp->flags & REPZ)
500 regs->ecx -= p->count;
501 break;
503 case INSTR_STOS:
504 sign = p->df ? -1 : 1;
505 regs->edi += sign * p->count * p->size;
506 if (mmio_opp->flags & REPZ)
507 regs->ecx -= p->count;
508 break;
510 case INSTR_AND:
511 if (src & REGISTER) {
512 index = operand_index(src);
513 value = get_reg_value(size, index, 0, regs);
514 diff = (unsigned long) p->u.data & value;
515 } else if (src & IMMEDIATE) {
516 value = mmio_opp->immediate;
517 diff = (unsigned long) p->u.data & value;
518 } else if (src & MEMORY) {
519 index = operand_index(dst);
520 value = get_reg_value(size, index, 0, regs);
521 diff = (unsigned long) p->u.data & value;
522 set_reg_value(size, index, 0, regs, diff);
523 }
525 /*
526 * The OF and CF flags are cleared; the SF, ZF, and PF
527 * flags are set according to the result. The state of
528 * the AF flag is undefined.
529 */
530 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
531 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
532 set_eflags_ZF(size, diff, regs);
533 set_eflags_SF(size, diff, regs);
534 set_eflags_PF(size, diff, regs);
535 break;
537 case INSTR_OR:
538 if (src & REGISTER) {
539 index = operand_index(src);
540 value = get_reg_value(size, index, 0, regs);
541 diff = (unsigned long) p->u.data | value;
542 } else if (src & IMMEDIATE) {
543 value = mmio_opp->immediate;
544 diff = (unsigned long) p->u.data | value;
545 } else if (src & MEMORY) {
546 index = operand_index(dst);
547 value = get_reg_value(size, index, 0, regs);
548 diff = (unsigned long) p->u.data | value;
549 set_reg_value(size, index, 0, regs, diff);
550 }
552 /*
553 * The OF and CF flags are cleared; the SF, ZF, and PF
554 * flags are set according to the result. The state of
555 * the AF flag is undefined.
556 */
557 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
558 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
559 set_eflags_ZF(size, diff, regs);
560 set_eflags_SF(size, diff, regs);
561 set_eflags_PF(size, diff, regs);
562 break;
564 case INSTR_XOR:
565 if (src & REGISTER) {
566 index = operand_index(src);
567 value = get_reg_value(size, index, 0, regs);
568 diff = (unsigned long) p->u.data ^ value;
569 } else if (src & IMMEDIATE) {
570 value = mmio_opp->immediate;
571 diff = (unsigned long) p->u.data ^ value;
572 } else if (src & MEMORY) {
573 index = operand_index(dst);
574 value = get_reg_value(size, index, 0, regs);
575 diff = (unsigned long) p->u.data ^ value;
576 set_reg_value(size, index, 0, regs, diff);
577 }
579 /*
580 * The OF and CF flags are cleared; the SF, ZF, and PF
581 * flags are set according to the result. The state of
582 * the AF flag is undefined.
583 */
584 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
585 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
586 set_eflags_ZF(size, diff, regs);
587 set_eflags_SF(size, diff, regs);
588 set_eflags_PF(size, diff, regs);
589 break;
591 case INSTR_CMP:
592 if (src & REGISTER) {
593 index = operand_index(src);
594 value = get_reg_value(size, index, 0, regs);
595 diff = (unsigned long) p->u.data - value;
596 } else if (src & IMMEDIATE) {
597 value = mmio_opp->immediate;
598 diff = (unsigned long) p->u.data - value;
599 } else if (src & MEMORY) {
600 index = operand_index(dst);
601 value = get_reg_value(size, index, 0, regs);
602 diff = value - (unsigned long) p->u.data;
603 }
605 /*
606 * The CF, OF, SF, ZF, AF, and PF flags are set according
607 * to the result
608 */
609 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
610 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
611 set_eflags_CF(size, value, (unsigned long) p->u.data, regs);
612 set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs);
613 set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs);
614 set_eflags_ZF(size, diff, regs);
615 set_eflags_SF(size, diff, regs);
616 set_eflags_PF(size, diff, regs);
617 break;
619 case INSTR_TEST:
620 if (src & REGISTER) {
621 index = operand_index(src);
622 value = get_reg_value(size, index, 0, regs);
623 } else if (src & IMMEDIATE) {
624 value = mmio_opp->immediate;
625 } else if (src & MEMORY) {
626 index = operand_index(dst);
627 value = get_reg_value(size, index, 0, regs);
628 }
629 diff = (unsigned long) p->u.data & value;
631 /*
632 * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
633 */
634 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
635 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
636 set_eflags_ZF(size, diff, regs);
637 set_eflags_SF(size, diff, regs);
638 set_eflags_PF(size, diff, regs);
639 break;
641 case INSTR_BT:
642 index = operand_index(src);
643 value = get_reg_value(size, index, 0, regs);
645 if (p->u.data & (1 << (value & ((1 << 5) - 1))))
646 regs->eflags |= X86_EFLAGS_CF;
647 else
648 regs->eflags &= ~X86_EFLAGS_CF;
650 break;
652 case INSTR_XCHG:
653 if (src & REGISTER) {
654 index = operand_index(src);
655 set_reg_value(size, index, 0, regs, p->u.data);
656 } else {
657 index = operand_index(dst);
658 set_reg_value(size, index, 0, regs, p->u.data);
659 }
660 break;
661 }
663 hvm_load_cpu_guest_regs(v, regs);
664 }
666 void hvm_io_assist(struct vcpu *v)
667 {
668 vcpu_iodata_t *vio;
669 ioreq_t *p;
670 struct cpu_user_regs *regs = guest_cpu_user_regs();
671 struct mmio_op *mmio_opp;
672 struct cpu_user_regs *inst_decoder_regs;
674 mmio_opp = &v->arch.hvm_vcpu.mmio_op;
675 inst_decoder_regs = mmio_opp->inst_decoder_regs;
677 vio = get_vio(v->domain, v->vcpu_id);
679 if (vio == 0) {
680 HVM_DBG_LOG(DBG_LEVEL_1,
681 "bad shared page: %lx", (unsigned long) vio);
682 printf("bad shared page: %lx\n", (unsigned long) vio);
683 domain_crash_synchronous();
684 }
686 p = &vio->vp_ioreq;
688 /* clear IO wait HVM flag */
689 if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags)) {
690 if (p->state == STATE_IORESP_READY) {
691 p->state = STATE_INVALID;
692 clear_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
694 if (p->type == IOREQ_TYPE_PIO)
695 hvm_pio_assist(regs, p, mmio_opp);
696 else
697 hvm_mmio_assist(v, regs, p, mmio_opp);
698 }
699 /* else an interrupt send event raced us */
700 }
701 }
703 /*
704 * On exit from hvm_wait_io, we're guaranteed not to be waiting on
705 * I/O response from the device model.
706 */
707 void hvm_wait_io(void)
708 {
709 struct vcpu *v = current;
710 struct domain *d = v->domain;
711 int port = iopacket_port(v);
713 for ( ; ; )
714 {
715 /* Clear master flag, selector flag, event flag each in turn. */
716 v->vcpu_info->evtchn_upcall_pending = 0;
717 clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
718 smp_mb__after_clear_bit();
719 if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) )
720 hvm_io_assist(v);
722 /* Need to wait for I/O responses? */
723 if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
724 break;
726 do_sched_op_compat(SCHEDOP_block, 0);
727 }
729 /*
730 * Re-set the selector and master flags in case any other notifications
731 * are pending.
732 */
733 if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] )
734 set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
735 if ( v->vcpu_info->evtchn_pending_sel )
736 v->vcpu_info->evtchn_upcall_pending = 1;
737 }
739 void hvm_safe_block(void)
740 {
741 struct vcpu *v = current;
742 struct domain *d = v->domain;
743 int port = iopacket_port(v);
745 for ( ; ; )
746 {
747 /* Clear master flag & selector flag so we will wake from block. */
748 v->vcpu_info->evtchn_upcall_pending = 0;
749 clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
750 smp_mb__after_clear_bit();
752 /* Event pending already? */
753 if ( test_bit(port, &d->shared_info->evtchn_pending[0]) )
754 break;
756 do_sched_op_compat(SCHEDOP_block, 0);
757 }
759 /* Reflect pending event in selector and master flags. */
760 set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
761 v->vcpu_info->evtchn_upcall_pending = 1;
762 }
764 /*
765 * Local variables:
766 * mode: C
767 * c-set-style: "BSD"
768 * c-basic-offset: 4
769 * tab-width: 4
770 * indent-tabs-mode: nil
771 * End:
772 */