ia64/xen-unstable

view xen/arch/x86/hvm/io.c @ 9563:9bee4875a848

Rename sched_op->sched_op_compat and sched_op_new->sched_op
after Christian's interface cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Apr 01 11:08:50 2006 +0100 (2006-04-01)
parents b5bb9920bf48
children 8d08ad8256df
line source
1 /*
2 * io.c: Handling I/O and interrupts.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/mm.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/trace.h>
27 #include <xen/event.h>
29 #include <xen/hypercall.h>
30 #include <asm/current.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/apic.h>
35 #include <asm/shadow.h>
36 #include <asm/hvm/hvm.h>
37 #include <asm/hvm/support.h>
38 #include <asm/hvm/vpit.h>
39 #include <asm/hvm/vpic.h>
40 #include <asm/hvm/vlapic.h>
42 #include <public/sched.h>
43 #include <public/hvm/ioreq.h>
45 #if defined (__i386__)
46 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
47 {
48 switch (size) {
49 case BYTE:
50 switch (index) {
51 case 0:
52 regs->eax &= 0xFFFFFF00;
53 regs->eax |= (value & 0xFF);
54 break;
55 case 1:
56 regs->ecx &= 0xFFFFFF00;
57 regs->ecx |= (value & 0xFF);
58 break;
59 case 2:
60 regs->edx &= 0xFFFFFF00;
61 regs->edx |= (value & 0xFF);
62 break;
63 case 3:
64 regs->ebx &= 0xFFFFFF00;
65 regs->ebx |= (value & 0xFF);
66 break;
67 case 4:
68 regs->eax &= 0xFFFF00FF;
69 regs->eax |= ((value & 0xFF) << 8);
70 break;
71 case 5:
72 regs->ecx &= 0xFFFF00FF;
73 regs->ecx |= ((value & 0xFF) << 8);
74 break;
75 case 6:
76 regs->edx &= 0xFFFF00FF;
77 regs->edx |= ((value & 0xFF) << 8);
78 break;
79 case 7:
80 regs->ebx &= 0xFFFF00FF;
81 regs->ebx |= ((value & 0xFF) << 8);
82 break;
83 default:
84 printk("Error: size:%x, index:%x are invalid!\n", size, index);
85 domain_crash_synchronous();
86 break;
87 }
88 break;
89 case WORD:
90 switch (index) {
91 case 0:
92 regs->eax &= 0xFFFF0000;
93 regs->eax |= (value & 0xFFFF);
94 break;
95 case 1:
96 regs->ecx &= 0xFFFF0000;
97 regs->ecx |= (value & 0xFFFF);
98 break;
99 case 2:
100 regs->edx &= 0xFFFF0000;
101 regs->edx |= (value & 0xFFFF);
102 break;
103 case 3:
104 regs->ebx &= 0xFFFF0000;
105 regs->ebx |= (value & 0xFFFF);
106 break;
107 case 4:
108 regs->esp &= 0xFFFF0000;
109 regs->esp |= (value & 0xFFFF);
110 break;
111 case 5:
112 regs->ebp &= 0xFFFF0000;
113 regs->ebp |= (value & 0xFFFF);
114 break;
115 case 6:
116 regs->esi &= 0xFFFF0000;
117 regs->esi |= (value & 0xFFFF);
118 break;
119 case 7:
120 regs->edi &= 0xFFFF0000;
121 regs->edi |= (value & 0xFFFF);
122 break;
123 default:
124 printk("Error: size:%x, index:%x are invalid!\n", size, index);
125 domain_crash_synchronous();
126 break;
127 }
128 break;
129 case LONG:
130 switch (index) {
131 case 0:
132 regs->eax = value;
133 break;
134 case 1:
135 regs->ecx = value;
136 break;
137 case 2:
138 regs->edx = value;
139 break;
140 case 3:
141 regs->ebx = value;
142 break;
143 case 4:
144 regs->esp = value;
145 break;
146 case 5:
147 regs->ebp = value;
148 break;
149 case 6:
150 regs->esi = value;
151 break;
152 case 7:
153 regs->edi = value;
154 break;
155 default:
156 printk("Error: size:%x, index:%x are invalid!\n", size, index);
157 domain_crash_synchronous();
158 break;
159 }
160 break;
161 default:
162 printk("Error: size:%x, index:%x are invalid!\n", size, index);
163 domain_crash_synchronous();
164 break;
165 }
166 }
167 #else
168 static inline void __set_reg_value(unsigned long *reg, int size, long value)
169 {
170 switch (size) {
171 case BYTE_64:
172 *reg &= ~0xFF;
173 *reg |= (value & 0xFF);
174 break;
175 case WORD:
176 *reg &= ~0xFFFF;
177 *reg |= (value & 0xFFFF);
178 break;
179 case LONG:
180 *reg &= ~0xFFFFFFFF;
181 *reg |= (value & 0xFFFFFFFF);
182 break;
183 case QUAD:
184 *reg = value;
185 break;
186 default:
187 printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
188 domain_crash_synchronous();
189 }
190 }
192 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
193 {
194 if (size == BYTE) {
195 switch (index) {
196 case 0:
197 regs->rax &= ~0xFF;
198 regs->rax |= (value & 0xFF);
199 break;
200 case 1:
201 regs->rcx &= ~0xFF;
202 regs->rcx |= (value & 0xFF);
203 break;
204 case 2:
205 regs->rdx &= ~0xFF;
206 regs->rdx |= (value & 0xFF);
207 break;
208 case 3:
209 regs->rbx &= ~0xFF;
210 regs->rbx |= (value & 0xFF);
211 break;
212 case 4:
213 regs->rax &= 0xFFFFFFFFFFFF00FF;
214 regs->rax |= ((value & 0xFF) << 8);
215 break;
216 case 5:
217 regs->rcx &= 0xFFFFFFFFFFFF00FF;
218 regs->rcx |= ((value & 0xFF) << 8);
219 break;
220 case 6:
221 regs->rdx &= 0xFFFFFFFFFFFF00FF;
222 regs->rdx |= ((value & 0xFF) << 8);
223 break;
224 case 7:
225 regs->rbx &= 0xFFFFFFFFFFFF00FF;
226 regs->rbx |= ((value & 0xFF) << 8);
227 break;
228 default:
229 printk("Error: size:%x, index:%x are invalid!\n", size, index);
230 domain_crash_synchronous();
231 break;
232 }
233 return;
234 }
236 switch (index) {
237 case 0:
238 __set_reg_value(&regs->rax, size, value);
239 break;
240 case 1:
241 __set_reg_value(&regs->rcx, size, value);
242 break;
243 case 2:
244 __set_reg_value(&regs->rdx, size, value);
245 break;
246 case 3:
247 __set_reg_value(&regs->rbx, size, value);
248 break;
249 case 4:
250 __set_reg_value(&regs->rsp, size, value);
251 break;
252 case 5:
253 __set_reg_value(&regs->rbp, size, value);
254 break;
255 case 6:
256 __set_reg_value(&regs->rsi, size, value);
257 break;
258 case 7:
259 __set_reg_value(&regs->rdi, size, value);
260 break;
261 case 8:
262 __set_reg_value(&regs->r8, size, value);
263 break;
264 case 9:
265 __set_reg_value(&regs->r9, size, value);
266 break;
267 case 10:
268 __set_reg_value(&regs->r10, size, value);
269 break;
270 case 11:
271 __set_reg_value(&regs->r11, size, value);
272 break;
273 case 12:
274 __set_reg_value(&regs->r12, size, value);
275 break;
276 case 13:
277 __set_reg_value(&regs->r13, size, value);
278 break;
279 case 14:
280 __set_reg_value(&regs->r14, size, value);
281 break;
282 case 15:
283 __set_reg_value(&regs->r15, size, value);
284 break;
285 default:
286 printk("Error: <set_reg_value> Invalid index\n");
287 domain_crash_synchronous();
288 }
289 return;
290 }
291 #endif
293 extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs);
295 static inline void set_eflags_CF(int size, unsigned long v1,
296 unsigned long v2, struct cpu_user_regs *regs)
297 {
298 unsigned long mask = (1 << (8 * size)) - 1;
300 if ((v1 & mask) > (v2 & mask))
301 regs->eflags |= X86_EFLAGS_CF;
302 else
303 regs->eflags &= ~X86_EFLAGS_CF;
304 }
306 static inline void set_eflags_OF(int size, unsigned long v1,
307 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
308 {
309 if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
310 regs->eflags |= X86_EFLAGS_OF;
311 }
313 static inline void set_eflags_AF(int size, unsigned long v1,
314 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
315 {
316 if ((v1 ^ v2 ^ v3) & 0x10)
317 regs->eflags |= X86_EFLAGS_AF;
318 }
320 static inline void set_eflags_ZF(int size, unsigned long v1,
321 struct cpu_user_regs *regs)
322 {
323 unsigned long mask = (1 << (8 * size)) - 1;
325 if ((v1 & mask) == 0)
326 regs->eflags |= X86_EFLAGS_ZF;
327 }
329 static inline void set_eflags_SF(int size, unsigned long v1,
330 struct cpu_user_regs *regs)
331 {
332 if (v1 & (1 << ((8 * size) - 1)))
333 regs->eflags |= X86_EFLAGS_SF;
334 }
336 static char parity_table[256] = {
337 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
338 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
339 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
340 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
341 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
342 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
343 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
344 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
345 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
346 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
347 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
348 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
349 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
350 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
351 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
352 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
353 };
355 static inline void set_eflags_PF(int size, unsigned long v1,
356 struct cpu_user_regs *regs)
357 {
358 if (parity_table[v1 & 0xFF])
359 regs->eflags |= X86_EFLAGS_PF;
360 }
362 static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
363 struct mmio_op *mmio_opp)
364 {
365 unsigned long old_eax;
366 int sign = p->df ? -1 : 1;
368 if (p->dir == IOREQ_WRITE) {
369 if (p->pdata_valid) {
370 regs->esi += sign * p->count * p->size;
371 if (mmio_opp->flags & REPZ)
372 regs->ecx -= p->count;
373 }
374 } else {
375 if (mmio_opp->flags & OVERLAP) {
376 unsigned long addr;
378 regs->edi += sign * p->count * p->size;
379 if (mmio_opp->flags & REPZ)
380 regs->ecx -= p->count;
382 addr = regs->edi;
383 if (sign > 0)
384 addr -= p->size;
385 hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
386 } else if (p->pdata_valid) {
387 regs->edi += sign * p->count * p->size;
388 if (mmio_opp->flags & REPZ)
389 regs->ecx -= p->count;
390 } else {
391 old_eax = regs->eax;
392 switch (p->size) {
393 case 1:
394 regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
395 break;
396 case 2:
397 regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
398 break;
399 case 4:
400 regs->eax = (p->u.data & 0xffffffff);
401 break;
402 default:
403 printk("Error: %s unknown port size\n", __FUNCTION__);
404 domain_crash_synchronous();
405 }
406 }
407 }
408 }
410 static void hvm_mmio_assist(struct vcpu *v, struct cpu_user_regs *regs,
411 ioreq_t *p, struct mmio_op *mmio_opp)
412 {
413 int sign = p->df ? -1 : 1;
414 int size = -1, index = -1;
415 unsigned long value = 0, diff = 0;
416 unsigned long src, dst;
418 src = mmio_opp->operand[0];
419 dst = mmio_opp->operand[1];
420 size = operand_size(src);
422 switch (mmio_opp->instr) {
423 case INSTR_MOV:
424 if (dst & REGISTER) {
425 index = operand_index(dst);
426 set_reg_value(size, index, 0, regs, p->u.data);
427 }
428 break;
430 case INSTR_MOVZX:
431 if (dst & REGISTER) {
432 switch (size) {
433 case BYTE:
434 p->u.data &= 0xFFULL;
435 break;
437 case WORD:
438 p->u.data &= 0xFFFFULL;
439 break;
441 case LONG:
442 p->u.data &= 0xFFFFFFFFULL;
443 break;
445 default:
446 printk("Impossible source operand size of movzx instr: %d\n", size);
447 domain_crash_synchronous();
448 }
449 index = operand_index(dst);
450 set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
451 }
452 break;
454 case INSTR_MOVSX:
455 if (dst & REGISTER) {
456 switch (size) {
457 case BYTE:
458 p->u.data &= 0xFFULL;
459 if ( p->u.data & 0x80ULL )
460 p->u.data |= 0xFFFFFFFFFFFFFF00ULL;
461 break;
463 case WORD:
464 p->u.data &= 0xFFFFULL;
465 if ( p->u.data & 0x8000ULL )
466 p->u.data |= 0xFFFFFFFFFFFF0000ULL;
467 break;
469 case LONG:
470 p->u.data &= 0xFFFFFFFFULL;
471 if ( p->u.data & 0x80000000ULL )
472 p->u.data |= 0xFFFFFFFF00000000ULL;
473 break;
475 default:
476 printk("Impossible source operand size of movsx instr: %d\n", size);
477 domain_crash_synchronous();
478 }
479 index = operand_index(dst);
480 set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
481 }
482 break;
484 case INSTR_MOVS:
485 sign = p->df ? -1 : 1;
486 regs->esi += sign * p->count * p->size;
487 regs->edi += sign * p->count * p->size;
489 if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
490 unsigned long addr = regs->edi;
492 if (sign > 0)
493 addr -= p->size;
494 hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
495 }
497 if (mmio_opp->flags & REPZ)
498 regs->ecx -= p->count;
499 break;
501 case INSTR_STOS:
502 sign = p->df ? -1 : 1;
503 regs->edi += sign * p->count * p->size;
504 if (mmio_opp->flags & REPZ)
505 regs->ecx -= p->count;
506 break;
508 case INSTR_AND:
509 if (src & REGISTER) {
510 index = operand_index(src);
511 value = get_reg_value(size, index, 0, regs);
512 diff = (unsigned long) p->u.data & value;
513 } else if (src & IMMEDIATE) {
514 value = mmio_opp->immediate;
515 diff = (unsigned long) p->u.data & value;
516 } else if (src & MEMORY) {
517 index = operand_index(dst);
518 value = get_reg_value(size, index, 0, regs);
519 diff = (unsigned long) p->u.data & value;
520 set_reg_value(size, index, 0, regs, diff);
521 }
523 /*
524 * The OF and CF flags are cleared; the SF, ZF, and PF
525 * flags are set according to the result. The state of
526 * the AF flag is undefined.
527 */
528 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
529 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
530 set_eflags_ZF(size, diff, regs);
531 set_eflags_SF(size, diff, regs);
532 set_eflags_PF(size, diff, regs);
533 break;
535 case INSTR_OR:
536 if (src & REGISTER) {
537 index = operand_index(src);
538 value = get_reg_value(size, index, 0, regs);
539 diff = (unsigned long) p->u.data | value;
540 } else if (src & IMMEDIATE) {
541 value = mmio_opp->immediate;
542 diff = (unsigned long) p->u.data | value;
543 } else if (src & MEMORY) {
544 index = operand_index(dst);
545 value = get_reg_value(size, index, 0, regs);
546 diff = (unsigned long) p->u.data | value;
547 set_reg_value(size, index, 0, regs, diff);
548 }
550 /*
551 * The OF and CF flags are cleared; the SF, ZF, and PF
552 * flags are set according to the result. The state of
553 * the AF flag is undefined.
554 */
555 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
556 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
557 set_eflags_ZF(size, diff, regs);
558 set_eflags_SF(size, diff, regs);
559 set_eflags_PF(size, diff, regs);
560 break;
562 case INSTR_XOR:
563 if (src & REGISTER) {
564 index = operand_index(src);
565 value = get_reg_value(size, index, 0, regs);
566 diff = (unsigned long) p->u.data ^ value;
567 } else if (src & IMMEDIATE) {
568 value = mmio_opp->immediate;
569 diff = (unsigned long) p->u.data ^ value;
570 } else if (src & MEMORY) {
571 index = operand_index(dst);
572 value = get_reg_value(size, index, 0, regs);
573 diff = (unsigned long) p->u.data ^ value;
574 set_reg_value(size, index, 0, regs, diff);
575 }
577 /*
578 * The OF and CF flags are cleared; the SF, ZF, and PF
579 * flags are set according to the result. The state of
580 * the AF flag is undefined.
581 */
582 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
583 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
584 set_eflags_ZF(size, diff, regs);
585 set_eflags_SF(size, diff, regs);
586 set_eflags_PF(size, diff, regs);
587 break;
589 case INSTR_CMP:
590 if (src & REGISTER) {
591 index = operand_index(src);
592 value = get_reg_value(size, index, 0, regs);
593 diff = (unsigned long) p->u.data - value;
594 } else if (src & IMMEDIATE) {
595 value = mmio_opp->immediate;
596 diff = (unsigned long) p->u.data - value;
597 } else if (src & MEMORY) {
598 index = operand_index(dst);
599 value = get_reg_value(size, index, 0, regs);
600 diff = value - (unsigned long) p->u.data;
601 }
603 /*
604 * The CF, OF, SF, ZF, AF, and PF flags are set according
605 * to the result
606 */
607 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
608 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
609 set_eflags_CF(size, value, (unsigned long) p->u.data, regs);
610 set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs);
611 set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs);
612 set_eflags_ZF(size, diff, regs);
613 set_eflags_SF(size, diff, regs);
614 set_eflags_PF(size, diff, regs);
615 break;
617 case INSTR_TEST:
618 if (src & REGISTER) {
619 index = operand_index(src);
620 value = get_reg_value(size, index, 0, regs);
621 } else if (src & IMMEDIATE) {
622 value = mmio_opp->immediate;
623 } else if (src & MEMORY) {
624 index = operand_index(dst);
625 value = get_reg_value(size, index, 0, regs);
626 }
627 diff = (unsigned long) p->u.data & value;
629 /*
630 * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
631 */
632 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
633 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
634 set_eflags_ZF(size, diff, regs);
635 set_eflags_SF(size, diff, regs);
636 set_eflags_PF(size, diff, regs);
637 break;
639 case INSTR_BT:
640 index = operand_index(src);
641 value = get_reg_value(size, index, 0, regs);
643 if (p->u.data & (1 << (value & ((1 << 5) - 1))))
644 regs->eflags |= X86_EFLAGS_CF;
645 else
646 regs->eflags &= ~X86_EFLAGS_CF;
648 break;
649 }
651 hvm_load_cpu_guest_regs(v, regs);
652 }
654 void hvm_io_assist(struct vcpu *v)
655 {
656 vcpu_iodata_t *vio;
657 ioreq_t *p;
658 struct cpu_user_regs *regs = guest_cpu_user_regs();
659 struct mmio_op *mmio_opp;
660 struct cpu_user_regs *inst_decoder_regs;
662 mmio_opp = &v->arch.hvm_vcpu.mmio_op;
663 inst_decoder_regs = mmio_opp->inst_decoder_regs;
665 vio = get_vio(v->domain, v->vcpu_id);
667 if (vio == 0) {
668 HVM_DBG_LOG(DBG_LEVEL_1,
669 "bad shared page: %lx", (unsigned long) vio);
670 printf("bad shared page: %lx\n", (unsigned long) vio);
671 domain_crash_synchronous();
672 }
674 p = &vio->vp_ioreq;
675 if (p->state == STATE_IORESP_HOOK)
676 hvm_hooks_assist(v);
678 /* clear IO wait HVM flag */
679 if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags)) {
680 if (p->state == STATE_IORESP_READY) {
681 p->state = STATE_INVALID;
682 clear_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
684 if (p->type == IOREQ_TYPE_PIO)
685 hvm_pio_assist(regs, p, mmio_opp);
686 else
687 hvm_mmio_assist(v, regs, p, mmio_opp);
688 }
689 /* else an interrupt send event raced us */
690 }
691 }
693 /*
694 * On exit from hvm_wait_io, we're guaranteed not to be waiting on
695 * I/O response from the device model.
696 */
697 void hvm_wait_io(void)
698 {
699 struct vcpu *v = current;
700 struct domain *d = v->domain;
701 int port = iopacket_port(v);
703 for ( ; ; )
704 {
705 /* Clear master flag, selector flag, event flag each in turn. */
706 v->vcpu_info->evtchn_upcall_pending = 0;
707 clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
708 smp_mb__after_clear_bit();
709 if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) )
710 hvm_io_assist(v);
712 /* Need to wait for I/O responses? */
713 if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
714 break;
716 do_sched_op_compat(SCHEDOP_block, 0);
717 }
719 /*
720 * Re-set the selector and master flags in case any other notifications
721 * are pending.
722 */
723 if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] )
724 set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
725 if ( v->vcpu_info->evtchn_pending_sel )
726 v->vcpu_info->evtchn_upcall_pending = 1;
727 }
729 void hvm_safe_block(void)
730 {
731 struct vcpu *v = current;
732 struct domain *d = v->domain;
733 int port = iopacket_port(v);
735 for ( ; ; )
736 {
737 /* Clear master flag & selector flag so we will wake from block. */
738 v->vcpu_info->evtchn_upcall_pending = 0;
739 clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
740 smp_mb__after_clear_bit();
742 /* Event pending already? */
743 if ( test_bit(port, &d->shared_info->evtchn_pending[0]) )
744 break;
746 do_sched_op_compat(SCHEDOP_block, 0);
747 }
749 /* Reflect pending event in selector and master flags. */
750 set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
751 v->vcpu_info->evtchn_upcall_pending = 1;
752 }
754 /*
755 * Local variables:
756 * mode: C
757 * c-set-style: "BSD"
758 * c-basic-offset: 4
759 * tab-width: 4
760 * indent-tabs-mode: nil
761 * End:
762 */