ia64/xen-unstable

view xen/arch/x86/hvm/io.c @ 10903:822c39808e62

[HVM] Initialise full regs structure for PIO requests.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 02 09:52:03 2006 +0100 (2006-08-02)
parents 3fa8b914e2b5
children 415614d3a1ee
line source
1 /*
2 * io.c: Handling I/O and interrupts.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/mm.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/trace.h>
27 #include <xen/event.h>
29 #include <xen/hypercall.h>
30 #include <asm/current.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/apic.h>
35 #include <asm/shadow.h>
36 #include <asm/hvm/hvm.h>
37 #include <asm/hvm/support.h>
38 #include <asm/hvm/vpit.h>
39 #include <asm/hvm/vpic.h>
40 #include <asm/hvm/vlapic.h>
42 #include <public/sched.h>
43 #include <public/hvm/ioreq.h>
45 #if defined (__i386__)
46 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
47 {
48 switch (size) {
49 case BYTE:
50 switch (index) {
51 case 0:
52 regs->eax &= 0xFFFFFF00;
53 regs->eax |= (value & 0xFF);
54 break;
55 case 1:
56 regs->ecx &= 0xFFFFFF00;
57 regs->ecx |= (value & 0xFF);
58 break;
59 case 2:
60 regs->edx &= 0xFFFFFF00;
61 regs->edx |= (value & 0xFF);
62 break;
63 case 3:
64 regs->ebx &= 0xFFFFFF00;
65 regs->ebx |= (value & 0xFF);
66 break;
67 case 4:
68 regs->eax &= 0xFFFF00FF;
69 regs->eax |= ((value & 0xFF) << 8);
70 break;
71 case 5:
72 regs->ecx &= 0xFFFF00FF;
73 regs->ecx |= ((value & 0xFF) << 8);
74 break;
75 case 6:
76 regs->edx &= 0xFFFF00FF;
77 regs->edx |= ((value & 0xFF) << 8);
78 break;
79 case 7:
80 regs->ebx &= 0xFFFF00FF;
81 regs->ebx |= ((value & 0xFF) << 8);
82 break;
83 default:
84 printk("Error: size:%x, index:%x are invalid!\n", size, index);
85 domain_crash_synchronous();
86 break;
87 }
88 break;
89 case WORD:
90 switch (index) {
91 case 0:
92 regs->eax &= 0xFFFF0000;
93 regs->eax |= (value & 0xFFFF);
94 break;
95 case 1:
96 regs->ecx &= 0xFFFF0000;
97 regs->ecx |= (value & 0xFFFF);
98 break;
99 case 2:
100 regs->edx &= 0xFFFF0000;
101 regs->edx |= (value & 0xFFFF);
102 break;
103 case 3:
104 regs->ebx &= 0xFFFF0000;
105 regs->ebx |= (value & 0xFFFF);
106 break;
107 case 4:
108 regs->esp &= 0xFFFF0000;
109 regs->esp |= (value & 0xFFFF);
110 break;
111 case 5:
112 regs->ebp &= 0xFFFF0000;
113 regs->ebp |= (value & 0xFFFF);
114 break;
115 case 6:
116 regs->esi &= 0xFFFF0000;
117 regs->esi |= (value & 0xFFFF);
118 break;
119 case 7:
120 regs->edi &= 0xFFFF0000;
121 regs->edi |= (value & 0xFFFF);
122 break;
123 default:
124 printk("Error: size:%x, index:%x are invalid!\n", size, index);
125 domain_crash_synchronous();
126 break;
127 }
128 break;
129 case LONG:
130 switch (index) {
131 case 0:
132 regs->eax = value;
133 break;
134 case 1:
135 regs->ecx = value;
136 break;
137 case 2:
138 regs->edx = value;
139 break;
140 case 3:
141 regs->ebx = value;
142 break;
143 case 4:
144 regs->esp = value;
145 break;
146 case 5:
147 regs->ebp = value;
148 break;
149 case 6:
150 regs->esi = value;
151 break;
152 case 7:
153 regs->edi = value;
154 break;
155 default:
156 printk("Error: size:%x, index:%x are invalid!\n", size, index);
157 domain_crash_synchronous();
158 break;
159 }
160 break;
161 default:
162 printk("Error: size:%x, index:%x are invalid!\n", size, index);
163 domain_crash_synchronous();
164 break;
165 }
166 }
167 #else
168 static inline void __set_reg_value(unsigned long *reg, int size, long value)
169 {
170 switch (size) {
171 case BYTE_64:
172 *reg &= ~0xFF;
173 *reg |= (value & 0xFF);
174 break;
175 case WORD:
176 *reg &= ~0xFFFF;
177 *reg |= (value & 0xFFFF);
178 break;
179 case LONG:
180 *reg &= ~0xFFFFFFFF;
181 *reg |= (value & 0xFFFFFFFF);
182 break;
183 case QUAD:
184 *reg = value;
185 break;
186 default:
187 printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
188 domain_crash_synchronous();
189 }
190 }
192 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
193 {
194 if (size == BYTE) {
195 switch (index) {
196 case 0:
197 regs->rax &= ~0xFF;
198 regs->rax |= (value & 0xFF);
199 break;
200 case 1:
201 regs->rcx &= ~0xFF;
202 regs->rcx |= (value & 0xFF);
203 break;
204 case 2:
205 regs->rdx &= ~0xFF;
206 regs->rdx |= (value & 0xFF);
207 break;
208 case 3:
209 regs->rbx &= ~0xFF;
210 regs->rbx |= (value & 0xFF);
211 break;
212 case 4:
213 regs->rax &= 0xFFFFFFFFFFFF00FF;
214 regs->rax |= ((value & 0xFF) << 8);
215 break;
216 case 5:
217 regs->rcx &= 0xFFFFFFFFFFFF00FF;
218 regs->rcx |= ((value & 0xFF) << 8);
219 break;
220 case 6:
221 regs->rdx &= 0xFFFFFFFFFFFF00FF;
222 regs->rdx |= ((value & 0xFF) << 8);
223 break;
224 case 7:
225 regs->rbx &= 0xFFFFFFFFFFFF00FF;
226 regs->rbx |= ((value & 0xFF) << 8);
227 break;
228 default:
229 printk("Error: size:%x, index:%x are invalid!\n", size, index);
230 domain_crash_synchronous();
231 break;
232 }
233 return;
234 }
236 switch (index) {
237 case 0:
238 __set_reg_value(&regs->rax, size, value);
239 break;
240 case 1:
241 __set_reg_value(&regs->rcx, size, value);
242 break;
243 case 2:
244 __set_reg_value(&regs->rdx, size, value);
245 break;
246 case 3:
247 __set_reg_value(&regs->rbx, size, value);
248 break;
249 case 4:
250 __set_reg_value(&regs->rsp, size, value);
251 break;
252 case 5:
253 __set_reg_value(&regs->rbp, size, value);
254 break;
255 case 6:
256 __set_reg_value(&regs->rsi, size, value);
257 break;
258 case 7:
259 __set_reg_value(&regs->rdi, size, value);
260 break;
261 case 8:
262 __set_reg_value(&regs->r8, size, value);
263 break;
264 case 9:
265 __set_reg_value(&regs->r9, size, value);
266 break;
267 case 10:
268 __set_reg_value(&regs->r10, size, value);
269 break;
270 case 11:
271 __set_reg_value(&regs->r11, size, value);
272 break;
273 case 12:
274 __set_reg_value(&regs->r12, size, value);
275 break;
276 case 13:
277 __set_reg_value(&regs->r13, size, value);
278 break;
279 case 14:
280 __set_reg_value(&regs->r14, size, value);
281 break;
282 case 15:
283 __set_reg_value(&regs->r15, size, value);
284 break;
285 default:
286 printk("Error: <set_reg_value> Invalid index\n");
287 domain_crash_synchronous();
288 }
289 return;
290 }
291 #endif
293 extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs);
295 static inline void set_eflags_CF(int size, unsigned long v1,
296 unsigned long v2, struct cpu_user_regs *regs)
297 {
298 unsigned long mask = (1 << (8 * size)) - 1;
300 if ((v1 & mask) > (v2 & mask))
301 regs->eflags |= X86_EFLAGS_CF;
302 else
303 regs->eflags &= ~X86_EFLAGS_CF;
304 }
306 static inline void set_eflags_OF(int size, unsigned long v1,
307 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
308 {
309 if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
310 regs->eflags |= X86_EFLAGS_OF;
311 }
313 static inline void set_eflags_AF(int size, unsigned long v1,
314 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
315 {
316 if ((v1 ^ v2 ^ v3) & 0x10)
317 regs->eflags |= X86_EFLAGS_AF;
318 }
320 static inline void set_eflags_ZF(int size, unsigned long v1,
321 struct cpu_user_regs *regs)
322 {
323 unsigned long mask = (1 << (8 * size)) - 1;
325 if ((v1 & mask) == 0)
326 regs->eflags |= X86_EFLAGS_ZF;
327 }
329 static inline void set_eflags_SF(int size, unsigned long v1,
330 struct cpu_user_regs *regs)
331 {
332 if (v1 & (1 << ((8 * size) - 1)))
333 regs->eflags |= X86_EFLAGS_SF;
334 }
336 static char parity_table[256] = {
337 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
338 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
339 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
340 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
341 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
342 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
343 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
344 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
345 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
346 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
347 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
348 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
349 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
350 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
351 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
352 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
353 };
355 static inline void set_eflags_PF(int size, unsigned long v1,
356 struct cpu_user_regs *regs)
357 {
358 if (parity_table[v1 & 0xFF])
359 regs->eflags |= X86_EFLAGS_PF;
360 }
362 static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
363 struct hvm_io_op *pio_opp)
364 {
365 unsigned long old_eax;
366 int sign = p->df ? -1 : 1;
368 if ( p->pdata_valid || (pio_opp->flags & OVERLAP) )
369 {
370 if ( pio_opp->flags & REPZ )
371 regs->ecx -= p->count;
372 if ( p->dir == IOREQ_READ )
373 {
374 regs->edi += sign * p->count * p->size;
375 if ( pio_opp->flags & OVERLAP )
376 {
377 unsigned long addr = regs->edi;
378 if (hvm_realmode(current))
379 addr += regs->es << 4;
380 if (sign > 0)
381 addr -= p->size;
382 hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
383 }
384 }
385 else /* p->dir == IOREQ_WRITE */
386 {
387 ASSERT(p->dir == IOREQ_WRITE);
388 regs->esi += sign * p->count * p->size;
389 }
390 }
391 else if ( p->dir == IOREQ_READ )
392 {
393 old_eax = regs->eax;
394 switch ( p->size )
395 {
396 case 1:
397 regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
398 break;
399 case 2:
400 regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
401 break;
402 case 4:
403 regs->eax = (p->u.data & 0xffffffff);
404 break;
405 default:
406 printk("Error: %s unknown port size\n", __FUNCTION__);
407 domain_crash_synchronous();
408 }
409 }
410 }
412 static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
413 struct hvm_io_op *mmio_opp)
414 {
415 int sign = p->df ? -1 : 1;
416 int size = -1, index = -1;
417 unsigned long value = 0, diff = 0;
418 unsigned long src, dst;
420 src = mmio_opp->operand[0];
421 dst = mmio_opp->operand[1];
422 size = operand_size(src);
424 switch (mmio_opp->instr) {
425 case INSTR_MOV:
426 if (dst & REGISTER) {
427 index = operand_index(dst);
428 set_reg_value(size, index, 0, regs, p->u.data);
429 }
430 break;
432 case INSTR_MOVZX:
433 if (dst & REGISTER) {
434 switch (size) {
435 case BYTE:
436 p->u.data &= 0xFFULL;
437 break;
439 case WORD:
440 p->u.data &= 0xFFFFULL;
441 break;
443 case LONG:
444 p->u.data &= 0xFFFFFFFFULL;
445 break;
447 default:
448 printk("Impossible source operand size of movzx instr: %d\n", size);
449 domain_crash_synchronous();
450 }
451 index = operand_index(dst);
452 set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
453 }
454 break;
456 case INSTR_MOVSX:
457 if (dst & REGISTER) {
458 switch (size) {
459 case BYTE:
460 p->u.data &= 0xFFULL;
461 if ( p->u.data & 0x80ULL )
462 p->u.data |= 0xFFFFFFFFFFFFFF00ULL;
463 break;
465 case WORD:
466 p->u.data &= 0xFFFFULL;
467 if ( p->u.data & 0x8000ULL )
468 p->u.data |= 0xFFFFFFFFFFFF0000ULL;
469 break;
471 case LONG:
472 p->u.data &= 0xFFFFFFFFULL;
473 if ( p->u.data & 0x80000000ULL )
474 p->u.data |= 0xFFFFFFFF00000000ULL;
475 break;
477 default:
478 printk("Impossible source operand size of movsx instr: %d\n", size);
479 domain_crash_synchronous();
480 }
481 index = operand_index(dst);
482 set_reg_value(operand_size(dst), index, 0, regs, p->u.data);
483 }
484 break;
486 case INSTR_MOVS:
487 sign = p->df ? -1 : 1;
488 regs->esi += sign * p->count * p->size;
489 regs->edi += sign * p->count * p->size;
491 if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
492 unsigned long addr = regs->edi;
494 if (sign > 0)
495 addr -= p->size;
496 hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
497 }
499 if (mmio_opp->flags & REPZ)
500 regs->ecx -= p->count;
501 break;
503 case INSTR_STOS:
504 sign = p->df ? -1 : 1;
505 regs->edi += sign * p->count * p->size;
506 if (mmio_opp->flags & REPZ)
507 regs->ecx -= p->count;
508 break;
510 case INSTR_LODS:
511 sign = p->df ? -1 : 1;
512 regs->esi += sign * p->count * p->size;
513 if (mmio_opp->flags & REPZ)
514 regs->ecx -= p->count;
515 break;
517 case INSTR_AND:
518 if (src & REGISTER) {
519 index = operand_index(src);
520 value = get_reg_value(size, index, 0, regs);
521 diff = (unsigned long) p->u.data & value;
522 } else if (src & IMMEDIATE) {
523 value = mmio_opp->immediate;
524 diff = (unsigned long) p->u.data & value;
525 } else if (src & MEMORY) {
526 index = operand_index(dst);
527 value = get_reg_value(size, index, 0, regs);
528 diff = (unsigned long) p->u.data & value;
529 set_reg_value(size, index, 0, regs, diff);
530 }
532 /*
533 * The OF and CF flags are cleared; the SF, ZF, and PF
534 * flags are set according to the result. The state of
535 * the AF flag is undefined.
536 */
537 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
538 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
539 set_eflags_ZF(size, diff, regs);
540 set_eflags_SF(size, diff, regs);
541 set_eflags_PF(size, diff, regs);
542 break;
544 case INSTR_OR:
545 if (src & REGISTER) {
546 index = operand_index(src);
547 value = get_reg_value(size, index, 0, regs);
548 diff = (unsigned long) p->u.data | value;
549 } else if (src & IMMEDIATE) {
550 value = mmio_opp->immediate;
551 diff = (unsigned long) p->u.data | value;
552 } else if (src & MEMORY) {
553 index = operand_index(dst);
554 value = get_reg_value(size, index, 0, regs);
555 diff = (unsigned long) p->u.data | value;
556 set_reg_value(size, index, 0, regs, diff);
557 }
559 /*
560 * The OF and CF flags are cleared; the SF, ZF, and PF
561 * flags are set according to the result. The state of
562 * the AF flag is undefined.
563 */
564 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
565 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
566 set_eflags_ZF(size, diff, regs);
567 set_eflags_SF(size, diff, regs);
568 set_eflags_PF(size, diff, regs);
569 break;
571 case INSTR_XOR:
572 if (src & REGISTER) {
573 index = operand_index(src);
574 value = get_reg_value(size, index, 0, regs);
575 diff = (unsigned long) p->u.data ^ value;
576 } else if (src & IMMEDIATE) {
577 value = mmio_opp->immediate;
578 diff = (unsigned long) p->u.data ^ value;
579 } else if (src & MEMORY) {
580 index = operand_index(dst);
581 value = get_reg_value(size, index, 0, regs);
582 diff = (unsigned long) p->u.data ^ value;
583 set_reg_value(size, index, 0, regs, diff);
584 }
586 /*
587 * The OF and CF flags are cleared; the SF, ZF, and PF
588 * flags are set according to the result. The state of
589 * the AF flag is undefined.
590 */
591 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
592 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
593 set_eflags_ZF(size, diff, regs);
594 set_eflags_SF(size, diff, regs);
595 set_eflags_PF(size, diff, regs);
596 break;
598 case INSTR_CMP:
599 if (src & REGISTER) {
600 index = operand_index(src);
601 value = get_reg_value(size, index, 0, regs);
602 diff = (unsigned long) p->u.data - value;
603 } else if (src & IMMEDIATE) {
604 value = mmio_opp->immediate;
605 diff = (unsigned long) p->u.data - value;
606 } else if (src & MEMORY) {
607 index = operand_index(dst);
608 value = get_reg_value(size, index, 0, regs);
609 diff = value - (unsigned long) p->u.data;
610 }
612 /*
613 * The CF, OF, SF, ZF, AF, and PF flags are set according
614 * to the result
615 */
616 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
617 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
618 set_eflags_CF(size, value, (unsigned long) p->u.data, regs);
619 set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs);
620 set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs);
621 set_eflags_ZF(size, diff, regs);
622 set_eflags_SF(size, diff, regs);
623 set_eflags_PF(size, diff, regs);
624 break;
626 case INSTR_TEST:
627 if (src & REGISTER) {
628 index = operand_index(src);
629 value = get_reg_value(size, index, 0, regs);
630 } else if (src & IMMEDIATE) {
631 value = mmio_opp->immediate;
632 } else if (src & MEMORY) {
633 index = operand_index(dst);
634 value = get_reg_value(size, index, 0, regs);
635 }
636 diff = (unsigned long) p->u.data & value;
638 /*
639 * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
640 */
641 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
642 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
643 set_eflags_ZF(size, diff, regs);
644 set_eflags_SF(size, diff, regs);
645 set_eflags_PF(size, diff, regs);
646 break;
648 case INSTR_BT:
649 index = operand_index(src);
650 value = get_reg_value(size, index, 0, regs);
652 if (p->u.data & (1 << (value & ((1 << 5) - 1))))
653 regs->eflags |= X86_EFLAGS_CF;
654 else
655 regs->eflags &= ~X86_EFLAGS_CF;
657 break;
659 case INSTR_XCHG:
660 if (src & REGISTER) {
661 index = operand_index(src);
662 set_reg_value(size, index, 0, regs, p->u.data);
663 } else {
664 index = operand_index(dst);
665 set_reg_value(size, index, 0, regs, p->u.data);
666 }
667 break;
668 }
669 }
671 void hvm_io_assist(struct vcpu *v)
672 {
673 vcpu_iodata_t *vio;
674 ioreq_t *p;
675 struct cpu_user_regs *regs;
676 struct hvm_io_op *io_opp;
678 io_opp = &v->arch.hvm_vcpu.io_op;
679 regs = &io_opp->io_context;
681 vio = get_vio(v->domain, v->vcpu_id);
683 if ( vio == 0 ) {
684 printf("bad shared page: %lx\n", (unsigned long)vio);
685 domain_crash_synchronous();
686 }
688 p = &vio->vp_ioreq;
690 /* clear IO wait HVM flag */
691 if ( test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) ) {
692 if ( p->state == STATE_IORESP_READY ) {
693 p->state = STATE_INVALID;
694 clear_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
696 if ( p->type == IOREQ_TYPE_PIO )
697 hvm_pio_assist(regs, p, io_opp);
698 else
699 hvm_mmio_assist(regs, p, io_opp);
701 /* Copy register changes back into current guest state. */
702 hvm_load_cpu_guest_regs(v, regs);
703 memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
704 }
705 /* else an interrupt send event raced us */
706 }
707 }
709 /*
710 * On exit from hvm_wait_io, we're guaranteed not to be waiting on
711 * I/O response from the device model.
712 */
713 void hvm_wait_io(void)
714 {
715 struct vcpu *v = current;
716 struct domain *d = v->domain;
717 int port = iopacket_port(v);
719 for ( ; ; )
720 {
721 /* Clear master flag, selector flag, event flag each in turn. */
722 v->vcpu_info->evtchn_upcall_pending = 0;
723 clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
724 smp_mb__after_clear_bit();
725 if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) )
726 hvm_io_assist(v);
728 /* Need to wait for I/O responses? */
729 if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
730 break;
732 do_sched_op_compat(SCHEDOP_block, 0);
733 }
735 /*
736 * Re-set the selector and master flags in case any other notifications
737 * are pending.
738 */
739 if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] )
740 set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
741 if ( v->vcpu_info->evtchn_pending_sel )
742 v->vcpu_info->evtchn_upcall_pending = 1;
743 }
745 void hvm_safe_block(void)
746 {
747 struct vcpu *v = current;
748 struct domain *d = v->domain;
749 int port = iopacket_port(v);
751 for ( ; ; )
752 {
753 /* Clear master flag & selector flag so we will wake from block. */
754 v->vcpu_info->evtchn_upcall_pending = 0;
755 clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
756 smp_mb__after_clear_bit();
758 /* Event pending already? */
759 if ( test_bit(port, &d->shared_info->evtchn_pending[0]) )
760 break;
762 do_sched_op_compat(SCHEDOP_block, 0);
763 }
765 /* Reflect pending event in selector and master flags. */
766 set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
767 v->vcpu_info->evtchn_upcall_pending = 1;
768 }
770 /*
771 * Local variables:
772 * mode: C
773 * c-set-style: "BSD"
774 * c-basic-offset: 4
775 * tab-width: 4
776 * indent-tabs-mode: nil
777 * End:
778 */