ia64/xen-unstable

view xen/arch/x86/hvm/io.c @ 14038:4314691c70a2

[HVM] Fix MMIO LODS emulation
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Feb 20 20:02:49 2007 +0000 (2007-02-20)
parents f48553000369
children c39a6b458bd0
line source
1 /*
2 * io.c: Handling I/O and interrupts.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/mm.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/trace.h>
27 #include <xen/event.h>
29 #include <xen/hypercall.h>
30 #include <asm/current.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/apic.h>
35 #include <asm/paging.h>
36 #include <asm/hvm/hvm.h>
37 #include <asm/hvm/support.h>
38 #include <asm/hvm/vpt.h>
39 #include <asm/hvm/vpic.h>
40 #include <asm/hvm/vlapic.h>
42 #include <public/sched.h>
43 #include <public/hvm/ioreq.h>
45 #if defined (__i386__)
46 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
47 {
48 switch (size) {
49 case BYTE:
50 switch (index) {
51 case 0:
52 regs->eax &= 0xFFFFFF00;
53 regs->eax |= (value & 0xFF);
54 break;
55 case 1:
56 regs->ecx &= 0xFFFFFF00;
57 regs->ecx |= (value & 0xFF);
58 break;
59 case 2:
60 regs->edx &= 0xFFFFFF00;
61 regs->edx |= (value & 0xFF);
62 break;
63 case 3:
64 regs->ebx &= 0xFFFFFF00;
65 regs->ebx |= (value & 0xFF);
66 break;
67 case 4:
68 regs->eax &= 0xFFFF00FF;
69 regs->eax |= ((value & 0xFF) << 8);
70 break;
71 case 5:
72 regs->ecx &= 0xFFFF00FF;
73 regs->ecx |= ((value & 0xFF) << 8);
74 break;
75 case 6:
76 regs->edx &= 0xFFFF00FF;
77 regs->edx |= ((value & 0xFF) << 8);
78 break;
79 case 7:
80 regs->ebx &= 0xFFFF00FF;
81 regs->ebx |= ((value & 0xFF) << 8);
82 break;
83 default:
84 goto crash;
85 }
86 break;
87 case WORD:
88 switch (index) {
89 case 0:
90 regs->eax &= 0xFFFF0000;
91 regs->eax |= (value & 0xFFFF);
92 break;
93 case 1:
94 regs->ecx &= 0xFFFF0000;
95 regs->ecx |= (value & 0xFFFF);
96 break;
97 case 2:
98 regs->edx &= 0xFFFF0000;
99 regs->edx |= (value & 0xFFFF);
100 break;
101 case 3:
102 regs->ebx &= 0xFFFF0000;
103 regs->ebx |= (value & 0xFFFF);
104 break;
105 case 4:
106 regs->esp &= 0xFFFF0000;
107 regs->esp |= (value & 0xFFFF);
108 break;
109 case 5:
110 regs->ebp &= 0xFFFF0000;
111 regs->ebp |= (value & 0xFFFF);
112 break;
113 case 6:
114 regs->esi &= 0xFFFF0000;
115 regs->esi |= (value & 0xFFFF);
116 break;
117 case 7:
118 regs->edi &= 0xFFFF0000;
119 regs->edi |= (value & 0xFFFF);
120 break;
121 default:
122 goto crash;
123 }
124 break;
125 case LONG:
126 switch (index) {
127 case 0:
128 regs->eax = value;
129 break;
130 case 1:
131 regs->ecx = value;
132 break;
133 case 2:
134 regs->edx = value;
135 break;
136 case 3:
137 regs->ebx = value;
138 break;
139 case 4:
140 regs->esp = value;
141 break;
142 case 5:
143 regs->ebp = value;
144 break;
145 case 6:
146 regs->esi = value;
147 break;
148 case 7:
149 regs->edi = value;
150 break;
151 default:
152 goto crash;
153 }
154 break;
155 default:
156 crash:
157 gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n", size, index);
158 domain_crash_synchronous();
159 }
160 }
161 #else
162 static inline void __set_reg_value(unsigned long *reg, int size, long value)
163 {
164 switch (size) {
165 case BYTE_64:
166 *reg &= ~0xFF;
167 *reg |= (value & 0xFF);
168 break;
169 case WORD:
170 *reg &= ~0xFFFF;
171 *reg |= (value & 0xFFFF);
172 break;
173 case LONG:
174 *reg &= ~0xFFFFFFFF;
175 *reg |= (value & 0xFFFFFFFF);
176 break;
177 case QUAD:
178 *reg = value;
179 break;
180 default:
181 gdprintk(XENLOG_ERR, "size:%x is invalid\n", size);
182 domain_crash_synchronous();
183 }
184 }
186 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
187 {
188 if (size == BYTE) {
189 switch (index) {
190 case 0:
191 regs->rax &= ~0xFF;
192 regs->rax |= (value & 0xFF);
193 break;
194 case 1:
195 regs->rcx &= ~0xFF;
196 regs->rcx |= (value & 0xFF);
197 break;
198 case 2:
199 regs->rdx &= ~0xFF;
200 regs->rdx |= (value & 0xFF);
201 break;
202 case 3:
203 regs->rbx &= ~0xFF;
204 regs->rbx |= (value & 0xFF);
205 break;
206 case 4:
207 regs->rax &= 0xFFFFFFFFFFFF00FF;
208 regs->rax |= ((value & 0xFF) << 8);
209 break;
210 case 5:
211 regs->rcx &= 0xFFFFFFFFFFFF00FF;
212 regs->rcx |= ((value & 0xFF) << 8);
213 break;
214 case 6:
215 regs->rdx &= 0xFFFFFFFFFFFF00FF;
216 regs->rdx |= ((value & 0xFF) << 8);
217 break;
218 case 7:
219 regs->rbx &= 0xFFFFFFFFFFFF00FF;
220 regs->rbx |= ((value & 0xFF) << 8);
221 break;
222 default:
223 gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n",
224 size, index);
225 domain_crash_synchronous();
226 break;
227 }
228 return;
229 }
231 switch (index) {
232 case 0:
233 __set_reg_value(&regs->rax, size, value);
234 break;
235 case 1:
236 __set_reg_value(&regs->rcx, size, value);
237 break;
238 case 2:
239 __set_reg_value(&regs->rdx, size, value);
240 break;
241 case 3:
242 __set_reg_value(&regs->rbx, size, value);
243 break;
244 case 4:
245 __set_reg_value(&regs->rsp, size, value);
246 break;
247 case 5:
248 __set_reg_value(&regs->rbp, size, value);
249 break;
250 case 6:
251 __set_reg_value(&regs->rsi, size, value);
252 break;
253 case 7:
254 __set_reg_value(&regs->rdi, size, value);
255 break;
256 case 8:
257 __set_reg_value(&regs->r8, size, value);
258 break;
259 case 9:
260 __set_reg_value(&regs->r9, size, value);
261 break;
262 case 10:
263 __set_reg_value(&regs->r10, size, value);
264 break;
265 case 11:
266 __set_reg_value(&regs->r11, size, value);
267 break;
268 case 12:
269 __set_reg_value(&regs->r12, size, value);
270 break;
271 case 13:
272 __set_reg_value(&regs->r13, size, value);
273 break;
274 case 14:
275 __set_reg_value(&regs->r14, size, value);
276 break;
277 case 15:
278 __set_reg_value(&regs->r15, size, value);
279 break;
280 default:
281 gdprintk(XENLOG_ERR, "Invalid index\n");
282 domain_crash_synchronous();
283 }
284 return;
285 }
286 #endif
288 extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs);
290 static inline void set_eflags_CF(int size, unsigned long v1,
291 unsigned long v2, struct cpu_user_regs *regs)
292 {
293 unsigned long mask = (1 << (8 * size)) - 1;
295 if ((v1 & mask) > (v2 & mask))
296 regs->eflags |= X86_EFLAGS_CF;
297 else
298 regs->eflags &= ~X86_EFLAGS_CF;
299 }
301 static inline void set_eflags_OF(int size, unsigned long v1,
302 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
303 {
304 if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
305 regs->eflags |= X86_EFLAGS_OF;
306 }
308 static inline void set_eflags_AF(int size, unsigned long v1,
309 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
310 {
311 if ((v1 ^ v2 ^ v3) & 0x10)
312 regs->eflags |= X86_EFLAGS_AF;
313 }
315 static inline void set_eflags_ZF(int size, unsigned long v1,
316 struct cpu_user_regs *regs)
317 {
318 unsigned long mask = (1 << (8 * size)) - 1;
320 if ((v1 & mask) == 0)
321 regs->eflags |= X86_EFLAGS_ZF;
322 }
324 static inline void set_eflags_SF(int size, unsigned long v1,
325 struct cpu_user_regs *regs)
326 {
327 if (v1 & (1 << ((8 * size) - 1)))
328 regs->eflags |= X86_EFLAGS_SF;
329 }
331 static char parity_table[256] = {
332 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
333 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
334 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
335 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
336 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
337 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
338 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
339 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
340 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
341 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
342 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
343 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
344 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
345 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
346 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
347 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
348 };
350 static inline void set_eflags_PF(int size, unsigned long v1,
351 struct cpu_user_regs *regs)
352 {
353 if (parity_table[v1 & 0xFF])
354 regs->eflags |= X86_EFLAGS_PF;
355 }
357 static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
358 struct hvm_io_op *pio_opp)
359 {
360 unsigned long old_eax;
361 int sign = p->df ? -1 : 1;
363 if ( p->data_is_ptr || (pio_opp->flags & OVERLAP) )
364 {
365 if ( pio_opp->flags & REPZ )
366 regs->ecx -= p->count;
368 if ( p->dir == IOREQ_READ )
369 {
370 if ( pio_opp->flags & OVERLAP )
371 {
372 unsigned long addr = pio_opp->addr;
373 if ( hvm_paging_enabled(current) )
374 {
375 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
376 if ( rv != 0 )
377 {
378 /* Failed on the page-spanning copy. Inject PF into
379 * the guest for the address where we failed. */
380 addr += p->size - rv;
381 gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
382 "of a page-spanning PIO: va=%#lx\n", addr);
383 hvm_inject_exception(TRAP_page_fault,
384 PFEC_write_access, addr);
385 return;
386 }
387 }
388 else
389 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
390 }
391 regs->edi += sign * p->count * p->size;
392 }
393 else /* p->dir == IOREQ_WRITE */
394 {
395 ASSERT(p->dir == IOREQ_WRITE);
396 regs->esi += sign * p->count * p->size;
397 }
398 }
399 else if ( p->dir == IOREQ_READ )
400 {
401 old_eax = regs->eax;
402 switch ( p->size )
403 {
404 case 1:
405 regs->eax = (old_eax & 0xffffff00) | (p->data & 0xff);
406 break;
407 case 2:
408 regs->eax = (old_eax & 0xffff0000) | (p->data & 0xffff);
409 break;
410 case 4:
411 regs->eax = (p->data & 0xffffffff);
412 break;
413 default:
414 printk("Error: %s unknown port size\n", __FUNCTION__);
415 domain_crash_synchronous();
416 }
417 TRACE_VMEXIT(3, regs->eax);
418 }
419 }
421 static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
422 struct hvm_io_op *mmio_opp)
423 {
424 int sign = p->df ? -1 : 1;
425 int size = -1, index = -1;
426 unsigned long value = 0, diff = 0;
427 unsigned long src, dst;
429 src = mmio_opp->operand[0];
430 dst = mmio_opp->operand[1];
431 size = operand_size(src);
433 switch (mmio_opp->instr) {
434 case INSTR_MOV:
435 if (dst & REGISTER) {
436 index = operand_index(dst);
437 set_reg_value(size, index, 0, regs, p->data);
438 }
439 break;
441 case INSTR_MOVZX:
442 if (dst & REGISTER) {
443 switch (size) {
444 case BYTE:
445 p->data &= 0xFFULL;
446 break;
448 case WORD:
449 p->data &= 0xFFFFULL;
450 break;
452 case LONG:
453 p->data &= 0xFFFFFFFFULL;
454 break;
456 default:
457 printk("Impossible source operand size of movzx instr: %d\n", size);
458 domain_crash_synchronous();
459 }
460 index = operand_index(dst);
461 set_reg_value(operand_size(dst), index, 0, regs, p->data);
462 }
463 break;
465 case INSTR_MOVSX:
466 if (dst & REGISTER) {
467 switch (size) {
468 case BYTE:
469 p->data &= 0xFFULL;
470 if ( p->data & 0x80ULL )
471 p->data |= 0xFFFFFFFFFFFFFF00ULL;
472 break;
474 case WORD:
475 p->data &= 0xFFFFULL;
476 if ( p->data & 0x8000ULL )
477 p->data |= 0xFFFFFFFFFFFF0000ULL;
478 break;
480 case LONG:
481 p->data &= 0xFFFFFFFFULL;
482 if ( p->data & 0x80000000ULL )
483 p->data |= 0xFFFFFFFF00000000ULL;
484 break;
486 default:
487 printk("Impossible source operand size of movsx instr: %d\n", size);
488 domain_crash_synchronous();
489 }
490 index = operand_index(dst);
491 set_reg_value(operand_size(dst), index, 0, regs, p->data);
492 }
493 break;
495 case INSTR_MOVS:
496 sign = p->df ? -1 : 1;
498 if (mmio_opp->flags & REPZ)
499 regs->ecx -= p->count;
501 if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
502 unsigned long addr = mmio_opp->addr;
504 if (hvm_paging_enabled(current))
505 {
506 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
507 if ( rv != 0 )
508 {
509 /* Failed on the page-spanning copy. Inject PF into
510 * the guest for the address where we failed. */
511 addr += p->size - rv;
512 gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
513 "a page-spanning MMIO: va=%#lx\n", addr);
514 hvm_inject_exception(TRAP_page_fault,
515 PFEC_write_access, addr);
516 return;
517 }
518 }
519 else
520 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
521 }
523 regs->esi += sign * p->count * p->size;
524 regs->edi += sign * p->count * p->size;
526 break;
528 case INSTR_STOS:
529 sign = p->df ? -1 : 1;
530 regs->edi += sign * p->count * p->size;
531 if (mmio_opp->flags & REPZ)
532 regs->ecx -= p->count;
533 break;
535 case INSTR_LODS:
536 set_reg_value(size, 0, 0, regs, p->data);
537 sign = p->df ? -1 : 1;
538 regs->esi += sign * p->count * p->size;
539 if (mmio_opp->flags & REPZ)
540 regs->ecx -= p->count;
541 break;
543 case INSTR_AND:
544 if (src & REGISTER) {
545 index = operand_index(src);
546 value = get_reg_value(size, index, 0, regs);
547 diff = (unsigned long) p->data & value;
548 } else if (src & IMMEDIATE) {
549 value = mmio_opp->immediate;
550 diff = (unsigned long) p->data & value;
551 } else if (src & MEMORY) {
552 index = operand_index(dst);
553 value = get_reg_value(size, index, 0, regs);
554 diff = (unsigned long) p->data & value;
555 set_reg_value(size, index, 0, regs, diff);
556 }
558 case INSTR_ADD:
559 if (src & REGISTER) {
560 index = operand_index(src);
561 value = get_reg_value(size, index, 0, regs);
562 diff = (unsigned long) p->data + value;
563 } else if (src & IMMEDIATE) {
564 value = mmio_opp->immediate;
565 diff = (unsigned long) p->data + value;
566 } else if (src & MEMORY) {
567 index = operand_index(dst);
568 value = get_reg_value(size, index, 0, regs);
569 diff = (unsigned long) p->data + value;
570 set_reg_value(size, index, 0, regs, diff);
571 }
573 /*
574 * The OF and CF flags are cleared; the SF, ZF, and PF
575 * flags are set according to the result. The state of
576 * the AF flag is undefined.
577 */
578 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
579 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
580 set_eflags_ZF(size, diff, regs);
581 set_eflags_SF(size, diff, regs);
582 set_eflags_PF(size, diff, regs);
583 break;
585 case INSTR_OR:
586 if (src & REGISTER) {
587 index = operand_index(src);
588 value = get_reg_value(size, index, 0, regs);
589 diff = (unsigned long) p->data | value;
590 } else if (src & IMMEDIATE) {
591 value = mmio_opp->immediate;
592 diff = (unsigned long) p->data | value;
593 } else if (src & MEMORY) {
594 index = operand_index(dst);
595 value = get_reg_value(size, index, 0, regs);
596 diff = (unsigned long) p->data | value;
597 set_reg_value(size, index, 0, regs, diff);
598 }
600 /*
601 * The OF and CF flags are cleared; the SF, ZF, and PF
602 * flags are set according to the result. The state of
603 * the AF flag is undefined.
604 */
605 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
606 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
607 set_eflags_ZF(size, diff, regs);
608 set_eflags_SF(size, diff, regs);
609 set_eflags_PF(size, diff, regs);
610 break;
612 case INSTR_XOR:
613 if (src & REGISTER) {
614 index = operand_index(src);
615 value = get_reg_value(size, index, 0, regs);
616 diff = (unsigned long) p->data ^ value;
617 } else if (src & IMMEDIATE) {
618 value = mmio_opp->immediate;
619 diff = (unsigned long) p->data ^ value;
620 } else if (src & MEMORY) {
621 index = operand_index(dst);
622 value = get_reg_value(size, index, 0, regs);
623 diff = (unsigned long) p->data ^ value;
624 set_reg_value(size, index, 0, regs, diff);
625 }
627 /*
628 * The OF and CF flags are cleared; the SF, ZF, and PF
629 * flags are set according to the result. The state of
630 * the AF flag is undefined.
631 */
632 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
633 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
634 set_eflags_ZF(size, diff, regs);
635 set_eflags_SF(size, diff, regs);
636 set_eflags_PF(size, diff, regs);
637 break;
639 case INSTR_CMP:
640 case INSTR_SUB:
641 if (src & REGISTER) {
642 index = operand_index(src);
643 value = get_reg_value(size, index, 0, regs);
644 diff = (unsigned long) p->data - value;
645 } else if (src & IMMEDIATE) {
646 value = mmio_opp->immediate;
647 diff = (unsigned long) p->data - value;
648 } else if (src & MEMORY) {
649 index = operand_index(dst);
650 value = get_reg_value(size, index, 0, regs);
651 diff = value - (unsigned long) p->data;
652 if ( mmio_opp->instr == INSTR_SUB )
653 set_reg_value(size, index, 0, regs, diff);
654 }
656 /*
657 * The CF, OF, SF, ZF, AF, and PF flags are set according
658 * to the result
659 */
660 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
661 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
662 set_eflags_CF(size, value, (unsigned long) p->data, regs);
663 set_eflags_OF(size, diff, value, (unsigned long) p->data, regs);
664 set_eflags_AF(size, diff, value, (unsigned long) p->data, regs);
665 set_eflags_ZF(size, diff, regs);
666 set_eflags_SF(size, diff, regs);
667 set_eflags_PF(size, diff, regs);
668 break;
670 case INSTR_TEST:
671 if (src & REGISTER) {
672 index = operand_index(src);
673 value = get_reg_value(size, index, 0, regs);
674 } else if (src & IMMEDIATE) {
675 value = mmio_opp->immediate;
676 } else if (src & MEMORY) {
677 index = operand_index(dst);
678 value = get_reg_value(size, index, 0, regs);
679 }
680 diff = (unsigned long) p->data & value;
682 /*
683 * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
684 */
685 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
686 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
687 set_eflags_ZF(size, diff, regs);
688 set_eflags_SF(size, diff, regs);
689 set_eflags_PF(size, diff, regs);
690 break;
692 case INSTR_BT:
693 if ( src & REGISTER )
694 {
695 index = operand_index(src);
696 value = get_reg_value(size, index, 0, regs);
697 }
698 else if ( src & IMMEDIATE )
699 value = mmio_opp->immediate;
700 if (p->data & (1 << (value & ((1 << 5) - 1))))
701 regs->eflags |= X86_EFLAGS_CF;
702 else
703 regs->eflags &= ~X86_EFLAGS_CF;
705 break;
707 case INSTR_XCHG:
708 if (src & REGISTER) {
709 index = operand_index(src);
710 set_reg_value(size, index, 0, regs, p->data);
711 } else {
712 index = operand_index(dst);
713 set_reg_value(size, index, 0, regs, p->data);
714 }
715 break;
717 case INSTR_PUSH:
718 mmio_opp->addr += hvm_get_segment_base(current, x86_seg_ss);
719 {
720 unsigned long addr = mmio_opp->addr;
721 int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
722 if ( rv != 0 )
723 {
724 addr += p->size - rv;
725 gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO: "
726 "va=%#lx\n", addr);
727 hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
728 return;
729 }
730 }
731 break;
732 }
733 }
735 void hvm_io_assist(struct vcpu *v)
736 {
737 vcpu_iodata_t *vio;
738 ioreq_t *p;
739 struct cpu_user_regs *regs;
740 struct hvm_io_op *io_opp;
742 io_opp = &v->arch.hvm_vcpu.io_op;
743 regs = &io_opp->io_context;
744 vio = get_vio(v->domain, v->vcpu_id);
746 p = &vio->vp_ioreq;
747 if ( p->state != STATE_IORESP_READY )
748 {
749 gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state);
750 domain_crash_synchronous();
751 }
753 rmb(); /* see IORESP_READY /then/ read contents of ioreq */
755 p->state = STATE_IOREQ_NONE;
757 if ( p->type == IOREQ_TYPE_PIO )
758 hvm_pio_assist(regs, p, io_opp);
759 else
760 hvm_mmio_assist(regs, p, io_opp);
762 /* Copy register changes back into current guest state. */
763 hvm_load_cpu_guest_regs(v, regs);
764 memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
765 }
767 /*
768 * Local variables:
769 * mode: C
770 * c-set-style: "BSD"
771 * c-basic-offset: 4
772 * tab-width: 4
773 * indent-tabs-mode: nil
774 * End:
775 */