ia64/xen-unstable

view xen/arch/x86/hvm/io.c @ 14039:c39a6b458bd0

[HVM] Fix MMIO AND emulation
which was falling though into AND...
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Feb 20 20:49:44 2007 +0000 (2007-02-20)
parents 4314691c70a2
children 720afbf74001
line source
1 /*
2 * io.c: Handling I/O and interrupts.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/mm.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/trace.h>
27 #include <xen/event.h>
29 #include <xen/hypercall.h>
30 #include <asm/current.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/apic.h>
35 #include <asm/paging.h>
36 #include <asm/hvm/hvm.h>
37 #include <asm/hvm/support.h>
38 #include <asm/hvm/vpt.h>
39 #include <asm/hvm/vpic.h>
40 #include <asm/hvm/vlapic.h>
42 #include <public/sched.h>
43 #include <public/hvm/ioreq.h>
45 #if defined (__i386__)
46 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
47 {
48 switch (size) {
49 case BYTE:
50 switch (index) {
51 case 0:
52 regs->eax &= 0xFFFFFF00;
53 regs->eax |= (value & 0xFF);
54 break;
55 case 1:
56 regs->ecx &= 0xFFFFFF00;
57 regs->ecx |= (value & 0xFF);
58 break;
59 case 2:
60 regs->edx &= 0xFFFFFF00;
61 regs->edx |= (value & 0xFF);
62 break;
63 case 3:
64 regs->ebx &= 0xFFFFFF00;
65 regs->ebx |= (value & 0xFF);
66 break;
67 case 4:
68 regs->eax &= 0xFFFF00FF;
69 regs->eax |= ((value & 0xFF) << 8);
70 break;
71 case 5:
72 regs->ecx &= 0xFFFF00FF;
73 regs->ecx |= ((value & 0xFF) << 8);
74 break;
75 case 6:
76 regs->edx &= 0xFFFF00FF;
77 regs->edx |= ((value & 0xFF) << 8);
78 break;
79 case 7:
80 regs->ebx &= 0xFFFF00FF;
81 regs->ebx |= ((value & 0xFF) << 8);
82 break;
83 default:
84 goto crash;
85 }
86 break;
87 case WORD:
88 switch (index) {
89 case 0:
90 regs->eax &= 0xFFFF0000;
91 regs->eax |= (value & 0xFFFF);
92 break;
93 case 1:
94 regs->ecx &= 0xFFFF0000;
95 regs->ecx |= (value & 0xFFFF);
96 break;
97 case 2:
98 regs->edx &= 0xFFFF0000;
99 regs->edx |= (value & 0xFFFF);
100 break;
101 case 3:
102 regs->ebx &= 0xFFFF0000;
103 regs->ebx |= (value & 0xFFFF);
104 break;
105 case 4:
106 regs->esp &= 0xFFFF0000;
107 regs->esp |= (value & 0xFFFF);
108 break;
109 case 5:
110 regs->ebp &= 0xFFFF0000;
111 regs->ebp |= (value & 0xFFFF);
112 break;
113 case 6:
114 regs->esi &= 0xFFFF0000;
115 regs->esi |= (value & 0xFFFF);
116 break;
117 case 7:
118 regs->edi &= 0xFFFF0000;
119 regs->edi |= (value & 0xFFFF);
120 break;
121 default:
122 goto crash;
123 }
124 break;
125 case LONG:
126 switch (index) {
127 case 0:
128 regs->eax = value;
129 break;
130 case 1:
131 regs->ecx = value;
132 break;
133 case 2:
134 regs->edx = value;
135 break;
136 case 3:
137 regs->ebx = value;
138 break;
139 case 4:
140 regs->esp = value;
141 break;
142 case 5:
143 regs->ebp = value;
144 break;
145 case 6:
146 regs->esi = value;
147 break;
148 case 7:
149 regs->edi = value;
150 break;
151 default:
152 goto crash;
153 }
154 break;
155 default:
156 crash:
157 gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n", size, index);
158 domain_crash_synchronous();
159 }
160 }
161 #else
162 static inline void __set_reg_value(unsigned long *reg, int size, long value)
163 {
164 switch (size) {
165 case BYTE_64:
166 *reg &= ~0xFF;
167 *reg |= (value & 0xFF);
168 break;
169 case WORD:
170 *reg &= ~0xFFFF;
171 *reg |= (value & 0xFFFF);
172 break;
173 case LONG:
174 *reg &= ~0xFFFFFFFF;
175 *reg |= (value & 0xFFFFFFFF);
176 break;
177 case QUAD:
178 *reg = value;
179 break;
180 default:
181 gdprintk(XENLOG_ERR, "size:%x is invalid\n", size);
182 domain_crash_synchronous();
183 }
184 }
186 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
187 {
188 if (size == BYTE) {
189 switch (index) {
190 case 0:
191 regs->rax &= ~0xFF;
192 regs->rax |= (value & 0xFF);
193 break;
194 case 1:
195 regs->rcx &= ~0xFF;
196 regs->rcx |= (value & 0xFF);
197 break;
198 case 2:
199 regs->rdx &= ~0xFF;
200 regs->rdx |= (value & 0xFF);
201 break;
202 case 3:
203 regs->rbx &= ~0xFF;
204 regs->rbx |= (value & 0xFF);
205 break;
206 case 4:
207 regs->rax &= 0xFFFFFFFFFFFF00FF;
208 regs->rax |= ((value & 0xFF) << 8);
209 break;
210 case 5:
211 regs->rcx &= 0xFFFFFFFFFFFF00FF;
212 regs->rcx |= ((value & 0xFF) << 8);
213 break;
214 case 6:
215 regs->rdx &= 0xFFFFFFFFFFFF00FF;
216 regs->rdx |= ((value & 0xFF) << 8);
217 break;
218 case 7:
219 regs->rbx &= 0xFFFFFFFFFFFF00FF;
220 regs->rbx |= ((value & 0xFF) << 8);
221 break;
222 default:
223 gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n",
224 size, index);
225 domain_crash_synchronous();
226 break;
227 }
228 return;
229 }
231 switch (index) {
232 case 0:
233 __set_reg_value(&regs->rax, size, value);
234 break;
235 case 1:
236 __set_reg_value(&regs->rcx, size, value);
237 break;
238 case 2:
239 __set_reg_value(&regs->rdx, size, value);
240 break;
241 case 3:
242 __set_reg_value(&regs->rbx, size, value);
243 break;
244 case 4:
245 __set_reg_value(&regs->rsp, size, value);
246 break;
247 case 5:
248 __set_reg_value(&regs->rbp, size, value);
249 break;
250 case 6:
251 __set_reg_value(&regs->rsi, size, value);
252 break;
253 case 7:
254 __set_reg_value(&regs->rdi, size, value);
255 break;
256 case 8:
257 __set_reg_value(&regs->r8, size, value);
258 break;
259 case 9:
260 __set_reg_value(&regs->r9, size, value);
261 break;
262 case 10:
263 __set_reg_value(&regs->r10, size, value);
264 break;
265 case 11:
266 __set_reg_value(&regs->r11, size, value);
267 break;
268 case 12:
269 __set_reg_value(&regs->r12, size, value);
270 break;
271 case 13:
272 __set_reg_value(&regs->r13, size, value);
273 break;
274 case 14:
275 __set_reg_value(&regs->r14, size, value);
276 break;
277 case 15:
278 __set_reg_value(&regs->r15, size, value);
279 break;
280 default:
281 gdprintk(XENLOG_ERR, "Invalid index\n");
282 domain_crash_synchronous();
283 }
284 return;
285 }
286 #endif
288 extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs);
290 static inline void set_eflags_CF(int size, unsigned long v1,
291 unsigned long v2, struct cpu_user_regs *regs)
292 {
293 unsigned long mask = (1 << (8 * size)) - 1;
295 if ((v1 & mask) > (v2 & mask))
296 regs->eflags |= X86_EFLAGS_CF;
297 else
298 regs->eflags &= ~X86_EFLAGS_CF;
299 }
301 static inline void set_eflags_OF(int size, unsigned long v1,
302 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
303 {
304 if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1)))
305 regs->eflags |= X86_EFLAGS_OF;
306 }
308 static inline void set_eflags_AF(int size, unsigned long v1,
309 unsigned long v2, unsigned long v3, struct cpu_user_regs *regs)
310 {
311 if ((v1 ^ v2 ^ v3) & 0x10)
312 regs->eflags |= X86_EFLAGS_AF;
313 }
315 static inline void set_eflags_ZF(int size, unsigned long v1,
316 struct cpu_user_regs *regs)
317 {
318 unsigned long mask = (1 << (8 * size)) - 1;
320 if ((v1 & mask) == 0)
321 regs->eflags |= X86_EFLAGS_ZF;
322 }
324 static inline void set_eflags_SF(int size, unsigned long v1,
325 struct cpu_user_regs *regs)
326 {
327 if (v1 & (1 << ((8 * size) - 1)))
328 regs->eflags |= X86_EFLAGS_SF;
329 }
331 static char parity_table[256] = {
332 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
333 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
334 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
335 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
336 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
337 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
338 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
339 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
340 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
341 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
342 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
343 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
344 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
345 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
346 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
347 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
348 };
350 static inline void set_eflags_PF(int size, unsigned long v1,
351 struct cpu_user_regs *regs)
352 {
353 if (parity_table[v1 & 0xFF])
354 regs->eflags |= X86_EFLAGS_PF;
355 }
357 static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
358 struct hvm_io_op *pio_opp)
359 {
360 unsigned long old_eax;
361 int sign = p->df ? -1 : 1;
363 if ( p->data_is_ptr || (pio_opp->flags & OVERLAP) )
364 {
365 if ( pio_opp->flags & REPZ )
366 regs->ecx -= p->count;
368 if ( p->dir == IOREQ_READ )
369 {
370 if ( pio_opp->flags & OVERLAP )
371 {
372 unsigned long addr = pio_opp->addr;
373 if ( hvm_paging_enabled(current) )
374 {
375 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
376 if ( rv != 0 )
377 {
378 /* Failed on the page-spanning copy. Inject PF into
379 * the guest for the address where we failed. */
380 addr += p->size - rv;
381 gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
382 "of a page-spanning PIO: va=%#lx\n", addr);
383 hvm_inject_exception(TRAP_page_fault,
384 PFEC_write_access, addr);
385 return;
386 }
387 }
388 else
389 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
390 }
391 regs->edi += sign * p->count * p->size;
392 }
393 else /* p->dir == IOREQ_WRITE */
394 {
395 ASSERT(p->dir == IOREQ_WRITE);
396 regs->esi += sign * p->count * p->size;
397 }
398 }
399 else if ( p->dir == IOREQ_READ )
400 {
401 old_eax = regs->eax;
402 switch ( p->size )
403 {
404 case 1:
405 regs->eax = (old_eax & 0xffffff00) | (p->data & 0xff);
406 break;
407 case 2:
408 regs->eax = (old_eax & 0xffff0000) | (p->data & 0xffff);
409 break;
410 case 4:
411 regs->eax = (p->data & 0xffffffff);
412 break;
413 default:
414 printk("Error: %s unknown port size\n", __FUNCTION__);
415 domain_crash_synchronous();
416 }
417 TRACE_VMEXIT(3, regs->eax);
418 }
419 }
421 static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
422 struct hvm_io_op *mmio_opp)
423 {
424 int sign = p->df ? -1 : 1;
425 int size = -1, index = -1;
426 unsigned long value = 0, diff = 0;
427 unsigned long src, dst;
429 src = mmio_opp->operand[0];
430 dst = mmio_opp->operand[1];
431 size = operand_size(src);
433 switch (mmio_opp->instr) {
434 case INSTR_MOV:
435 if (dst & REGISTER) {
436 index = operand_index(dst);
437 set_reg_value(size, index, 0, regs, p->data);
438 }
439 break;
441 case INSTR_MOVZX:
442 if (dst & REGISTER) {
443 switch (size) {
444 case BYTE:
445 p->data &= 0xFFULL;
446 break;
448 case WORD:
449 p->data &= 0xFFFFULL;
450 break;
452 case LONG:
453 p->data &= 0xFFFFFFFFULL;
454 break;
456 default:
457 printk("Impossible source operand size of movzx instr: %d\n", size);
458 domain_crash_synchronous();
459 }
460 index = operand_index(dst);
461 set_reg_value(operand_size(dst), index, 0, regs, p->data);
462 }
463 break;
465 case INSTR_MOVSX:
466 if (dst & REGISTER) {
467 switch (size) {
468 case BYTE:
469 p->data &= 0xFFULL;
470 if ( p->data & 0x80ULL )
471 p->data |= 0xFFFFFFFFFFFFFF00ULL;
472 break;
474 case WORD:
475 p->data &= 0xFFFFULL;
476 if ( p->data & 0x8000ULL )
477 p->data |= 0xFFFFFFFFFFFF0000ULL;
478 break;
480 case LONG:
481 p->data &= 0xFFFFFFFFULL;
482 if ( p->data & 0x80000000ULL )
483 p->data |= 0xFFFFFFFF00000000ULL;
484 break;
486 default:
487 printk("Impossible source operand size of movsx instr: %d\n", size);
488 domain_crash_synchronous();
489 }
490 index = operand_index(dst);
491 set_reg_value(operand_size(dst), index, 0, regs, p->data);
492 }
493 break;
495 case INSTR_MOVS:
496 sign = p->df ? -1 : 1;
498 if (mmio_opp->flags & REPZ)
499 regs->ecx -= p->count;
501 if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
502 unsigned long addr = mmio_opp->addr;
504 if (hvm_paging_enabled(current))
505 {
506 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
507 if ( rv != 0 )
508 {
509 /* Failed on the page-spanning copy. Inject PF into
510 * the guest for the address where we failed. */
511 addr += p->size - rv;
512 gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
513 "a page-spanning MMIO: va=%#lx\n", addr);
514 hvm_inject_exception(TRAP_page_fault,
515 PFEC_write_access, addr);
516 return;
517 }
518 }
519 else
520 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
521 }
523 regs->esi += sign * p->count * p->size;
524 regs->edi += sign * p->count * p->size;
526 break;
528 case INSTR_STOS:
529 sign = p->df ? -1 : 1;
530 regs->edi += sign * p->count * p->size;
531 if (mmio_opp->flags & REPZ)
532 regs->ecx -= p->count;
533 break;
535 case INSTR_LODS:
536 set_reg_value(size, 0, 0, regs, p->data);
537 sign = p->df ? -1 : 1;
538 regs->esi += sign * p->count * p->size;
539 if (mmio_opp->flags & REPZ)
540 regs->ecx -= p->count;
541 break;
543 case INSTR_AND:
544 if (src & REGISTER) {
545 index = operand_index(src);
546 value = get_reg_value(size, index, 0, regs);
547 diff = (unsigned long) p->data & value;
548 } else if (src & IMMEDIATE) {
549 value = mmio_opp->immediate;
550 diff = (unsigned long) p->data & value;
551 } else if (src & MEMORY) {
552 index = operand_index(dst);
553 value = get_reg_value(size, index, 0, regs);
554 diff = (unsigned long) p->data & value;
555 set_reg_value(size, index, 0, regs, diff);
556 }
557 break;
559 case INSTR_ADD:
560 if (src & REGISTER) {
561 index = operand_index(src);
562 value = get_reg_value(size, index, 0, regs);
563 diff = (unsigned long) p->data + value;
564 } else if (src & IMMEDIATE) {
565 value = mmio_opp->immediate;
566 diff = (unsigned long) p->data + value;
567 } else if (src & MEMORY) {
568 index = operand_index(dst);
569 value = get_reg_value(size, index, 0, regs);
570 diff = (unsigned long) p->data + value;
571 set_reg_value(size, index, 0, regs, diff);
572 }
574 /*
575 * The OF and CF flags are cleared; the SF, ZF, and PF
576 * flags are set according to the result. The state of
577 * the AF flag is undefined.
578 */
579 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
580 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
581 set_eflags_ZF(size, diff, regs);
582 set_eflags_SF(size, diff, regs);
583 set_eflags_PF(size, diff, regs);
584 break;
586 case INSTR_OR:
587 if (src & REGISTER) {
588 index = operand_index(src);
589 value = get_reg_value(size, index, 0, regs);
590 diff = (unsigned long) p->data | value;
591 } else if (src & IMMEDIATE) {
592 value = mmio_opp->immediate;
593 diff = (unsigned long) p->data | value;
594 } else if (src & MEMORY) {
595 index = operand_index(dst);
596 value = get_reg_value(size, index, 0, regs);
597 diff = (unsigned long) p->data | value;
598 set_reg_value(size, index, 0, regs, diff);
599 }
601 /*
602 * The OF and CF flags are cleared; the SF, ZF, and PF
603 * flags are set according to the result. The state of
604 * the AF flag is undefined.
605 */
606 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
607 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
608 set_eflags_ZF(size, diff, regs);
609 set_eflags_SF(size, diff, regs);
610 set_eflags_PF(size, diff, regs);
611 break;
613 case INSTR_XOR:
614 if (src & REGISTER) {
615 index = operand_index(src);
616 value = get_reg_value(size, index, 0, regs);
617 diff = (unsigned long) p->data ^ value;
618 } else if (src & IMMEDIATE) {
619 value = mmio_opp->immediate;
620 diff = (unsigned long) p->data ^ value;
621 } else if (src & MEMORY) {
622 index = operand_index(dst);
623 value = get_reg_value(size, index, 0, regs);
624 diff = (unsigned long) p->data ^ value;
625 set_reg_value(size, index, 0, regs, diff);
626 }
628 /*
629 * The OF and CF flags are cleared; the SF, ZF, and PF
630 * flags are set according to the result. The state of
631 * the AF flag is undefined.
632 */
633 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
634 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
635 set_eflags_ZF(size, diff, regs);
636 set_eflags_SF(size, diff, regs);
637 set_eflags_PF(size, diff, regs);
638 break;
640 case INSTR_CMP:
641 case INSTR_SUB:
642 if (src & REGISTER) {
643 index = operand_index(src);
644 value = get_reg_value(size, index, 0, regs);
645 diff = (unsigned long) p->data - value;
646 } else if (src & IMMEDIATE) {
647 value = mmio_opp->immediate;
648 diff = (unsigned long) p->data - value;
649 } else if (src & MEMORY) {
650 index = operand_index(dst);
651 value = get_reg_value(size, index, 0, regs);
652 diff = value - (unsigned long) p->data;
653 if ( mmio_opp->instr == INSTR_SUB )
654 set_reg_value(size, index, 0, regs, diff);
655 }
657 /*
658 * The CF, OF, SF, ZF, AF, and PF flags are set according
659 * to the result
660 */
661 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
662 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
663 set_eflags_CF(size, value, (unsigned long) p->data, regs);
664 set_eflags_OF(size, diff, value, (unsigned long) p->data, regs);
665 set_eflags_AF(size, diff, value, (unsigned long) p->data, regs);
666 set_eflags_ZF(size, diff, regs);
667 set_eflags_SF(size, diff, regs);
668 set_eflags_PF(size, diff, regs);
669 break;
671 case INSTR_TEST:
672 if (src & REGISTER) {
673 index = operand_index(src);
674 value = get_reg_value(size, index, 0, regs);
675 } else if (src & IMMEDIATE) {
676 value = mmio_opp->immediate;
677 } else if (src & MEMORY) {
678 index = operand_index(dst);
679 value = get_reg_value(size, index, 0, regs);
680 }
681 diff = (unsigned long) p->data & value;
683 /*
684 * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
685 */
686 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
687 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
688 set_eflags_ZF(size, diff, regs);
689 set_eflags_SF(size, diff, regs);
690 set_eflags_PF(size, diff, regs);
691 break;
693 case INSTR_BT:
694 if ( src & REGISTER )
695 {
696 index = operand_index(src);
697 value = get_reg_value(size, index, 0, regs);
698 }
699 else if ( src & IMMEDIATE )
700 value = mmio_opp->immediate;
701 if (p->data & (1 << (value & ((1 << 5) - 1))))
702 regs->eflags |= X86_EFLAGS_CF;
703 else
704 regs->eflags &= ~X86_EFLAGS_CF;
706 break;
708 case INSTR_XCHG:
709 if (src & REGISTER) {
710 index = operand_index(src);
711 set_reg_value(size, index, 0, regs, p->data);
712 } else {
713 index = operand_index(dst);
714 set_reg_value(size, index, 0, regs, p->data);
715 }
716 break;
718 case INSTR_PUSH:
719 mmio_opp->addr += hvm_get_segment_base(current, x86_seg_ss);
720 {
721 unsigned long addr = mmio_opp->addr;
722 int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
723 if ( rv != 0 )
724 {
725 addr += p->size - rv;
726 gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO: "
727 "va=%#lx\n", addr);
728 hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
729 return;
730 }
731 }
732 break;
733 }
734 }
736 void hvm_io_assist(struct vcpu *v)
737 {
738 vcpu_iodata_t *vio;
739 ioreq_t *p;
740 struct cpu_user_regs *regs;
741 struct hvm_io_op *io_opp;
743 io_opp = &v->arch.hvm_vcpu.io_op;
744 regs = &io_opp->io_context;
745 vio = get_vio(v->domain, v->vcpu_id);
747 p = &vio->vp_ioreq;
748 if ( p->state != STATE_IORESP_READY )
749 {
750 gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state);
751 domain_crash_synchronous();
752 }
754 rmb(); /* see IORESP_READY /then/ read contents of ioreq */
756 p->state = STATE_IOREQ_NONE;
758 if ( p->type == IOREQ_TYPE_PIO )
759 hvm_pio_assist(regs, p, io_opp);
760 else
761 hvm_mmio_assist(regs, p, io_opp);
763 /* Copy register changes back into current guest state. */
764 hvm_load_cpu_guest_regs(v, regs);
765 memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
766 }
768 /*
769 * Local variables:
770 * mode: C
771 * c-set-style: "BSD"
772 * c-basic-offset: 4
773 * tab-width: 4
774 * indent-tabs-mode: nil
775 * End:
776 */