ia64/xen-unstable

view xen/arch/x86/hvm/io.c @ 16427:fd3f6d814f6d

x86: single step after instruction emulation

Inject single step trap after emulating instructions if guest's
EFLAGS.TF is set.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Nov 22 18:28:47 2007 +0000 (2007-11-22)
parents 305a8dbc264c
children 368bcf480772
line source
1 /*
2 * io.c: Handling I/O and interrupts.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/mm.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/trace.h>
27 #include <xen/event.h>
29 #include <xen/hypercall.h>
30 #include <asm/current.h>
31 #include <asm/cpufeature.h>
32 #include <asm/processor.h>
33 #include <asm/msr.h>
34 #include <asm/apic.h>
35 #include <asm/paging.h>
36 #include <asm/shadow.h>
37 #include <asm/p2m.h>
38 #include <asm/hvm/hvm.h>
39 #include <asm/hvm/support.h>
40 #include <asm/hvm/vpt.h>
41 #include <asm/hvm/vpic.h>
42 #include <asm/hvm/vlapic.h>
43 #include <asm/hvm/trace.h>
45 #include <public/sched.h>
46 #include <xen/iocap.h>
47 #include <public/hvm/ioreq.h>
49 #if defined (__i386__)
50 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
51 {
52 switch (size) {
53 case BYTE:
54 switch (index) {
55 case 0:
56 regs->eax &= 0xFFFFFF00;
57 regs->eax |= (value & 0xFF);
58 break;
59 case 1:
60 regs->ecx &= 0xFFFFFF00;
61 regs->ecx |= (value & 0xFF);
62 break;
63 case 2:
64 regs->edx &= 0xFFFFFF00;
65 regs->edx |= (value & 0xFF);
66 break;
67 case 3:
68 regs->ebx &= 0xFFFFFF00;
69 regs->ebx |= (value & 0xFF);
70 break;
71 case 4:
72 regs->eax &= 0xFFFF00FF;
73 regs->eax |= ((value & 0xFF) << 8);
74 break;
75 case 5:
76 regs->ecx &= 0xFFFF00FF;
77 regs->ecx |= ((value & 0xFF) << 8);
78 break;
79 case 6:
80 regs->edx &= 0xFFFF00FF;
81 regs->edx |= ((value & 0xFF) << 8);
82 break;
83 case 7:
84 regs->ebx &= 0xFFFF00FF;
85 regs->ebx |= ((value & 0xFF) << 8);
86 break;
87 default:
88 goto crash;
89 }
90 break;
91 case WORD:
92 switch (index) {
93 case 0:
94 regs->eax &= 0xFFFF0000;
95 regs->eax |= (value & 0xFFFF);
96 break;
97 case 1:
98 regs->ecx &= 0xFFFF0000;
99 regs->ecx |= (value & 0xFFFF);
100 break;
101 case 2:
102 regs->edx &= 0xFFFF0000;
103 regs->edx |= (value & 0xFFFF);
104 break;
105 case 3:
106 regs->ebx &= 0xFFFF0000;
107 regs->ebx |= (value & 0xFFFF);
108 break;
109 case 4:
110 regs->esp &= 0xFFFF0000;
111 regs->esp |= (value & 0xFFFF);
112 break;
113 case 5:
114 regs->ebp &= 0xFFFF0000;
115 regs->ebp |= (value & 0xFFFF);
116 break;
117 case 6:
118 regs->esi &= 0xFFFF0000;
119 regs->esi |= (value & 0xFFFF);
120 break;
121 case 7:
122 regs->edi &= 0xFFFF0000;
123 regs->edi |= (value & 0xFFFF);
124 break;
125 default:
126 goto crash;
127 }
128 break;
129 case LONG:
130 switch (index) {
131 case 0:
132 regs->eax = value;
133 break;
134 case 1:
135 regs->ecx = value;
136 break;
137 case 2:
138 regs->edx = value;
139 break;
140 case 3:
141 regs->ebx = value;
142 break;
143 case 4:
144 regs->esp = value;
145 break;
146 case 5:
147 regs->ebp = value;
148 break;
149 case 6:
150 regs->esi = value;
151 break;
152 case 7:
153 regs->edi = value;
154 break;
155 default:
156 goto crash;
157 }
158 break;
159 default:
160 crash:
161 gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n", size, index);
162 domain_crash_synchronous();
163 }
164 }
165 #else
166 static inline void __set_reg_value(unsigned long *reg, int size, long value)
167 {
168 switch (size) {
169 case BYTE_64:
170 *reg &= ~0xFF;
171 *reg |= (value & 0xFF);
172 break;
173 case WORD:
174 *reg &= ~0xFFFF;
175 *reg |= (value & 0xFFFF);
176 break;
177 case LONG:
178 *reg &= ~0xFFFFFFFF;
179 *reg |= (value & 0xFFFFFFFF);
180 break;
181 case QUAD:
182 *reg = value;
183 break;
184 default:
185 gdprintk(XENLOG_ERR, "size:%x is invalid\n", size);
186 domain_crash_synchronous();
187 }
188 }
190 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
191 {
192 if (size == BYTE) {
193 switch (index) {
194 case 0:
195 regs->rax &= ~0xFF;
196 regs->rax |= (value & 0xFF);
197 break;
198 case 1:
199 regs->rcx &= ~0xFF;
200 regs->rcx |= (value & 0xFF);
201 break;
202 case 2:
203 regs->rdx &= ~0xFF;
204 regs->rdx |= (value & 0xFF);
205 break;
206 case 3:
207 regs->rbx &= ~0xFF;
208 regs->rbx |= (value & 0xFF);
209 break;
210 case 4:
211 regs->rax &= 0xFFFFFFFFFFFF00FF;
212 regs->rax |= ((value & 0xFF) << 8);
213 break;
214 case 5:
215 regs->rcx &= 0xFFFFFFFFFFFF00FF;
216 regs->rcx |= ((value & 0xFF) << 8);
217 break;
218 case 6:
219 regs->rdx &= 0xFFFFFFFFFFFF00FF;
220 regs->rdx |= ((value & 0xFF) << 8);
221 break;
222 case 7:
223 regs->rbx &= 0xFFFFFFFFFFFF00FF;
224 regs->rbx |= ((value & 0xFF) << 8);
225 break;
226 default:
227 gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n",
228 size, index);
229 domain_crash_synchronous();
230 break;
231 }
232 return;
233 }
235 switch (index) {
236 case 0:
237 __set_reg_value(&regs->rax, size, value);
238 break;
239 case 1:
240 __set_reg_value(&regs->rcx, size, value);
241 break;
242 case 2:
243 __set_reg_value(&regs->rdx, size, value);
244 break;
245 case 3:
246 __set_reg_value(&regs->rbx, size, value);
247 break;
248 case 4:
249 __set_reg_value(&regs->rsp, size, value);
250 break;
251 case 5:
252 __set_reg_value(&regs->rbp, size, value);
253 break;
254 case 6:
255 __set_reg_value(&regs->rsi, size, value);
256 break;
257 case 7:
258 __set_reg_value(&regs->rdi, size, value);
259 break;
260 case 8:
261 __set_reg_value(&regs->r8, size, value);
262 break;
263 case 9:
264 __set_reg_value(&regs->r9, size, value);
265 break;
266 case 10:
267 __set_reg_value(&regs->r10, size, value);
268 break;
269 case 11:
270 __set_reg_value(&regs->r11, size, value);
271 break;
272 case 12:
273 __set_reg_value(&regs->r12, size, value);
274 break;
275 case 13:
276 __set_reg_value(&regs->r13, size, value);
277 break;
278 case 14:
279 __set_reg_value(&regs->r14, size, value);
280 break;
281 case 15:
282 __set_reg_value(&regs->r15, size, value);
283 break;
284 default:
285 gdprintk(XENLOG_ERR, "Invalid index\n");
286 domain_crash_synchronous();
287 }
288 return;
289 }
290 #endif
292 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs);
294 static inline void set_eflags_CF(int size,
295 unsigned int instr,
296 unsigned long result,
297 unsigned long src,
298 unsigned long dst,
299 struct cpu_user_regs *regs)
300 {
301 unsigned long mask;
303 if ( size == BYTE_64 )
304 size = BYTE;
305 ASSERT((size <= sizeof(mask)) && (size > 0));
307 mask = ~0UL >> (8 * (sizeof(mask) - size));
309 if ( instr == INSTR_ADD )
310 {
311 /* CF=1 <==> result is less than the augend and addend) */
312 if ( (result & mask) < (dst & mask) )
313 {
314 ASSERT((result & mask) < (src & mask));
315 regs->eflags |= X86_EFLAGS_CF;
316 }
317 }
318 else
319 {
320 ASSERT( instr == INSTR_CMP || instr == INSTR_SUB );
321 if ( (src & mask) > (dst & mask) )
322 regs->eflags |= X86_EFLAGS_CF;
323 }
324 }
326 static inline void set_eflags_OF(int size,
327 unsigned int instr,
328 unsigned long result,
329 unsigned long src,
330 unsigned long dst,
331 struct cpu_user_regs *regs)
332 {
333 unsigned long mask;
335 if ( size == BYTE_64 )
336 size = BYTE;
337 ASSERT((size <= sizeof(mask)) && (size > 0));
339 mask = 1UL << ((8*size) - 1);
341 if ( instr == INSTR_ADD )
342 {
343 if ((src ^ result) & (dst ^ result) & mask);
344 regs->eflags |= X86_EFLAGS_OF;
345 }
346 else
347 {
348 ASSERT(instr == INSTR_CMP || instr == INSTR_SUB);
349 if ((dst ^ src) & (dst ^ result) & mask)
350 regs->eflags |= X86_EFLAGS_OF;
351 }
352 }
354 static inline void set_eflags_AF(int size,
355 unsigned long result,
356 unsigned long src,
357 unsigned long dst,
358 struct cpu_user_regs *regs)
359 {
360 if ((result ^ src ^ dst) & 0x10)
361 regs->eflags |= X86_EFLAGS_AF;
362 }
364 static inline void set_eflags_ZF(int size, unsigned long result,
365 struct cpu_user_regs *regs)
366 {
367 unsigned long mask;
369 if ( size == BYTE_64 )
370 size = BYTE;
371 ASSERT((size <= sizeof(mask)) && (size > 0));
373 mask = ~0UL >> (8 * (sizeof(mask) - size));
375 if ((result & mask) == 0)
376 regs->eflags |= X86_EFLAGS_ZF;
377 }
379 static inline void set_eflags_SF(int size, unsigned long result,
380 struct cpu_user_regs *regs)
381 {
382 unsigned long mask;
384 if ( size == BYTE_64 )
385 size = BYTE;
386 ASSERT((size <= sizeof(mask)) && (size > 0));
388 mask = 1UL << ((8*size) - 1);
390 if (result & mask)
391 regs->eflags |= X86_EFLAGS_SF;
392 }
394 static char parity_table[256] = {
395 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
396 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
397 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
398 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
399 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
400 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
401 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
402 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
403 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
404 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
405 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
406 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
407 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
408 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
409 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
410 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
411 };
413 static inline void set_eflags_PF(int size, unsigned long result,
414 struct cpu_user_regs *regs)
415 {
416 if (parity_table[result & 0xFF])
417 regs->eflags |= X86_EFLAGS_PF;
418 }
420 static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
421 struct hvm_io_op *pio_opp)
422 {
423 if ( p->data_is_ptr || (pio_opp->flags & OVERLAP) )
424 {
425 int sign = p->df ? -1 : 1;
427 if ( pio_opp->flags & REPZ )
428 regs->ecx -= p->count;
430 if ( p->dir == IOREQ_READ )
431 {
432 if ( pio_opp->flags & OVERLAP )
433 {
434 unsigned long addr = pio_opp->addr;
435 if ( hvm_paging_enabled(current) )
436 {
437 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
438 if ( rv != 0 )
439 {
440 /* Failed on the page-spanning copy. Inject PF into
441 * the guest for the address where we failed. */
442 addr += p->size - rv;
443 gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
444 "of a page-spanning PIO: va=%#lx\n", addr);
445 hvm_inject_exception(TRAP_page_fault,
446 PFEC_write_access, addr);
447 return;
448 }
449 }
450 else
451 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
452 }
453 regs->edi += sign * p->count * p->size;
454 }
455 else /* p->dir == IOREQ_WRITE */
456 {
457 ASSERT(p->dir == IOREQ_WRITE);
458 regs->esi += sign * p->count * p->size;
459 }
460 }
461 else if ( p->dir == IOREQ_READ )
462 {
463 unsigned long old_eax = regs->eax;
465 switch ( p->size )
466 {
467 case 1:
468 regs->eax = (old_eax & ~0xff) | (p->data & 0xff);
469 break;
470 case 2:
471 regs->eax = (old_eax & ~0xffff) | (p->data & 0xffff);
472 break;
473 case 4:
474 regs->eax = (p->data & 0xffffffff);
475 break;
476 default:
477 printk("Error: %s unknown port size\n", __FUNCTION__);
478 domain_crash_synchronous();
479 }
480 HVMTRACE_1D(IO_ASSIST, current, p->data);
481 }
482 }
484 static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
485 struct hvm_io_op *mmio_opp)
486 {
487 int sign = p->df ? -1 : 1;
488 int size = -1, index = -1;
489 unsigned long value = 0, result = 0;
490 unsigned long src, dst;
492 src = mmio_opp->operand[0];
493 dst = mmio_opp->operand[1];
494 size = operand_size(src);
496 HVMTRACE_1D(MMIO_ASSIST, current, p->data);
498 switch (mmio_opp->instr) {
499 case INSTR_MOV:
500 if (dst & REGISTER) {
501 index = operand_index(dst);
502 set_reg_value(size, index, 0, regs, p->data);
503 }
504 break;
506 case INSTR_MOVZX:
507 if (dst & REGISTER) {
508 switch (size) {
509 case BYTE:
510 p->data &= 0xFFULL;
511 break;
513 case WORD:
514 p->data &= 0xFFFFULL;
515 break;
517 case LONG:
518 p->data &= 0xFFFFFFFFULL;
519 break;
521 default:
522 printk("Impossible source operand size of movzx instr: %d\n", size);
523 domain_crash_synchronous();
524 }
525 index = operand_index(dst);
526 set_reg_value(operand_size(dst), index, 0, regs, p->data);
527 }
528 break;
530 case INSTR_MOVSX:
531 if (dst & REGISTER) {
532 switch (size) {
533 case BYTE:
534 p->data &= 0xFFULL;
535 if ( p->data & 0x80ULL )
536 p->data |= 0xFFFFFFFFFFFFFF00ULL;
537 break;
539 case WORD:
540 p->data &= 0xFFFFULL;
541 if ( p->data & 0x8000ULL )
542 p->data |= 0xFFFFFFFFFFFF0000ULL;
543 break;
545 case LONG:
546 p->data &= 0xFFFFFFFFULL;
547 if ( p->data & 0x80000000ULL )
548 p->data |= 0xFFFFFFFF00000000ULL;
549 break;
551 default:
552 printk("Impossible source operand size of movsx instr: %d\n", size);
553 domain_crash_synchronous();
554 }
555 index = operand_index(dst);
556 set_reg_value(operand_size(dst), index, 0, regs, p->data);
557 }
558 break;
560 case INSTR_MOVS:
561 sign = p->df ? -1 : 1;
563 if (mmio_opp->flags & REPZ)
564 regs->ecx -= p->count;
566 if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
567 unsigned long addr = mmio_opp->addr;
569 if (hvm_paging_enabled(current))
570 {
571 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
572 if ( rv != 0 )
573 {
574 /* Failed on the page-spanning copy. Inject PF into
575 * the guest for the address where we failed. */
576 addr += p->size - rv;
577 gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
578 "a page-spanning MMIO: va=%#lx\n", addr);
579 hvm_inject_exception(TRAP_page_fault,
580 PFEC_write_access, addr);
581 return;
582 }
583 }
584 else
585 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
586 }
588 regs->esi += sign * p->count * p->size;
589 regs->edi += sign * p->count * p->size;
591 break;
593 case INSTR_STOS:
594 sign = p->df ? -1 : 1;
595 regs->edi += sign * p->count * p->size;
596 if (mmio_opp->flags & REPZ)
597 regs->ecx -= p->count;
598 break;
600 case INSTR_LODS:
601 set_reg_value(size, 0, 0, regs, p->data);
602 sign = p->df ? -1 : 1;
603 regs->esi += sign * p->count * p->size;
604 if (mmio_opp->flags & REPZ)
605 regs->ecx -= p->count;
606 break;
608 case INSTR_AND:
609 if (src & REGISTER) {
610 index = operand_index(src);
611 value = get_reg_value(size, index, 0, regs);
612 result = (unsigned long) p->data & value;
613 } else if (src & IMMEDIATE) {
614 value = mmio_opp->immediate;
615 result = (unsigned long) p->data & value;
616 } else if (src & MEMORY) {
617 index = operand_index(dst);
618 value = get_reg_value(size, index, 0, regs);
619 result = (unsigned long) p->data & value;
620 set_reg_value(size, index, 0, regs, result);
621 }
623 /*
624 * The OF and CF flags are cleared; the SF, ZF, and PF
625 * flags are set according to the result. The state of
626 * the AF flag is undefined.
627 */
628 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
629 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
630 set_eflags_ZF(size, result, regs);
631 set_eflags_SF(size, result, regs);
632 set_eflags_PF(size, result, regs);
633 break;
635 case INSTR_ADD:
636 if (src & REGISTER) {
637 index = operand_index(src);
638 value = get_reg_value(size, index, 0, regs);
639 result = (unsigned long) p->data + value;
640 } else if (src & IMMEDIATE) {
641 value = mmio_opp->immediate;
642 result = (unsigned long) p->data + value;
643 } else if (src & MEMORY) {
644 index = operand_index(dst);
645 value = get_reg_value(size, index, 0, regs);
646 result = (unsigned long) p->data + value;
647 set_reg_value(size, index, 0, regs, result);
648 }
650 /*
651 * The CF, OF, SF, ZF, AF, and PF flags are set according
652 * to the result
653 */
654 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
655 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
656 set_eflags_CF(size, mmio_opp->instr, result, value,
657 (unsigned long) p->data, regs);
658 set_eflags_OF(size, mmio_opp->instr, result, value,
659 (unsigned long) p->data, regs);
660 set_eflags_AF(size, result, value, (unsigned long) p->data, regs);
661 set_eflags_ZF(size, result, regs);
662 set_eflags_SF(size, result, regs);
663 set_eflags_PF(size, result, regs);
664 break;
666 case INSTR_OR:
667 if (src & REGISTER) {
668 index = operand_index(src);
669 value = get_reg_value(size, index, 0, regs);
670 result = (unsigned long) p->data | value;
671 } else if (src & IMMEDIATE) {
672 value = mmio_opp->immediate;
673 result = (unsigned long) p->data | value;
674 } else if (src & MEMORY) {
675 index = operand_index(dst);
676 value = get_reg_value(size, index, 0, regs);
677 result = (unsigned long) p->data | value;
678 set_reg_value(size, index, 0, regs, result);
679 }
681 /*
682 * The OF and CF flags are cleared; the SF, ZF, and PF
683 * flags are set according to the result. The state of
684 * the AF flag is undefined.
685 */
686 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
687 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
688 set_eflags_ZF(size, result, regs);
689 set_eflags_SF(size, result, regs);
690 set_eflags_PF(size, result, regs);
691 break;
693 case INSTR_XOR:
694 if (src & REGISTER) {
695 index = operand_index(src);
696 value = get_reg_value(size, index, 0, regs);
697 result = (unsigned long) p->data ^ value;
698 } else if (src & IMMEDIATE) {
699 value = mmio_opp->immediate;
700 result = (unsigned long) p->data ^ value;
701 } else if (src & MEMORY) {
702 index = operand_index(dst);
703 value = get_reg_value(size, index, 0, regs);
704 result = (unsigned long) p->data ^ value;
705 set_reg_value(size, index, 0, regs, result);
706 }
708 /*
709 * The OF and CF flags are cleared; the SF, ZF, and PF
710 * flags are set according to the result. The state of
711 * the AF flag is undefined.
712 */
713 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
714 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
715 set_eflags_ZF(size, result, regs);
716 set_eflags_SF(size, result, regs);
717 set_eflags_PF(size, result, regs);
718 break;
720 case INSTR_CMP:
721 case INSTR_SUB:
722 if (src & REGISTER) {
723 index = operand_index(src);
724 value = get_reg_value(size, index, 0, regs);
725 result = (unsigned long) p->data - value;
726 } else if (src & IMMEDIATE) {
727 value = mmio_opp->immediate;
728 result = (unsigned long) p->data - value;
729 } else if (src & MEMORY) {
730 index = operand_index(dst);
731 value = get_reg_value(size, index, 0, regs);
732 result = value - (unsigned long) p->data;
733 if ( mmio_opp->instr == INSTR_SUB )
734 set_reg_value(size, index, 0, regs, result);
735 }
737 /*
738 * The CF, OF, SF, ZF, AF, and PF flags are set according
739 * to the result
740 */
741 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
742 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
743 if ( src & (REGISTER | IMMEDIATE) )
744 {
745 set_eflags_CF(size, mmio_opp->instr, result, value,
746 (unsigned long) p->data, regs);
747 set_eflags_OF(size, mmio_opp->instr, result, value,
748 (unsigned long) p->data, regs);
749 }
750 else
751 {
752 set_eflags_CF(size, mmio_opp->instr, result,
753 (unsigned long) p->data, value, regs);
754 set_eflags_OF(size, mmio_opp->instr, result,
755 (unsigned long) p->data, value, regs);
756 }
757 set_eflags_AF(size, result, value, (unsigned long) p->data, regs);
758 set_eflags_ZF(size, result, regs);
759 set_eflags_SF(size, result, regs);
760 set_eflags_PF(size, result, regs);
761 break;
763 case INSTR_TEST:
764 if (src & REGISTER) {
765 index = operand_index(src);
766 value = get_reg_value(size, index, 0, regs);
767 } else if (src & IMMEDIATE) {
768 value = mmio_opp->immediate;
769 } else if (src & MEMORY) {
770 index = operand_index(dst);
771 value = get_reg_value(size, index, 0, regs);
772 }
773 result = (unsigned long) p->data & value;
775 /*
776 * Sets the SF, ZF, and PF status flags. CF and OF are set to 0
777 */
778 regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|
779 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF);
780 set_eflags_ZF(size, result, regs);
781 set_eflags_SF(size, result, regs);
782 set_eflags_PF(size, result, regs);
783 break;
785 case INSTR_BT:
786 if ( src & REGISTER )
787 {
788 index = operand_index(src);
789 value = get_reg_value(size, index, 0, regs);
790 }
791 else if ( src & IMMEDIATE )
792 value = mmio_opp->immediate;
793 if (p->data & (1 << (value & ((1 << 5) - 1))))
794 regs->eflags |= X86_EFLAGS_CF;
795 else
796 regs->eflags &= ~X86_EFLAGS_CF;
798 break;
800 case INSTR_XCHG:
801 if (src & REGISTER) {
802 index = operand_index(src);
803 set_reg_value(size, index, 0, regs, p->data);
804 } else {
805 index = operand_index(dst);
806 set_reg_value(size, index, 0, regs, p->data);
807 }
808 break;
810 case INSTR_PUSH:
811 mmio_opp->addr += hvm_get_segment_base(current, x86_seg_ss);
812 {
813 unsigned long addr = mmio_opp->addr;
814 int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
815 if ( rv != 0 )
816 {
817 addr += p->size - rv;
818 gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO:"
819 " va=%#lx\n", addr);
820 hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
821 return;
822 }
823 }
824 break;
825 }
826 }
828 void hvm_io_assist(void)
829 {
830 vcpu_iodata_t *vio;
831 ioreq_t *p;
832 struct cpu_user_regs *regs;
833 struct hvm_io_op *io_opp;
834 struct vcpu *v = current;
836 io_opp = &v->arch.hvm_vcpu.io_op;
837 regs = &io_opp->io_context;
838 vio = get_ioreq(v);
840 p = &vio->vp_ioreq;
841 if ( p->state != STATE_IORESP_READY )
842 {
843 gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state);
844 domain_crash_synchronous();
845 }
847 rmb(); /* see IORESP_READY /then/ read contents of ioreq */
849 p->state = STATE_IOREQ_NONE;
851 switch ( p->type )
852 {
853 case IOREQ_TYPE_INVALIDATE:
854 goto out;
855 case IOREQ_TYPE_PIO:
856 hvm_pio_assist(regs, p, io_opp);
857 break;
858 default:
859 hvm_mmio_assist(regs, p, io_opp);
860 break;
861 }
863 /* Copy register changes back into current guest state. */
864 regs->eflags &= ~X86_EFLAGS_RF;
865 memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
866 if ( regs->eflags & X86_EFLAGS_TF )
867 hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
869 out:
870 vcpu_end_shutdown_deferral(v);
871 }
873 void dpci_ioport_read(uint32_t mport, ioreq_t *p)
874 {
875 uint64_t i;
876 uint64_t z_data;
877 uint64_t length = (p->count * p->size);
879 for ( i = 0; i < length; i += p->size )
880 {
881 z_data = ~0ULL;
883 switch ( p->size )
884 {
885 case BYTE:
886 z_data = (uint64_t)inb(mport);
887 break;
888 case WORD:
889 z_data = (uint64_t)inw(mport);
890 break;
891 case LONG:
892 z_data = (uint64_t)inl(mport);
893 break;
894 default:
895 gdprintk(XENLOG_ERR, "Error: unable to handle size: %"
896 PRId64 "\n", p->size);
897 return;
898 }
900 p->data = z_data;
901 if ( p->data_is_ptr &&
902 hvm_copy_to_guest_phys(p->data + i, (void *)&z_data,
903 (int)p->size) )
904 {
905 gdprintk(XENLOG_ERR, "Error: couldn't copy to hvm phys\n");
906 return;
907 }
908 }
909 }
911 void dpci_ioport_write(uint32_t mport, ioreq_t *p)
912 {
913 uint64_t i;
914 uint64_t z_data = 0;
915 uint64_t length = (p->count * p->size);
917 for ( i = 0; i < length; i += p->size )
918 {
919 z_data = p->data;
920 if ( p->data_is_ptr &&
921 hvm_copy_from_guest_phys((void *)&z_data,
922 p->data + i, (int)p->size) )
923 {
924 gdprintk(XENLOG_ERR, "Error: couldn't copy from hvm phys\n");
925 return;
926 }
928 switch ( p->size )
929 {
930 case BYTE:
931 outb((uint8_t) z_data, mport);
932 break;
933 case WORD:
934 outw((uint16_t) z_data, mport);
935 break;
936 case LONG:
937 outl((uint32_t) z_data, mport);
938 break;
939 default:
940 gdprintk(XENLOG_ERR, "Error: unable to handle size: %"
941 PRId64 "\n", p->size);
942 break;
943 }
944 }
945 }
947 int dpci_ioport_intercept(ioreq_t *p)
948 {
949 struct domain *d = current->domain;
950 struct hvm_iommu *hd = domain_hvm_iommu(d);
951 struct g2m_ioport *g2m_ioport;
952 unsigned int mport, gport = p->addr;
953 unsigned int s = 0, e = 0;
955 list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
956 {
957 s = g2m_ioport->gport;
958 e = s + g2m_ioport->np;
959 if ( (gport >= s) && (gport < e) )
960 goto found;
961 }
963 return 0;
965 found:
966 mport = (gport - s) + g2m_ioport->mport;
968 if ( !ioports_access_permitted(d, mport, mport + p->size - 1) )
969 {
970 gdprintk(XENLOG_ERR, "Error: access to gport=0x%x denied!\n",
971 (uint32_t)p->addr);
972 return 0;
973 }
975 switch ( p->dir )
976 {
977 case IOREQ_READ:
978 dpci_ioport_read(mport, p);
979 break;
980 case IOREQ_WRITE:
981 dpci_ioport_write(mport, p);
982 break;
983 default:
984 gdprintk(XENLOG_ERR, "Error: couldn't handle p->dir = %d", p->dir);
985 }
987 return 1;
988 }
990 /*
991 * Local variables:
992 * mode: C
993 * c-set-style: "BSD"
994 * c-basic-offset: 4
995 * tab-width: 4
996 * indent-tabs-mode: nil
997 * End:
998 */