ia64/xen-unstable

view xen/arch/x86/hvm/platform.c @ 16494:c76a9aa12d2e

hvm: Inject #UD for un-emulated instructions rather than crash guest

The CrashMe stress test (a process repeatedly forks child processes, and
the child processes initialize a buffer with random numbers, then treat
the buffer as code, and execute it) can crash 32-bit HVM RHEL5.1 guest
easily; this is because we haven't emulated all the instructions in
handle_mmio() yet.

The CrashMe process runs with root rights, and can access MMIO space in
an unknown way ("strace -f" shows the random codes running at CPL=3D3
don't call mmap(), and don't open any special files in /dev/ "); the gpa
may look like 0xa**** or 0xb****, or 0xfee0****. =20

Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 28 13:04:47 2007 +0000 (2007-11-28)
parents bb961bda7eff
children 717f0dce76e7
line source
1 /*
2 * platform.c: handling x86 platform related MMIO instructions
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <xen/domain_page.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <xen/sched.h>
29 #include <asm/regs.h>
30 #include <asm/x86_emulate.h>
31 #include <asm/paging.h>
32 #include <asm/hvm/hvm.h>
33 #include <asm/hvm/support.h>
34 #include <asm/hvm/io.h>
35 #include <public/hvm/ioreq.h>
37 #include <xen/lib.h>
38 #include <xen/sched.h>
39 #include <asm/current.h>
41 #define DECODE_success 1
42 #define DECODE_failure 0
44 #define mk_operand(size_reg, index, seg, flag) \
45 (((size_reg) << 24) | ((index) << 16) | ((seg) << 8) | (flag))
47 #if defined (__x86_64__)
48 static inline long __get_reg_value(unsigned long reg, int size)
49 {
50 switch ( size ) {
51 case BYTE_64:
52 return (char)(reg & 0xFF);
53 case WORD:
54 return (short)(reg & 0xFFFF);
55 case LONG:
56 return (int)(reg & 0xFFFFFFFF);
57 case QUAD:
58 return (long)(reg);
59 default:
60 printk("Error: (__get_reg_value) Invalid reg size\n");
61 domain_crash_synchronous();
62 }
63 }
65 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
66 {
67 if ( size == BYTE ) {
68 switch ( index ) {
69 case 0: /* %al */
70 return (char)(regs->rax & 0xFF);
71 case 1: /* %cl */
72 return (char)(regs->rcx & 0xFF);
73 case 2: /* %dl */
74 return (char)(regs->rdx & 0xFF);
75 case 3: /* %bl */
76 return (char)(regs->rbx & 0xFF);
77 case 4: /* %ah */
78 return (char)((regs->rax & 0xFF00) >> 8);
79 case 5: /* %ch */
80 return (char)((regs->rcx & 0xFF00) >> 8);
81 case 6: /* %dh */
82 return (char)((regs->rdx & 0xFF00) >> 8);
83 case 7: /* %bh */
84 return (char)((regs->rbx & 0xFF00) >> 8);
85 default:
86 printk("Error: (get_reg_value) Invalid index value\n");
87 domain_crash_synchronous();
88 }
89 /* NOTREACHED */
90 }
92 switch ( index ) {
93 case 0: return __get_reg_value(regs->rax, size);
94 case 1: return __get_reg_value(regs->rcx, size);
95 case 2: return __get_reg_value(regs->rdx, size);
96 case 3: return __get_reg_value(regs->rbx, size);
97 case 4: return __get_reg_value(regs->rsp, size);
98 case 5: return __get_reg_value(regs->rbp, size);
99 case 6: return __get_reg_value(regs->rsi, size);
100 case 7: return __get_reg_value(regs->rdi, size);
101 case 8: return __get_reg_value(regs->r8, size);
102 case 9: return __get_reg_value(regs->r9, size);
103 case 10: return __get_reg_value(regs->r10, size);
104 case 11: return __get_reg_value(regs->r11, size);
105 case 12: return __get_reg_value(regs->r12, size);
106 case 13: return __get_reg_value(regs->r13, size);
107 case 14: return __get_reg_value(regs->r14, size);
108 case 15: return __get_reg_value(regs->r15, size);
109 default:
110 printk("Error: (get_reg_value) Invalid index value\n");
111 domain_crash_synchronous();
112 }
113 }
114 #elif defined (__i386__)
115 static inline long __get_reg_value(unsigned long reg, int size)
116 {
117 switch ( size ) {
118 case WORD:
119 return (short)(reg & 0xFFFF);
120 case LONG:
121 return (int)(reg & 0xFFFFFFFF);
122 default:
123 printk("Error: (__get_reg_value) Invalid reg size\n");
124 domain_crash_synchronous();
125 }
126 }
128 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
129 {
130 if ( size == BYTE ) {
131 switch ( index ) {
132 case 0: /* %al */
133 return (char)(regs->eax & 0xFF);
134 case 1: /* %cl */
135 return (char)(regs->ecx & 0xFF);
136 case 2: /* %dl */
137 return (char)(regs->edx & 0xFF);
138 case 3: /* %bl */
139 return (char)(regs->ebx & 0xFF);
140 case 4: /* %ah */
141 return (char)((regs->eax & 0xFF00) >> 8);
142 case 5: /* %ch */
143 return (char)((regs->ecx & 0xFF00) >> 8);
144 case 6: /* %dh */
145 return (char)((regs->edx & 0xFF00) >> 8);
146 case 7: /* %bh */
147 return (char)((regs->ebx & 0xFF00) >> 8);
148 default:
149 printk("Error: (get_reg_value) Invalid index value\n");
150 domain_crash_synchronous();
151 }
152 }
154 switch ( index ) {
155 case 0: return __get_reg_value(regs->eax, size);
156 case 1: return __get_reg_value(regs->ecx, size);
157 case 2: return __get_reg_value(regs->edx, size);
158 case 3: return __get_reg_value(regs->ebx, size);
159 case 4: return __get_reg_value(regs->esp, size);
160 case 5: return __get_reg_value(regs->ebp, size);
161 case 6: return __get_reg_value(regs->esi, size);
162 case 7: return __get_reg_value(regs->edi, size);
163 default:
164 printk("Error: (get_reg_value) Invalid index value\n");
165 domain_crash_synchronous();
166 }
167 }
168 #endif
170 static inline unsigned char *check_prefix(unsigned char *inst,
171 struct hvm_io_op *mmio_op,
172 unsigned char *ad_size,
173 unsigned char *op_size,
174 unsigned char *seg_sel,
175 unsigned char *rex_p)
176 {
177 while ( 1 ) {
178 switch ( *inst ) {
179 /* rex prefix for em64t instructions */
180 case 0x40 ... 0x4f:
181 *rex_p = *inst;
182 break;
183 case 0xf3: /* REPZ */
184 mmio_op->flags = REPZ;
185 break;
186 case 0xf2: /* REPNZ */
187 mmio_op->flags = REPNZ;
188 break;
189 case 0xf0: /* LOCK */
190 break;
191 case 0x2e: /* CS */
192 case 0x36: /* SS */
193 case 0x3e: /* DS */
194 case 0x26: /* ES */
195 case 0x64: /* FS */
196 case 0x65: /* GS */
197 *seg_sel = *inst;
198 break;
199 case 0x66: /* 32bit->16bit */
200 *op_size = WORD;
201 break;
202 case 0x67:
203 *ad_size = WORD;
204 break;
205 default:
206 return inst;
207 }
208 inst++;
209 }
210 }
212 static inline unsigned long get_immediate(int ad_size, const unsigned char *inst, int op_size)
213 {
214 int mod, reg, rm;
215 unsigned long val = 0;
216 int i;
218 mod = (*inst >> 6) & 3;
219 reg = (*inst >> 3) & 7;
220 rm = *inst & 7;
222 inst++; //skip ModR/M byte
223 if ( ad_size != WORD && mod != 3 && rm == 4 ) {
224 rm = *inst & 7;
225 inst++; //skip SIB byte
226 }
228 switch ( mod ) {
229 case 0:
230 if ( ad_size == WORD ) {
231 if ( rm == 6 )
232 inst = inst + 2; //disp16, skip 2 bytes
233 }
234 else {
235 if ( rm == 5 )
236 inst = inst + 4; //disp32, skip 4 bytes
237 }
238 break;
239 case 1:
240 inst++; //disp8, skip 1 byte
241 break;
242 case 2:
243 if ( ad_size == WORD )
244 inst = inst + 2; //disp16, skip 2 bytes
245 else
246 inst = inst + 4; //disp32, skip 4 bytes
247 break;
248 }
250 if ( op_size == QUAD )
251 op_size = LONG;
253 for ( i = 0; i < op_size; i++ ) {
254 val |= (*inst++ & 0xff) << (8 * i);
255 }
257 return val;
258 }
260 static inline unsigned long get_immediate_sign_ext(
261 int ad_size, const unsigned char *inst, int op_size)
262 {
263 unsigned long result = get_immediate(ad_size, inst, op_size);
264 if ( op_size == BYTE )
265 return (int8_t)result;
266 if ( op_size == WORD )
267 return (int16_t)result;
268 return (int32_t)result;
269 }
271 static inline int get_index(const unsigned char *inst, unsigned char rex)
272 {
273 int mod, reg, rm;
274 int rex_r, rex_b;
276 mod = (*inst >> 6) & 3;
277 reg = (*inst >> 3) & 7;
278 rm = *inst & 7;
280 rex_r = (rex >> 2) & 1;
281 rex_b = rex & 1;
283 //Only one operand in the instruction is register
284 if ( mod == 3 ) {
285 return (rm + (rex_b << 3));
286 } else {
287 return (reg + (rex_r << 3));
288 }
289 return 0;
290 }
292 static void init_instruction(struct hvm_io_op *mmio_op)
293 {
294 mmio_op->instr = 0;
296 mmio_op->flags = 0;
298 mmio_op->operand[0] = 0;
299 mmio_op->operand[1] = 0;
300 mmio_op->immediate = 0;
301 }
303 #define GET_OP_SIZE_FOR_BYTE(size_reg) \
304 do { \
305 if ( rex ) \
306 (size_reg) = BYTE_64; \
307 else \
308 (size_reg) = BYTE; \
309 } while( 0 )
311 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
312 do { \
313 if ( rex & 0x8 ) \
314 (op_size) = QUAD; \
315 else if ( (op_size) != WORD ) \
316 (op_size) = LONG; \
317 } while( 0 )
320 /*
321 * Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
322 */
323 static inline int mem_acc(unsigned char size, struct hvm_io_op *mmio)
324 {
325 mmio->operand[0] = mk_operand(size, 0, 0, MEMORY);
326 mmio->operand[1] = mk_operand(size, 0, 0, REGISTER);
327 return DECODE_success;
328 }
330 /*
331 * Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
332 */
333 static inline int acc_mem(unsigned char size, struct hvm_io_op *mmio)
334 {
335 mmio->operand[0] = mk_operand(size, 0, 0, REGISTER);
336 mmio->operand[1] = mk_operand(size, 0, 0, MEMORY);
337 return DECODE_success;
338 }
340 /*
341 * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
342 */
343 static int mem_reg(unsigned char size, unsigned char *opcode,
344 struct hvm_io_op *mmio_op, unsigned char rex)
345 {
346 int index = get_index(opcode + 1, rex);
348 mmio_op->operand[0] = mk_operand(size, 0, 0, MEMORY);
349 mmio_op->operand[1] = mk_operand(size, index, 0, REGISTER);
350 return DECODE_success;
351 }
353 /*
354 * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
355 */
356 static int reg_mem(unsigned char size, unsigned char *opcode,
357 struct hvm_io_op *mmio_op, unsigned char rex)
358 {
359 int index = get_index(opcode + 1, rex);
361 mmio_op->operand[0] = mk_operand(size, index, 0, REGISTER);
362 mmio_op->operand[1] = mk_operand(size, 0, 0, MEMORY);
363 return DECODE_success;
364 }
366 static int mmio_decode(int address_bytes, unsigned char *opcode,
367 struct hvm_io_op *mmio_op,
368 unsigned char *ad_size, unsigned char *op_size,
369 unsigned char *seg_sel)
370 {
371 unsigned char size_reg = 0;
372 unsigned char rex = 0;
373 int index;
375 *ad_size = 0;
376 *op_size = 0;
377 *seg_sel = 0;
378 init_instruction(mmio_op);
380 opcode = check_prefix(opcode, mmio_op, ad_size, op_size, seg_sel, &rex);
382 switch ( address_bytes )
383 {
384 case 2:
385 if ( *op_size == WORD )
386 *op_size = LONG;
387 else if ( *op_size == LONG )
388 *op_size = WORD;
389 else if ( *op_size == 0 )
390 *op_size = WORD;
391 if ( *ad_size == WORD )
392 *ad_size = LONG;
393 else if ( *ad_size == LONG )
394 *ad_size = WORD;
395 else if ( *ad_size == 0 )
396 *ad_size = WORD;
397 break;
398 case 4:
399 if ( *op_size == 0 )
400 *op_size = LONG;
401 if ( *ad_size == 0 )
402 *ad_size = LONG;
403 break;
404 #ifdef __x86_64__
405 case 8:
406 if ( *op_size == 0 )
407 *op_size = rex & 0x8 ? QUAD : LONG;
408 if ( *ad_size == WORD )
409 *ad_size = LONG;
410 else if ( *ad_size == 0 )
411 *ad_size = QUAD;
412 break;
413 #endif
414 }
416 /* the operands order in comments conforms to AT&T convention */
418 switch ( *opcode ) {
420 case 0x00: /* add r8, m8 */
421 mmio_op->instr = INSTR_ADD;
422 *op_size = BYTE;
423 GET_OP_SIZE_FOR_BYTE(size_reg);
424 return reg_mem(size_reg, opcode, mmio_op, rex);
426 case 0x03: /* add m32/16, r32/16 */
427 mmio_op->instr = INSTR_ADD;
428 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
429 return mem_reg(*op_size, opcode, mmio_op, rex);
431 case 0x08: /* or r8, m8 */
432 mmio_op->instr = INSTR_OR;
433 *op_size = BYTE;
434 GET_OP_SIZE_FOR_BYTE(size_reg);
435 return reg_mem(size_reg, opcode, mmio_op, rex);
437 case 0x09: /* or r32/16, m32/16 */
438 mmio_op->instr = INSTR_OR;
439 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
440 return reg_mem(*op_size, opcode, mmio_op, rex);
442 case 0x0A: /* or m8, r8 */
443 mmio_op->instr = INSTR_OR;
444 *op_size = BYTE;
445 GET_OP_SIZE_FOR_BYTE(size_reg);
446 return mem_reg(size_reg, opcode, mmio_op, rex);
448 case 0x0B: /* or m32/16, r32/16 */
449 mmio_op->instr = INSTR_OR;
450 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
451 return mem_reg(*op_size, opcode, mmio_op, rex);
453 case 0x20: /* and r8, m8 */
454 mmio_op->instr = INSTR_AND;
455 *op_size = BYTE;
456 GET_OP_SIZE_FOR_BYTE(size_reg);
457 return reg_mem(size_reg, opcode, mmio_op, rex);
459 case 0x21: /* and r32/16, m32/16 */
460 mmio_op->instr = INSTR_AND;
461 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
462 return reg_mem(*op_size, opcode, mmio_op, rex);
464 case 0x22: /* and m8, r8 */
465 mmio_op->instr = INSTR_AND;
466 *op_size = BYTE;
467 GET_OP_SIZE_FOR_BYTE(size_reg);
468 return mem_reg(size_reg, opcode, mmio_op, rex);
470 case 0x23: /* and m32/16, r32/16 */
471 mmio_op->instr = INSTR_AND;
472 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
473 return mem_reg(*op_size, opcode, mmio_op, rex);
475 case 0x2B: /* sub m32/16, r32/16 */
476 mmio_op->instr = INSTR_SUB;
477 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
478 return mem_reg(*op_size, opcode, mmio_op, rex);
480 case 0x30: /* xor r8, m8 */
481 mmio_op->instr = INSTR_XOR;
482 *op_size = BYTE;
483 GET_OP_SIZE_FOR_BYTE(size_reg);
484 return reg_mem(size_reg, opcode, mmio_op, rex);
486 case 0x31: /* xor r32/16, m32/16 */
487 mmio_op->instr = INSTR_XOR;
488 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
489 return reg_mem(*op_size, opcode, mmio_op, rex);
491 case 0x32: /* xor m8, r8 */
492 mmio_op->instr = INSTR_XOR;
493 *op_size = BYTE;
494 GET_OP_SIZE_FOR_BYTE(size_reg);
495 return mem_reg(size_reg, opcode, mmio_op, rex);
497 case 0x38: /* cmp r8, m8 */
498 mmio_op->instr = INSTR_CMP;
499 *op_size = BYTE;
500 GET_OP_SIZE_FOR_BYTE(size_reg);
501 return reg_mem(size_reg, opcode, mmio_op, rex);
503 case 0x39: /* cmp r32/16, m32/16 */
504 mmio_op->instr = INSTR_CMP;
505 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
506 return reg_mem(*op_size, opcode, mmio_op, rex);
508 case 0x3A: /* cmp m8, r8 */
509 mmio_op->instr = INSTR_CMP;
510 *op_size = BYTE;
511 GET_OP_SIZE_FOR_BYTE(size_reg);
512 return mem_reg(size_reg, opcode, mmio_op, rex);
514 case 0x3B: /* cmp m32/16, r32/16 */
515 mmio_op->instr = INSTR_CMP;
516 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
517 return mem_reg(*op_size, opcode, mmio_op, rex);
519 case 0x80:
520 case 0x81:
521 case 0x83:
522 {
523 unsigned char ins_subtype = (opcode[1] >> 3) & 7;
525 if ( opcode[0] == 0x80 ) {
526 *op_size = BYTE;
527 GET_OP_SIZE_FOR_BYTE(size_reg);
528 } else {
529 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
530 size_reg = *op_size;
531 }
533 /* opcode 0x83 always has a single byte operand */
534 if ( opcode[0] == 0x83 )
535 mmio_op->immediate =
536 get_immediate_sign_ext(*ad_size, opcode + 1, BYTE);
537 else
538 mmio_op->immediate =
539 get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
541 mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
542 mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
544 switch ( ins_subtype ) {
545 case 0: /* add $imm, m32/16 */
546 mmio_op->instr = INSTR_ADD;
547 return DECODE_success;
549 case 1: /* or $imm, m32/16 */
550 mmio_op->instr = INSTR_OR;
551 return DECODE_success;
553 case 4: /* and $imm, m32/16 */
554 mmio_op->instr = INSTR_AND;
555 return DECODE_success;
557 case 5: /* sub $imm, m32/16 */
558 mmio_op->instr = INSTR_SUB;
559 return DECODE_success;
561 case 6: /* xor $imm, m32/16 */
562 mmio_op->instr = INSTR_XOR;
563 return DECODE_success;
565 case 7: /* cmp $imm, m32/16 */
566 mmio_op->instr = INSTR_CMP;
567 return DECODE_success;
569 default:
570 printk("%x/%x, This opcode isn't handled yet!\n",
571 *opcode, ins_subtype);
572 return DECODE_failure;
573 }
574 }
576 case 0x84: /* test r8, m8 */
577 mmio_op->instr = INSTR_TEST;
578 *op_size = BYTE;
579 GET_OP_SIZE_FOR_BYTE(size_reg);
580 return reg_mem(size_reg, opcode, mmio_op, rex);
582 case 0x85: /* test r16/32, m16/32 */
583 mmio_op->instr = INSTR_TEST;
584 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
585 return reg_mem(*op_size, opcode, mmio_op, rex);
587 case 0x86: /* xchg m8, r8 */
588 mmio_op->instr = INSTR_XCHG;
589 *op_size = BYTE;
590 GET_OP_SIZE_FOR_BYTE(size_reg);
591 return reg_mem(size_reg, opcode, mmio_op, rex);
593 case 0x87: /* xchg m16/32, r16/32 */
594 mmio_op->instr = INSTR_XCHG;
595 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
596 return reg_mem(*op_size, opcode, mmio_op, rex);
598 case 0x88: /* mov r8, m8 */
599 mmio_op->instr = INSTR_MOV;
600 *op_size = BYTE;
601 GET_OP_SIZE_FOR_BYTE(size_reg);
602 return reg_mem(size_reg, opcode, mmio_op, rex);
604 case 0x89: /* mov r32/16, m32/16 */
605 mmio_op->instr = INSTR_MOV;
606 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
607 return reg_mem(*op_size, opcode, mmio_op, rex);
609 case 0x8A: /* mov m8, r8 */
610 mmio_op->instr = INSTR_MOV;
611 *op_size = BYTE;
612 GET_OP_SIZE_FOR_BYTE(size_reg);
613 return mem_reg(size_reg, opcode, mmio_op, rex);
615 case 0x8B: /* mov m32/16, r32/16 */
616 mmio_op->instr = INSTR_MOV;
617 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
618 return mem_reg(*op_size, opcode, mmio_op, rex);
620 case 0xA0: /* mov <addr>, al */
621 mmio_op->instr = INSTR_MOV;
622 *op_size = BYTE;
623 GET_OP_SIZE_FOR_BYTE(size_reg);
624 return mem_acc(size_reg, mmio_op);
626 case 0xA1: /* mov <addr>, ax/eax */
627 mmio_op->instr = INSTR_MOV;
628 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
629 return mem_acc(*op_size, mmio_op);
631 case 0xA2: /* mov al, <addr> */
632 mmio_op->instr = INSTR_MOV;
633 *op_size = BYTE;
634 GET_OP_SIZE_FOR_BYTE(size_reg);
635 return acc_mem(size_reg, mmio_op);
637 case 0xA3: /* mov ax/eax, <addr> */
638 mmio_op->instr = INSTR_MOV;
639 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
640 return acc_mem(*op_size, mmio_op);
642 case 0xA4: /* movsb */
643 mmio_op->instr = INSTR_MOVS;
644 *op_size = BYTE;
645 return DECODE_success;
647 case 0xA5: /* movsw/movsl */
648 mmio_op->instr = INSTR_MOVS;
649 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
650 return DECODE_success;
652 case 0xAA: /* stosb */
653 mmio_op->instr = INSTR_STOS;
654 *op_size = BYTE;
655 return DECODE_success;
657 case 0xAB: /* stosw/stosl */
658 mmio_op->instr = INSTR_STOS;
659 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
660 return DECODE_success;
662 case 0xAC: /* lodsb */
663 mmio_op->instr = INSTR_LODS;
664 *op_size = BYTE;
665 return DECODE_success;
667 case 0xAD: /* lodsw/lodsl */
668 mmio_op->instr = INSTR_LODS;
669 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
670 return DECODE_success;
672 case 0xC6:
673 if ( ((opcode[1] >> 3) & 7) == 0 ) { /* mov $imm8, m8 */
674 mmio_op->instr = INSTR_MOV;
675 *op_size = BYTE;
677 mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
678 mmio_op->immediate =
679 get_immediate(*ad_size, opcode + 1, *op_size);
680 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
682 return DECODE_success;
683 } else
684 return DECODE_failure;
686 case 0xC7:
687 if ( ((opcode[1] >> 3) & 7) == 0 ) { /* mov $imm16/32, m16/32 */
688 mmio_op->instr = INSTR_MOV;
689 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
691 mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
692 mmio_op->immediate =
693 get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
694 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
696 return DECODE_success;
697 } else
698 return DECODE_failure;
700 case 0xF6:
701 case 0xF7:
702 if ( ((opcode[1] >> 3) & 7) == 0 ) { /* test $imm8/16/32, m8/16/32 */
703 mmio_op->instr = INSTR_TEST;
705 if ( opcode[0] == 0xF6 ) {
706 *op_size = BYTE;
707 GET_OP_SIZE_FOR_BYTE(size_reg);
708 } else {
709 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
710 size_reg = *op_size;
711 }
713 mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
714 mmio_op->immediate =
715 get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
716 mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
718 return DECODE_success;
719 } else
720 return DECODE_failure;
722 case 0xFE:
723 case 0xFF:
724 {
725 unsigned char ins_subtype = (opcode[1] >> 3) & 7;
727 if ( opcode[0] == 0xFE ) {
728 *op_size = BYTE;
729 GET_OP_SIZE_FOR_BYTE(size_reg);
730 } else {
731 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
732 size_reg = *op_size;
733 }
735 mmio_op->immediate = 1;
736 mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
737 mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
739 switch ( ins_subtype ) {
740 case 0: /* inc */
741 mmio_op->instr = INSTR_ADD;
742 return DECODE_success;
744 case 1: /* dec */
745 mmio_op->instr = INSTR_SUB;
746 return DECODE_success;
748 case 6: /* push */
749 mmio_op->instr = INSTR_PUSH;
750 mmio_op->operand[0] = mmio_op->operand[1];
751 return DECODE_success;
753 default:
754 printk("%x/%x, This opcode isn't handled yet!\n",
755 *opcode, ins_subtype);
756 return DECODE_failure;
757 }
758 }
760 case 0x0F:
761 break;
763 default:
764 printk("%x, This opcode isn't handled yet!\n", *opcode);
765 return DECODE_failure;
766 }
768 switch ( *++opcode ) {
769 case 0xB6: /* movzx m8, r16/r32/r64 */
770 mmio_op->instr = INSTR_MOVZX;
771 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
772 index = get_index(opcode + 1, rex);
773 mmio_op->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
774 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
775 return DECODE_success;
777 case 0xB7: /* movzx m16, r32/r64 */
778 mmio_op->instr = INSTR_MOVZX;
779 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
780 index = get_index(opcode + 1, rex);
781 mmio_op->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
782 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
783 return DECODE_success;
785 case 0xBE: /* movsx m8, r16/r32/r64 */
786 mmio_op->instr = INSTR_MOVSX;
787 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
788 index = get_index(opcode + 1, rex);
789 mmio_op->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
790 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
791 return DECODE_success;
793 case 0xBF: /* movsx m16, r32/r64 */
794 mmio_op->instr = INSTR_MOVSX;
795 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
796 index = get_index(opcode + 1, rex);
797 mmio_op->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
798 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
799 return DECODE_success;
801 case 0xA3: /* bt r32, m32 */
802 mmio_op->instr = INSTR_BT;
803 index = get_index(opcode + 1, rex);
804 *op_size = LONG;
805 mmio_op->operand[0] = mk_operand(*op_size, index, 0, REGISTER);
806 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
807 return DECODE_success;
809 case 0xBA:
810 if ( ((opcode[1] >> 3) & 7) == 4 ) /* BT $imm8, m16/32/64 */
811 {
812 mmio_op->instr = INSTR_BT;
813 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
814 mmio_op->operand[0] = mk_operand(BYTE, 0, 0, IMMEDIATE);
815 mmio_op->immediate =
816 (signed char)get_immediate(*ad_size, opcode + 1, BYTE);
817 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
818 return DECODE_success;
819 }
820 else
821 {
822 printk("0f %x, This opcode subtype isn't handled yet\n", *opcode);
823 return DECODE_failure;
824 }
826 default:
827 printk("0f %x, This opcode isn't handled yet\n", *opcode);
828 return DECODE_failure;
829 }
830 }
832 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
833 {
834 if ( inst_len > MAX_INST_LEN || inst_len <= 0 )
835 return 0;
836 if ( hvm_fetch_from_guest_virt(buf, guest_eip, inst_len) )
837 return 0;
838 return inst_len;
839 }
841 void send_pio_req(unsigned long port, unsigned long count, int size,
842 paddr_t value, int dir, int df, int value_is_ptr)
843 {
844 struct vcpu *v = current;
845 vcpu_iodata_t *vio;
846 ioreq_t *p;
848 if ( size == 0 || count == 0 ) {
849 printk("null pio request? port %lx, count %lx, "
850 "size %d, value %"PRIpaddr", dir %d, value_is_ptr %d.\n",
851 port, count, size, value, dir, value_is_ptr);
852 }
854 vio = get_ioreq(v);
855 if ( vio == NULL ) {
856 printk("bad shared page: %lx\n", (unsigned long) vio);
857 domain_crash_synchronous();
858 }
860 p = &vio->vp_ioreq;
861 if ( p->state != STATE_IOREQ_NONE )
862 printk("WARNING: send pio with something already pending (%d)?\n",
863 p->state);
865 p->dir = dir;
866 p->data_is_ptr = value_is_ptr;
868 p->type = IOREQ_TYPE_PIO;
869 p->size = size;
870 p->addr = port;
871 p->count = count;
872 p->df = df;
874 p->io_count++;
876 p->data = value;
878 if ( hvm_portio_intercept(p) )
879 {
880 p->state = STATE_IORESP_READY;
881 hvm_io_assist();
882 return;
883 }
885 hvm_send_assist_req(v);
886 }
888 void send_mmio_req(unsigned char type, unsigned long gpa,
889 unsigned long count, int size, paddr_t value,
890 int dir, int df, int value_is_ptr)
891 {
892 struct vcpu *v = current;
893 vcpu_iodata_t *vio;
894 ioreq_t *p;
896 if ( size == 0 || count == 0 ) {
897 printk("null mmio request? type %d, gpa %lx, "
898 "count %lx, size %d, value %"PRIpaddr"x, dir %d, "
899 "value_is_ptr %d.\n",
900 type, gpa, count, size, value, dir, value_is_ptr);
901 }
903 vio = get_ioreq(v);
904 if (vio == NULL) {
905 printk("bad shared page\n");
906 domain_crash_synchronous();
907 }
909 p = &vio->vp_ioreq;
911 if ( p->state != STATE_IOREQ_NONE )
912 printk("WARNING: send mmio with something already pending (%d)?\n",
913 p->state);
914 p->dir = dir;
915 p->data_is_ptr = value_is_ptr;
917 p->type = type;
918 p->size = size;
919 p->addr = gpa;
920 p->count = count;
921 p->df = df;
923 p->io_count++;
925 p->data = value;
927 if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
928 {
929 p->state = STATE_IORESP_READY;
930 hvm_io_assist();
931 return;
932 }
934 hvm_send_assist_req(v);
935 }
937 void send_timeoffset_req(unsigned long timeoff)
938 {
939 ioreq_t p[1];
941 if ( timeoff == 0 )
942 return;
944 memset(p, 0, sizeof(*p));
946 p->type = IOREQ_TYPE_TIMEOFFSET;
947 p->size = 8;
948 p->count = 1;
949 p->dir = IOREQ_WRITE;
950 p->data = timeoff;
952 p->state = STATE_IOREQ_READY;
954 if ( !hvm_buffered_io_send(p) )
955 printk("Unsuccessful timeoffset update\n");
956 }
958 /* Ask ioemu mapcache to invalidate mappings. */
959 void send_invalidate_req(void)
960 {
961 struct vcpu *v = current;
962 vcpu_iodata_t *vio;
963 ioreq_t *p;
965 vio = get_ioreq(v);
966 if ( vio == NULL )
967 {
968 printk("bad shared page: %lx\n", (unsigned long) vio);
969 domain_crash_synchronous();
970 }
972 p = &vio->vp_ioreq;
973 if ( p->state != STATE_IOREQ_NONE )
974 printk("WARNING: send invalidate req with something "
975 "already pending (%d)?\n", p->state);
977 p->type = IOREQ_TYPE_INVALIDATE;
978 p->size = 4;
979 p->dir = IOREQ_WRITE;
980 p->data = ~0UL; /* flush all */
981 p->io_count++;
983 hvm_send_assist_req(v);
984 }
986 static void mmio_operands(int type, unsigned long gpa,
987 struct hvm_io_op *mmio_op,
988 unsigned char op_size)
989 {
990 unsigned long value = 0;
991 int df, index, size_reg;
992 struct cpu_user_regs *regs = &mmio_op->io_context;
994 df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
996 size_reg = operand_size(mmio_op->operand[0]);
998 if ( mmio_op->operand[0] & REGISTER ) { /* dest is memory */
999 index = operand_index(mmio_op->operand[0]);
1000 value = get_reg_value(size_reg, index, 0, regs);
1001 send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);
1002 } else if ( mmio_op->operand[0] & IMMEDIATE ) { /* dest is memory */
1003 value = mmio_op->immediate;
1004 send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);
1005 } else if ( mmio_op->operand[0] & MEMORY ) { /* dest is register */
1006 /* send the request and wait for the value */
1007 if ( (mmio_op->instr == INSTR_MOVZX) ||
1008 (mmio_op->instr == INSTR_MOVSX) )
1009 send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, df, 0);
1010 else
1011 send_mmio_req(type, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
1012 } else {
1013 printk("%s: invalid dest mode.\n", __func__);
1014 domain_crash_synchronous();
1018 #define GET_REPEAT_COUNT() \
1019 (mmio_op->flags & REPZ ? (ad_size == WORD ? regs->ecx & 0xFFFF : regs->ecx) : 1)
1022 void handle_mmio(unsigned long gpa)
1024 unsigned long inst_addr;
1025 struct hvm_io_op *mmio_op;
1026 struct cpu_user_regs *regs;
1027 unsigned char inst[MAX_INST_LEN], ad_size, op_size, seg_sel;
1028 int i, address_bytes, df, inst_len;
1029 struct vcpu *v = current;
1031 mmio_op = &v->arch.hvm_vcpu.io_op;
1032 regs = &mmio_op->io_context;
1034 /* Copy current guest state into io instruction state structure. */
1035 memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
1037 df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
1039 address_bytes = hvm_guest_x86_mode(v);
1040 if (address_bytes < 2)
1041 /* real or vm86 modes */
1042 address_bytes = 2;
1043 inst_addr = hvm_get_segment_base(v, x86_seg_cs) + regs->eip;
1044 memset(inst, 0, MAX_INST_LEN);
1045 inst_len = hvm_instruction_fetch(inst_addr, address_bytes, inst);
1046 if ( inst_len <= 0 )
1048 gdprintk(XENLOG_DEBUG, "handle_mmio: failed to get instruction\n");
1049 /* hvm_instruction_fetch() will have injected a #PF; get out now */
1050 return;
1053 if ( mmio_decode(address_bytes, inst, mmio_op, &ad_size,
1054 &op_size, &seg_sel) == DECODE_failure )
1056 gdprintk(XENLOG_WARNING,
1057 "handle_mmio: failed to decode instruction\n");
1058 gdprintk(XENLOG_WARNING,
1059 "mmio opcode: gpa 0x%lx, len %d:", gpa, inst_len);
1060 for ( i = 0; i < inst_len; i++ )
1061 printk(" %02x", inst[i] & 0xFF);
1062 printk("\n");
1064 hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
1065 return;
1068 regs->eip += inst_len; /* advance %eip */
1070 switch ( mmio_op->instr ) {
1071 case INSTR_MOV:
1072 mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);
1073 break;
1075 case INSTR_MOVS:
1077 unsigned long count = GET_REPEAT_COUNT();
1078 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
1079 unsigned long addr, gfn;
1080 paddr_t paddr;
1081 int dir, size = op_size;
1082 uint32_t pfec;
1084 ASSERT(count);
1086 /* determine non-MMIO address */
1087 addr = regs->edi;
1088 if ( ad_size == WORD )
1089 addr &= 0xFFFF;
1090 addr += hvm_get_segment_base(v, x86_seg_es);
1091 pfec = PFEC_page_present | PFEC_write_access;
1092 if ( ring_3(regs) )
1093 pfec |= PFEC_user_mode;
1094 gfn = paging_gva_to_gfn(v, addr, &pfec);
1095 paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
1096 if ( paddr == gpa )
1098 enum x86_segment seg;
1100 dir = IOREQ_WRITE;
1101 addr = regs->esi;
1102 if ( ad_size == WORD )
1103 addr &= 0xFFFF;
1104 switch ( seg_sel )
1106 case 0x26: seg = x86_seg_es; break;
1107 case 0x2e: seg = x86_seg_cs; break;
1108 case 0x36: seg = x86_seg_ss; break;
1109 case 0:
1110 case 0x3e: seg = x86_seg_ds; break;
1111 case 0x64: seg = x86_seg_fs; break;
1112 case 0x65: seg = x86_seg_gs; break;
1113 default: domain_crash_synchronous();
1115 addr += hvm_get_segment_base(v, seg);
1116 pfec &= ~PFEC_write_access;
1117 gfn = paging_gva_to_gfn(v, addr, &pfec);
1118 paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
1120 else
1121 dir = IOREQ_READ;
1123 if ( gfn == INVALID_GFN )
1125 /* The guest does not have the non-mmio address mapped.
1126 * Need to send in a page fault */
1127 regs->eip -= inst_len; /* do not advance %eip */
1128 hvm_inject_exception(TRAP_page_fault, pfec, addr);
1129 return;
1132 /*
1133 * In case of a movs spanning multiple pages, we break the accesses
1134 * up into multiple pages (the device model works with non-continguous
1135 * physical guest pages). To copy just one page, we adjust %ecx and
1136 * do not advance %eip so that the next rep;movs copies the next page.
1137 * Unaligned accesses, for example movsl starting at PGSZ-2, are
1138 * turned into a single copy where we handle the overlapping memory
1139 * copy ourself. After this copy succeeds, "rep movs" is executed
1140 * again.
1141 */
1142 if ( (addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK) ) {
1143 unsigned long value = 0;
1145 mmio_op->flags |= OVERLAP;
1147 if ( dir == IOREQ_WRITE ) {
1148 if ( hvm_paging_enabled(v) )
1150 int rv = hvm_copy_from_guest_virt(&value, addr, size);
1151 if ( rv != 0 )
1153 /* Failed on the page-spanning copy. Inject PF into
1154 * the guest for the address where we failed */
1155 regs->eip -= inst_len; /* do not advance %eip */
1156 /* Must set CR2 at the failing address */
1157 addr += size - rv;
1158 gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
1159 "page-spanning MMIO: va=%#lx\n", addr);
1160 hvm_inject_exception(TRAP_page_fault, 0, addr);
1161 return;
1164 else
1165 (void) hvm_copy_from_guest_phys(&value, addr, size);
1166 } else /* dir != IOREQ_WRITE */
1167 /* Remember where to write the result, as a *VA*.
1168 * Must be a VA so we can handle the page overlap
1169 * correctly in hvm_mmio_assist() */
1170 mmio_op->addr = addr;
1172 if ( count != 1 )
1173 regs->eip -= inst_len; /* do not advance %eip */
1175 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, df, 0);
1176 } else {
1177 unsigned long last_addr = sign > 0 ? addr + count * size - 1
1178 : addr - (count - 1) * size;
1180 if ( (addr & PAGE_MASK) != (last_addr & PAGE_MASK) )
1182 regs->eip -= inst_len; /* do not advance %eip */
1184 if ( sign > 0 )
1185 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
1186 else
1187 count = (addr & ~PAGE_MASK) / size + 1;
1190 ASSERT(count);
1192 send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size,
1193 paddr, dir, df, 1);
1195 break;
1198 case INSTR_MOVZX:
1199 case INSTR_MOVSX:
1200 mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);
1201 break;
1203 case INSTR_STOS:
1204 /*
1205 * Since the destination is always in (contiguous) mmio space we don't
1206 * need to break it up into pages.
1207 */
1208 send_mmio_req(IOREQ_TYPE_COPY, gpa,
1209 GET_REPEAT_COUNT(), op_size, regs->eax, IOREQ_WRITE, df, 0);
1210 break;
1212 case INSTR_LODS:
1213 /*
1214 * Since the source is always in (contiguous) mmio space we don't
1215 * need to break it up into pages.
1216 */
1217 mmio_op->operand[0] = mk_operand(op_size, 0, 0, REGISTER);
1218 send_mmio_req(IOREQ_TYPE_COPY, gpa,
1219 GET_REPEAT_COUNT(), op_size, 0, IOREQ_READ, df, 0);
1220 break;
1222 case INSTR_OR:
1223 mmio_operands(IOREQ_TYPE_OR, gpa, mmio_op, op_size);
1224 break;
1226 case INSTR_AND:
1227 mmio_operands(IOREQ_TYPE_AND, gpa, mmio_op, op_size);
1228 break;
1230 case INSTR_ADD:
1231 mmio_operands(IOREQ_TYPE_ADD, gpa, mmio_op, op_size);
1232 break;
1234 case INSTR_SUB:
1235 mmio_operands(IOREQ_TYPE_SUB, gpa, mmio_op, op_size);
1236 break;
1238 case INSTR_XOR:
1239 mmio_operands(IOREQ_TYPE_XOR, gpa, mmio_op, op_size);
1240 break;
1242 case INSTR_PUSH:
1243 if ( ad_size == WORD )
1245 mmio_op->addr = (uint16_t)(regs->esp - op_size);
1246 regs->esp = mmio_op->addr | (regs->esp & ~0xffff);
1248 else
1250 regs->esp -= op_size;
1251 mmio_op->addr = regs->esp;
1253 /* send the request and wait for the value */
1254 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
1255 break;
1257 case INSTR_CMP: /* Pass through */
1258 case INSTR_TEST:
1259 /* send the request and wait for the value */
1260 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
1261 break;
1263 case INSTR_BT:
1265 unsigned long value = 0;
1266 int index, size;
1268 if ( mmio_op->operand[0] & REGISTER )
1270 index = operand_index(mmio_op->operand[0]);
1271 size = operand_size(mmio_op->operand[0]);
1272 value = get_reg_value(size, index, 0, regs);
1274 else if ( mmio_op->operand[0] & IMMEDIATE )
1276 mmio_op->immediate = mmio_op->immediate;
1277 value = mmio_op->immediate;
1279 send_mmio_req(IOREQ_TYPE_COPY, gpa + (value >> 5), 1,
1280 op_size, 0, IOREQ_READ, df, 0);
1281 break;
1284 case INSTR_XCHG:
1285 if ( mmio_op->operand[0] & REGISTER ) {
1286 long value;
1287 unsigned long operand = mmio_op->operand[0];
1288 value = get_reg_value(operand_size(operand),
1289 operand_index(operand), 0,
1290 regs);
1291 /* send the request and wait for the value */
1292 send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
1293 op_size, value, IOREQ_WRITE, df, 0);
1294 } else {
1295 /* the destination is a register */
1296 long value;
1297 unsigned long operand = mmio_op->operand[1];
1298 value = get_reg_value(operand_size(operand),
1299 operand_index(operand), 0,
1300 regs);
1301 /* send the request and wait for the value */
1302 send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
1303 op_size, value, IOREQ_WRITE, df, 0);
1305 break;
1307 default:
1308 printk("Unhandled MMIO instruction\n");
1309 domain_crash_synchronous();
1313 DEFINE_PER_CPU(int, guest_handles_in_xen_space);
1315 /* Note that copy_{to,from}_user_hvm require the PTE to be writable even
1316 when they're only trying to read from it. The guest is expected to
1317 deal with this. */
1318 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
1320 if ( this_cpu(guest_handles_in_xen_space) )
1322 memcpy(to, from, len);
1323 return 0;
1326 return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len);
1329 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
1331 if ( this_cpu(guest_handles_in_xen_space) )
1333 memcpy(to, from, len);
1334 return 0;
1337 return hvm_copy_from_guest_virt(to, (unsigned long)from, len);
1340 /*
1341 * Local variables:
1342 * mode: C
1343 * c-set-style: "BSD"
1344 * c-basic-offset: 4
1345 * tab-width: 4
1346 * indent-tabs-mode: nil
1347 * End:
1348 */