ia64/xen-unstable

view xen/arch/x86/hvm/platform.c @ 14860:a9aa7c29eda8

hvm: Fix a bug in the mmio emulation of SUB instruction.
Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
author kfraser@localhost.localdomain
date Mon Apr 16 11:35:58 2007 +0100 (2007-04-16)
parents 76f9a8e730ea
children b96df7a4e0a7
line source
1 /*
2 * platform.c: handling x86 platform related MMIO instructions
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <xen/domain_page.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <xen/sched.h>
29 #include <asm/regs.h>
30 #include <asm/x86_emulate.h>
31 #include <asm/paging.h>
32 #include <asm/hvm/hvm.h>
33 #include <asm/hvm/support.h>
34 #include <asm/hvm/io.h>
35 #include <public/hvm/ioreq.h>
37 #include <xen/lib.h>
38 #include <xen/sched.h>
39 #include <asm/current.h>
41 #define DECODE_success 1
42 #define DECODE_failure 0
44 #define mk_operand(size_reg, index, seg, flag) \
45 (((size_reg) << 24) | ((index) << 16) | ((seg) << 8) | (flag))
47 #if defined (__x86_64__)
48 static inline long __get_reg_value(unsigned long reg, int size)
49 {
50 switch ( size ) {
51 case BYTE_64:
52 return (char)(reg & 0xFF);
53 case WORD:
54 return (short)(reg & 0xFFFF);
55 case LONG:
56 return (int)(reg & 0xFFFFFFFF);
57 case QUAD:
58 return (long)(reg);
59 default:
60 printk("Error: (__get_reg_value) Invalid reg size\n");
61 domain_crash_synchronous();
62 }
63 }
65 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
66 {
67 if ( size == BYTE ) {
68 switch ( index ) {
69 case 0: /* %al */
70 return (char)(regs->rax & 0xFF);
71 case 1: /* %cl */
72 return (char)(regs->rcx & 0xFF);
73 case 2: /* %dl */
74 return (char)(regs->rdx & 0xFF);
75 case 3: /* %bl */
76 return (char)(regs->rbx & 0xFF);
77 case 4: /* %ah */
78 return (char)((regs->rax & 0xFF00) >> 8);
79 case 5: /* %ch */
80 return (char)((regs->rcx & 0xFF00) >> 8);
81 case 6: /* %dh */
82 return (char)((regs->rdx & 0xFF00) >> 8);
83 case 7: /* %bh */
84 return (char)((regs->rbx & 0xFF00) >> 8);
85 default:
86 printk("Error: (get_reg_value) Invalid index value\n");
87 domain_crash_synchronous();
88 }
89 /* NOTREACHED */
90 }
92 switch ( index ) {
93 case 0: return __get_reg_value(regs->rax, size);
94 case 1: return __get_reg_value(regs->rcx, size);
95 case 2: return __get_reg_value(regs->rdx, size);
96 case 3: return __get_reg_value(regs->rbx, size);
97 case 4: return __get_reg_value(regs->rsp, size);
98 case 5: return __get_reg_value(regs->rbp, size);
99 case 6: return __get_reg_value(regs->rsi, size);
100 case 7: return __get_reg_value(regs->rdi, size);
101 case 8: return __get_reg_value(regs->r8, size);
102 case 9: return __get_reg_value(regs->r9, size);
103 case 10: return __get_reg_value(regs->r10, size);
104 case 11: return __get_reg_value(regs->r11, size);
105 case 12: return __get_reg_value(regs->r12, size);
106 case 13: return __get_reg_value(regs->r13, size);
107 case 14: return __get_reg_value(regs->r14, size);
108 case 15: return __get_reg_value(regs->r15, size);
109 default:
110 printk("Error: (get_reg_value) Invalid index value\n");
111 domain_crash_synchronous();
112 }
113 }
114 #elif defined (__i386__)
115 static inline long __get_reg_value(unsigned long reg, int size)
116 {
117 switch ( size ) {
118 case WORD:
119 return (short)(reg & 0xFFFF);
120 case LONG:
121 return (int)(reg & 0xFFFFFFFF);
122 default:
123 printk("Error: (__get_reg_value) Invalid reg size\n");
124 domain_crash_synchronous();
125 }
126 }
128 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
129 {
130 if ( size == BYTE ) {
131 switch ( index ) {
132 case 0: /* %al */
133 return (char)(regs->eax & 0xFF);
134 case 1: /* %cl */
135 return (char)(regs->ecx & 0xFF);
136 case 2: /* %dl */
137 return (char)(regs->edx & 0xFF);
138 case 3: /* %bl */
139 return (char)(regs->ebx & 0xFF);
140 case 4: /* %ah */
141 return (char)((regs->eax & 0xFF00) >> 8);
142 case 5: /* %ch */
143 return (char)((regs->ecx & 0xFF00) >> 8);
144 case 6: /* %dh */
145 return (char)((regs->edx & 0xFF00) >> 8);
146 case 7: /* %bh */
147 return (char)((regs->ebx & 0xFF00) >> 8);
148 default:
149 printk("Error: (get_reg_value) Invalid index value\n");
150 domain_crash_synchronous();
151 }
152 }
154 switch ( index ) {
155 case 0: return __get_reg_value(regs->eax, size);
156 case 1: return __get_reg_value(regs->ecx, size);
157 case 2: return __get_reg_value(regs->edx, size);
158 case 3: return __get_reg_value(regs->ebx, size);
159 case 4: return __get_reg_value(regs->esp, size);
160 case 5: return __get_reg_value(regs->ebp, size);
161 case 6: return __get_reg_value(regs->esi, size);
162 case 7: return __get_reg_value(regs->edi, size);
163 default:
164 printk("Error: (get_reg_value) Invalid index value\n");
165 domain_crash_synchronous();
166 }
167 }
168 #endif
170 static inline unsigned char *check_prefix(unsigned char *inst,
171 struct hvm_io_op *mmio_op,
172 unsigned char *ad_size,
173 unsigned char *op_size,
174 unsigned char *seg_sel,
175 unsigned char *rex_p)
176 {
177 while ( 1 ) {
178 switch ( *inst ) {
179 /* rex prefix for em64t instructions */
180 case 0x40 ... 0x4f:
181 *rex_p = *inst;
182 break;
183 case 0xf3: /* REPZ */
184 mmio_op->flags = REPZ;
185 break;
186 case 0xf2: /* REPNZ */
187 mmio_op->flags = REPNZ;
188 break;
189 case 0xf0: /* LOCK */
190 break;
191 case 0x2e: /* CS */
192 case 0x36: /* SS */
193 case 0x3e: /* DS */
194 case 0x26: /* ES */
195 case 0x64: /* FS */
196 case 0x65: /* GS */
197 *seg_sel = *inst;
198 break;
199 case 0x66: /* 32bit->16bit */
200 *op_size = WORD;
201 break;
202 case 0x67:
203 *ad_size = WORD;
204 break;
205 default:
206 return inst;
207 }
208 inst++;
209 }
210 }
212 static inline unsigned long get_immediate(int ad_size, const unsigned char *inst, int op_size)
213 {
214 int mod, reg, rm;
215 unsigned long val = 0;
216 int i;
218 mod = (*inst >> 6) & 3;
219 reg = (*inst >> 3) & 7;
220 rm = *inst & 7;
222 inst++; //skip ModR/M byte
223 if ( ad_size != WORD && mod != 3 && rm == 4 ) {
224 inst++; //skip SIB byte
225 }
227 switch ( mod ) {
228 case 0:
229 if ( ad_size == WORD ) {
230 if ( rm == 6 )
231 inst = inst + 2; //disp16, skip 2 bytes
232 }
233 else {
234 if ( rm == 5 )
235 inst = inst + 4; //disp32, skip 4 bytes
236 }
237 break;
238 case 1:
239 inst++; //disp8, skip 1 byte
240 break;
241 case 2:
242 if ( ad_size == WORD )
243 inst = inst + 2; //disp16, skip 2 bytes
244 else
245 inst = inst + 4; //disp32, skip 4 bytes
246 break;
247 }
249 if ( op_size == QUAD )
250 op_size = LONG;
252 for ( i = 0; i < op_size; i++ ) {
253 val |= (*inst++ & 0xff) << (8 * i);
254 }
256 return val;
257 }
259 /* Some instructions, like "add $imm8, r/m16"/"MOV $imm32, r/m64" require
260 * the src immediate operand be sign-extented befere the op is executed. Here
261 * we always sign-extend the operand to a "unsigned long" variable.
262 *
263 * Note: to simplify the logic here, the sign-extension here may be performed
264 * redundantly against some instructions, like "MOV $imm16, r/m16" -- however
265 * this is harmless, since we always remember the operand's size.
266 */
267 static inline unsigned long get_immediate_sign_ext(int ad_size,
268 const unsigned char *inst,
269 int op_size)
270 {
271 unsigned long result = get_immediate(ad_size, inst, op_size);
273 if ( op_size == QUAD )
274 op_size = LONG;
276 ASSERT( op_size == BYTE || op_size == WORD || op_size == LONG );
278 if ( result & (1UL << ((8*op_size) - 1)) )
279 {
280 unsigned long mask = ~0UL >> (8 * (sizeof(mask) - op_size));
281 result = ~mask | (result & mask);
282 }
283 return result;
284 }
286 static inline int get_index(const unsigned char *inst, unsigned char rex)
287 {
288 int mod, reg, rm;
289 int rex_r, rex_b;
291 mod = (*inst >> 6) & 3;
292 reg = (*inst >> 3) & 7;
293 rm = *inst & 7;
295 rex_r = (rex >> 2) & 1;
296 rex_b = rex & 1;
298 //Only one operand in the instruction is register
299 if ( mod == 3 ) {
300 return (rm + (rex_b << 3));
301 } else {
302 return (reg + (rex_r << 3));
303 }
304 return 0;
305 }
307 static void init_instruction(struct hvm_io_op *mmio_op)
308 {
309 mmio_op->instr = 0;
311 mmio_op->flags = 0;
313 mmio_op->operand[0] = 0;
314 mmio_op->operand[1] = 0;
315 mmio_op->immediate = 0;
316 }
318 #define GET_OP_SIZE_FOR_BYTE(size_reg) \
319 do { \
320 if ( rex ) \
321 (size_reg) = BYTE_64; \
322 else \
323 (size_reg) = BYTE; \
324 } while( 0 )
326 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
327 do { \
328 if ( rex & 0x8 ) \
329 (op_size) = QUAD; \
330 else if ( (op_size) != WORD ) \
331 (op_size) = LONG; \
332 } while( 0 )
335 /*
336 * Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
337 */
338 static inline int mem_acc(unsigned char size, struct hvm_io_op *mmio)
339 {
340 mmio->operand[0] = mk_operand(size, 0, 0, MEMORY);
341 mmio->operand[1] = mk_operand(size, 0, 0, REGISTER);
342 return DECODE_success;
343 }
345 /*
346 * Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
347 */
348 static inline int acc_mem(unsigned char size, struct hvm_io_op *mmio)
349 {
350 mmio->operand[0] = mk_operand(size, 0, 0, REGISTER);
351 mmio->operand[1] = mk_operand(size, 0, 0, MEMORY);
352 return DECODE_success;
353 }
355 /*
356 * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
357 */
358 static int mem_reg(unsigned char size, unsigned char *opcode,
359 struct hvm_io_op *mmio_op, unsigned char rex)
360 {
361 int index = get_index(opcode + 1, rex);
363 mmio_op->operand[0] = mk_operand(size, 0, 0, MEMORY);
364 mmio_op->operand[1] = mk_operand(size, index, 0, REGISTER);
365 return DECODE_success;
366 }
368 /*
369 * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
370 */
371 static int reg_mem(unsigned char size, unsigned char *opcode,
372 struct hvm_io_op *mmio_op, unsigned char rex)
373 {
374 int index = get_index(opcode + 1, rex);
376 mmio_op->operand[0] = mk_operand(size, index, 0, REGISTER);
377 mmio_op->operand[1] = mk_operand(size, 0, 0, MEMORY);
378 return DECODE_success;
379 }
381 static int mmio_decode(int address_bytes, unsigned char *opcode,
382 struct hvm_io_op *mmio_op,
383 unsigned char *ad_size, unsigned char *op_size,
384 unsigned char *seg_sel)
385 {
386 unsigned char size_reg = 0;
387 unsigned char rex = 0;
388 int index;
390 *ad_size = 0;
391 *op_size = 0;
392 *seg_sel = 0;
393 init_instruction(mmio_op);
395 opcode = check_prefix(opcode, mmio_op, ad_size, op_size, seg_sel, &rex);
397 switch ( address_bytes )
398 {
399 case 2:
400 if ( *op_size == WORD )
401 *op_size = LONG;
402 else if ( *op_size == LONG )
403 *op_size = WORD;
404 else if ( *op_size == 0 )
405 *op_size = WORD;
406 if ( *ad_size == WORD )
407 *ad_size = LONG;
408 else if ( *ad_size == LONG )
409 *ad_size = WORD;
410 else if ( *ad_size == 0 )
411 *ad_size = WORD;
412 break;
413 case 4:
414 if ( *op_size == 0 )
415 *op_size = LONG;
416 if ( *ad_size == 0 )
417 *ad_size = LONG;
418 break;
419 #ifdef __x86_64__
420 case 8:
421 if ( *op_size == 0 )
422 *op_size = rex & 0x8 ? QUAD : LONG;
423 if ( *ad_size == WORD )
424 *ad_size = LONG;
425 else if ( *ad_size == 0 )
426 *ad_size = QUAD;
427 break;
428 #endif
429 }
431 /* the operands order in comments conforms to AT&T convention */
433 switch ( *opcode ) {
435 case 0x00: /* add r8, m8 */
436 mmio_op->instr = INSTR_ADD;
437 *op_size = BYTE;
438 GET_OP_SIZE_FOR_BYTE(size_reg);
439 return reg_mem(size_reg, opcode, mmio_op, rex);
441 case 0x03: /* add m32/16, r32/16 */
442 mmio_op->instr = INSTR_ADD;
443 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
444 return mem_reg(*op_size, opcode, mmio_op, rex);
446 case 0x08: /* or r8, m8 */
447 mmio_op->instr = INSTR_OR;
448 *op_size = BYTE;
449 GET_OP_SIZE_FOR_BYTE(size_reg);
450 return reg_mem(size_reg, opcode, mmio_op, rex);
452 case 0x09: /* or r32/16, m32/16 */
453 mmio_op->instr = INSTR_OR;
454 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
455 return reg_mem(*op_size, opcode, mmio_op, rex);
457 case 0x0A: /* or m8, r8 */
458 mmio_op->instr = INSTR_OR;
459 *op_size = BYTE;
460 GET_OP_SIZE_FOR_BYTE(size_reg);
461 return mem_reg(size_reg, opcode, mmio_op, rex);
463 case 0x0B: /* or m32/16, r32/16 */
464 mmio_op->instr = INSTR_OR;
465 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
466 return mem_reg(*op_size, opcode, mmio_op, rex);
468 case 0x20: /* and r8, m8 */
469 mmio_op->instr = INSTR_AND;
470 *op_size = BYTE;
471 GET_OP_SIZE_FOR_BYTE(size_reg);
472 return reg_mem(size_reg, opcode, mmio_op, rex);
474 case 0x21: /* and r32/16, m32/16 */
475 mmio_op->instr = INSTR_AND;
476 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
477 return reg_mem(*op_size, opcode, mmio_op, rex);
479 case 0x22: /* and m8, r8 */
480 mmio_op->instr = INSTR_AND;
481 *op_size = BYTE;
482 GET_OP_SIZE_FOR_BYTE(size_reg);
483 return mem_reg(size_reg, opcode, mmio_op, rex);
485 case 0x23: /* and m32/16, r32/16 */
486 mmio_op->instr = INSTR_AND;
487 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
488 return mem_reg(*op_size, opcode, mmio_op, rex);
490 case 0x2B: /* sub m32/16, r32/16 */
491 mmio_op->instr = INSTR_SUB;
492 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
493 return mem_reg(*op_size, opcode, mmio_op, rex);
495 case 0x30: /* xor r8, m8 */
496 mmio_op->instr = INSTR_XOR;
497 *op_size = BYTE;
498 GET_OP_SIZE_FOR_BYTE(size_reg);
499 return reg_mem(size_reg, opcode, mmio_op, rex);
501 case 0x31: /* xor r32/16, m32/16 */
502 mmio_op->instr = INSTR_XOR;
503 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
504 return reg_mem(*op_size, opcode, mmio_op, rex);
506 case 0x32: /* xor m8, r8 */
507 mmio_op->instr = INSTR_XOR;
508 *op_size = BYTE;
509 GET_OP_SIZE_FOR_BYTE(size_reg);
510 return mem_reg(size_reg, opcode, mmio_op, rex);
512 case 0x38: /* cmp r8, m8 */
513 mmio_op->instr = INSTR_CMP;
514 *op_size = BYTE;
515 GET_OP_SIZE_FOR_BYTE(size_reg);
516 return reg_mem(size_reg, opcode, mmio_op, rex);
518 case 0x39: /* cmp r32/16, m32/16 */
519 mmio_op->instr = INSTR_CMP;
520 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
521 return reg_mem(*op_size, opcode, mmio_op, rex);
523 case 0x3A: /* cmp m8, r8 */
524 mmio_op->instr = INSTR_CMP;
525 *op_size = BYTE;
526 GET_OP_SIZE_FOR_BYTE(size_reg);
527 return mem_reg(size_reg, opcode, mmio_op, rex);
529 case 0x3B: /* cmp m32/16, r32/16 */
530 mmio_op->instr = INSTR_CMP;
531 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
532 return mem_reg(*op_size, opcode, mmio_op, rex);
534 case 0x80:
535 case 0x81:
536 case 0x83:
537 {
538 unsigned char ins_subtype = (opcode[1] >> 3) & 7;
540 if ( opcode[0] == 0x80 ) {
541 *op_size = BYTE;
542 GET_OP_SIZE_FOR_BYTE(size_reg);
543 } else {
544 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
545 size_reg = *op_size;
546 }
548 /* opcode 0x83 always has a single byte operand */
549 if ( opcode[0] == 0x83 )
550 mmio_op->immediate =
551 get_immediate_sign_ext(*ad_size, opcode + 1, BYTE);
552 else
553 mmio_op->immediate =
554 get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
556 mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
557 mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
559 switch ( ins_subtype ) {
560 case 0: /* add $imm, m32/16 */
561 mmio_op->instr = INSTR_ADD;
562 return DECODE_success;
564 case 1: /* or $imm, m32/16 */
565 mmio_op->instr = INSTR_OR;
566 return DECODE_success;
568 case 4: /* and $imm, m32/16 */
569 mmio_op->instr = INSTR_AND;
570 return DECODE_success;
572 case 5: /* sub $imm, m32/16 */
573 mmio_op->instr = INSTR_SUB;
574 return DECODE_success;
576 case 6: /* xor $imm, m32/16 */
577 mmio_op->instr = INSTR_XOR;
578 return DECODE_success;
580 case 7: /* cmp $imm, m32/16 */
581 mmio_op->instr = INSTR_CMP;
582 return DECODE_success;
584 default:
585 printk("%x/%x, This opcode isn't handled yet!\n",
586 *opcode, ins_subtype);
587 return DECODE_failure;
588 }
589 }
591 case 0x84: /* test r8, m8 */
592 mmio_op->instr = INSTR_TEST;
593 *op_size = BYTE;
594 GET_OP_SIZE_FOR_BYTE(size_reg);
595 return reg_mem(size_reg, opcode, mmio_op, rex);
597 case 0x85: /* test r16/32, m16/32 */
598 mmio_op->instr = INSTR_TEST;
599 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
600 return reg_mem(*op_size, opcode, mmio_op, rex);
602 case 0x86: /* xchg m8, r8 */
603 mmio_op->instr = INSTR_XCHG;
604 *op_size = BYTE;
605 GET_OP_SIZE_FOR_BYTE(size_reg);
606 return reg_mem(size_reg, opcode, mmio_op, rex);
608 case 0x87: /* xchg m16/32, r16/32 */
609 mmio_op->instr = INSTR_XCHG;
610 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
611 return reg_mem(*op_size, opcode, mmio_op, rex);
613 case 0x88: /* mov r8, m8 */
614 mmio_op->instr = INSTR_MOV;
615 *op_size = BYTE;
616 GET_OP_SIZE_FOR_BYTE(size_reg);
617 return reg_mem(size_reg, opcode, mmio_op, rex);
619 case 0x89: /* mov r32/16, m32/16 */
620 mmio_op->instr = INSTR_MOV;
621 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
622 return reg_mem(*op_size, opcode, mmio_op, rex);
624 case 0x8A: /* mov m8, r8 */
625 mmio_op->instr = INSTR_MOV;
626 *op_size = BYTE;
627 GET_OP_SIZE_FOR_BYTE(size_reg);
628 return mem_reg(size_reg, opcode, mmio_op, rex);
630 case 0x8B: /* mov m32/16, r32/16 */
631 mmio_op->instr = INSTR_MOV;
632 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
633 return mem_reg(*op_size, opcode, mmio_op, rex);
635 case 0xA0: /* mov <addr>, al */
636 mmio_op->instr = INSTR_MOV;
637 *op_size = BYTE;
638 GET_OP_SIZE_FOR_BYTE(size_reg);
639 return mem_acc(size_reg, mmio_op);
641 case 0xA1: /* mov <addr>, ax/eax */
642 mmio_op->instr = INSTR_MOV;
643 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
644 return mem_acc(*op_size, mmio_op);
646 case 0xA2: /* mov al, <addr> */
647 mmio_op->instr = INSTR_MOV;
648 *op_size = BYTE;
649 GET_OP_SIZE_FOR_BYTE(size_reg);
650 return acc_mem(size_reg, mmio_op);
652 case 0xA3: /* mov ax/eax, <addr> */
653 mmio_op->instr = INSTR_MOV;
654 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
655 return acc_mem(*op_size, mmio_op);
657 case 0xA4: /* movsb */
658 mmio_op->instr = INSTR_MOVS;
659 *op_size = BYTE;
660 return DECODE_success;
662 case 0xA5: /* movsw/movsl */
663 mmio_op->instr = INSTR_MOVS;
664 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
665 return DECODE_success;
667 case 0xAA: /* stosb */
668 mmio_op->instr = INSTR_STOS;
669 *op_size = BYTE;
670 return DECODE_success;
672 case 0xAB: /* stosw/stosl */
673 mmio_op->instr = INSTR_STOS;
674 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
675 return DECODE_success;
677 case 0xAC: /* lodsb */
678 mmio_op->instr = INSTR_LODS;
679 *op_size = BYTE;
680 return DECODE_success;
682 case 0xAD: /* lodsw/lodsl */
683 mmio_op->instr = INSTR_LODS;
684 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
685 return DECODE_success;
687 case 0xC6:
688 if ( ((opcode[1] >> 3) & 7) == 0 ) { /* mov $imm8, m8 */
689 mmio_op->instr = INSTR_MOV;
690 *op_size = BYTE;
692 mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
693 mmio_op->immediate =
694 get_immediate(*ad_size, opcode + 1, *op_size);
695 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
697 return DECODE_success;
698 } else
699 return DECODE_failure;
701 case 0xC7:
702 if ( ((opcode[1] >> 3) & 7) == 0 ) { /* mov $imm16/32, m16/32 */
703 mmio_op->instr = INSTR_MOV;
704 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
706 mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
707 mmio_op->immediate =
708 get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
709 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
711 return DECODE_success;
712 } else
713 return DECODE_failure;
715 case 0xF6:
716 case 0xF7:
717 if ( ((opcode[1] >> 3) & 7) == 0 ) { /* test $imm8/16/32, m8/16/32 */
718 mmio_op->instr = INSTR_TEST;
720 if ( opcode[0] == 0xF6 ) {
721 *op_size = BYTE;
722 GET_OP_SIZE_FOR_BYTE(size_reg);
723 } else {
724 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
725 size_reg = *op_size;
726 }
728 mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
729 mmio_op->immediate =
730 get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
731 mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
733 return DECODE_success;
734 } else
735 return DECODE_failure;
737 case 0xFE:
738 case 0xFF:
739 {
740 unsigned char ins_subtype = (opcode[1] >> 3) & 7;
742 if ( opcode[0] == 0xFE ) {
743 *op_size = BYTE;
744 GET_OP_SIZE_FOR_BYTE(size_reg);
745 } else {
746 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
747 size_reg = *op_size;
748 }
750 mmio_op->immediate = 1;
751 mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
752 mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
754 switch ( ins_subtype ) {
755 case 0: /* inc */
756 mmio_op->instr = INSTR_ADD;
757 return DECODE_success;
759 case 1: /* dec */
760 mmio_op->instr = INSTR_SUB;
761 return DECODE_success;
763 case 6: /* push */
764 mmio_op->instr = INSTR_PUSH;
765 mmio_op->operand[0] = mmio_op->operand[1];
766 return DECODE_success;
768 default:
769 printk("%x/%x, This opcode isn't handled yet!\n",
770 *opcode, ins_subtype);
771 return DECODE_failure;
772 }
773 }
775 case 0x0F:
776 break;
778 default:
779 printk("%x, This opcode isn't handled yet!\n", *opcode);
780 return DECODE_failure;
781 }
783 switch ( *++opcode ) {
784 case 0xB6: /* movzx m8, r16/r32/r64 */
785 mmio_op->instr = INSTR_MOVZX;
786 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
787 index = get_index(opcode + 1, rex);
788 mmio_op->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
789 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
790 return DECODE_success;
792 case 0xB7: /* movzx m16, r32/r64 */
793 mmio_op->instr = INSTR_MOVZX;
794 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
795 index = get_index(opcode + 1, rex);
796 mmio_op->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
797 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
798 return DECODE_success;
800 case 0xBE: /* movsx m8, r16/r32/r64 */
801 mmio_op->instr = INSTR_MOVSX;
802 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
803 index = get_index(opcode + 1, rex);
804 mmio_op->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
805 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
806 return DECODE_success;
808 case 0xBF: /* movsx m16, r32/r64 */
809 mmio_op->instr = INSTR_MOVSX;
810 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
811 index = get_index(opcode + 1, rex);
812 mmio_op->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
813 mmio_op->operand[1] = mk_operand(*op_size, index, 0, REGISTER);
814 return DECODE_success;
816 case 0xA3: /* bt r32, m32 */
817 mmio_op->instr = INSTR_BT;
818 index = get_index(opcode + 1, rex);
819 *op_size = LONG;
820 mmio_op->operand[0] = mk_operand(*op_size, index, 0, REGISTER);
821 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
822 return DECODE_success;
824 case 0xBA:
825 if ( ((opcode[1] >> 3) & 7) == 4 ) /* BT $imm8, m16/32/64 */
826 {
827 mmio_op->instr = INSTR_BT;
828 GET_OP_SIZE_FOR_NONEBYTE(*op_size);
829 mmio_op->operand[0] = mk_operand(BYTE, 0, 0, IMMEDIATE);
830 mmio_op->immediate =
831 (signed char)get_immediate(*ad_size, opcode + 1, BYTE);
832 mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
833 return DECODE_success;
834 }
835 else
836 {
837 printk("0f %x, This opcode subtype isn't handled yet\n", *opcode);
838 return DECODE_failure;
839 }
841 default:
842 printk("0f %x, This opcode isn't handled yet\n", *opcode);
843 return DECODE_failure;
844 }
845 }
847 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
848 {
849 if ( inst_len > MAX_INST_LEN || inst_len <= 0 )
850 return 0;
851 if ( hvm_copy_from_guest_virt(buf, guest_eip, inst_len) )
852 return 0;
853 return inst_len;
854 }
856 void send_pio_req(unsigned long port, unsigned long count, int size,
857 paddr_t value, int dir, int df, int value_is_ptr)
858 {
859 struct vcpu *v = current;
860 vcpu_iodata_t *vio;
861 ioreq_t *p;
863 if ( size == 0 || count == 0 ) {
864 printk("null pio request? port %lx, count %lx, "
865 "size %d, value %"PRIpaddr", dir %d, value_is_ptr %d.\n",
866 port, count, size, value, dir, value_is_ptr);
867 }
869 vio = get_ioreq(v);
870 if ( vio == NULL ) {
871 printk("bad shared page: %lx\n", (unsigned long) vio);
872 domain_crash_synchronous();
873 }
875 p = &vio->vp_ioreq;
876 if ( p->state != STATE_IOREQ_NONE )
877 printk("WARNING: send pio with something already pending (%d)?\n",
878 p->state);
880 p->dir = dir;
881 p->data_is_ptr = value_is_ptr;
883 p->type = IOREQ_TYPE_PIO;
884 p->size = size;
885 p->addr = port;
886 p->count = count;
887 p->df = df;
889 p->io_count++;
891 p->data = value;
893 if ( hvm_portio_intercept(p) )
894 {
895 p->state = STATE_IORESP_READY;
896 hvm_io_assist();
897 return;
898 }
900 hvm_send_assist_req(v);
901 }
903 static void send_mmio_req(unsigned char type, unsigned long gpa,
904 unsigned long count, int size, paddr_t value,
905 int dir, int df, int value_is_ptr)
906 {
907 struct vcpu *v = current;
908 vcpu_iodata_t *vio;
909 ioreq_t *p;
911 if ( size == 0 || count == 0 ) {
912 printk("null mmio request? type %d, gpa %lx, "
913 "count %lx, size %d, value %"PRIpaddr"x, dir %d, "
914 "value_is_ptr %d.\n",
915 type, gpa, count, size, value, dir, value_is_ptr);
916 }
918 vio = get_ioreq(v);
919 if (vio == NULL) {
920 printk("bad shared page\n");
921 domain_crash_synchronous();
922 }
924 p = &vio->vp_ioreq;
926 if ( p->state != STATE_IOREQ_NONE )
927 printk("WARNING: send mmio with something already pending (%d)?\n",
928 p->state);
929 p->dir = dir;
930 p->data_is_ptr = value_is_ptr;
932 p->type = type;
933 p->size = size;
934 p->addr = gpa;
935 p->count = count;
936 p->df = df;
938 p->io_count++;
940 p->data = value;
942 if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
943 {
944 p->state = STATE_IORESP_READY;
945 hvm_io_assist();
946 return;
947 }
949 hvm_send_assist_req(v);
950 }
952 void send_timeoffset_req(unsigned long timeoff)
953 {
954 ioreq_t p[1];
956 if ( timeoff == 0 )
957 return;
959 memset(p, 0, sizeof(*p));
961 p->type = IOREQ_TYPE_TIMEOFFSET;
962 p->size = 4;
963 p->dir = IOREQ_WRITE;
964 p->data = timeoff;
966 p->state = STATE_IOREQ_READY;
968 if ( !hvm_buffered_io_send(p) )
969 printk("Unsuccessful timeoffset update\n");
970 }
972 /* Ask ioemu mapcache to invalidate mappings. */
973 void send_invalidate_req(void)
974 {
975 struct vcpu *v = current;
976 vcpu_iodata_t *vio;
977 ioreq_t *p;
979 vio = get_ioreq(v);
980 if ( vio == NULL )
981 {
982 printk("bad shared page: %lx\n", (unsigned long) vio);
983 domain_crash_synchronous();
984 }
986 p = &vio->vp_ioreq;
987 if ( p->state != STATE_IOREQ_NONE )
988 printk("WARNING: send invalidate req with something "
989 "already pending (%d)?\n", p->state);
991 p->type = IOREQ_TYPE_INVALIDATE;
992 p->size = 4;
993 p->dir = IOREQ_WRITE;
994 p->data = ~0UL; /* flush all */
995 p->io_count++;
997 hvm_send_assist_req(v);
998 }
1000 static void mmio_operands(int type, unsigned long gpa,
1001 struct hvm_io_op *mmio_op,
1002 unsigned char op_size)
1004 unsigned long value = 0;
1005 int df, index, size_reg;
1006 struct cpu_user_regs *regs = &mmio_op->io_context;
1008 df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
1010 size_reg = operand_size(mmio_op->operand[0]);
1012 if ( mmio_op->operand[0] & REGISTER ) { /* dest is memory */
1013 index = operand_index(mmio_op->operand[0]);
1014 value = get_reg_value(size_reg, index, 0, regs);
1015 send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);
1016 } else if ( mmio_op->operand[0] & IMMEDIATE ) { /* dest is memory */
1017 value = mmio_op->immediate;
1018 send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);
1019 } else if ( mmio_op->operand[0] & MEMORY ) { /* dest is register */
1020 /* send the request and wait for the value */
1021 if ( (mmio_op->instr == INSTR_MOVZX) ||
1022 (mmio_op->instr == INSTR_MOVSX) )
1023 send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, df, 0);
1024 else
1025 send_mmio_req(type, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
1026 } else {
1027 printk("%s: invalid dest mode.\n", __func__);
1028 domain_crash_synchronous();
1032 #define GET_REPEAT_COUNT() \
1033 (mmio_op->flags & REPZ ? (ad_size == WORD ? regs->ecx & 0xFFFF : regs->ecx) : 1)
1036 void handle_mmio(unsigned long gpa)
1038 unsigned long inst_addr;
1039 struct hvm_io_op *mmio_op;
1040 struct cpu_user_regs *regs;
1041 unsigned char inst[MAX_INST_LEN], ad_size, op_size, seg_sel;
1042 int i, address_bytes, df, inst_len;
1043 struct vcpu *v = current;
1045 mmio_op = &v->arch.hvm_vcpu.io_op;
1046 regs = &mmio_op->io_context;
1048 /* Copy current guest state into io instruction state structure. */
1049 memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
1050 hvm_store_cpu_guest_regs(v, regs, NULL);
1052 df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
1054 address_bytes = hvm_guest_x86_mode(v);
1055 inst_addr = hvm_get_segment_base(v, x86_seg_cs) + regs->eip;
1056 inst_len = hvm_instruction_length(inst_addr, address_bytes);
1057 if ( inst_len <= 0 )
1059 printk("handle_mmio: failed to get instruction length\n");
1060 domain_crash_synchronous();
1063 memset(inst, 0, MAX_INST_LEN);
1064 if ( inst_copy_from_guest(inst, inst_addr, inst_len) != inst_len ) {
1065 printk("handle_mmio: failed to copy instruction\n");
1066 domain_crash_synchronous();
1069 if ( mmio_decode(address_bytes, inst, mmio_op, &ad_size,
1070 &op_size, &seg_sel) == DECODE_failure ) {
1071 printk("handle_mmio: failed to decode instruction\n");
1072 printk("mmio opcode: gpa 0x%lx, len %d:", gpa, inst_len);
1073 for ( i = 0; i < inst_len; i++ )
1074 printk(" %02x", inst[i] & 0xFF);
1075 printk("\n");
1076 domain_crash_synchronous();
1079 regs->eip += inst_len; /* advance %eip */
1081 switch ( mmio_op->instr ) {
1082 case INSTR_MOV:
1083 mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);
1084 break;
1086 case INSTR_MOVS:
1088 unsigned long count = GET_REPEAT_COUNT();
1089 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
1090 unsigned long addr, gfn;
1091 paddr_t paddr;
1092 int dir, size = op_size;
1094 ASSERT(count);
1096 /* determine non-MMIO address */
1097 addr = regs->edi;
1098 if ( ad_size == WORD )
1099 addr &= 0xFFFF;
1100 addr += hvm_get_segment_base(v, x86_seg_es);
1101 gfn = paging_gva_to_gfn(v, addr);
1102 paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
1103 if ( paddr == gpa )
1105 enum x86_segment seg;
1107 dir = IOREQ_WRITE;
1108 addr = regs->esi;
1109 if ( ad_size == WORD )
1110 addr &= 0xFFFF;
1111 switch ( seg_sel )
1113 case 0x26: seg = x86_seg_es; break;
1114 case 0x2e: seg = x86_seg_cs; break;
1115 case 0x36: seg = x86_seg_ss; break;
1116 case 0:
1117 case 0x3e: seg = x86_seg_ds; break;
1118 case 0x64: seg = x86_seg_fs; break;
1119 case 0x65: seg = x86_seg_gs; break;
1120 default: domain_crash_synchronous();
1122 addr += hvm_get_segment_base(v, seg);
1123 gfn = paging_gva_to_gfn(v, addr);
1124 paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
1126 else
1127 dir = IOREQ_READ;
1129 if ( gfn == INVALID_GFN )
1131 /* The guest does not have the non-mmio address mapped.
1132 * Need to send in a page fault */
1133 int errcode = 0;
1134 /* IO read --> memory write */
1135 if ( dir == IOREQ_READ ) errcode |= PFEC_write_access;
1136 regs->eip -= inst_len; /* do not advance %eip */
1137 hvm_inject_exception(TRAP_page_fault, errcode, addr);
1138 return;
1141 /*
1142 * In case of a movs spanning multiple pages, we break the accesses
1143 * up into multiple pages (the device model works with non-continguous
1144 * physical guest pages). To copy just one page, we adjust %ecx and
1145 * do not advance %eip so that the next rep;movs copies the next page.
1146 * Unaligned accesses, for example movsl starting at PGSZ-2, are
1147 * turned into a single copy where we handle the overlapping memory
1148 * copy ourself. After this copy succeeds, "rep movs" is executed
1149 * again.
1150 */
1151 if ( (addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK) ) {
1152 unsigned long value = 0;
1154 mmio_op->flags |= OVERLAP;
1156 if ( dir == IOREQ_WRITE ) {
1157 if ( hvm_paging_enabled(v) )
1159 int rv = hvm_copy_from_guest_virt(&value, addr, size);
1160 if ( rv != 0 )
1162 /* Failed on the page-spanning copy. Inject PF into
1163 * the guest for the address where we failed */
1164 regs->eip -= inst_len; /* do not advance %eip */
1165 /* Must set CR2 at the failing address */
1166 addr += size - rv;
1167 gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
1168 "page-spanning MMIO: va=%#lx\n", addr);
1169 hvm_inject_exception(TRAP_page_fault, 0, addr);
1170 return;
1173 else
1174 (void) hvm_copy_from_guest_phys(&value, addr, size);
1175 } else /* dir != IOREQ_WRITE */
1176 /* Remember where to write the result, as a *VA*.
1177 * Must be a VA so we can handle the page overlap
1178 * correctly in hvm_mmio_assist() */
1179 mmio_op->addr = addr;
1181 if ( count != 1 )
1182 regs->eip -= inst_len; /* do not advance %eip */
1184 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, df, 0);
1185 } else {
1186 unsigned long last_addr = sign > 0 ? addr + count * size - 1
1187 : addr - (count - 1) * size;
1189 if ( (addr & PAGE_MASK) != (last_addr & PAGE_MASK) )
1191 regs->eip -= inst_len; /* do not advance %eip */
1193 if ( sign > 0 )
1194 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
1195 else
1196 count = (addr & ~PAGE_MASK) / size + 1;
1199 ASSERT(count);
1201 send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size,
1202 paddr, dir, df, 1);
1204 break;
1207 case INSTR_MOVZX:
1208 case INSTR_MOVSX:
1209 mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);
1210 break;
1212 case INSTR_STOS:
1213 /*
1214 * Since the destination is always in (contiguous) mmio space we don't
1215 * need to break it up into pages.
1216 */
1217 send_mmio_req(IOREQ_TYPE_COPY, gpa,
1218 GET_REPEAT_COUNT(), op_size, regs->eax, IOREQ_WRITE, df, 0);
1219 break;
1221 case INSTR_LODS:
1222 /*
1223 * Since the source is always in (contiguous) mmio space we don't
1224 * need to break it up into pages.
1225 */
1226 mmio_op->operand[0] = mk_operand(op_size, 0, 0, REGISTER);
1227 send_mmio_req(IOREQ_TYPE_COPY, gpa,
1228 GET_REPEAT_COUNT(), op_size, 0, IOREQ_READ, df, 0);
1229 break;
1231 case INSTR_OR:
1232 mmio_operands(IOREQ_TYPE_OR, gpa, mmio_op, op_size);
1233 break;
1235 case INSTR_AND:
1236 mmio_operands(IOREQ_TYPE_AND, gpa, mmio_op, op_size);
1237 break;
1239 case INSTR_ADD:
1240 mmio_operands(IOREQ_TYPE_ADD, gpa, mmio_op, op_size);
1241 break;
1243 case INSTR_SUB:
1244 mmio_operands(IOREQ_TYPE_SUB, gpa, mmio_op, op_size);
1245 break;
1247 case INSTR_XOR:
1248 mmio_operands(IOREQ_TYPE_XOR, gpa, mmio_op, op_size);
1249 break;
1251 case INSTR_PUSH:
1252 if ( ad_size == WORD )
1254 mmio_op->addr = (uint16_t)(regs->esp - op_size);
1255 regs->esp = mmio_op->addr | (regs->esp & ~0xffff);
1257 else
1259 regs->esp -= op_size;
1260 mmio_op->addr = regs->esp;
1262 /* send the request and wait for the value */
1263 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
1264 break;
1266 case INSTR_CMP: /* Pass through */
1267 case INSTR_TEST:
1268 /* send the request and wait for the value */
1269 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, op_size, 0, IOREQ_READ, df, 0);
1270 break;
1272 case INSTR_BT:
1274 unsigned long value = 0;
1275 int index, size;
1277 if ( mmio_op->operand[0] & REGISTER )
1279 index = operand_index(mmio_op->operand[0]);
1280 size = operand_size(mmio_op->operand[0]);
1281 value = get_reg_value(size, index, 0, regs);
1283 else if ( mmio_op->operand[0] & IMMEDIATE )
1285 mmio_op->immediate = mmio_op->immediate;
1286 value = mmio_op->immediate;
1288 send_mmio_req(IOREQ_TYPE_COPY, gpa + (value >> 5), 1,
1289 op_size, 0, IOREQ_READ, df, 0);
1290 break;
1293 case INSTR_XCHG:
1294 if ( mmio_op->operand[0] & REGISTER ) {
1295 long value;
1296 unsigned long operand = mmio_op->operand[0];
1297 value = get_reg_value(operand_size(operand),
1298 operand_index(operand), 0,
1299 regs);
1300 /* send the request and wait for the value */
1301 send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
1302 op_size, value, IOREQ_WRITE, df, 0);
1303 } else {
1304 /* the destination is a register */
1305 long value;
1306 unsigned long operand = mmio_op->operand[1];
1307 value = get_reg_value(operand_size(operand),
1308 operand_index(operand), 0,
1309 regs);
1310 /* send the request and wait for the value */
1311 send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
1312 op_size, value, IOREQ_WRITE, df, 0);
1314 break;
1316 default:
1317 printk("Unhandled MMIO instruction\n");
1318 domain_crash_synchronous();
1322 DEFINE_PER_CPU(int, guest_handles_in_xen_space);
1324 /* Note that copy_{to,from}_user_hvm don't set the A and D bits on
1325 PTEs, and require the PTE to be writable even when they're only
1326 trying to read from it. The guest is expected to deal with
1327 this. */
1328 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
1330 if ( this_cpu(guest_handles_in_xen_space) )
1332 memcpy(to, from, len);
1333 return 0;
1336 return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len);
1339 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
1341 if ( this_cpu(guest_handles_in_xen_space) )
1343 memcpy(to, from, len);
1344 return 0;
1347 return hvm_copy_from_guest_virt(to, (unsigned long)from, len);
1350 /*
1351 * Local variables:
1352 * mode: C
1353 * c-set-style: "BSD"
1354 * c-basic-offset: 4
1355 * tab-width: 4
1356 * indent-tabs-mode: nil
1357 * End:
1358 */