ia64/xen-unstable

view xen/arch/x86/hvm/platform.c @ 10903:822c39808e62

[HVM] Initialise full regs structure for PIO requests.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 02 09:52:03 2006 +0100 (2006-08-02)
parents 3fa8b914e2b5
children bfe12b4d45d3
line source
1 /*
2 * platform.c: handling x86 platform related MMIO instructions
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <asm/shadow.h>
25 #include <xen/domain_page.h>
26 #include <asm/page.h>
27 #include <xen/event.h>
28 #include <xen/trace.h>
29 #include <xen/sched.h>
30 #include <asm/regs.h>
31 #include <asm/hvm/hvm.h>
32 #include <asm/hvm/support.h>
33 #include <public/hvm/ioreq.h>
35 #include <xen/lib.h>
36 #include <xen/sched.h>
37 #include <asm/current.h>
38 #if CONFIG_PAGING_LEVELS >= 3
39 #include <asm/shadow_64.h>
40 #endif
42 #define DECODE_success 1
43 #define DECODE_failure 0
45 #if defined (__x86_64__)
46 static inline long __get_reg_value(unsigned long reg, int size)
47 {
48 switch(size) {
49 case BYTE_64:
50 return (char)(reg & 0xFF);
51 case WORD:
52 return (short)(reg & 0xFFFF);
53 case LONG:
54 return (int)(reg & 0xFFFFFFFF);
55 case QUAD:
56 return (long)(reg);
57 default:
58 printf("Error: (__get_reg_value) Invalid reg size\n");
59 domain_crash_synchronous();
60 }
61 }
63 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
64 {
65 if (size == BYTE) {
66 switch (index) {
67 case 0: /* %al */
68 return (char)(regs->rax & 0xFF);
69 case 1: /* %cl */
70 return (char)(regs->rcx & 0xFF);
71 case 2: /* %dl */
72 return (char)(regs->rdx & 0xFF);
73 case 3: /* %bl */
74 return (char)(regs->rbx & 0xFF);
75 case 4: /* %ah */
76 return (char)((regs->rax & 0xFF00) >> 8);
77 case 5: /* %ch */
78 return (char)((regs->rcx & 0xFF00) >> 8);
79 case 6: /* %dh */
80 return (char)((regs->rdx & 0xFF00) >> 8);
81 case 7: /* %bh */
82 return (char)((regs->rbx & 0xFF00) >> 8);
83 default:
84 printf("Error: (get_reg_value) Invalid index value\n");
85 domain_crash_synchronous();
86 }
87 /* NOTREACHED */
88 }
90 switch (index) {
91 case 0: return __get_reg_value(regs->rax, size);
92 case 1: return __get_reg_value(regs->rcx, size);
93 case 2: return __get_reg_value(regs->rdx, size);
94 case 3: return __get_reg_value(regs->rbx, size);
95 case 4: return __get_reg_value(regs->rsp, size);
96 case 5: return __get_reg_value(regs->rbp, size);
97 case 6: return __get_reg_value(regs->rsi, size);
98 case 7: return __get_reg_value(regs->rdi, size);
99 case 8: return __get_reg_value(regs->r8, size);
100 case 9: return __get_reg_value(regs->r9, size);
101 case 10: return __get_reg_value(regs->r10, size);
102 case 11: return __get_reg_value(regs->r11, size);
103 case 12: return __get_reg_value(regs->r12, size);
104 case 13: return __get_reg_value(regs->r13, size);
105 case 14: return __get_reg_value(regs->r14, size);
106 case 15: return __get_reg_value(regs->r15, size);
107 default:
108 printf("Error: (get_reg_value) Invalid index value\n");
109 domain_crash_synchronous();
110 }
111 }
112 #elif defined (__i386__)
113 static inline long __get_reg_value(unsigned long reg, int size)
114 {
115 switch(size) {
116 case WORD:
117 return (short)(reg & 0xFFFF);
118 case LONG:
119 return (int)(reg & 0xFFFFFFFF);
120 default:
121 printf("Error: (__get_reg_value) Invalid reg size\n");
122 domain_crash_synchronous();
123 }
124 }
126 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
127 {
128 if (size == BYTE) {
129 switch (index) {
130 case 0: /* %al */
131 return (char)(regs->eax & 0xFF);
132 case 1: /* %cl */
133 return (char)(regs->ecx & 0xFF);
134 case 2: /* %dl */
135 return (char)(regs->edx & 0xFF);
136 case 3: /* %bl */
137 return (char)(regs->ebx & 0xFF);
138 case 4: /* %ah */
139 return (char)((regs->eax & 0xFF00) >> 8);
140 case 5: /* %ch */
141 return (char)((regs->ecx & 0xFF00) >> 8);
142 case 6: /* %dh */
143 return (char)((regs->edx & 0xFF00) >> 8);
144 case 7: /* %bh */
145 return (char)((regs->ebx & 0xFF00) >> 8);
146 default:
147 printf("Error: (get_reg_value) Invalid index value\n");
148 domain_crash_synchronous();
149 }
150 }
152 switch (index) {
153 case 0: return __get_reg_value(regs->eax, size);
154 case 1: return __get_reg_value(regs->ecx, size);
155 case 2: return __get_reg_value(regs->edx, size);
156 case 3: return __get_reg_value(regs->ebx, size);
157 case 4: return __get_reg_value(regs->esp, size);
158 case 5: return __get_reg_value(regs->ebp, size);
159 case 6: return __get_reg_value(regs->esi, size);
160 case 7: return __get_reg_value(regs->edi, size);
161 default:
162 printf("Error: (get_reg_value) Invalid index value\n");
163 domain_crash_synchronous();
164 }
165 }
166 #endif
168 static inline unsigned char *check_prefix(unsigned char *inst,
169 struct instruction *thread_inst, unsigned char *rex_p)
170 {
171 while (1) {
172 switch (*inst) {
173 /* rex prefix for em64t instructions */
174 case 0x40 ... 0x4e:
175 *rex_p = *inst;
176 break;
177 case 0xf3: /* REPZ */
178 thread_inst->flags = REPZ;
179 break;
180 case 0xf2: /* REPNZ */
181 thread_inst->flags = REPNZ;
182 break;
183 case 0xf0: /* LOCK */
184 break;
185 case 0x2e: /* CS */
186 case 0x36: /* SS */
187 case 0x3e: /* DS */
188 case 0x26: /* ES */
189 case 0x64: /* FS */
190 case 0x65: /* GS */
191 thread_inst->seg_sel = *inst;
192 break;
193 case 0x66: /* 32bit->16bit */
194 thread_inst->op_size = WORD;
195 break;
196 case 0x67:
197 break;
198 default:
199 return inst;
200 }
201 inst++;
202 }
203 }
205 static inline unsigned long get_immediate(int op16,const unsigned char *inst, int op_size)
206 {
207 int mod, reg, rm;
208 unsigned long val = 0;
209 int i;
211 mod = (*inst >> 6) & 3;
212 reg = (*inst >> 3) & 7;
213 rm = *inst & 7;
215 inst++; //skip ModR/M byte
216 if (mod != 3 && rm == 4) {
217 inst++; //skip SIB byte
218 }
220 switch(mod) {
221 case 0:
222 if (rm == 5 || rm == 4) {
223 if (op16)
224 inst = inst + 2; //disp16, skip 2 bytes
225 else
226 inst = inst + 4; //disp32, skip 4 bytes
227 }
228 break;
229 case 1:
230 inst++; //disp8, skip 1 byte
231 break;
232 case 2:
233 if (op16)
234 inst = inst + 2; //disp16, skip 2 bytes
235 else
236 inst = inst + 4; //disp32, skip 4 bytes
237 break;
238 }
240 if (op_size == QUAD)
241 op_size = LONG;
243 for (i = 0; i < op_size; i++) {
244 val |= (*inst++ & 0xff) << (8 * i);
245 }
247 return val;
248 }
250 static inline int get_index(const unsigned char *inst, unsigned char rex)
251 {
252 int mod, reg, rm;
253 int rex_r, rex_b;
255 mod = (*inst >> 6) & 3;
256 reg = (*inst >> 3) & 7;
257 rm = *inst & 7;
259 rex_r = (rex >> 2) & 1;
260 rex_b = rex & 1;
262 //Only one operand in the instruction is register
263 if (mod == 3) {
264 return (rm + (rex_b << 3));
265 } else {
266 return (reg + (rex_r << 3));
267 }
268 return 0;
269 }
271 static void init_instruction(struct instruction *mmio_inst)
272 {
273 mmio_inst->instr = 0;
274 mmio_inst->op_size = 0;
275 mmio_inst->immediate = 0;
276 mmio_inst->seg_sel = 0;
278 mmio_inst->operand[0] = 0;
279 mmio_inst->operand[1] = 0;
281 mmio_inst->flags = 0;
282 }
284 #define GET_OP_SIZE_FOR_BYTE(op_size) \
285 do { \
286 if (rex) \
287 op_size = BYTE_64; \
288 else \
289 op_size = BYTE; \
290 } while(0)
292 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
293 do { \
294 if (rex & 0x8) \
295 op_size = QUAD; \
296 else if (op_size != WORD) \
297 op_size = LONG; \
298 } while(0)
301 /*
302 * Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
303 */
304 static int mem_acc(unsigned char size, struct instruction *instr)
305 {
306 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
307 instr->operand[1] = mk_operand(size, 0, 0, REGISTER);
308 return DECODE_success;
309 }
311 /*
312 * Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
313 */
314 static int acc_mem(unsigned char size, struct instruction *instr)
315 {
316 instr->operand[0] = mk_operand(size, 0, 0, REGISTER);
317 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
318 return DECODE_success;
319 }
321 /*
322 * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
323 */
324 static int mem_reg(unsigned char size, unsigned char *opcode,
325 struct instruction *instr, unsigned char rex)
326 {
327 int index = get_index(opcode + 1, rex);
329 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
330 instr->operand[1] = mk_operand(size, index, 0, REGISTER);
331 return DECODE_success;
332 }
334 /*
335 * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
336 */
337 static int reg_mem(unsigned char size, unsigned char *opcode,
338 struct instruction *instr, unsigned char rex)
339 {
340 int index = get_index(opcode + 1, rex);
342 instr->operand[0] = mk_operand(size, index, 0, REGISTER);
343 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
344 return DECODE_success;
345 }
347 static int hvm_decode(int realmode, unsigned char *opcode, struct instruction *instr)
348 {
349 unsigned char size_reg = 0;
350 unsigned char rex = 0;
351 int index;
353 init_instruction(instr);
355 opcode = check_prefix(opcode, instr, &rex);
357 if (realmode) { /* meaning is reversed */
358 if (instr->op_size == WORD)
359 instr->op_size = LONG;
360 else if (instr->op_size == LONG)
361 instr->op_size = WORD;
362 else if (instr->op_size == 0)
363 instr->op_size = WORD;
364 }
366 switch (*opcode) {
367 case 0x0A: /* or r8, m8 */
368 instr->instr = INSTR_OR;
369 instr->op_size = BYTE;
370 GET_OP_SIZE_FOR_BYTE(size_reg);
371 return mem_reg(size_reg, opcode, instr, rex);
373 case 0x0B: /* or m32/16, r32/16 */
374 instr->instr = INSTR_OR;
375 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
376 return mem_reg(instr->op_size, opcode, instr, rex);
378 case 0x20: /* and r8, m8 */
379 instr->instr = INSTR_AND;
380 instr->op_size = BYTE;
381 GET_OP_SIZE_FOR_BYTE(size_reg);
382 return reg_mem(size_reg, opcode, instr, rex);
384 case 0x21: /* and r32/16, m32/16 */
385 instr->instr = INSTR_AND;
386 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
387 return reg_mem(instr->op_size, opcode, instr, rex);
389 case 0x22: /* and m8, r8 */
390 instr->instr = INSTR_AND;
391 instr->op_size = BYTE;
392 GET_OP_SIZE_FOR_BYTE(size_reg);
393 return mem_reg(size_reg, opcode, instr, rex);
395 case 0x23: /* and m32/16, r32/16 */
396 instr->instr = INSTR_AND;
397 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
398 return mem_reg(instr->op_size, opcode, instr, rex);
400 case 0x30: /* xor r8, m8 */
401 instr->instr = INSTR_XOR;
402 instr->op_size = BYTE;
403 GET_OP_SIZE_FOR_BYTE(size_reg);
404 return reg_mem(size_reg, opcode, instr, rex);
406 case 0x31: /* xor r32/16, m32/16 */
407 instr->instr = INSTR_XOR;
408 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
409 return reg_mem(instr->op_size, opcode, instr, rex);
411 case 0x32: /* xor m8, r8*/
412 instr->instr = INSTR_XOR;
413 instr->op_size = BYTE;
414 GET_OP_SIZE_FOR_BYTE(size_reg);
415 return mem_reg(size_reg, opcode, instr, rex);
417 case 0x39: /* cmp r32/16, m32/16 */
418 instr->instr = INSTR_CMP;
419 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
420 return reg_mem(instr->op_size, opcode, instr, rex);
422 case 0x3B: /* cmp m32/16, r32/16 */
423 instr->instr = INSTR_CMP;
424 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
425 return mem_reg(instr->op_size, opcode, instr, rex);
427 case 0x80:
428 case 0x81:
429 case 0x83:
430 {
431 unsigned char ins_subtype = (opcode[1] >> 3) & 7;
433 if (opcode[0] == 0x80) {
434 GET_OP_SIZE_FOR_BYTE(size_reg);
435 instr->op_size = BYTE;
436 } else if (opcode[0] == 0x81) {
437 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
438 size_reg = instr->op_size;
439 } else if (opcode[0] == 0x83) {
440 GET_OP_SIZE_FOR_NONEBYTE(size_reg);
441 instr->op_size = size_reg;
442 }
444 /* opcode 0x83 always has a single byte operand */
445 if (opcode[0] == 0x83)
446 instr->immediate =
447 (signed char)get_immediate(realmode, opcode+1, BYTE);
448 else
449 instr->immediate =
450 get_immediate(realmode, opcode+1, instr->op_size);
452 instr->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
453 instr->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
455 switch (ins_subtype) {
456 case 7: /* cmp $imm, m32/16 */
457 instr->instr = INSTR_CMP;
458 return DECODE_success;
460 case 1: /* or $imm, m32/16 */
461 instr->instr = INSTR_OR;
462 return DECODE_success;
464 default:
465 printf("%x, This opcode isn't handled yet!\n", *opcode);
466 return DECODE_failure;
467 }
468 }
470 case 0x84: /* test m8, r8 */
471 instr->instr = INSTR_TEST;
472 instr->op_size = BYTE;
473 GET_OP_SIZE_FOR_BYTE(size_reg);
474 return mem_reg(size_reg, opcode, instr, rex);
476 case 0x87: /* xchg {r/m16|r/m32}, {m/r16|m/r32} */
477 instr->instr = INSTR_XCHG;
478 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
479 if (((*(opcode+1)) & 0xc7) == 5)
480 return reg_mem(instr->op_size, opcode, instr, rex);
481 else
482 return mem_reg(instr->op_size, opcode, instr, rex);
484 case 0x88: /* mov r8, m8 */
485 instr->instr = INSTR_MOV;
486 instr->op_size = BYTE;
487 GET_OP_SIZE_FOR_BYTE(size_reg);
488 return reg_mem(size_reg, opcode, instr, rex);
490 case 0x89: /* mov r32/16, m32/16 */
491 instr->instr = INSTR_MOV;
492 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
493 return reg_mem(instr->op_size, opcode, instr, rex);
495 case 0x8A: /* mov m8, r8 */
496 instr->instr = INSTR_MOV;
497 instr->op_size = BYTE;
498 GET_OP_SIZE_FOR_BYTE(size_reg);
499 return mem_reg(size_reg, opcode, instr, rex);
501 case 0x8B: /* mov m32/16, r32/16 */
502 instr->instr = INSTR_MOV;
503 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
504 return mem_reg(instr->op_size, opcode, instr, rex);
506 case 0xA0: /* mov <addr>, al */
507 instr->instr = INSTR_MOV;
508 instr->op_size = BYTE;
509 GET_OP_SIZE_FOR_BYTE(size_reg);
510 return mem_acc(size_reg, instr);
512 case 0xA1: /* mov <addr>, ax/eax */
513 instr->instr = INSTR_MOV;
514 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
515 return mem_acc(instr->op_size, instr);
517 case 0xA2: /* mov al, <addr> */
518 instr->instr = INSTR_MOV;
519 instr->op_size = BYTE;
520 GET_OP_SIZE_FOR_BYTE(size_reg);
521 return acc_mem(size_reg, instr);
523 case 0xA3: /* mov ax/eax, <addr> */
524 instr->instr = INSTR_MOV;
525 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
526 return acc_mem(instr->op_size, instr);
528 case 0xA4: /* movsb */
529 instr->instr = INSTR_MOVS;
530 instr->op_size = BYTE;
531 return DECODE_success;
533 case 0xA5: /* movsw/movsl */
534 instr->instr = INSTR_MOVS;
535 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
536 return DECODE_success;
538 case 0xAA: /* stosb */
539 instr->instr = INSTR_STOS;
540 instr->op_size = BYTE;
541 return DECODE_success;
543 case 0xAB: /* stosw/stosl */
544 instr->instr = INSTR_STOS;
545 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
546 return DECODE_success;
548 case 0xAC: /* lodsb */
549 instr->instr = INSTR_LODS;
550 instr->op_size = BYTE;
551 return DECODE_success;
553 case 0xAD: /* lodsw/lodsl */
554 instr->instr = INSTR_LODS;
555 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
556 return DECODE_success;
558 case 0xC6:
559 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
560 instr->instr = INSTR_MOV;
561 instr->op_size = BYTE;
563 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
564 instr->immediate = get_immediate(realmode, opcode+1, instr->op_size);
565 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
567 return DECODE_success;
568 } else
569 return DECODE_failure;
571 case 0xC7:
572 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
573 instr->instr = INSTR_MOV;
574 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
576 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
577 instr->immediate = get_immediate(realmode, opcode+1, instr->op_size);
578 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
580 return DECODE_success;
581 } else
582 return DECODE_failure;
584 case 0xF6:
585 case 0xF7:
586 if (((opcode[1] >> 3) & 7) == 0) { /* test $imm8/16/32, m8/16/32 */
587 instr->instr = INSTR_TEST;
589 if (opcode[0] == 0xF6) {
590 GET_OP_SIZE_FOR_BYTE(size_reg);
591 instr->op_size = BYTE;
592 } else {
593 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
594 size_reg = instr->op_size;
595 }
597 instr->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
598 instr->immediate = get_immediate(realmode, opcode+1, instr->op_size);
599 instr->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
601 return DECODE_success;
602 } else
603 return DECODE_failure;
605 case 0x0F:
606 break;
608 default:
609 printf("%x, This opcode isn't handled yet!\n", *opcode);
610 return DECODE_failure;
611 }
613 switch (*++opcode) {
614 case 0xB6: /* movzx m8, r16/r32/r64 */
615 instr->instr = INSTR_MOVZX;
616 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
617 index = get_index(opcode + 1, rex);
618 instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
619 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
620 return DECODE_success;
622 case 0xB7: /* movzx m16/m32, r32/r64 */
623 instr->instr = INSTR_MOVZX;
624 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
625 index = get_index(opcode + 1, rex);
626 if (rex & 0x8)
627 instr->operand[0] = mk_operand(LONG, 0, 0, MEMORY);
628 else
629 instr->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
630 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
631 return DECODE_success;
633 case 0xBE: /* movsx m8, r16/r32/r64 */
634 instr->instr = INSTR_MOVSX;
635 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
636 index = get_index(opcode + 1, rex);
637 instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
638 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
639 return DECODE_success;
641 case 0xBF: /* movsx m16, r32/r64 */
642 instr->instr = INSTR_MOVSX;
643 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
644 index = get_index(opcode + 1, rex);
645 instr->operand[0] = mk_operand(WORD, 0, 0, MEMORY);
646 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
647 return DECODE_success;
649 case 0xA3: /* bt r32, m32 */
650 instr->instr = INSTR_BT;
651 index = get_index(opcode + 1, rex);
652 instr->op_size = LONG;
653 instr->operand[0] = mk_operand(instr->op_size, index, 0, REGISTER);
654 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
655 return DECODE_success;
657 default:
658 printf("0f %x, This opcode isn't handled yet\n", *opcode);
659 return DECODE_failure;
660 }
661 }
663 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
664 {
665 if (inst_len > MAX_INST_LEN || inst_len <= 0)
666 return 0;
667 if (!hvm_copy(buf, guest_eip, inst_len, HVM_COPY_IN))
668 return 0;
669 return inst_len;
670 }
672 void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
673 unsigned long count, int size, long value, int dir, int pvalid)
674 {
675 struct vcpu *v = current;
676 vcpu_iodata_t *vio;
677 ioreq_t *p;
679 vio = get_vio(v->domain, v->vcpu_id);
680 if (vio == NULL) {
681 printk("bad shared page: %lx\n", (unsigned long) vio);
682 domain_crash_synchronous();
683 }
685 if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags)) {
686 printf("HVM I/O has not yet completed\n");
687 domain_crash_synchronous();
688 }
689 set_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
691 p = &vio->vp_ioreq;
692 p->dir = dir;
693 p->pdata_valid = pvalid;
695 p->type = IOREQ_TYPE_PIO;
696 p->size = size;
697 p->addr = port;
698 p->count = count;
699 p->df = regs->eflags & EF_DF ? 1 : 0;
701 p->io_count++;
703 if (pvalid) {
704 if (hvm_paging_enabled(current))
705 p->u.pdata = (void *) gva_to_gpa(value);
706 else
707 p->u.pdata = (void *) value; /* guest VA == guest PA */
708 } else
709 p->u.data = value;
711 if (hvm_portio_intercept(p)) {
712 p->state = STATE_IORESP_READY;
713 hvm_io_assist(v);
714 return;
715 }
717 p->state = STATE_IOREQ_READY;
719 evtchn_send(iopacket_port(v));
720 hvm_wait_io();
721 }
723 void send_mmio_req(
724 unsigned char type, unsigned long gpa,
725 unsigned long count, int size, long value, int dir, int pvalid)
726 {
727 struct vcpu *v = current;
728 vcpu_iodata_t *vio;
729 ioreq_t *p;
730 struct cpu_user_regs *regs;
732 regs = &current->arch.hvm_vcpu.io_op.io_context;
734 vio = get_vio(v->domain, v->vcpu_id);
735 if (vio == NULL) {
736 printf("bad shared page\n");
737 domain_crash_synchronous();
738 }
740 p = &vio->vp_ioreq;
742 if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags)) {
743 printf("HVM I/O has not yet completed\n");
744 domain_crash_synchronous();
745 }
747 set_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags);
748 p->dir = dir;
749 p->pdata_valid = pvalid;
751 p->type = type;
752 p->size = size;
753 p->addr = gpa;
754 p->count = count;
755 p->df = regs->eflags & EF_DF ? 1 : 0;
757 p->io_count++;
759 if (pvalid) {
760 if (hvm_paging_enabled(v))
761 p->u.pdata = (void *) gva_to_gpa(value);
762 else
763 p->u.pdata = (void *) value; /* guest VA == guest PA */
764 } else
765 p->u.data = value;
767 if (hvm_mmio_intercept(p)){
768 p->state = STATE_IORESP_READY;
769 hvm_io_assist(v);
770 return;
771 }
773 p->state = STATE_IOREQ_READY;
775 evtchn_send(iopacket_port(v));
776 hvm_wait_io();
777 }
779 static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
780 struct hvm_io_op *mmio_opp, struct cpu_user_regs *regs)
781 {
782 unsigned long value = 0;
783 int index, size_reg;
785 size_reg = operand_size(inst->operand[0]);
787 mmio_opp->flags = inst->flags;
788 mmio_opp->instr = inst->instr;
789 mmio_opp->operand[0] = inst->operand[0]; /* source */
790 mmio_opp->operand[1] = inst->operand[1]; /* destination */
791 mmio_opp->immediate = inst->immediate;
793 if (inst->operand[0] & REGISTER) { /* dest is memory */
794 index = operand_index(inst->operand[0]);
795 value = get_reg_value(size_reg, index, 0, regs);
796 send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
797 } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
798 value = inst->immediate;
799 send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
800 } else if (inst->operand[0] & MEMORY) { /* dest is register */
801 /* send the request and wait for the value */
802 if ( (inst->instr == INSTR_MOVZX) || (inst->instr == INSTR_MOVSX) )
803 send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, 0);
804 else
805 send_mmio_req(type, gpa, 1, inst->op_size, 0, IOREQ_READ, 0);
806 } else {
807 printf("mmio_operands: invalid operand\n");
808 domain_crash_synchronous();
809 }
810 }
812 #define GET_REPEAT_COUNT() \
813 (mmio_inst.flags & REPZ ? (realmode ? regs->ecx & 0xFFFF : regs->ecx) : 1)
815 void handle_mmio(unsigned long va, unsigned long gpa)
816 {
817 unsigned long inst_addr;
818 struct hvm_io_op *mmio_opp;
819 struct cpu_user_regs *regs;
820 struct instruction mmio_inst;
821 unsigned char inst[MAX_INST_LEN];
822 int i, realmode, ret, inst_len;
823 struct vcpu *v = current;
825 mmio_opp = &v->arch.hvm_vcpu.io_op;
826 regs = &mmio_opp->io_context;
828 /* Copy current guest state into io instruction state structure. */
829 memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
830 hvm_store_cpu_guest_regs(v, regs, NULL);
832 if ((inst_len = hvm_instruction_length(v)) <= 0) {
833 printf("handle_mmio: failed to get instruction length\n");
834 domain_crash_synchronous();
835 }
837 realmode = hvm_realmode(v);
838 if (realmode)
839 inst_addr = (regs->cs << 4) + regs->eip;
840 else
841 inst_addr = regs->eip;
843 memset(inst, 0, MAX_INST_LEN);
844 ret = inst_copy_from_guest(inst, inst_addr, inst_len);
845 if (ret != inst_len) {
846 printf("handle_mmio: failed to copy instruction\n");
847 domain_crash_synchronous();
848 }
850 init_instruction(&mmio_inst);
852 if (hvm_decode(realmode, inst, &mmio_inst) == DECODE_failure) {
853 printf("handle_mmio: failed to decode instruction\n");
854 printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %d:",
855 va, gpa, inst_len);
856 for (i = 0; i < inst_len; i++)
857 printf(" %02x", inst[i] & 0xFF);
858 printf("\n");
859 domain_crash_synchronous();
860 }
862 regs->eip += inst_len; /* advance %eip */
864 switch (mmio_inst.instr) {
865 case INSTR_MOV:
866 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
867 break;
869 case INSTR_MOVS:
870 {
871 unsigned long count = GET_REPEAT_COUNT();
872 unsigned long size = mmio_inst.op_size;
873 int sign = regs->eflags & EF_DF ? -1 : 1;
874 unsigned long addr = 0;
875 int dir;
877 /* determine non-MMIO address */
878 if (realmode) {
879 if (((regs->es << 4) + (regs->edi & 0xFFFF)) == va) {
880 dir = IOREQ_WRITE;
881 addr = (regs->ds << 4) + (regs->esi & 0xFFFF);
882 } else {
883 dir = IOREQ_READ;
884 addr = (regs->es << 4) + (regs->edi & 0xFFFF);
885 }
886 } else {
887 if (va == regs->edi) {
888 dir = IOREQ_WRITE;
889 addr = regs->esi;
890 } else {
891 dir = IOREQ_READ;
892 addr = regs->edi;
893 }
894 }
896 mmio_opp->flags = mmio_inst.flags;
897 mmio_opp->instr = mmio_inst.instr;
899 /*
900 * In case of a movs spanning multiple pages, we break the accesses
901 * up into multiple pages (the device model works with non-continguous
902 * physical guest pages). To copy just one page, we adjust %ecx and
903 * do not advance %eip so that the next "rep movs" copies the next page.
904 * Unaligned accesses, for example movsl starting at PGSZ-2, are
905 * turned into a single copy where we handle the overlapping memory
906 * copy ourself. After this copy succeeds, "rep movs" is executed
907 * again.
908 */
909 if ((addr & PAGE_MASK) != ((addr + sign * (size - 1)) & PAGE_MASK)) {
910 unsigned long value = 0;
912 mmio_opp->flags |= OVERLAP;
914 regs->eip -= inst_len; /* do not advance %eip */
916 if (dir == IOREQ_WRITE)
917 hvm_copy(&value, addr, size, HVM_COPY_IN);
918 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
919 } else {
920 if ((addr & PAGE_MASK) != ((addr + sign * (count * size - 1)) & PAGE_MASK)) {
921 regs->eip -= inst_len; /* do not advance %eip */
923 if (sign > 0)
924 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
925 else
926 count = (addr & ~PAGE_MASK) / size;
927 }
929 send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
930 }
931 break;
932 }
934 case INSTR_MOVZX:
935 case INSTR_MOVSX:
936 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
937 break;
939 case INSTR_STOS:
940 /*
941 * Since the destination is always in (contiguous) mmio space we don't
942 * need to break it up into pages.
943 */
944 mmio_opp->flags = mmio_inst.flags;
945 mmio_opp->instr = mmio_inst.instr;
946 send_mmio_req(IOREQ_TYPE_COPY, gpa,
947 GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0);
948 break;
950 case INSTR_LODS:
951 /*
952 * Since the source is always in (contiguous) mmio space we don't
953 * need to break it up into pages.
954 */
955 mmio_opp->flags = mmio_inst.flags;
956 mmio_opp->instr = mmio_inst.instr;
957 send_mmio_req(IOREQ_TYPE_COPY, gpa,
958 GET_REPEAT_COUNT(), mmio_inst.op_size, 0, IOREQ_READ, 0);
959 break;
961 case INSTR_OR:
962 mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mmio_opp, regs);
963 break;
965 case INSTR_AND:
966 mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mmio_opp, regs);
967 break;
969 case INSTR_XOR:
970 mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mmio_opp, regs);
971 break;
973 case INSTR_CMP: /* Pass through */
974 case INSTR_TEST:
975 mmio_opp->flags = mmio_inst.flags;
976 mmio_opp->instr = mmio_inst.instr;
977 mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
978 mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
979 mmio_opp->immediate = mmio_inst.immediate;
981 /* send the request and wait for the value */
982 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1,
983 mmio_inst.op_size, 0, IOREQ_READ, 0);
984 break;
986 case INSTR_BT:
987 {
988 unsigned long value = 0;
989 int index, size;
991 mmio_opp->instr = mmio_inst.instr;
992 mmio_opp->operand[0] = mmio_inst.operand[0]; /* bit offset */
993 mmio_opp->operand[1] = mmio_inst.operand[1]; /* bit base */
995 index = operand_index(mmio_inst.operand[0]);
996 size = operand_size(mmio_inst.operand[0]);
997 value = get_reg_value(size, index, 0, regs);
999 send_mmio_req(IOREQ_TYPE_COPY, gpa + (value >> 5), 1,
1000 mmio_inst.op_size, 0, IOREQ_READ, 0);
1001 break;
1004 case INSTR_XCHG:
1005 mmio_opp->flags = mmio_inst.flags;
1006 mmio_opp->instr = mmio_inst.instr;
1007 mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
1008 mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
1009 if ( mmio_inst.operand[0] & REGISTER ) {
1010 long value;
1011 unsigned long operand = mmio_inst.operand[0];
1012 value = get_reg_value(operand_size(operand),
1013 operand_index(operand), 0,
1014 regs);
1015 /* send the request and wait for the value */
1016 send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
1017 mmio_inst.op_size, value, IOREQ_WRITE, 0);
1018 } else {
1019 /* the destination is a register */
1020 long value;
1021 unsigned long operand = mmio_inst.operand[1];
1022 value = get_reg_value(operand_size(operand),
1023 operand_index(operand), 0,
1024 regs);
1025 /* send the request and wait for the value */
1026 send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
1027 mmio_inst.op_size, value, IOREQ_WRITE, 0);
1029 break;
1031 default:
1032 printf("Unhandled MMIO instruction\n");
1033 domain_crash_synchronous();
1037 /*
1038 * Local variables:
1039 * mode: C
1040 * c-set-style: "BSD"
1041 * c-basic-offset: 4
1042 * tab-width: 4
1043 * indent-tabs-mode: nil
1044 * End:
1045 */