ia64/xen-unstable

view xen/arch/x86/vmx_platform.c @ 6737:a8f01a0a9559

One more instruction for the VMX MMIO decoder.
This patch is to handle the instruction with opcode 0x80.
Without this patch, the 64-bit VMX guest can't boot.

Signed-off-by: Chengyuan Li <chengyuan.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Sep 10 14:19:09 2005 +0000 (2005-09-10)
parents d4d69c509371
children 21cbdb20ff4c
line source
1 /*
2 * vmx_platform.c: handling x86 platform related MMIO instructions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/mm.h>
23 #include <asm/shadow.h>
24 #include <xen/domain_page.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <asm/vmx.h>
29 #include <asm/vmx_platform.h>
30 #include <public/io/ioreq.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #if CONFIG_PAGING_LEVELS >= 3
36 #include <asm/shadow_64.h>
37 #endif
38 #ifdef CONFIG_VMX
40 #define DECODE_success 1
41 #define DECODE_failure 0
43 #if defined (__x86_64__)
44 void store_cpu_user_regs(struct cpu_user_regs *regs)
45 {
46 __vmread(GUEST_SS_SELECTOR, &regs->ss);
47 __vmread(GUEST_RSP, &regs->rsp);
48 __vmread(GUEST_RFLAGS, &regs->rflags);
49 __vmread(GUEST_CS_SELECTOR, &regs->cs);
50 __vmread(GUEST_DS_SELECTOR, &regs->ds);
51 __vmread(GUEST_ES_SELECTOR, &regs->es);
52 __vmread(GUEST_RIP, &regs->rip);
53 }
55 static inline long __get_reg_value(unsigned long reg, int size)
56 {
57 switch(size) {
58 case BYTE_64:
59 return (char)(reg & 0xFF);
60 case WORD:
61 return (short)(reg & 0xFFFF);
62 case LONG:
63 return (int)(reg & 0xFFFFFFFF);
64 case QUAD:
65 return (long)(reg);
66 default:
67 printf("Error: (__get_reg_value) Invalid reg size\n");
68 domain_crash_synchronous();
69 }
70 }
72 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
73 {
74 if (size == BYTE) {
75 switch (index) {
76 case 0: /* %al */
77 return (char)(regs->rax & 0xFF);
78 case 1: /* %cl */
79 return (char)(regs->rcx & 0xFF);
80 case 2: /* %dl */
81 return (char)(regs->rdx & 0xFF);
82 case 3: /* %bl */
83 return (char)(regs->rbx & 0xFF);
84 case 4: /* %ah */
85 return (char)((regs->rax & 0xFF00) >> 8);
86 case 5: /* %ch */
87 return (char)((regs->rcx & 0xFF00) >> 8);
88 case 6: /* %dh */
89 return (char)((regs->rdx & 0xFF00) >> 8);
90 case 7: /* %bh */
91 return (char)((regs->rbx & 0xFF00) >> 8);
92 default:
93 printf("Error: (get_reg_value) Invalid index value\n");
94 domain_crash_synchronous();
95 }
96 }
98 switch (index) {
99 case 0: return __get_reg_value(regs->rax, size);
100 case 1: return __get_reg_value(regs->rcx, size);
101 case 2: return __get_reg_value(regs->rdx, size);
102 case 3: return __get_reg_value(regs->rbx, size);
103 case 4: return __get_reg_value(regs->rsp, size);
104 case 5: return __get_reg_value(regs->rbp, size);
105 case 6: return __get_reg_value(regs->rsi, size);
106 case 7: return __get_reg_value(regs->rdi, size);
107 case 8: return __get_reg_value(regs->r8, size);
108 case 9: return __get_reg_value(regs->r9, size);
109 case 10: return __get_reg_value(regs->r10, size);
110 case 11: return __get_reg_value(regs->r11, size);
111 case 12: return __get_reg_value(regs->r12, size);
112 case 13: return __get_reg_value(regs->r13, size);
113 case 14: return __get_reg_value(regs->r14, size);
114 case 15: return __get_reg_value(regs->r15, size);
115 default:
116 printf("Error: (get_reg_value) Invalid index value\n");
117 domain_crash_synchronous();
118 }
119 }
120 #elif defined (__i386__)
121 void store_cpu_user_regs(struct cpu_user_regs *regs)
122 {
123 __vmread(GUEST_SS_SELECTOR, &regs->ss);
124 __vmread(GUEST_RSP, &regs->esp);
125 __vmread(GUEST_RFLAGS, &regs->eflags);
126 __vmread(GUEST_CS_SELECTOR, &regs->cs);
127 __vmread(GUEST_DS_SELECTOR, &regs->ds);
128 __vmread(GUEST_ES_SELECTOR, &regs->es);
129 __vmread(GUEST_RIP, &regs->eip);
130 }
132 static inline long __get_reg_value(unsigned long reg, int size)
133 {
134 switch(size) {
135 case WORD:
136 return (short)(reg & 0xFFFF);
137 case LONG:
138 return (int)(reg & 0xFFFFFFFF);
139 default:
140 printf("Error: (__get_reg_value) Invalid reg size\n");
141 domain_crash_synchronous();
142 }
143 }
145 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
146 {
147 if (size == BYTE) {
148 switch (index) {
149 case 0: /* %al */
150 return (char)(regs->eax & 0xFF);
151 case 1: /* %cl */
152 return (char)(regs->ecx & 0xFF);
153 case 2: /* %dl */
154 return (char)(regs->edx & 0xFF);
155 case 3: /* %bl */
156 return (char)(regs->ebx & 0xFF);
157 case 4: /* %ah */
158 return (char)((regs->eax & 0xFF00) >> 8);
159 case 5: /* %ch */
160 return (char)((regs->ecx & 0xFF00) >> 8);
161 case 6: /* %dh */
162 return (char)((regs->edx & 0xFF00) >> 8);
163 case 7: /* %bh */
164 return (char)((regs->ebx & 0xFF00) >> 8);
165 default:
166 printf("Error: (get_reg_value) Invalid index value\n");
167 domain_crash_synchronous();
168 }
169 }
171 switch (index) {
172 case 0: return __get_reg_value(regs->eax, size);
173 case 1: return __get_reg_value(regs->ecx, size);
174 case 2: return __get_reg_value(regs->edx, size);
175 case 3: return __get_reg_value(regs->ebx, size);
176 case 4: return __get_reg_value(regs->esp, size);
177 case 5: return __get_reg_value(regs->ebp, size);
178 case 6: return __get_reg_value(regs->esi, size);
179 case 7: return __get_reg_value(regs->edi, size);
180 default:
181 printf("Error: (get_reg_value) Invalid index value\n");
182 domain_crash_synchronous();
183 }
184 }
185 #endif
187 static inline unsigned char *check_prefix(unsigned char *inst,
188 struct instruction *thread_inst, unsigned char *rex_p)
189 {
190 while (1) {
191 switch (*inst) {
192 /* rex prefix for em64t instructions */
193 case 0x40 ... 0x4e:
194 *rex_p = *inst;
195 break;
196 case 0xf3: /* REPZ */
197 thread_inst->flags = REPZ;
198 break;
199 case 0xf2: /* REPNZ */
200 thread_inst->flags = REPNZ;
201 break;
202 case 0xf0: /* LOCK */
203 break;
204 case 0x2e: /* CS */
205 case 0x36: /* SS */
206 case 0x3e: /* DS */
207 case 0x26: /* ES */
208 case 0x64: /* FS */
209 case 0x65: /* GS */
210 thread_inst->seg_sel = *inst;
211 break;
212 case 0x66: /* 32bit->16bit */
213 thread_inst->op_size = WORD;
214 break;
215 case 0x67:
216 printf("Error: Not handling 0x67 (yet)\n");
217 domain_crash_synchronous();
218 break;
219 default:
220 return inst;
221 }
222 inst++;
223 }
224 }
226 static inline unsigned long get_immediate(int op16,const unsigned char *inst, int op_size)
227 {
228 int mod, reg, rm;
229 unsigned long val = 0;
230 int i;
232 mod = (*inst >> 6) & 3;
233 reg = (*inst >> 3) & 7;
234 rm = *inst & 7;
236 inst++; //skip ModR/M byte
237 if (mod != 3 && rm == 4) {
238 inst++; //skip SIB byte
239 }
241 switch(mod) {
242 case 0:
243 if (rm == 5 || rm == 4) {
244 if (op16)
245 inst = inst + 2; //disp16, skip 2 bytes
246 else
247 inst = inst + 4; //disp32, skip 4 bytes
248 }
249 break;
250 case 1:
251 inst++; //disp8, skip 1 byte
252 break;
253 case 2:
254 if (op16)
255 inst = inst + 2; //disp16, skip 2 bytes
256 else
257 inst = inst + 4; //disp32, skip 4 bytes
258 break;
259 }
261 if (op_size == QUAD)
262 op_size = LONG;
264 for (i = 0; i < op_size; i++) {
265 val |= (*inst++ & 0xff) << (8 * i);
266 }
268 return val;
269 }
271 static inline int get_index(const unsigned char *inst, unsigned char rex)
272 {
273 int mod, reg, rm;
274 int rex_r, rex_b;
276 mod = (*inst >> 6) & 3;
277 reg = (*inst >> 3) & 7;
278 rm = *inst & 7;
280 rex_r = (rex >> 2) & 1;
281 rex_b = rex & 1;
283 //Only one operand in the instruction is register
284 if (mod == 3) {
285 return (rm + (rex_b << 3));
286 } else {
287 return (reg + (rex_r << 3));
288 }
289 return 0;
290 }
292 static void init_instruction(struct instruction *mmio_inst)
293 {
294 mmio_inst->instr = 0;
295 mmio_inst->op_size = 0;
296 mmio_inst->immediate = 0;
297 mmio_inst->seg_sel = 0;
299 mmio_inst->operand[0] = 0;
300 mmio_inst->operand[1] = 0;
302 mmio_inst->flags = 0;
303 }
305 #define GET_OP_SIZE_FOR_BYTE(op_size) \
306 do { \
307 if (rex) \
308 op_size = BYTE_64; \
309 else \
310 op_size = BYTE; \
311 } while(0)
313 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
314 do { \
315 if (rex & 0x8) \
316 op_size = QUAD; \
317 else if (op_size != WORD) \
318 op_size = LONG; \
319 } while(0)
322 /*
323 * Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
324 */
325 static int mem_acc(unsigned char size, struct instruction *instr)
326 {
327 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
328 instr->operand[1] = mk_operand(size, 0, 0, REGISTER);
329 return DECODE_success;
330 }
332 /*
333 * Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
334 */
335 static int acc_mem(unsigned char size, struct instruction *instr)
336 {
337 instr->operand[0] = mk_operand(size, 0, 0, REGISTER);
338 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
339 return DECODE_success;
340 }
342 /*
343 * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
344 */
345 static int mem_reg(unsigned char size, unsigned char *opcode,
346 struct instruction *instr, unsigned char rex)
347 {
348 int index = get_index(opcode + 1, rex);
350 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
351 instr->operand[1] = mk_operand(size, index, 0, REGISTER);
352 return DECODE_success;
353 }
355 /*
356 * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
357 */
358 static int reg_mem(unsigned char size, unsigned char *opcode,
359 struct instruction *instr, unsigned char rex)
360 {
361 int index = get_index(opcode + 1, rex);
363 instr->operand[0] = mk_operand(size, index, 0, REGISTER);
364 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
365 return DECODE_success;
366 }
368 static int vmx_decode(unsigned char *opcode, struct instruction *instr)
369 {
370 unsigned long eflags;
371 int index, vm86 = 0;
372 unsigned char rex = 0;
373 unsigned char tmp_size = 0;
375 init_instruction(instr);
377 opcode = check_prefix(opcode, instr, &rex);
379 __vmread(GUEST_RFLAGS, &eflags);
380 if (eflags & X86_EFLAGS_VM)
381 vm86 = 1;
383 if (vm86) { /* meaning is reversed */
384 if (instr->op_size == WORD)
385 instr->op_size = LONG;
386 else if (instr->op_size == LONG)
387 instr->op_size = WORD;
388 else if (instr->op_size == 0)
389 instr->op_size = WORD;
390 }
392 switch (*opcode) {
393 case 0x0B: /* or m32/16, r32/16 */
394 instr->instr = INSTR_OR;
395 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
396 return mem_reg(instr->op_size, opcode, instr, rex);
398 case 0x20: /* and r8, m8 */
399 instr->instr = INSTR_AND;
400 GET_OP_SIZE_FOR_BYTE(instr->op_size);
401 return reg_mem(instr->op_size, opcode, instr, rex);
403 case 0x21: /* and r32/16, m32/16 */
404 instr->instr = INSTR_AND;
405 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
406 return reg_mem(instr->op_size, opcode, instr, rex);
408 case 0x23: /* and m32/16, r32/16 */
409 instr->instr = INSTR_AND;
410 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
411 return mem_reg(instr->op_size, opcode, instr, rex);
413 case 0x30: /* xor r8, m8 */
414 instr->instr = INSTR_XOR;
415 GET_OP_SIZE_FOR_BYTE(instr->op_size);
416 return reg_mem(instr->op_size, opcode, instr, rex);
418 case 0x31: /* xor r32/16, m32/16 */
419 instr->instr = INSTR_XOR;
420 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
421 return reg_mem(instr->op_size, opcode, instr, rex);
423 case 0x39: /* cmp r32/16, m32/16 */
424 instr->instr = INSTR_CMP;
425 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
426 return reg_mem(instr->op_size, opcode, instr, rex);
428 case 0x80:
429 case 0x81:
430 if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */
431 instr->instr = INSTR_CMP;
433 if (opcode[0] == 0x80)
434 GET_OP_SIZE_FOR_BYTE(instr->op_size);
435 else
436 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
438 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
439 instr->immediate = get_immediate(vm86, opcode+1, BYTE);
440 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
442 return DECODE_success;
443 } else
444 return DECODE_failure;
446 case 0x84: /* test m8, r8 */
447 instr->instr = INSTR_TEST;
448 instr->op_size = BYTE;
449 GET_OP_SIZE_FOR_BYTE(tmp_size);
450 return mem_reg(tmp_size, opcode, instr, rex);
452 case 0x88: /* mov r8, m8 */
453 instr->instr = INSTR_MOV;
454 instr->op_size = BYTE;
455 GET_OP_SIZE_FOR_BYTE(tmp_size);
456 return reg_mem(tmp_size, opcode, instr, rex);
458 case 0x89: /* mov r32/16, m32/16 */
459 instr->instr = INSTR_MOV;
460 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
461 return reg_mem(instr->op_size, opcode, instr, rex);
463 case 0x8A: /* mov m8, r8 */
464 instr->instr = INSTR_MOV;
465 instr->op_size = BYTE;
466 GET_OP_SIZE_FOR_BYTE(tmp_size);
467 return mem_reg(tmp_size, opcode, instr, rex);
469 case 0x8B: /* mov m32/16, r32/16 */
470 instr->instr = INSTR_MOV;
471 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
472 return mem_reg(instr->op_size, opcode, instr, rex);
474 case 0xA0: /* mov <addr>, al */
475 instr->instr = INSTR_MOV;
476 instr->op_size = BYTE;
477 GET_OP_SIZE_FOR_BYTE(tmp_size);
478 return mem_acc(tmp_size, instr);
480 case 0xA1: /* mov <addr>, ax/eax */
481 instr->instr = INSTR_MOV;
482 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
483 return mem_acc(instr->op_size, instr);
485 case 0xA2: /* mov al, <addr> */
486 instr->instr = INSTR_MOV;
487 instr->op_size = BYTE;
488 GET_OP_SIZE_FOR_BYTE(tmp_size);
489 return acc_mem(tmp_size, instr);
491 case 0xA3: /* mov ax/eax, <addr> */
492 instr->instr = INSTR_MOV;
493 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
494 return acc_mem(instr->op_size, instr);
496 case 0xA4: /* movsb */
497 instr->instr = INSTR_MOVS;
498 instr->op_size = BYTE;
499 return DECODE_success;
501 case 0xA5: /* movsw/movsl */
502 instr->instr = INSTR_MOVS;
503 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
504 return DECODE_success;
506 case 0xAA: /* stosb */
507 instr->instr = INSTR_STOS;
508 instr->op_size = BYTE;
509 return DECODE_success;
511 case 0xAB: /* stosw/stosl */
512 instr->instr = INSTR_STOS;
513 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
514 return DECODE_success;
516 case 0xC6:
517 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
518 instr->instr = INSTR_MOV;
519 instr->op_size = BYTE;
521 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
522 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
523 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
525 return DECODE_success;
526 } else
527 return DECODE_failure;
529 case 0xC7:
530 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
531 instr->instr = INSTR_MOV;
532 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
534 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
535 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
536 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
538 return DECODE_success;
539 } else
540 return DECODE_failure;
542 case 0xF6:
543 if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */
544 instr->instr = INSTR_TEST;
545 instr->op_size = BYTE;
547 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
548 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
549 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
551 return DECODE_success;
552 } else
553 return DECODE_failure;
555 case 0x0F:
556 break;
558 default:
559 printf("%x, This opcode isn't handled yet!\n", *opcode);
560 return DECODE_failure;
561 }
563 switch (*++opcode) {
564 case 0xB6: /* movz m8, r16/r32 */
565 instr->instr = INSTR_MOVZ;
566 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
567 index = get_index(opcode + 1, rex);
568 instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
569 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
570 return DECODE_success;
572 case 0xB7: /* movz m16, r32 */
573 instr->instr = INSTR_MOVZ;
574 index = get_index(opcode + 1, rex);
575 if (rex & 0x8) {
576 instr->op_size = LONG;
577 instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
578 } else {
579 instr->op_size = WORD;
580 instr->operand[1] = mk_operand(LONG, index, 0, REGISTER);
581 }
582 instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY);
583 return DECODE_success;
585 default:
586 printf("0f %x, This opcode isn't handled yet\n", *opcode);
587 return DECODE_failure;
588 }
589 }
591 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
592 {
593 if (inst_len > MAX_INST_LEN || inst_len <= 0)
594 return 0;
595 if (!vmx_copy(buf, guest_eip, inst_len, VMX_COPY_IN))
596 return 0;
597 return inst_len;
598 }
600 void send_mmio_req(unsigned char type, unsigned long gpa,
601 unsigned long count, int size, long value, int dir, int pvalid)
602 {
603 struct vcpu *d = current;
604 vcpu_iodata_t *vio;
605 ioreq_t *p;
606 int vm86;
607 struct cpu_user_regs *regs;
608 extern long evtchn_send(int lport);
610 regs = current->domain->arch.vmx_platform.mpci.inst_decoder_regs;
612 vio = get_vio(d->domain, d->vcpu_id);
613 if (vio == NULL) {
614 printf("bad shared page\n");
615 domain_crash_synchronous();
616 }
618 p = &vio->vp_ioreq;
620 vm86 = regs->eflags & X86_EFLAGS_VM;
622 if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
623 printf("VMX I/O has not yet completed\n");
624 domain_crash_synchronous();
625 }
627 set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
628 p->dir = dir;
629 p->pdata_valid = pvalid;
631 p->type = type;
632 p->size = size;
633 p->addr = gpa;
634 p->count = count;
635 p->df = regs->eflags & EF_DF ? 1 : 0;
637 if (pvalid) {
638 if (vmx_paging_enabled(current))
639 p->u.pdata = (void *) gva_to_gpa(value);
640 else
641 p->u.pdata = (void *) value; /* guest VA == guest PA */
642 } else
643 p->u.data = value;
645 p->state = STATE_IOREQ_READY;
647 if (vmx_mmio_intercept(p)){
648 p->state = STATE_IORESP_READY;
649 vmx_io_assist(d);
650 return;
651 }
653 evtchn_send(iopacket_port(d->domain));
654 vmx_wait_io();
655 }
657 static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
658 struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs)
659 {
660 unsigned long value = 0;
661 int index, size;
663 size = operand_size(inst->operand[0]);
665 mpcip->flags = inst->flags;
666 mpcip->instr = inst->instr;
667 mpcip->operand[0] = inst->operand[0]; /* source */
668 mpcip->operand[1] = inst->operand[1]; /* destination */
670 if (inst->operand[0] & REGISTER) { /* dest is memory */
671 index = operand_index(inst->operand[0]);
672 value = get_reg_value(size, index, 0, regs);
673 send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
674 } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
675 value = inst->immediate;
676 send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
677 } else if (inst->operand[0] & MEMORY) { /* dest is register */
678 /* send the request and wait for the value */
679 send_mmio_req(type, gpa, 1, size, 0, IOREQ_READ, 0);
680 } else {
681 printf("mmio_operands: invalid operand\n");
682 domain_crash_synchronous();
683 }
684 }
686 #define GET_REPEAT_COUNT() \
687 (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1)
689 void handle_mmio(unsigned long va, unsigned long gpa)
690 {
691 unsigned long eip, eflags, cs;
692 unsigned long inst_len, inst_addr;
693 struct mi_per_cpu_info *mpcip;
694 struct cpu_user_regs *regs;
695 struct instruction mmio_inst;
696 unsigned char inst[MAX_INST_LEN];
697 int i, vm86, ret;
699 mpcip = &current->domain->arch.vmx_platform.mpci;
700 regs = mpcip->inst_decoder_regs;
702 __vmread(GUEST_RIP, &eip);
703 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
704 __vmread(GUEST_RFLAGS, &eflags);
705 vm86 = eflags & X86_EFLAGS_VM;
707 if (vm86) {
708 __vmread(GUEST_CS_SELECTOR, &cs);
709 inst_addr = (cs << 4) + eip;
710 } else
711 inst_addr = eip;
713 memset(inst, 0, MAX_INST_LEN);
714 ret = inst_copy_from_guest(inst, inst_addr, inst_len);
715 if (ret != inst_len) {
716 printf("handle_mmio - EXIT: get guest instruction fault\n");
717 domain_crash_synchronous();
718 }
720 init_instruction(&mmio_inst);
722 if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
723 printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
724 va, gpa, inst_len);
725 for (i = 0; i < inst_len; i++)
726 printf(" %02x", inst[i] & 0xFF);
727 printf("\n");
728 domain_crash_synchronous();
729 }
731 store_cpu_user_regs(regs);
732 regs->eip += inst_len; /* advance %eip */
734 switch (mmio_inst.instr) {
735 case INSTR_MOV:
736 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
737 break;
739 case INSTR_MOVS:
740 {
741 unsigned long count = GET_REPEAT_COUNT();
742 unsigned long size = mmio_inst.op_size;
743 int sign = regs->eflags & EF_DF ? -1 : 1;
744 unsigned long addr = 0;
745 int dir;
747 /* determine non-MMIO address */
748 if (vm86) {
749 unsigned long seg;
751 __vmread(GUEST_ES_SELECTOR, &seg);
752 if (((seg << 4) + (regs->edi & 0xFFFF)) == va) {
753 dir = IOREQ_WRITE;
754 __vmread(GUEST_DS_SELECTOR, &seg);
755 addr = (seg << 4) + (regs->esi & 0xFFFF);
756 } else {
757 dir = IOREQ_READ;
758 addr = (seg << 4) + (regs->edi & 0xFFFF);
759 }
760 } else {
761 if (va == regs->edi) {
762 dir = IOREQ_WRITE;
763 addr = regs->esi;
764 } else {
765 dir = IOREQ_READ;
766 addr = regs->edi;
767 }
768 }
770 mpcip->flags = mmio_inst.flags;
771 mpcip->instr = mmio_inst.instr;
773 /*
774 * In case of a movs spanning multiple pages, we break the accesses
775 * up into multiple pages (the device model works with non-continguous
776 * physical guest pages). To copy just one page, we adjust %ecx and
777 * do not advance %eip so that the next "rep movs" copies the next page.
778 * Unaligned accesses, for example movsl starting at PGSZ-2, are
779 * turned into a single copy where we handle the overlapping memory
780 * copy ourself. After this copy succeeds, "rep movs" is executed
781 * again.
782 */
783 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
784 unsigned long value = 0;
786 mpcip->flags |= OVERLAP;
788 regs->eip -= inst_len; /* do not advance %eip */
790 if (dir == IOREQ_WRITE)
791 vmx_copy(&value, addr, size, VMX_COPY_IN);
792 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
793 } else {
794 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
795 regs->eip -= inst_len; /* do not advance %eip */
797 if (sign > 0)
798 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
799 else
800 count = (addr & ~PAGE_MASK) / size;
801 }
803 send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
804 }
805 break;
806 }
808 case INSTR_MOVZ:
809 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
810 break;
812 case INSTR_STOS:
813 /*
814 * Since the destination is always in (contiguous) mmio space we don't
815 * need to break it up into pages.
816 */
817 mpcip->flags = mmio_inst.flags;
818 mpcip->instr = mmio_inst.instr;
819 send_mmio_req(IOREQ_TYPE_COPY, gpa,
820 GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0);
821 break;
823 case INSTR_OR:
824 mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs);
825 break;
827 case INSTR_AND:
828 mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs);
829 break;
831 case INSTR_XOR:
832 mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
833 break;
835 case INSTR_CMP:
836 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
837 break;
839 case INSTR_TEST:
840 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
841 break;
843 default:
844 printf("Unhandled MMIO instruction\n");
845 domain_crash_synchronous();
846 }
847 }
849 #endif /* CONFIG_VMX */
851 /*
852 * Local variables:
853 * mode: C
854 * c-set-style: "BSD"
855 * c-basic-offset: 4
856 * tab-width: 4
857 * indent-tabs-mode: nil
858 * End:
859 */