ia64/xen-unstable

view xen/arch/x86/vmx_platform.c @ 6747:21cbdb20ff4c

An FC4/i386 install inside VMX on an x86_64 system fails because byte
size is not handled by __set_reg_value. This patch adds that support.

This patch also reindents Chengyuan Li cmpb patch so that is aligns
with the rest of the code in that block.

Signed-Off-By: Leendert van Doorn <leendert@watson.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Sep 11 09:28:21 2005 +0000 (2005-09-11)
parents a8f01a0a9559
children 4508c22dc458
line source
1 /*
2 * vmx_platform.c: handling x86 platform related MMIO instructions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/mm.h>
23 #include <asm/shadow.h>
24 #include <xen/domain_page.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <asm/vmx.h>
29 #include <asm/vmx_platform.h>
30 #include <public/io/ioreq.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #if CONFIG_PAGING_LEVELS >= 3
36 #include <asm/shadow_64.h>
37 #endif
38 #ifdef CONFIG_VMX
40 #define DECODE_success 1
41 #define DECODE_failure 0
43 #if defined (__x86_64__)
44 void store_cpu_user_regs(struct cpu_user_regs *regs)
45 {
46 __vmread(GUEST_SS_SELECTOR, &regs->ss);
47 __vmread(GUEST_RSP, &regs->rsp);
48 __vmread(GUEST_RFLAGS, &regs->rflags);
49 __vmread(GUEST_CS_SELECTOR, &regs->cs);
50 __vmread(GUEST_DS_SELECTOR, &regs->ds);
51 __vmread(GUEST_ES_SELECTOR, &regs->es);
52 __vmread(GUEST_RIP, &regs->rip);
53 }
55 static inline long __get_reg_value(unsigned long reg, int size)
56 {
57 switch(size) {
58 case BYTE:
59 case BYTE_64:
60 return (char)(reg & 0xFF);
61 case WORD:
62 return (short)(reg & 0xFFFF);
63 case LONG:
64 return (int)(reg & 0xFFFFFFFF);
65 case QUAD:
66 return (long)(reg);
67 default:
68 printf("Error: (__get_reg_value) Invalid reg size\n");
69 domain_crash_synchronous();
70 }
71 }
73 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
74 {
75 if (size == BYTE) {
76 switch (index) {
77 case 0: /* %al */
78 return (char)(regs->rax & 0xFF);
79 case 1: /* %cl */
80 return (char)(regs->rcx & 0xFF);
81 case 2: /* %dl */
82 return (char)(regs->rdx & 0xFF);
83 case 3: /* %bl */
84 return (char)(regs->rbx & 0xFF);
85 case 4: /* %ah */
86 return (char)((regs->rax & 0xFF00) >> 8);
87 case 5: /* %ch */
88 return (char)((regs->rcx & 0xFF00) >> 8);
89 case 6: /* %dh */
90 return (char)((regs->rdx & 0xFF00) >> 8);
91 case 7: /* %bh */
92 return (char)((regs->rbx & 0xFF00) >> 8);
93 default:
94 printf("Error: (get_reg_value) Invalid index value\n");
95 domain_crash_synchronous();
96 }
97 }
99 switch (index) {
100 case 0: return __get_reg_value(regs->rax, size);
101 case 1: return __get_reg_value(regs->rcx, size);
102 case 2: return __get_reg_value(regs->rdx, size);
103 case 3: return __get_reg_value(regs->rbx, size);
104 case 4: return __get_reg_value(regs->rsp, size);
105 case 5: return __get_reg_value(regs->rbp, size);
106 case 6: return __get_reg_value(regs->rsi, size);
107 case 7: return __get_reg_value(regs->rdi, size);
108 case 8: return __get_reg_value(regs->r8, size);
109 case 9: return __get_reg_value(regs->r9, size);
110 case 10: return __get_reg_value(regs->r10, size);
111 case 11: return __get_reg_value(regs->r11, size);
112 case 12: return __get_reg_value(regs->r12, size);
113 case 13: return __get_reg_value(regs->r13, size);
114 case 14: return __get_reg_value(regs->r14, size);
115 case 15: return __get_reg_value(regs->r15, size);
116 default:
117 printf("Error: (get_reg_value) Invalid index value\n");
118 domain_crash_synchronous();
119 }
120 }
121 #elif defined (__i386__)
122 void store_cpu_user_regs(struct cpu_user_regs *regs)
123 {
124 __vmread(GUEST_SS_SELECTOR, &regs->ss);
125 __vmread(GUEST_RSP, &regs->esp);
126 __vmread(GUEST_RFLAGS, &regs->eflags);
127 __vmread(GUEST_CS_SELECTOR, &regs->cs);
128 __vmread(GUEST_DS_SELECTOR, &regs->ds);
129 __vmread(GUEST_ES_SELECTOR, &regs->es);
130 __vmread(GUEST_RIP, &regs->eip);
131 }
133 static inline long __get_reg_value(unsigned long reg, int size)
134 {
135 switch(size) {
136 case WORD:
137 return (short)(reg & 0xFFFF);
138 case LONG:
139 return (int)(reg & 0xFFFFFFFF);
140 default:
141 printf("Error: (__get_reg_value) Invalid reg size\n");
142 domain_crash_synchronous();
143 }
144 }
146 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
147 {
148 if (size == BYTE) {
149 switch (index) {
150 case 0: /* %al */
151 return (char)(regs->eax & 0xFF);
152 case 1: /* %cl */
153 return (char)(regs->ecx & 0xFF);
154 case 2: /* %dl */
155 return (char)(regs->edx & 0xFF);
156 case 3: /* %bl */
157 return (char)(regs->ebx & 0xFF);
158 case 4: /* %ah */
159 return (char)((regs->eax & 0xFF00) >> 8);
160 case 5: /* %ch */
161 return (char)((regs->ecx & 0xFF00) >> 8);
162 case 6: /* %dh */
163 return (char)((regs->edx & 0xFF00) >> 8);
164 case 7: /* %bh */
165 return (char)((regs->ebx & 0xFF00) >> 8);
166 default:
167 printf("Error: (get_reg_value) Invalid index value\n");
168 domain_crash_synchronous();
169 }
170 }
172 switch (index) {
173 case 0: return __get_reg_value(regs->eax, size);
174 case 1: return __get_reg_value(regs->ecx, size);
175 case 2: return __get_reg_value(regs->edx, size);
176 case 3: return __get_reg_value(regs->ebx, size);
177 case 4: return __get_reg_value(regs->esp, size);
178 case 5: return __get_reg_value(regs->ebp, size);
179 case 6: return __get_reg_value(regs->esi, size);
180 case 7: return __get_reg_value(regs->edi, size);
181 default:
182 printf("Error: (get_reg_value) Invalid index value\n");
183 domain_crash_synchronous();
184 }
185 }
186 #endif
188 static inline unsigned char *check_prefix(unsigned char *inst,
189 struct instruction *thread_inst, unsigned char *rex_p)
190 {
191 while (1) {
192 switch (*inst) {
193 /* rex prefix for em64t instructions */
194 case 0x40 ... 0x4e:
195 *rex_p = *inst;
196 break;
197 case 0xf3: /* REPZ */
198 thread_inst->flags = REPZ;
199 break;
200 case 0xf2: /* REPNZ */
201 thread_inst->flags = REPNZ;
202 break;
203 case 0xf0: /* LOCK */
204 break;
205 case 0x2e: /* CS */
206 case 0x36: /* SS */
207 case 0x3e: /* DS */
208 case 0x26: /* ES */
209 case 0x64: /* FS */
210 case 0x65: /* GS */
211 thread_inst->seg_sel = *inst;
212 break;
213 case 0x66: /* 32bit->16bit */
214 thread_inst->op_size = WORD;
215 break;
216 case 0x67:
217 printf("Error: Not handling 0x67 (yet)\n");
218 domain_crash_synchronous();
219 break;
220 default:
221 return inst;
222 }
223 inst++;
224 }
225 }
227 static inline unsigned long get_immediate(int op16,const unsigned char *inst, int op_size)
228 {
229 int mod, reg, rm;
230 unsigned long val = 0;
231 int i;
233 mod = (*inst >> 6) & 3;
234 reg = (*inst >> 3) & 7;
235 rm = *inst & 7;
237 inst++; //skip ModR/M byte
238 if (mod != 3 && rm == 4) {
239 inst++; //skip SIB byte
240 }
242 switch(mod) {
243 case 0:
244 if (rm == 5 || rm == 4) {
245 if (op16)
246 inst = inst + 2; //disp16, skip 2 bytes
247 else
248 inst = inst + 4; //disp32, skip 4 bytes
249 }
250 break;
251 case 1:
252 inst++; //disp8, skip 1 byte
253 break;
254 case 2:
255 if (op16)
256 inst = inst + 2; //disp16, skip 2 bytes
257 else
258 inst = inst + 4; //disp32, skip 4 bytes
259 break;
260 }
262 if (op_size == QUAD)
263 op_size = LONG;
265 for (i = 0; i < op_size; i++) {
266 val |= (*inst++ & 0xff) << (8 * i);
267 }
269 return val;
270 }
272 static inline int get_index(const unsigned char *inst, unsigned char rex)
273 {
274 int mod, reg, rm;
275 int rex_r, rex_b;
277 mod = (*inst >> 6) & 3;
278 reg = (*inst >> 3) & 7;
279 rm = *inst & 7;
281 rex_r = (rex >> 2) & 1;
282 rex_b = rex & 1;
284 //Only one operand in the instruction is register
285 if (mod == 3) {
286 return (rm + (rex_b << 3));
287 } else {
288 return (reg + (rex_r << 3));
289 }
290 return 0;
291 }
293 static void init_instruction(struct instruction *mmio_inst)
294 {
295 mmio_inst->instr = 0;
296 mmio_inst->op_size = 0;
297 mmio_inst->immediate = 0;
298 mmio_inst->seg_sel = 0;
300 mmio_inst->operand[0] = 0;
301 mmio_inst->operand[1] = 0;
303 mmio_inst->flags = 0;
304 }
306 #define GET_OP_SIZE_FOR_BYTE(op_size) \
307 do { \
308 if (rex) \
309 op_size = BYTE_64; \
310 else \
311 op_size = BYTE; \
312 } while(0)
314 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
315 do { \
316 if (rex & 0x8) \
317 op_size = QUAD; \
318 else if (op_size != WORD) \
319 op_size = LONG; \
320 } while(0)
323 /*
324 * Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
325 */
326 static int mem_acc(unsigned char size, struct instruction *instr)
327 {
328 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
329 instr->operand[1] = mk_operand(size, 0, 0, REGISTER);
330 return DECODE_success;
331 }
333 /*
334 * Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
335 */
336 static int acc_mem(unsigned char size, struct instruction *instr)
337 {
338 instr->operand[0] = mk_operand(size, 0, 0, REGISTER);
339 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
340 return DECODE_success;
341 }
343 /*
344 * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
345 */
346 static int mem_reg(unsigned char size, unsigned char *opcode,
347 struct instruction *instr, unsigned char rex)
348 {
349 int index = get_index(opcode + 1, rex);
351 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
352 instr->operand[1] = mk_operand(size, index, 0, REGISTER);
353 return DECODE_success;
354 }
356 /*
357 * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
358 */
359 static int reg_mem(unsigned char size, unsigned char *opcode,
360 struct instruction *instr, unsigned char rex)
361 {
362 int index = get_index(opcode + 1, rex);
364 instr->operand[0] = mk_operand(size, index, 0, REGISTER);
365 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
366 return DECODE_success;
367 }
369 static int vmx_decode(unsigned char *opcode, struct instruction *instr)
370 {
371 unsigned long eflags;
372 int index, vm86 = 0;
373 unsigned char rex = 0;
374 unsigned char tmp_size = 0;
376 init_instruction(instr);
378 opcode = check_prefix(opcode, instr, &rex);
380 __vmread(GUEST_RFLAGS, &eflags);
381 if (eflags & X86_EFLAGS_VM)
382 vm86 = 1;
384 if (vm86) { /* meaning is reversed */
385 if (instr->op_size == WORD)
386 instr->op_size = LONG;
387 else if (instr->op_size == LONG)
388 instr->op_size = WORD;
389 else if (instr->op_size == 0)
390 instr->op_size = WORD;
391 }
393 switch (*opcode) {
394 case 0x0B: /* or m32/16, r32/16 */
395 instr->instr = INSTR_OR;
396 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
397 return mem_reg(instr->op_size, opcode, instr, rex);
399 case 0x20: /* and r8, m8 */
400 instr->instr = INSTR_AND;
401 GET_OP_SIZE_FOR_BYTE(instr->op_size);
402 return reg_mem(instr->op_size, opcode, instr, rex);
404 case 0x21: /* and r32/16, m32/16 */
405 instr->instr = INSTR_AND;
406 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
407 return reg_mem(instr->op_size, opcode, instr, rex);
409 case 0x23: /* and m32/16, r32/16 */
410 instr->instr = INSTR_AND;
411 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
412 return mem_reg(instr->op_size, opcode, instr, rex);
414 case 0x30: /* xor r8, m8 */
415 instr->instr = INSTR_XOR;
416 GET_OP_SIZE_FOR_BYTE(instr->op_size);
417 return reg_mem(instr->op_size, opcode, instr, rex);
419 case 0x31: /* xor r32/16, m32/16 */
420 instr->instr = INSTR_XOR;
421 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
422 return reg_mem(instr->op_size, opcode, instr, rex);
424 case 0x39: /* cmp r32/16, m32/16 */
425 instr->instr = INSTR_CMP;
426 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
427 return reg_mem(instr->op_size, opcode, instr, rex);
429 case 0x80:
430 case 0x81:
431 if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */
432 instr->instr = INSTR_CMP;
434 if (opcode[0] == 0x80)
435 GET_OP_SIZE_FOR_BYTE(instr->op_size);
436 else
437 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
439 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
440 instr->immediate = get_immediate(vm86, opcode+1, BYTE);
441 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
443 return DECODE_success;
444 } else
445 return DECODE_failure;
447 case 0x84: /* test m8, r8 */
448 instr->instr = INSTR_TEST;
449 instr->op_size = BYTE;
450 GET_OP_SIZE_FOR_BYTE(tmp_size);
451 return mem_reg(tmp_size, opcode, instr, rex);
453 case 0x88: /* mov r8, m8 */
454 instr->instr = INSTR_MOV;
455 instr->op_size = BYTE;
456 GET_OP_SIZE_FOR_BYTE(tmp_size);
457 return reg_mem(tmp_size, opcode, instr, rex);
459 case 0x89: /* mov r32/16, m32/16 */
460 instr->instr = INSTR_MOV;
461 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
462 return reg_mem(instr->op_size, opcode, instr, rex);
464 case 0x8A: /* mov m8, r8 */
465 instr->instr = INSTR_MOV;
466 instr->op_size = BYTE;
467 GET_OP_SIZE_FOR_BYTE(tmp_size);
468 return mem_reg(tmp_size, opcode, instr, rex);
470 case 0x8B: /* mov m32/16, r32/16 */
471 instr->instr = INSTR_MOV;
472 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
473 return mem_reg(instr->op_size, opcode, instr, rex);
475 case 0xA0: /* mov <addr>, al */
476 instr->instr = INSTR_MOV;
477 instr->op_size = BYTE;
478 GET_OP_SIZE_FOR_BYTE(tmp_size);
479 return mem_acc(tmp_size, instr);
481 case 0xA1: /* mov <addr>, ax/eax */
482 instr->instr = INSTR_MOV;
483 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
484 return mem_acc(instr->op_size, instr);
486 case 0xA2: /* mov al, <addr> */
487 instr->instr = INSTR_MOV;
488 instr->op_size = BYTE;
489 GET_OP_SIZE_FOR_BYTE(tmp_size);
490 return acc_mem(tmp_size, instr);
492 case 0xA3: /* mov ax/eax, <addr> */
493 instr->instr = INSTR_MOV;
494 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
495 return acc_mem(instr->op_size, instr);
497 case 0xA4: /* movsb */
498 instr->instr = INSTR_MOVS;
499 instr->op_size = BYTE;
500 return DECODE_success;
502 case 0xA5: /* movsw/movsl */
503 instr->instr = INSTR_MOVS;
504 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
505 return DECODE_success;
507 case 0xAA: /* stosb */
508 instr->instr = INSTR_STOS;
509 instr->op_size = BYTE;
510 return DECODE_success;
512 case 0xAB: /* stosw/stosl */
513 instr->instr = INSTR_STOS;
514 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
515 return DECODE_success;
517 case 0xC6:
518 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
519 instr->instr = INSTR_MOV;
520 instr->op_size = BYTE;
522 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
523 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
524 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
526 return DECODE_success;
527 } else
528 return DECODE_failure;
530 case 0xC7:
531 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
532 instr->instr = INSTR_MOV;
533 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
535 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
536 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
537 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
539 return DECODE_success;
540 } else
541 return DECODE_failure;
543 case 0xF6:
544 if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */
545 instr->instr = INSTR_TEST;
546 instr->op_size = BYTE;
548 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
549 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
550 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
552 return DECODE_success;
553 } else
554 return DECODE_failure;
556 case 0x0F:
557 break;
559 default:
560 printf("%x, This opcode isn't handled yet!\n", *opcode);
561 return DECODE_failure;
562 }
564 switch (*++opcode) {
565 case 0xB6: /* movz m8, r16/r32 */
566 instr->instr = INSTR_MOVZ;
567 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
568 index = get_index(opcode + 1, rex);
569 instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
570 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
571 return DECODE_success;
573 case 0xB7: /* movz m16, r32 */
574 instr->instr = INSTR_MOVZ;
575 index = get_index(opcode + 1, rex);
576 if (rex & 0x8) {
577 instr->op_size = LONG;
578 instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
579 } else {
580 instr->op_size = WORD;
581 instr->operand[1] = mk_operand(LONG, index, 0, REGISTER);
582 }
583 instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY);
584 return DECODE_success;
586 default:
587 printf("0f %x, This opcode isn't handled yet\n", *opcode);
588 return DECODE_failure;
589 }
590 }
592 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
593 {
594 if (inst_len > MAX_INST_LEN || inst_len <= 0)
595 return 0;
596 if (!vmx_copy(buf, guest_eip, inst_len, VMX_COPY_IN))
597 return 0;
598 return inst_len;
599 }
601 void send_mmio_req(unsigned char type, unsigned long gpa,
602 unsigned long count, int size, long value, int dir, int pvalid)
603 {
604 struct vcpu *d = current;
605 vcpu_iodata_t *vio;
606 ioreq_t *p;
607 int vm86;
608 struct cpu_user_regs *regs;
609 extern long evtchn_send(int lport);
611 regs = current->domain->arch.vmx_platform.mpci.inst_decoder_regs;
613 vio = get_vio(d->domain, d->vcpu_id);
614 if (vio == NULL) {
615 printf("bad shared page\n");
616 domain_crash_synchronous();
617 }
619 p = &vio->vp_ioreq;
621 vm86 = regs->eflags & X86_EFLAGS_VM;
623 if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
624 printf("VMX I/O has not yet completed\n");
625 domain_crash_synchronous();
626 }
628 set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
629 p->dir = dir;
630 p->pdata_valid = pvalid;
632 p->type = type;
633 p->size = size;
634 p->addr = gpa;
635 p->count = count;
636 p->df = regs->eflags & EF_DF ? 1 : 0;
638 if (pvalid) {
639 if (vmx_paging_enabled(current))
640 p->u.pdata = (void *) gva_to_gpa(value);
641 else
642 p->u.pdata = (void *) value; /* guest VA == guest PA */
643 } else
644 p->u.data = value;
646 p->state = STATE_IOREQ_READY;
648 if (vmx_mmio_intercept(p)){
649 p->state = STATE_IORESP_READY;
650 vmx_io_assist(d);
651 return;
652 }
654 evtchn_send(iopacket_port(d->domain));
655 vmx_wait_io();
656 }
658 static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
659 struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs)
660 {
661 unsigned long value = 0;
662 int index, size;
664 size = operand_size(inst->operand[0]);
666 mpcip->flags = inst->flags;
667 mpcip->instr = inst->instr;
668 mpcip->operand[0] = inst->operand[0]; /* source */
669 mpcip->operand[1] = inst->operand[1]; /* destination */
671 if (inst->operand[0] & REGISTER) { /* dest is memory */
672 index = operand_index(inst->operand[0]);
673 value = get_reg_value(size, index, 0, regs);
674 send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
675 } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
676 value = inst->immediate;
677 send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
678 } else if (inst->operand[0] & MEMORY) { /* dest is register */
679 /* send the request and wait for the value */
680 send_mmio_req(type, gpa, 1, size, 0, IOREQ_READ, 0);
681 } else {
682 printf("mmio_operands: invalid operand\n");
683 domain_crash_synchronous();
684 }
685 }
687 #define GET_REPEAT_COUNT() \
688 (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1)
690 void handle_mmio(unsigned long va, unsigned long gpa)
691 {
692 unsigned long eip, eflags, cs;
693 unsigned long inst_len, inst_addr;
694 struct mi_per_cpu_info *mpcip;
695 struct cpu_user_regs *regs;
696 struct instruction mmio_inst;
697 unsigned char inst[MAX_INST_LEN];
698 int i, vm86, ret;
700 mpcip = &current->domain->arch.vmx_platform.mpci;
701 regs = mpcip->inst_decoder_regs;
703 __vmread(GUEST_RIP, &eip);
704 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
705 __vmread(GUEST_RFLAGS, &eflags);
706 vm86 = eflags & X86_EFLAGS_VM;
708 if (vm86) {
709 __vmread(GUEST_CS_SELECTOR, &cs);
710 inst_addr = (cs << 4) + eip;
711 } else
712 inst_addr = eip;
714 memset(inst, 0, MAX_INST_LEN);
715 ret = inst_copy_from_guest(inst, inst_addr, inst_len);
716 if (ret != inst_len) {
717 printf("handle_mmio - EXIT: get guest instruction fault\n");
718 domain_crash_synchronous();
719 }
721 init_instruction(&mmio_inst);
723 if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
724 printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
725 va, gpa, inst_len);
726 for (i = 0; i < inst_len; i++)
727 printf(" %02x", inst[i] & 0xFF);
728 printf("\n");
729 domain_crash_synchronous();
730 }
732 store_cpu_user_regs(regs);
733 regs->eip += inst_len; /* advance %eip */
735 switch (mmio_inst.instr) {
736 case INSTR_MOV:
737 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
738 break;
740 case INSTR_MOVS:
741 {
742 unsigned long count = GET_REPEAT_COUNT();
743 unsigned long size = mmio_inst.op_size;
744 int sign = regs->eflags & EF_DF ? -1 : 1;
745 unsigned long addr = 0;
746 int dir;
748 /* determine non-MMIO address */
749 if (vm86) {
750 unsigned long seg;
752 __vmread(GUEST_ES_SELECTOR, &seg);
753 if (((seg << 4) + (regs->edi & 0xFFFF)) == va) {
754 dir = IOREQ_WRITE;
755 __vmread(GUEST_DS_SELECTOR, &seg);
756 addr = (seg << 4) + (regs->esi & 0xFFFF);
757 } else {
758 dir = IOREQ_READ;
759 addr = (seg << 4) + (regs->edi & 0xFFFF);
760 }
761 } else {
762 if (va == regs->edi) {
763 dir = IOREQ_WRITE;
764 addr = regs->esi;
765 } else {
766 dir = IOREQ_READ;
767 addr = regs->edi;
768 }
769 }
771 mpcip->flags = mmio_inst.flags;
772 mpcip->instr = mmio_inst.instr;
774 /*
775 * In case of a movs spanning multiple pages, we break the accesses
776 * up into multiple pages (the device model works with non-continguous
777 * physical guest pages). To copy just one page, we adjust %ecx and
778 * do not advance %eip so that the next "rep movs" copies the next page.
779 * Unaligned accesses, for example movsl starting at PGSZ-2, are
780 * turned into a single copy where we handle the overlapping memory
781 * copy ourself. After this copy succeeds, "rep movs" is executed
782 * again.
783 */
784 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
785 unsigned long value = 0;
787 mpcip->flags |= OVERLAP;
789 regs->eip -= inst_len; /* do not advance %eip */
791 if (dir == IOREQ_WRITE)
792 vmx_copy(&value, addr, size, VMX_COPY_IN);
793 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
794 } else {
795 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
796 regs->eip -= inst_len; /* do not advance %eip */
798 if (sign > 0)
799 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
800 else
801 count = (addr & ~PAGE_MASK) / size;
802 }
804 send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
805 }
806 break;
807 }
809 case INSTR_MOVZ:
810 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
811 break;
813 case INSTR_STOS:
814 /*
815 * Since the destination is always in (contiguous) mmio space we don't
816 * need to break it up into pages.
817 */
818 mpcip->flags = mmio_inst.flags;
819 mpcip->instr = mmio_inst.instr;
820 send_mmio_req(IOREQ_TYPE_COPY, gpa,
821 GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0);
822 break;
824 case INSTR_OR:
825 mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs);
826 break;
828 case INSTR_AND:
829 mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs);
830 break;
832 case INSTR_XOR:
833 mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
834 break;
836 case INSTR_CMP:
837 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
838 break;
840 case INSTR_TEST:
841 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
842 break;
844 default:
845 printf("Unhandled MMIO instruction\n");
846 domain_crash_synchronous();
847 }
848 }
850 #endif /* CONFIG_VMX */
852 /*
853 * Local variables:
854 * mode: C
855 * c-set-style: "BSD"
856 * c-basic-offset: 4
857 * tab-width: 4
858 * indent-tabs-mode: nil
859 * End:
860 */