ia64/xen-unstable

view xen/arch/x86/vmx_platform.c @ 5719:afe05231fe25

cmpl workaround for mmio regions

Although we don't normally expect a guest to use cmpl against a MMIO region,
this may happen due to bugs. This workaround is needed for 64 bit linux-2.6.

Signed-off-by: Chengyuan Li <chengyuan.li@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 11 09:00:12 2005 +0000 (2005-07-11)
parents 3de8cebfb231
children dd798dd2abce
line source
1 /*
2 * vmx_platform.c: handling x86 platform related MMIO instructions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/mm.h>
23 #include <asm/shadow.h>
24 #include <xen/domain_page.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <asm/vmx.h>
29 #include <asm/vmx_platform.h>
30 #include <public/io/ioreq.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
36 #ifdef CONFIG_VMX
38 #define DECODE_success 1
39 #define DECODE_failure 0
41 #if defined (__x86_64__)
42 void store_cpu_user_regs(struct cpu_user_regs *regs)
43 {
44 __vmread(GUEST_SS_SELECTOR, &regs->ss);
45 __vmread(GUEST_RSP, &regs->rsp);
46 __vmread(GUEST_RFLAGS, &regs->rflags);
47 __vmread(GUEST_CS_SELECTOR, &regs->cs);
48 __vmread(GUEST_DS_SELECTOR, &regs->ds);
49 __vmread(GUEST_ES_SELECTOR, &regs->es);
50 __vmread(GUEST_RIP, &regs->rip);
51 }
53 static inline long __get_reg_value(unsigned long reg, int size)
54 {
55 switch(size) {
56 case BYTE_64:
57 return (char)(reg & 0xFF);
58 case WORD:
59 return (short)(reg & 0xFFFF);
60 case LONG:
61 return (int)(reg & 0xFFFFFFFF);
62 case QUAD:
63 return (long)(reg);
64 default:
65 printk("Error: <__get_reg_value>Invalid reg size\n");
66 domain_crash_synchronous();
67 }
68 }
70 static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
71 {
72 if (size == BYTE) {
73 switch (index) {
74 case 0: //%al
75 return (char)(regs->rax & 0xFF);
76 case 1: //%cl
77 return (char)(regs->rcx & 0xFF);
78 case 2: //%dl
79 return (char)(regs->rdx & 0xFF);
80 case 3: //%bl
81 return (char)(regs->rbx & 0xFF);
82 case 4: //%ah
83 return (char)((regs->rax & 0xFF00) >> 8);
84 case 5: //%ch
85 return (char)((regs->rcx & 0xFF00) >> 8);
86 case 6: //%dh
87 return (char)((regs->rdx & 0xFF00) >> 8);
88 case 7: //%bh
89 return (char)((regs->rbx & 0xFF00) >> 8);
90 default:
91 printk("Error: (get_reg_value)Invalid index value\n");
92 domain_crash_synchronous();
93 }
95 }
96 switch (index) {
97 case 0: return __get_reg_value(regs->rax, size);
98 case 1: return __get_reg_value(regs->rcx, size);
99 case 2: return __get_reg_value(regs->rdx, size);
100 case 3: return __get_reg_value(regs->rbx, size);
101 case 4: return __get_reg_value(regs->rsp, size);
102 case 5: return __get_reg_value(regs->rbp, size);
103 case 6: return __get_reg_value(regs->rsi, size);
104 case 7: return __get_reg_value(regs->rdi, size);
105 case 8: return __get_reg_value(regs->r8, size);
106 case 9: return __get_reg_value(regs->r9, size);
107 case 10: return __get_reg_value(regs->r10, size);
108 case 11: return __get_reg_value(regs->r11, size);
109 case 12: return __get_reg_value(regs->r12, size);
110 case 13: return __get_reg_value(regs->r13, size);
111 case 14: return __get_reg_value(regs->r14, size);
112 case 15: return __get_reg_value(regs->r15, size);
113 default:
114 printk("Error: (get_reg_value)Invalid index value\n");
115 domain_crash_synchronous();
116 }
117 }
118 #elif defined (__i386__)
119 void store_cpu_user_regs(struct cpu_user_regs *regs)
120 {
121 __vmread(GUEST_SS_SELECTOR, &regs->ss);
122 __vmread(GUEST_RSP, &regs->esp);
123 __vmread(GUEST_RFLAGS, &regs->eflags);
124 __vmread(GUEST_CS_SELECTOR, &regs->cs);
125 __vmread(GUEST_DS_SELECTOR, &regs->ds);
126 __vmread(GUEST_ES_SELECTOR, &regs->es);
127 __vmread(GUEST_RIP, &regs->eip);
128 }
130 static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
131 {
132 /*
133 * Reference the db_reg[] table
134 */
135 switch (size) {
136 case BYTE:
137 switch (index) {
138 case 0: //%al
139 return (char)(regs->eax & 0xFF);
140 case 1: //%cl
141 return (char)(regs->ecx & 0xFF);
142 case 2: //%dl
143 return (char)(regs->edx & 0xFF);
144 case 3: //%bl
145 return (char)(regs->ebx & 0xFF);
146 case 4: //%ah
147 return (char)((regs->eax & 0xFF00) >> 8);
148 case 5: //%ch
149 return (char)((regs->ecx & 0xFF00) >> 8);
150 case 6: //%dh
151 return (char)((regs->edx & 0xFF00) >> 8);
152 case 7: //%bh
153 return (char)((regs->ebx & 0xFF00) >> 8);
154 default:
155 printk("Error: (get_reg_value)size case 0 error\n");
156 domain_crash_synchronous();
157 }
158 case WORD:
159 switch (index) {
160 case 0: //%ax
161 return (short)(regs->eax & 0xFFFF);
162 case 1: //%cx
163 return (short)(regs->ecx & 0xFFFF);
164 case 2: //%dx
165 return (short)(regs->edx & 0xFFFF);
166 case 3: //%bx
167 return (short)(regs->ebx & 0xFFFF);
168 case 4: //%sp
169 return (short)(regs->esp & 0xFFFF);
170 break;
171 case 5: //%bp
172 return (short)(regs->ebp & 0xFFFF);
173 case 6: //%si
174 return (short)(regs->esi & 0xFFFF);
175 case 7: //%di
176 return (short)(regs->edi & 0xFFFF);
177 default:
178 printk("Error: (get_reg_value)size case 1 error\n");
179 domain_crash_synchronous();
180 }
181 case LONG:
182 switch (index) {
183 case 0: //%eax
184 return regs->eax;
185 case 1: //%ecx
186 return regs->ecx;
187 case 2: //%edx
188 return regs->edx;
190 case 3: //%ebx
191 return regs->ebx;
192 case 4: //%esp
193 return regs->esp;
194 case 5: //%ebp
195 return regs->ebp;
196 case 6: //%esi
197 return regs->esi;
198 case 7: //%edi
199 return regs->edi;
200 default:
201 printk("Error: (get_reg_value)size case 2 error\n");
202 domain_crash_synchronous();
203 }
204 default:
205 printk("Error: (get_reg_value)size case error\n");
206 domain_crash_synchronous();
207 }
208 }
209 #endif
211 static inline const unsigned char *check_prefix(const unsigned char *inst, struct instruction *thread_inst, unsigned char *rex_p)
212 {
213 while (1) {
214 switch (*inst) {
215 /* rex prefix for em64t instructions*/
216 case 0x40 ... 0x4e:
217 *rex_p = *inst;
218 break;
220 case 0xf3: //REPZ
221 thread_inst->flags = REPZ;
222 break;
223 case 0xf2: //REPNZ
224 thread_inst->flags = REPNZ;
225 break;
226 case 0xf0: //LOCK
227 break;
228 case 0x2e: //CS
229 case 0x36: //SS
230 case 0x3e: //DS
231 case 0x26: //ES
232 case 0x64: //FS
233 case 0x65: //GS
234 thread_inst->seg_sel = *inst;
235 break;
236 case 0x66: //32bit->16bit
237 thread_inst->op_size = WORD;
238 break;
239 case 0x67:
240 printf("Error: Not handling 0x67 (yet)\n");
241 domain_crash_synchronous();
242 break;
243 default:
244 return inst;
245 }
246 inst++;
247 }
248 }
250 static inline unsigned long get_immediate(int op16, const unsigned char *inst, int op_size)
251 {
252 int mod, reg, rm;
253 unsigned long val = 0;
254 int i;
256 mod = (*inst >> 6) & 3;
257 reg = (*inst >> 3) & 7;
258 rm = *inst & 7;
260 inst++; //skip ModR/M byte
261 if (mod != 3 && rm == 4) {
262 inst++; //skip SIB byte
263 }
265 switch(mod) {
266 case 0:
267 if (rm == 5 || rm == 4) {
268 if (op16)
269 inst = inst + 2; //disp16, skip 2 bytes
270 else
271 inst = inst + 4; //disp32, skip 4 bytes
272 }
273 break;
274 case 1:
275 inst++; //disp8, skip 1 byte
276 break;
277 case 2:
278 if (op16)
279 inst = inst + 2; //disp16, skip 2 bytes
280 else
281 inst = inst + 4; //disp32, skip 4 bytes
282 break;
283 }
285 if (op_size == QUAD)
286 op_size = LONG;
288 for (i = 0; i < op_size; i++) {
289 val |= (*inst++ & 0xff) << (8 * i);
290 }
292 return val;
293 }
295 static inline int get_index(const unsigned char *inst, unsigned char rex)
296 {
297 int mod, reg, rm;
298 int rex_r, rex_b;
300 mod = (*inst >> 6) & 3;
301 reg = (*inst >> 3) & 7;
302 rm = *inst & 7;
304 rex_r = (rex >> 2) & 1;
305 rex_b = rex & 1;
307 //Only one operand in the instruction is register
308 if (mod == 3) {
309 return (rm + (rex_b << 3));
310 } else {
311 return (reg + (rex_r << 3));
312 }
313 return 0;
314 }
316 static void init_instruction(struct instruction *mmio_inst)
317 {
318 memset(mmio_inst->i_name, '0', I_NAME_LEN);
319 mmio_inst->op_size = 0;
320 mmio_inst->offset = 0;
321 mmio_inst->immediate = 0;
322 mmio_inst->seg_sel = 0;
323 mmio_inst->op_num = 0;
325 mmio_inst->operand[0] = 0;
326 mmio_inst->operand[1] = 0;
327 mmio_inst->operand[2] = 0;
329 mmio_inst->flags = 0;
330 }
332 #define GET_OP_SIZE_FOR_BYTE(op_size) \
333 do {if (rex) op_size = BYTE_64;else op_size = BYTE;} while(0)
335 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
336 do {if (rex & 0x8) op_size = QUAD; else if (op_size != WORD) op_size = LONG;} while(0)
338 static int vmx_decode(const unsigned char *inst, struct instruction *thread_inst)
339 {
340 unsigned long eflags;
341 int index, vm86 = 0;
342 unsigned char rex = 0;
343 unsigned char tmp_size = 0;
346 init_instruction(thread_inst);
348 inst = check_prefix(inst, thread_inst, &rex);
350 __vmread(GUEST_RFLAGS, &eflags);
351 if (eflags & X86_EFLAGS_VM)
352 vm86 = 1;
354 if (vm86) { /* meaning is reversed */
355 if (thread_inst->op_size == WORD)
356 thread_inst->op_size = LONG;
357 else if (thread_inst->op_size == LONG)
358 thread_inst->op_size = WORD;
359 else if (thread_inst->op_size == 0)
360 thread_inst->op_size = WORD;
361 }
363 switch(*inst) {
364 case 0x81:
365 /* This is only a workaround for cmpl instruction*/
366 strcpy((char *)thread_inst->i_name, "cmp");
367 return DECODE_success;
369 case 0x88:
370 /* mov r8 to m8 */
371 thread_inst->op_size = BYTE;
372 index = get_index((inst + 1), rex);
373 GET_OP_SIZE_FOR_BYTE(tmp_size);
374 thread_inst->operand[0] = mk_operand(tmp_size, index, 0, REGISTER);
376 break;
377 case 0x89:
378 /* mov r32/16 to m32/16 */
379 index = get_index((inst + 1), rex);
380 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
381 thread_inst->operand[0] = mk_operand(thread_inst->op_size, index, 0, REGISTER);
383 break;
384 case 0x8a:
385 /* mov m8 to r8 */
386 thread_inst->op_size = BYTE;
387 index = get_index((inst + 1), rex);
388 GET_OP_SIZE_FOR_BYTE(tmp_size);
389 thread_inst->operand[1] = mk_operand(tmp_size, index, 0, REGISTER);
390 break;
391 case 0x8b:
392 /* mov r32/16 to m32/16 */
393 index = get_index((inst + 1), rex);
394 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
395 thread_inst->operand[1] = mk_operand(thread_inst->op_size, index, 0, REGISTER);
396 break;
397 case 0x8c:
398 case 0x8e:
399 printk("%x, This opcode hasn't been handled yet!", *inst);
400 return DECODE_failure;
401 /* Not handle it yet. */
402 case 0xa0:
403 /* mov byte to al */
404 thread_inst->op_size = BYTE;
405 GET_OP_SIZE_FOR_BYTE(tmp_size);
406 thread_inst->operand[1] = mk_operand(tmp_size, 0, 0, REGISTER);
407 break;
408 case 0xa1:
409 /* mov word/doubleword to ax/eax */
410 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
411 thread_inst->operand[1] = mk_operand(thread_inst->op_size, 0, 0, REGISTER);
413 break;
414 case 0xa2:
415 /* mov al to (seg:offset) */
416 thread_inst->op_size = BYTE;
417 GET_OP_SIZE_FOR_BYTE(tmp_size);
418 thread_inst->operand[0] = mk_operand(tmp_size, 0, 0, REGISTER);
419 break;
420 case 0xa3:
421 /* mov ax/eax to (seg:offset) */
422 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
423 thread_inst->operand[0] = mk_operand(thread_inst->op_size, 0, 0, REGISTER);
424 break;
425 case 0xa4:
426 /* movsb */
427 thread_inst->op_size = BYTE;
428 strcpy((char *)thread_inst->i_name, "movs");
429 return DECODE_success;
430 case 0xa5:
431 /* movsw/movsl */
432 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
433 strcpy((char *)thread_inst->i_name, "movs");
434 return DECODE_success;
435 case 0xaa:
436 /* stosb */
437 thread_inst->op_size = BYTE;
438 strcpy((char *)thread_inst->i_name, "stosb");
439 return DECODE_success;
440 case 0xab:
441 /* stosw/stosl */
442 if (thread_inst->op_size == WORD) {
443 strcpy((char *)thread_inst->i_name, "stosw");
444 } else {
445 thread_inst->op_size = LONG;
446 strcpy((char *)thread_inst->i_name, "stosl");
447 }
448 return DECODE_success;
449 case 0xc6:
450 /* mov imm8 to m8 */
451 thread_inst->op_size = BYTE;
452 thread_inst->operand[0] = mk_operand(BYTE, 0, 0, IMMEDIATE);
453 thread_inst->immediate = get_immediate(vm86,
454 (inst+1), thread_inst->op_size);
455 break;
456 case 0xc7:
457 /* mov imm16/32 to m16/32 */
458 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
459 thread_inst->operand[0] = mk_operand(thread_inst->op_size, 0, 0, IMMEDIATE);
460 thread_inst->immediate = get_immediate(vm86, (inst+1), thread_inst->op_size);
462 break;
463 case 0x0f:
464 break;
465 default:
466 printk("%x, This opcode hasn't been handled yet!", *inst);
467 return DECODE_failure;
468 }
470 strcpy((char *)thread_inst->i_name, "mov");
471 if (*inst != 0x0f) {
472 return DECODE_success;
473 }
475 inst++;
476 switch (*inst) {
478 /* movz */
479 case 0xb6:
480 index = get_index((inst + 1), rex);
481 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
482 thread_inst->operand[1] = mk_operand(thread_inst->op_size, index, 0, REGISTER);
483 thread_inst->op_size = BYTE;
484 strcpy((char *)thread_inst->i_name, "movzb");
486 return DECODE_success;
487 case 0xb7:
488 index = get_index((inst + 1), rex);
489 if (rex & 0x8) {
490 thread_inst->op_size = LONG;
491 thread_inst->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
492 } else {
493 thread_inst->op_size = WORD;
494 thread_inst->operand[1] = mk_operand(LONG, index, 0, REGISTER);
495 }
497 strcpy((char *)thread_inst->i_name, "movzw");
499 return DECODE_success;
500 default:
501 printk("0f %x, This opcode hasn't been handled yet!", *inst);
502 return DECODE_failure;
503 }
505 /* will never reach here */
506 return DECODE_failure;
507 }
509 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
510 {
511 unsigned long gpa;
512 unsigned long mfn;
513 unsigned char *inst_start;
514 int remaining = 0;
516 if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) )
517 return 0;
519 if ( vmx_paging_enabled(current) )
520 {
521 gpa = gva_to_gpa(guest_eip);
522 mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
524 /* Does this cross a page boundary ? */
525 if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) )
526 {
527 remaining = (guest_eip + inst_len) & ~PAGE_MASK;
528 inst_len -= remaining;
529 }
530 }
531 else
532 {
533 mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT);
534 }
536 inst_start = map_domain_page(mfn);
537 memcpy((char *)buf, inst_start + (guest_eip & ~PAGE_MASK), inst_len);
538 unmap_domain_page(inst_start);
540 if ( remaining )
541 {
542 gpa = gva_to_gpa(guest_eip+inst_len+remaining);
543 mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
545 inst_start = map_domain_page(mfn);
546 memcpy((char *)buf+inst_len, inst_start, remaining);
547 unmap_domain_page(inst_start);
548 }
550 return inst_len+remaining;
551 }
553 static int read_from_mmio(struct instruction *inst_p)
554 {
555 // Only for mov instruction now!!!
556 if (inst_p->operand[1] & REGISTER)
557 return 1;
559 return 0;
560 }
562 // dir: 1 read from mmio
563 // 0 write to mmio
564 static void send_mmio_req(unsigned long gpa,
565 struct instruction *inst_p, long value, int dir, int pvalid)
566 {
567 struct vcpu *d = current;
568 vcpu_iodata_t *vio;
569 ioreq_t *p;
570 int vm86;
571 struct mi_per_cpu_info *mpci_p;
572 struct cpu_user_regs *inst_decoder_regs;
573 extern long evtchn_send(int lport);
575 mpci_p = &current->domain->arch.vmx_platform.mpci;
576 inst_decoder_regs = mpci_p->inst_decoder_regs;
578 vio = get_vio(d->domain, d->vcpu_id);
580 if (vio == NULL) {
581 printk("bad shared page\n");
582 domain_crash_synchronous();
583 }
584 p = &vio->vp_ioreq;
586 vm86 = inst_decoder_regs->eflags & X86_EFLAGS_VM;
588 if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
589 printf("VMX I/O has not yet completed\n");
590 domain_crash_synchronous();
591 }
593 set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
594 p->dir = dir;
595 p->pdata_valid = pvalid;
597 p->port_mm = 1;
598 p->size = inst_p->op_size;
599 p->addr = gpa;
600 p->u.data = value;
602 p->state = STATE_IOREQ_READY;
604 if (inst_p->flags & REPZ) {
605 if (vm86)
606 p->count = inst_decoder_regs->ecx & 0xFFFF;
607 else
608 p->count = inst_decoder_regs->ecx;
609 p->df = (inst_decoder_regs->eflags & EF_DF) ? 1 : 0;
610 } else
611 p->count = 1;
613 if ((pvalid) && vmx_paging_enabled(current))
614 p->u.pdata = (void *) gva_to_gpa(p->u.data);
616 if (vmx_mmio_intercept(p)){
617 p->state = STATE_IORESP_READY;
618 vmx_io_assist(d);
619 return;
620 }
622 evtchn_send(iopacket_port(d->domain));
623 vmx_wait_io();
624 }
626 void handle_mmio(unsigned long va, unsigned long gpa)
627 {
628 unsigned long eip, eflags, cs;
629 unsigned long inst_len, inst_addr;
630 struct mi_per_cpu_info *mpci_p;
631 struct cpu_user_regs *inst_decoder_regs;
632 struct instruction mmio_inst;
633 unsigned char inst[MAX_INST_LEN];
634 int vm86, ret;
636 mpci_p = &current->domain->arch.vmx_platform.mpci;
637 inst_decoder_regs = mpci_p->inst_decoder_regs;
639 __vmread(GUEST_RIP, &eip);
640 __vmread(INSTRUCTION_LEN, &inst_len);
641 __vmread(GUEST_RFLAGS, &eflags);
642 vm86 = eflags & X86_EFLAGS_VM;
644 if (vm86) {
645 __vmread(GUEST_CS_SELECTOR, &cs);
646 inst_addr = (cs << 4) + eip;
647 } else
648 inst_addr = eip; /* XXX should really look at GDT[cs].base too */
650 memset(inst, '0', MAX_INST_LEN);
651 ret = inst_copy_from_guest(inst, inst_addr, inst_len);
652 if (ret != inst_len) {
653 printk("handle_mmio - EXIT: get guest instruction fault\n");
654 domain_crash_synchronous();
655 }
658 init_instruction(&mmio_inst);
660 if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
661 printk("vmx decode failure: eip=%lx, va=%lx\n %x %x %x %x\n", eip, va,
662 inst[0], inst[1], inst[2], inst[3]);
663 domain_crash_synchronous();
664 }
666 __vmwrite(GUEST_RIP, eip + inst_len);
667 store_cpu_user_regs(inst_decoder_regs);
669 // Only handle "mov" and "movs" instructions!
670 if (!strncmp((char *)mmio_inst.i_name, "movz", 4)) {
671 if (read_from_mmio(&mmio_inst)) {
672 // Send the request and waiting for return value.
673 mpci_p->mmio_target = mmio_inst.operand[1] | WZEROEXTEND;
674 send_mmio_req(gpa, &mmio_inst, 0, IOREQ_READ, 0);
675 return ;
676 } else {
677 printk("handle_mmio - EXIT: movz error!\n");
678 domain_crash_synchronous();
679 }
680 }
682 if (!strncmp((char *)mmio_inst.i_name, "movs", 4)) {
683 unsigned long addr = 0;
684 int dir;
686 if (vm86) {
687 unsigned long seg;
689 __vmread(GUEST_ES_SELECTOR, &seg);
690 if (((seg << 4) + (inst_decoder_regs->edi & 0xFFFF)) == va) {
691 dir = IOREQ_WRITE;
692 __vmread(GUEST_DS_SELECTOR, &seg);
693 addr = (seg << 4) + (inst_decoder_regs->esi & 0xFFFF);
694 } else {
695 dir = IOREQ_READ;
696 addr = (seg << 4) + (inst_decoder_regs->edi & 0xFFFF);
697 }
698 } else { /* XXX should really look at GDT[ds/es].base too */
699 if (va == inst_decoder_regs->edi) {
700 dir = IOREQ_WRITE;
701 addr = inst_decoder_regs->esi;
702 } else {
703 dir = IOREQ_READ;
704 addr = inst_decoder_regs->edi;
705 }
706 }
708 send_mmio_req(gpa, &mmio_inst, addr, dir, 1);
709 return;
710 }
712 if (!strncmp((char *)mmio_inst.i_name, "mov", 3)) {
713 long value = 0;
714 int size, index;
716 if (read_from_mmio(&mmio_inst)) {
717 // Send the request and waiting for return value.
718 mpci_p->mmio_target = mmio_inst.operand[1];
719 send_mmio_req(gpa, &mmio_inst, value, IOREQ_READ, 0);
720 return;
721 } else {
722 // Write to MMIO
723 if (mmio_inst.operand[0] & IMMEDIATE) {
724 value = mmio_inst.immediate;
725 } else if (mmio_inst.operand[0] & REGISTER) {
726 size = operand_size(mmio_inst.operand[0]);
727 index = operand_index(mmio_inst.operand[0]);
728 value = get_reg_value(size, index, 0, inst_decoder_regs);
729 } else {
730 domain_crash_synchronous();
731 }
732 send_mmio_req(gpa, &mmio_inst, value, IOREQ_WRITE, 0);
733 return;
734 }
735 }
737 if (!strncmp((char *)mmio_inst.i_name, "stos", 4)) {
738 send_mmio_req(gpa, &mmio_inst,
739 inst_decoder_regs->eax, IOREQ_WRITE, 0);
740 return;
741 }
742 /* Workaround for cmp instruction */
743 if (!strncmp((char *)mmio_inst.i_name, "cmp", 3)) {
744 inst_decoder_regs->eflags &= ~X86_EFLAGS_ZF;
745 __vmwrite(GUEST_RFLAGS, inst_decoder_regs->eflags);
746 return;
747 }
749 domain_crash_synchronous();
750 }
752 #endif /* CONFIG_VMX */
754 /*
755 * Local variables:
756 * mode: C
757 * c-set-style: "BSD"
758 * c-basic-offset: 4
759 * tab-width: 4
760 * indent-tabs-mode: nil
761 * End:
762 */