ia64/xen-unstable

view xen/arch/x86/vmx_platform.c @ 5716:3de8cebfb231

More verbose log when vmx_decode() fails.
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jul 11 08:59:22 2005 +0000 (2005-07-11)
parents ff5d7ccd8d69
children afe05231fe25
line source
1 /*
2 * vmx_platform.c: handling x86 platform related MMIO instructions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/mm.h>
23 #include <asm/shadow.h>
24 #include <xen/domain_page.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <asm/vmx.h>
29 #include <asm/vmx_platform.h>
30 #include <public/io/ioreq.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
36 #ifdef CONFIG_VMX
38 #define DECODE_success 1
39 #define DECODE_failure 0
41 #if defined (__x86_64__)
42 void store_cpu_user_regs(struct cpu_user_regs *regs)
43 {
44 __vmread(GUEST_SS_SELECTOR, &regs->ss);
45 __vmread(GUEST_RSP, &regs->rsp);
46 __vmread(GUEST_RFLAGS, &regs->rflags);
47 __vmread(GUEST_CS_SELECTOR, &regs->cs);
48 __vmread(GUEST_DS_SELECTOR, &regs->ds);
49 __vmread(GUEST_ES_SELECTOR, &regs->es);
50 __vmread(GUEST_RIP, &regs->rip);
51 }
53 static inline long __get_reg_value(unsigned long reg, int size)
54 {
55 switch(size) {
56 case BYTE_64:
57 return (char)(reg & 0xFF);
58 case WORD:
59 return (short)(reg & 0xFFFF);
60 case LONG:
61 return (int)(reg & 0xFFFFFFFF);
62 case QUAD:
63 return (long)(reg);
64 default:
65 printk("Error: <__get_reg_value>Invalid reg size\n");
66 domain_crash_synchronous();
67 }
68 }
70 static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
71 {
72 if (size == BYTE) {
73 switch (index) {
74 case 0: //%al
75 return (char)(regs->rax & 0xFF);
76 case 1: //%cl
77 return (char)(regs->rcx & 0xFF);
78 case 2: //%dl
79 return (char)(regs->rdx & 0xFF);
80 case 3: //%bl
81 return (char)(regs->rbx & 0xFF);
82 case 4: //%ah
83 return (char)((regs->rax & 0xFF00) >> 8);
84 case 5: //%ch
85 return (char)((regs->rcx & 0xFF00) >> 8);
86 case 6: //%dh
87 return (char)((regs->rdx & 0xFF00) >> 8);
88 case 7: //%bh
89 return (char)((regs->rbx & 0xFF00) >> 8);
90 default:
91 printk("Error: (get_reg_value)Invalid index value\n");
92 domain_crash_synchronous();
93 }
95 }
96 switch (index) {
97 case 0: return __get_reg_value(regs->rax, size);
98 case 1: return __get_reg_value(regs->rcx, size);
99 case 2: return __get_reg_value(regs->rdx, size);
100 case 3: return __get_reg_value(regs->rbx, size);
101 case 4: return __get_reg_value(regs->rsp, size);
102 case 5: return __get_reg_value(regs->rbp, size);
103 case 6: return __get_reg_value(regs->rsi, size);
104 case 7: return __get_reg_value(regs->rdi, size);
105 case 8: return __get_reg_value(regs->r8, size);
106 case 9: return __get_reg_value(regs->r9, size);
107 case 10: return __get_reg_value(regs->r10, size);
108 case 11: return __get_reg_value(regs->r11, size);
109 case 12: return __get_reg_value(regs->r12, size);
110 case 13: return __get_reg_value(regs->r13, size);
111 case 14: return __get_reg_value(regs->r14, size);
112 case 15: return __get_reg_value(regs->r15, size);
113 default:
114 printk("Error: (get_reg_value)Invalid index value\n");
115 domain_crash_synchronous();
116 }
117 }
118 #elif defined (__i386__)
119 void store_cpu_user_regs(struct cpu_user_regs *regs)
120 {
121 __vmread(GUEST_SS_SELECTOR, &regs->ss);
122 __vmread(GUEST_RSP, &regs->esp);
123 __vmread(GUEST_RFLAGS, &regs->eflags);
124 __vmread(GUEST_CS_SELECTOR, &regs->cs);
125 __vmread(GUEST_DS_SELECTOR, &regs->ds);
126 __vmread(GUEST_ES_SELECTOR, &regs->es);
127 __vmread(GUEST_RIP, &regs->eip);
128 }
130 static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
131 {
132 /*
133 * Reference the db_reg[] table
134 */
135 switch (size) {
136 case BYTE:
137 switch (index) {
138 case 0: //%al
139 return (char)(regs->eax & 0xFF);
140 case 1: //%cl
141 return (char)(regs->ecx & 0xFF);
142 case 2: //%dl
143 return (char)(regs->edx & 0xFF);
144 case 3: //%bl
145 return (char)(regs->ebx & 0xFF);
146 case 4: //%ah
147 return (char)((regs->eax & 0xFF00) >> 8);
148 case 5: //%ch
149 return (char)((regs->ecx & 0xFF00) >> 8);
150 case 6: //%dh
151 return (char)((regs->edx & 0xFF00) >> 8);
152 case 7: //%bh
153 return (char)((regs->ebx & 0xFF00) >> 8);
154 default:
155 printk("Error: (get_reg_value)size case 0 error\n");
156 domain_crash_synchronous();
157 }
158 case WORD:
159 switch (index) {
160 case 0: //%ax
161 return (short)(regs->eax & 0xFFFF);
162 case 1: //%cx
163 return (short)(regs->ecx & 0xFFFF);
164 case 2: //%dx
165 return (short)(regs->edx & 0xFFFF);
166 case 3: //%bx
167 return (short)(regs->ebx & 0xFFFF);
168 case 4: //%sp
169 return (short)(regs->esp & 0xFFFF);
170 break;
171 case 5: //%bp
172 return (short)(regs->ebp & 0xFFFF);
173 case 6: //%si
174 return (short)(regs->esi & 0xFFFF);
175 case 7: //%di
176 return (short)(regs->edi & 0xFFFF);
177 default:
178 printk("Error: (get_reg_value)size case 1 error\n");
179 domain_crash_synchronous();
180 }
181 case LONG:
182 switch (index) {
183 case 0: //%eax
184 return regs->eax;
185 case 1: //%ecx
186 return regs->ecx;
187 case 2: //%edx
188 return regs->edx;
190 case 3: //%ebx
191 return regs->ebx;
192 case 4: //%esp
193 return regs->esp;
194 case 5: //%ebp
195 return regs->ebp;
196 case 6: //%esi
197 return regs->esi;
198 case 7: //%edi
199 return regs->edi;
200 default:
201 printk("Error: (get_reg_value)size case 2 error\n");
202 domain_crash_synchronous();
203 }
204 default:
205 printk("Error: (get_reg_value)size case error\n");
206 domain_crash_synchronous();
207 }
208 }
209 #endif
211 static inline const unsigned char *check_prefix(const unsigned char *inst, struct instruction *thread_inst, unsigned char *rex_p)
212 {
213 while (1) {
214 switch (*inst) {
215 /* rex prefix for em64t instructions*/
216 case 0x40 ... 0x4e:
217 *rex_p = *inst;
218 break;
220 case 0xf3: //REPZ
221 thread_inst->flags = REPZ;
222 break;
223 case 0xf2: //REPNZ
224 thread_inst->flags = REPNZ;
225 break;
226 case 0xf0: //LOCK
227 break;
228 case 0x2e: //CS
229 case 0x36: //SS
230 case 0x3e: //DS
231 case 0x26: //ES
232 case 0x64: //FS
233 case 0x65: //GS
234 thread_inst->seg_sel = *inst;
235 break;
236 case 0x66: //32bit->16bit
237 thread_inst->op_size = WORD;
238 break;
239 case 0x67:
240 printf("Error: Not handling 0x67 (yet)\n");
241 domain_crash_synchronous();
242 break;
243 default:
244 return inst;
245 }
246 inst++;
247 }
248 }
250 static inline unsigned long get_immediate(int op16, const unsigned char *inst, int op_size)
251 {
252 int mod, reg, rm;
253 unsigned long val = 0;
254 int i;
256 mod = (*inst >> 6) & 3;
257 reg = (*inst >> 3) & 7;
258 rm = *inst & 7;
260 inst++; //skip ModR/M byte
261 if (mod != 3 && rm == 4) {
262 inst++; //skip SIB byte
263 }
265 switch(mod) {
266 case 0:
267 if (rm == 5) {
268 if (op16)
269 inst = inst + 2; //disp16, skip 2 bytes
270 else
271 inst = inst + 4; //disp32, skip 4 bytes
272 }
273 break;
274 case 1:
275 inst++; //disp8, skip 1 byte
276 break;
277 case 2:
278 if (op16)
279 inst = inst + 2; //disp16, skip 2 bytes
280 else
281 inst = inst + 4; //disp32, skip 4 bytes
282 break;
283 }
285 if (op_size == QUAD)
286 op_size = LONG;
288 for (i = 0; i < op_size; i++) {
289 val |= (*inst++ & 0xff) << (8 * i);
290 }
292 return val;
293 }
295 static inline int get_index(const unsigned char *inst, unsigned char rex)
296 {
297 int mod, reg, rm;
298 int rex_r, rex_b;
300 mod = (*inst >> 6) & 3;
301 reg = (*inst >> 3) & 7;
302 rm = *inst & 7;
304 rex_r = (rex >> 2) & 1;
305 rex_b = rex & 1;
307 //Only one operand in the instruction is register
308 if (mod == 3) {
309 return (rm + (rex_b << 3));
310 } else {
311 return (reg + (rex_r << 3));
312 }
313 return 0;
314 }
316 static void init_instruction(struct instruction *mmio_inst)
317 {
318 memset(mmio_inst->i_name, '0', I_NAME_LEN);
319 mmio_inst->op_size = 0;
320 mmio_inst->offset = 0;
321 mmio_inst->immediate = 0;
322 mmio_inst->seg_sel = 0;
323 mmio_inst->op_num = 0;
325 mmio_inst->operand[0] = 0;
326 mmio_inst->operand[1] = 0;
327 mmio_inst->operand[2] = 0;
329 mmio_inst->flags = 0;
330 }
332 #define GET_OP_SIZE_FOR_BYTE(op_size) \
333 do {if (rex) op_size = BYTE_64;else op_size = BYTE;} while(0)
335 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
336 do {if (rex & 0x8) op_size = QUAD; else if (op_size != WORD) op_size = LONG;} while(0)
338 static int vmx_decode(const unsigned char *inst, struct instruction *thread_inst)
339 {
340 unsigned long eflags;
341 int index, vm86 = 0;
342 unsigned char rex = 0;
343 unsigned char tmp_size = 0;
346 init_instruction(thread_inst);
348 inst = check_prefix(inst, thread_inst, &rex);
350 __vmread(GUEST_RFLAGS, &eflags);
351 if (eflags & X86_EFLAGS_VM)
352 vm86 = 1;
354 if (vm86) { /* meaning is reversed */
355 if (thread_inst->op_size == WORD)
356 thread_inst->op_size = LONG;
357 else if (thread_inst->op_size == LONG)
358 thread_inst->op_size = WORD;
359 else if (thread_inst->op_size == 0)
360 thread_inst->op_size = WORD;
361 }
363 switch(*inst) {
364 case 0x88:
365 /* mov r8 to m8 */
366 thread_inst->op_size = BYTE;
367 index = get_index((inst + 1), rex);
368 GET_OP_SIZE_FOR_BYTE(tmp_size);
369 thread_inst->operand[0] = mk_operand(tmp_size, index, 0, REGISTER);
371 break;
372 case 0x89:
373 /* mov r32/16 to m32/16 */
374 index = get_index((inst + 1), rex);
375 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
376 thread_inst->operand[0] = mk_operand(thread_inst->op_size, index, 0, REGISTER);
378 break;
379 case 0x8a:
380 /* mov m8 to r8 */
381 thread_inst->op_size = BYTE;
382 index = get_index((inst + 1), rex);
383 GET_OP_SIZE_FOR_BYTE(tmp_size);
384 thread_inst->operand[1] = mk_operand(tmp_size, index, 0, REGISTER);
385 break;
386 case 0x8b:
387 /* mov r32/16 to m32/16 */
388 index = get_index((inst + 1), rex);
389 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
390 thread_inst->operand[1] = mk_operand(thread_inst->op_size, index, 0, REGISTER);
391 break;
392 case 0x8c:
393 case 0x8e:
394 printk("%x, This opcode hasn't been handled yet!", *inst);
395 return DECODE_failure;
396 /* Not handle it yet. */
397 case 0xa0:
398 /* mov byte to al */
399 thread_inst->op_size = BYTE;
400 GET_OP_SIZE_FOR_BYTE(tmp_size);
401 thread_inst->operand[1] = mk_operand(tmp_size, 0, 0, REGISTER);
402 break;
403 case 0xa1:
404 /* mov word/doubleword to ax/eax */
405 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
406 thread_inst->operand[1] = mk_operand(thread_inst->op_size, 0, 0, REGISTER);
408 break;
409 case 0xa2:
410 /* mov al to (seg:offset) */
411 thread_inst->op_size = BYTE;
412 GET_OP_SIZE_FOR_BYTE(tmp_size);
413 thread_inst->operand[0] = mk_operand(tmp_size, 0, 0, REGISTER);
414 break;
415 case 0xa3:
416 /* mov ax/eax to (seg:offset) */
417 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
418 thread_inst->operand[0] = mk_operand(thread_inst->op_size, 0, 0, REGISTER);
419 break;
420 case 0xa4:
421 /* movsb */
422 thread_inst->op_size = BYTE;
423 strcpy((char *)thread_inst->i_name, "movs");
424 return DECODE_success;
425 case 0xa5:
426 /* movsw/movsl */
427 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
428 strcpy((char *)thread_inst->i_name, "movs");
429 return DECODE_success;
430 case 0xaa:
431 /* stosb */
432 thread_inst->op_size = BYTE;
433 strcpy((char *)thread_inst->i_name, "stosb");
434 return DECODE_success;
435 case 0xab:
436 /* stosw/stosl */
437 if (thread_inst->op_size == WORD) {
438 strcpy((char *)thread_inst->i_name, "stosw");
439 } else {
440 thread_inst->op_size = LONG;
441 strcpy((char *)thread_inst->i_name, "stosl");
442 }
443 return DECODE_success;
444 case 0xc6:
445 /* mov imm8 to m8 */
446 thread_inst->op_size = BYTE;
447 thread_inst->operand[0] = mk_operand(BYTE, 0, 0, IMMEDIATE);
448 thread_inst->immediate = get_immediate(vm86,
449 (inst+1), thread_inst->op_size);
450 break;
451 case 0xc7:
452 /* mov imm16/32 to m16/32 */
453 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
454 thread_inst->operand[0] = mk_operand(thread_inst->op_size, 0, 0, IMMEDIATE);
455 thread_inst->immediate = get_immediate(vm86, (inst+1), thread_inst->op_size);
457 break;
458 case 0x0f:
459 break;
460 default:
461 printk("%x, This opcode hasn't been handled yet!", *inst);
462 return DECODE_failure;
463 }
465 strcpy((char *)thread_inst->i_name, "mov");
466 if (*inst != 0x0f) {
467 return DECODE_success;
468 }
470 inst++;
471 switch (*inst) {
473 /* movz */
474 case 0xb6:
475 index = get_index((inst + 1), rex);
476 GET_OP_SIZE_FOR_NONEBYTE(thread_inst->op_size);
477 thread_inst->operand[1] = mk_operand(thread_inst->op_size, index, 0, REGISTER);
478 thread_inst->op_size = BYTE;
479 strcpy((char *)thread_inst->i_name, "movzb");
481 return DECODE_success;
482 case 0xb7:
483 index = get_index((inst + 1), rex);
484 if (rex & 0x8) {
485 thread_inst->op_size = LONG;
486 thread_inst->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
487 } else {
488 thread_inst->op_size = WORD;
489 thread_inst->operand[1] = mk_operand(LONG, index, 0, REGISTER);
490 }
492 strcpy((char *)thread_inst->i_name, "movzw");
494 return DECODE_success;
495 default:
496 printk("0f %x, This opcode hasn't been handled yet!", *inst);
497 return DECODE_failure;
498 }
500 /* will never reach here */
501 return DECODE_failure;
502 }
504 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
505 {
506 unsigned long gpa;
507 unsigned long mfn;
508 unsigned char *inst_start;
509 int remaining = 0;
511 if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) )
512 return 0;
514 if ( vmx_paging_enabled(current) )
515 {
516 gpa = gva_to_gpa(guest_eip);
517 mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
519 /* Does this cross a page boundary ? */
520 if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) )
521 {
522 remaining = (guest_eip + inst_len) & ~PAGE_MASK;
523 inst_len -= remaining;
524 }
525 }
526 else
527 {
528 mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT);
529 }
531 inst_start = map_domain_page(mfn);
532 memcpy((char *)buf, inst_start + (guest_eip & ~PAGE_MASK), inst_len);
533 unmap_domain_page(inst_start);
535 if ( remaining )
536 {
537 gpa = gva_to_gpa(guest_eip+inst_len+remaining);
538 mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
540 inst_start = map_domain_page(mfn);
541 memcpy((char *)buf+inst_len, inst_start, remaining);
542 unmap_domain_page(inst_start);
543 }
545 return inst_len+remaining;
546 }
548 static int read_from_mmio(struct instruction *inst_p)
549 {
550 // Only for mov instruction now!!!
551 if (inst_p->operand[1] & REGISTER)
552 return 1;
554 return 0;
555 }
557 // dir: 1 read from mmio
558 // 0 write to mmio
559 static void send_mmio_req(unsigned long gpa,
560 struct instruction *inst_p, long value, int dir, int pvalid)
561 {
562 struct vcpu *d = current;
563 vcpu_iodata_t *vio;
564 ioreq_t *p;
565 int vm86;
566 struct mi_per_cpu_info *mpci_p;
567 struct cpu_user_regs *inst_decoder_regs;
568 extern long evtchn_send(int lport);
570 mpci_p = &current->domain->arch.vmx_platform.mpci;
571 inst_decoder_regs = mpci_p->inst_decoder_regs;
573 vio = get_vio(d->domain, d->vcpu_id);
575 if (vio == NULL) {
576 printk("bad shared page\n");
577 domain_crash_synchronous();
578 }
579 p = &vio->vp_ioreq;
581 vm86 = inst_decoder_regs->eflags & X86_EFLAGS_VM;
583 if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
584 printf("VMX I/O has not yet completed\n");
585 domain_crash_synchronous();
586 }
588 set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
589 p->dir = dir;
590 p->pdata_valid = pvalid;
592 p->port_mm = 1;
593 p->size = inst_p->op_size;
594 p->addr = gpa;
595 p->u.data = value;
597 p->state = STATE_IOREQ_READY;
599 if (inst_p->flags & REPZ) {
600 if (vm86)
601 p->count = inst_decoder_regs->ecx & 0xFFFF;
602 else
603 p->count = inst_decoder_regs->ecx;
604 p->df = (inst_decoder_regs->eflags & EF_DF) ? 1 : 0;
605 } else
606 p->count = 1;
608 if ((pvalid) && vmx_paging_enabled(current))
609 p->u.pdata = (void *) gva_to_gpa(p->u.data);
611 if (vmx_mmio_intercept(p)){
612 p->state = STATE_IORESP_READY;
613 vmx_io_assist(d);
614 return;
615 }
617 evtchn_send(iopacket_port(d->domain));
618 vmx_wait_io();
619 }
621 void handle_mmio(unsigned long va, unsigned long gpa)
622 {
623 unsigned long eip, eflags, cs;
624 unsigned long inst_len, inst_addr;
625 struct mi_per_cpu_info *mpci_p;
626 struct cpu_user_regs *inst_decoder_regs;
627 struct instruction mmio_inst;
628 unsigned char inst[MAX_INST_LEN];
629 int vm86, ret;
631 mpci_p = &current->domain->arch.vmx_platform.mpci;
632 inst_decoder_regs = mpci_p->inst_decoder_regs;
634 __vmread(GUEST_RIP, &eip);
635 __vmread(INSTRUCTION_LEN, &inst_len);
637 __vmread(GUEST_RFLAGS, &eflags);
638 vm86 = eflags & X86_EFLAGS_VM;
640 if (vm86) {
641 __vmread(GUEST_CS_SELECTOR, &cs);
642 inst_addr = (cs << 4) + eip;
643 } else
644 inst_addr = eip; /* XXX should really look at GDT[cs].base too */
646 memset(inst, '0', MAX_INST_LEN);
647 ret = inst_copy_from_guest(inst, inst_addr, inst_len);
648 if (ret != inst_len) {
649 printk("handle_mmio - EXIT: get guest instruction fault\n");
650 domain_crash_synchronous();
651 }
654 init_instruction(&mmio_inst);
656 if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
657 printk("vmx decode failure: eip=%lx, va=%lx\n %x %x %x %x\n", eip, va,
658 inst[0], inst[1], inst[2], inst[3]);
659 domain_crash_synchronous();
660 }
662 __vmwrite(GUEST_RIP, eip + inst_len);
663 store_cpu_user_regs(inst_decoder_regs);
665 // Only handle "mov" and "movs" instructions!
666 if (!strncmp((char *)mmio_inst.i_name, "movz", 4)) {
667 if (read_from_mmio(&mmio_inst)) {
668 // Send the request and waiting for return value.
669 mpci_p->mmio_target = mmio_inst.operand[1] | WZEROEXTEND;
670 send_mmio_req(gpa, &mmio_inst, 0, IOREQ_READ, 0);
671 return ;
672 } else {
673 printk("handle_mmio - EXIT: movz error!\n");
674 domain_crash_synchronous();
675 }
676 }
678 if (!strncmp((char *)mmio_inst.i_name, "movs", 4)) {
679 unsigned long addr = 0;
680 int dir;
682 if (vm86) {
683 unsigned long seg;
685 __vmread(GUEST_ES_SELECTOR, &seg);
686 if (((seg << 4) + (inst_decoder_regs->edi & 0xFFFF)) == va) {
687 dir = IOREQ_WRITE;
688 __vmread(GUEST_DS_SELECTOR, &seg);
689 addr = (seg << 4) + (inst_decoder_regs->esi & 0xFFFF);
690 } else {
691 dir = IOREQ_READ;
692 addr = (seg << 4) + (inst_decoder_regs->edi & 0xFFFF);
693 }
694 } else { /* XXX should really look at GDT[ds/es].base too */
695 if (va == inst_decoder_regs->edi) {
696 dir = IOREQ_WRITE;
697 addr = inst_decoder_regs->esi;
698 } else {
699 dir = IOREQ_READ;
700 addr = inst_decoder_regs->edi;
701 }
702 }
704 send_mmio_req(gpa, &mmio_inst, addr, dir, 1);
705 return;
706 }
708 if (!strncmp((char *)mmio_inst.i_name, "mov", 3)) {
709 long value = 0;
710 int size, index;
712 if (read_from_mmio(&mmio_inst)) {
713 // Send the request and waiting for return value.
714 mpci_p->mmio_target = mmio_inst.operand[1];
715 send_mmio_req(gpa, &mmio_inst, value, IOREQ_READ, 0);
716 return;
717 } else {
718 // Write to MMIO
719 if (mmio_inst.operand[0] & IMMEDIATE) {
720 value = mmio_inst.immediate;
721 } else if (mmio_inst.operand[0] & REGISTER) {
722 size = operand_size(mmio_inst.operand[0]);
723 index = operand_index(mmio_inst.operand[0]);
724 value = get_reg_value(size, index, 0, inst_decoder_regs);
725 } else {
726 domain_crash_synchronous();
727 }
728 send_mmio_req(gpa, &mmio_inst, value, IOREQ_WRITE, 0);
729 return;
730 }
731 }
733 if (!strncmp((char *)mmio_inst.i_name, "stos", 4)) {
734 send_mmio_req(gpa, &mmio_inst,
735 inst_decoder_regs->eax, IOREQ_WRITE, 0);
736 return;
737 }
739 domain_crash_synchronous();
740 }
742 #endif /* CONFIG_VMX */
744 /*
745 * Local variables:
746 * mode: C
747 * c-set-style: "BSD"
748 * c-basic-offset: 4
749 * tab-width: 4
750 * indent-tabs-mode: nil
751 * End:
752 */