ia64/xen-unstable

view tools/firmware/vmxassist/vm86.c @ 11698:96a6649fa691

[HVM] Add RDMSR/WRMSR instruction emulation to VMXAssist decoder
AP of PAE SMP windows will use it to set NX bit in EFER.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Sep 30 11:11:54 2006 +0100 (2006-09-30)
parents e5cdebf9d8ef
children a855c7d3a536
line source
1 /*
2 * vm86.c: A vm86 emulator. The main purpose of this emulator is to do as
3 * little work as possible.
4 *
5 * Leendert van Doorn, leendert@watson.ibm.com
6 * Copyright (c) 2005-2006, International Business Machines Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
21 #include "vm86.h"
22 #include "util.h"
23 #include "machine.h"
25 #define HIGHMEM (1 << 20) /* 1MB */
26 #define MASK16(v) ((v) & 0xFFFF)
28 #define DATA32 0x0001
29 #define ADDR32 0x0002
30 #define SEG_CS 0x0004
31 #define SEG_DS 0x0008
32 #define SEG_ES 0x0010
33 #define SEG_SS 0x0020
34 #define SEG_FS 0x0040
35 #define SEG_GS 0x0080
37 static unsigned prev_eip = 0;
38 enum vm86_mode mode = 0;
40 static struct regs saved_rm_regs;
42 #ifdef DEBUG
43 int traceset = 0;
45 char *states[] = {
46 "<VM86_REAL>",
47 "<VM86_REAL_TO_PROTECTED>",
48 "<VM86_PROTECTED_TO_REAL>",
49 "<VM86_PROTECTED>"
50 };
52 static char *rnames[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" };
53 #endif /* DEBUG */
55 #define PDE_PS (1 << 7)
56 #define PT_ENTRY_PRESENT 0x1
58 /* We only support access to <=4G physical memory due to 1:1 mapping */
59 static unsigned
60 guest_linear_to_real(uint32_t base)
61 {
62 uint32_t gcr3 = oldctx.cr3;
63 uint64_t l2_mfn;
64 uint64_t l1_mfn;
65 uint64_t l0_mfn;
67 if (!(oldctx.cr0 & CR0_PG))
68 return base;
70 if (!(oldctx.cr4 & CR4_PAE)) {
71 l1_mfn = ((uint32_t *)(long)gcr3)[(base >> 22) & 0x3ff];
72 if (!(l1_mfn & PT_ENTRY_PRESENT))
73 panic("l2 entry not present\n");
75 if ((oldctx.cr4 & CR4_PSE) && (l1_mfn & PDE_PS)) {
76 l0_mfn = l1_mfn & 0xffc00000;
77 return l0_mfn + (base & 0x3fffff);
78 }
80 l1_mfn &= 0xfffff000;
82 l0_mfn = ((uint32_t *)(long)l1_mfn)[(base >> 12) & 0x3ff];
83 if (!(l0_mfn & PT_ENTRY_PRESENT))
84 panic("l1 entry not present\n");
85 l0_mfn &= 0xfffff000;
87 return l0_mfn + (base & 0xfff);
88 } else {
89 l2_mfn = ((uint64_t *)(long)gcr3)[(base >> 30) & 0x3];
90 if (!(l2_mfn & PT_ENTRY_PRESENT))
91 panic("l3 entry not present\n");
92 l2_mfn &= 0x3fffff000ULL;
94 l1_mfn = ((uint64_t *)(long)l2_mfn)[(base >> 21) & 0x1ff];
95 if (!(l1_mfn & PT_ENTRY_PRESENT))
96 panic("l2 entry not present\n");
98 if (l1_mfn & PDE_PS) { /* CR4.PSE is ignored in PAE mode */
99 l0_mfn = l1_mfn & 0x3ffe00000ULL;
100 return l0_mfn + (base & 0x1fffff);
101 }
103 l1_mfn &= 0x3fffff000ULL;
105 l0_mfn = ((uint64_t *)(long)l1_mfn)[(base >> 12) & 0x1ff];
106 if (!(l0_mfn & PT_ENTRY_PRESENT))
107 panic("l1 entry not present\n");
108 l0_mfn &= 0x3fffff000ULL;
110 return l0_mfn + (base & 0xfff);
111 }
112 }
114 static unsigned
115 address(struct regs *regs, unsigned seg, unsigned off)
116 {
117 unsigned long long entry;
118 unsigned seg_base, seg_limit;
119 unsigned entry_low, entry_high;
121 if (seg == 0) {
122 if (mode == VM86_REAL || mode == VM86_REAL_TO_PROTECTED)
123 return off;
124 else
125 panic("segment is zero, but not in real mode!\n");
126 }
128 if (mode == VM86_REAL || seg > oldctx.gdtr_limit ||
129 (mode == VM86_REAL_TO_PROTECTED && regs->cs == seg))
130 return ((seg & 0xFFFF) << 4) + off;
132 entry = ((unsigned long long *)
133 guest_linear_to_real(oldctx.gdtr_base))[seg >> 3];
134 entry_high = entry >> 32;
135 entry_low = entry & 0xFFFFFFFF;
137 seg_base = (entry_high & 0xFF000000) | ((entry >> 16) & 0xFFFFFF);
138 seg_limit = (entry_high & 0xF0000) | (entry_low & 0xFFFF);
140 if (entry_high & 0x8000 &&
141 ((entry_high & 0x800000 && off >> 12 <= seg_limit) ||
142 (!(entry_high & 0x800000) && off <= seg_limit)))
143 return seg_base + off;
145 panic("should never reach here in function address():\n\t"
146 "entry=0x%08x%08x, mode=%d, seg=0x%08x, offset=0x%08x\n",
147 entry_high, entry_low, mode, seg, off);
149 return 0;
150 }
152 #ifdef DEBUG
153 void
154 trace(struct regs *regs, int adjust, char *fmt, ...)
155 {
156 unsigned off = regs->eip - adjust;
157 va_list ap;
159 if ((traceset & (1 << mode)) &&
160 (mode == VM86_REAL_TO_PROTECTED || mode == VM86_REAL)) {
161 /* 16-bit, seg:off addressing */
162 unsigned addr = address(regs, regs->cs, off);
163 printf("0x%08x: 0x%x:0x%04x ", addr, regs->cs, off);
164 printf("(%d) ", mode);
165 va_start(ap, fmt);
166 vprintf(fmt, ap);
167 va_end(ap);
168 printf("\n");
169 }
170 if ((traceset & (1 << mode)) &&
171 (mode == VM86_PROTECTED_TO_REAL || mode == VM86_PROTECTED)) {
172 /* 16-bit, gdt addressing */
173 unsigned addr = address(regs, regs->cs, off);
174 printf("0x%08x: 0x%x:0x%08x ", addr, regs->cs, off);
175 printf("(%d) ", mode);
176 va_start(ap, fmt);
177 vprintf(fmt, ap);
178 va_end(ap);
179 printf("\n");
180 }
181 }
182 #endif /* DEBUG */
184 static inline unsigned
185 read32(unsigned addr)
186 {
187 return *(unsigned long *) addr;
188 }
190 static inline unsigned
191 read16(unsigned addr)
192 {
193 return *(unsigned short *) addr;
194 }
196 static inline unsigned
197 read8(unsigned addr)
198 {
199 return *(unsigned char *) addr;
200 }
202 static inline void
203 write32(unsigned addr, unsigned value)
204 {
205 *(unsigned long *) addr = value;
206 }
208 static inline void
209 write16(unsigned addr, unsigned value)
210 {
211 *(unsigned short *) addr = value;
212 }
214 static inline void
215 write8(unsigned addr, unsigned value)
216 {
217 *(unsigned char *) addr = value;
218 }
220 static inline void
221 push32(struct regs *regs, unsigned value)
222 {
223 regs->uesp -= 4;
224 write32(address(regs, regs->uss, MASK16(regs->uesp)), value);
225 }
227 static inline void
228 push16(struct regs *regs, unsigned value)
229 {
230 regs->uesp -= 2;
231 write16(address(regs, regs->uss, MASK16(regs->uesp)), value);
232 }
234 static inline unsigned
235 pop32(struct regs *regs)
236 {
237 unsigned value = read32(address(regs, regs->uss, MASK16(regs->uesp)));
238 regs->uesp += 4;
239 return value;
240 }
242 static inline unsigned
243 pop16(struct regs *regs)
244 {
245 unsigned value = read16(address(regs, regs->uss, MASK16(regs->uesp)));
246 regs->uesp += 2;
247 return value;
248 }
250 static inline unsigned
251 fetch32(struct regs *regs)
252 {
253 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
255 regs->eip += 4;
256 return read32(addr);
257 }
259 static inline unsigned
260 fetch16(struct regs *regs)
261 {
262 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
264 regs->eip += 2;
265 return read16(addr);
266 }
268 static inline unsigned
269 fetch8(struct regs *regs)
270 {
271 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
273 regs->eip++;
274 return read8(addr);
275 }
277 static unsigned
278 getreg32(struct regs *regs, int r)
279 {
280 switch (r & 7) {
281 case 0: return regs->eax;
282 case 1: return regs->ecx;
283 case 2: return regs->edx;
284 case 3: return regs->ebx;
285 case 4: return regs->esp;
286 case 5: return regs->ebp;
287 case 6: return regs->esi;
288 case 7: return regs->edi;
289 }
290 return ~0;
291 }
293 static unsigned
294 getreg16(struct regs *regs, int r)
295 {
296 return MASK16(getreg32(regs, r));
297 }
299 static unsigned
300 getreg8(struct regs *regs, int r)
301 {
302 switch (r & 7) {
303 case 0: return regs->eax & 0xFF; /* al */
304 case 1: return regs->ecx & 0xFF; /* cl */
305 case 2: return regs->edx & 0xFF; /* dl */
306 case 3: return regs->ebx & 0xFF; /* bl */
307 case 4: return (regs->esp >> 8) & 0xFF; /* ah */
308 case 5: return (regs->ebp >> 8) & 0xFF; /* ch */
309 case 6: return (regs->esi >> 8) & 0xFF; /* dh */
310 case 7: return (regs->edi >> 8) & 0xFF; /* bh */
311 }
312 return ~0;
313 }
315 static void
316 setreg32(struct regs *regs, int r, unsigned v)
317 {
318 switch (r & 7) {
319 case 0: regs->eax = v; break;
320 case 1: regs->ecx = v; break;
321 case 2: regs->edx = v; break;
322 case 3: regs->ebx = v; break;
323 case 4: regs->esp = v; break;
324 case 5: regs->ebp = v; break;
325 case 6: regs->esi = v; break;
326 case 7: regs->edi = v; break;
327 }
328 }
330 static void
331 setreg16(struct regs *regs, int r, unsigned v)
332 {
333 setreg32(regs, r, (getreg32(regs, r) & ~0xFFFF) | MASK16(v));
334 }
336 static void
337 setreg8(struct regs *regs, int r, unsigned v)
338 {
339 v &= 0xFF;
340 switch (r & 7) {
341 case 0: regs->eax = (regs->eax & ~0xFF) | v; break;
342 case 1: regs->ecx = (regs->ecx & ~0xFF) | v; break;
343 case 2: regs->edx = (regs->edx & ~0xFF) | v; break;
344 case 3: regs->ebx = (regs->ebx & ~0xFF) | v; break;
345 case 4: regs->esp = (regs->esp & ~0xFF00) | (v << 8); break;
346 case 5: regs->ebp = (regs->ebp & ~0xFF00) | (v << 8); break;
347 case 6: regs->esi = (regs->esi & ~0xFF00) | (v << 8); break;
348 case 7: regs->edi = (regs->edi & ~0xFF00) | (v << 8); break;
349 }
350 }
352 static unsigned
353 segment(unsigned prefix, struct regs *regs, unsigned seg)
354 {
355 if (prefix & SEG_ES)
356 seg = regs->ves;
357 if (prefix & SEG_DS)
358 seg = regs->vds;
359 if (prefix & SEG_CS)
360 seg = regs->cs;
361 if (prefix & SEG_SS)
362 seg = regs->uss;
363 if (prefix & SEG_FS)
364 seg = regs->fs;
365 if (prefix & SEG_GS)
366 seg = regs->gs;
367 return seg;
368 }
370 static unsigned
371 sib(struct regs *regs, int mod, unsigned byte)
372 {
373 unsigned scale = (byte >> 6) & 3;
374 int index = (byte >> 3) & 7;
375 int base = byte & 7;
376 unsigned addr = 0;
378 switch (mod) {
379 case 0:
380 if (base == 5)
381 addr = fetch32(regs);
382 else
383 addr = getreg32(regs, base);
384 break;
385 case 1:
386 addr = getreg32(regs, base) + (char) fetch8(regs);
387 break;
388 case 2:
389 addr = getreg32(regs, base) + fetch32(regs);
390 break;
391 }
393 if (index != 4)
394 addr += getreg32(regs, index) << scale;
396 return addr;
397 }
399 /*
400 * Operand (modrm) decode
401 */
402 static unsigned
403 operand(unsigned prefix, struct regs *regs, unsigned modrm)
404 {
405 int mod, disp = 0, seg;
407 seg = segment(prefix, regs, regs->vds);
409 if (prefix & ADDR32) { /* 32-bit addressing */
410 switch ((mod = (modrm >> 6) & 3)) {
411 case 0:
412 switch (modrm & 7) {
413 case 0: return address(regs, seg, regs->eax);
414 case 1: return address(regs, seg, regs->ecx);
415 case 2: return address(regs, seg, regs->edx);
416 case 3: return address(regs, seg, regs->ebx);
417 case 4: return address(regs, seg,
418 sib(regs, mod, fetch8(regs)));
419 case 5: return address(regs, seg, fetch32(regs));
420 case 6: return address(regs, seg, regs->esi);
421 case 7: return address(regs, seg, regs->edi);
422 }
423 break;
424 case 1:
425 case 2:
426 if ((modrm & 7) != 4) {
427 if (mod == 1)
428 disp = (char) fetch8(regs);
429 else
430 disp = (int) fetch32(regs);
431 }
432 switch (modrm & 7) {
433 case 0: return address(regs, seg, regs->eax + disp);
434 case 1: return address(regs, seg, regs->ecx + disp);
435 case 2: return address(regs, seg, regs->edx + disp);
436 case 3: return address(regs, seg, regs->ebx + disp);
437 case 4: return address(regs, seg,
438 sib(regs, mod, fetch8(regs)));
439 case 5: return address(regs, seg, regs->ebp + disp);
440 case 6: return address(regs, seg, regs->esi + disp);
441 case 7: return address(regs, seg, regs->edi + disp);
442 }
443 break;
444 case 3:
445 return getreg32(regs, modrm);
446 }
447 } else { /* 16-bit addressing */
448 switch ((mod = (modrm >> 6) & 3)) {
449 case 0:
450 switch (modrm & 7) {
451 case 0: return address(regs, seg, MASK16(regs->ebx) +
452 MASK16(regs->esi));
453 case 1: return address(regs, seg, MASK16(regs->ebx) +
454 MASK16(regs->edi));
455 case 2: return address(regs, seg, MASK16(regs->ebp) +
456 MASK16(regs->esi));
457 case 3: return address(regs, seg, MASK16(regs->ebp) +
458 MASK16(regs->edi));
459 case 4: return address(regs, seg, MASK16(regs->esi));
460 case 5: return address(regs, seg, MASK16(regs->edi));
461 case 6: return address(regs, seg, fetch16(regs));
462 case 7: return address(regs, seg, MASK16(regs->ebx));
463 }
464 break;
465 case 1:
466 case 2:
467 if (mod == 1)
468 disp = (char) fetch8(regs);
469 else
470 disp = (int) fetch16(regs);
471 switch (modrm & 7) {
472 case 0: return address(regs, seg, MASK16(regs->ebx) +
473 MASK16(regs->esi) + disp);
474 case 1: return address(regs, seg, MASK16(regs->ebx) +
475 MASK16(regs->edi) + disp);
476 case 2: return address(regs, seg, MASK16(regs->ebp) +
477 MASK16(regs->esi) + disp);
478 case 3: return address(regs, seg, MASK16(regs->ebp) +
479 MASK16(regs->edi) + disp);
480 case 4: return address(regs, seg,
481 MASK16(regs->esi) + disp);
482 case 5: return address(regs, seg,
483 MASK16(regs->edi) + disp);
484 case 6: return address(regs, seg,
485 MASK16(regs->ebp) + disp);
486 case 7: return address(regs, seg,
487 MASK16(regs->ebx) + disp);
488 }
489 break;
490 case 3:
491 return getreg16(regs, modrm);
492 }
493 }
495 return 0;
496 }
498 /*
499 * Load new IDT
500 */
501 static int
502 lidt(struct regs *regs, unsigned prefix, unsigned modrm)
503 {
504 unsigned eip = regs->eip - 3;
505 unsigned addr = operand(prefix, regs, modrm);
507 oldctx.idtr_limit = ((struct dtr *) addr)->size;
508 if ((prefix & DATA32) == 0)
509 oldctx.idtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
510 else
511 oldctx.idtr_base = ((struct dtr *) addr)->base;
512 TRACE((regs, regs->eip - eip, "lidt 0x%x <%d, 0x%x>",
513 addr, oldctx.idtr_limit, oldctx.idtr_base));
515 return 1;
516 }
518 /*
519 * Load new GDT
520 */
521 static int
522 lgdt(struct regs *regs, unsigned prefix, unsigned modrm)
523 {
524 unsigned eip = regs->eip - 3;
525 unsigned addr = operand(prefix, regs, modrm);
527 oldctx.gdtr_limit = ((struct dtr *) addr)->size;
528 if ((prefix & DATA32) == 0)
529 oldctx.gdtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
530 else
531 oldctx.gdtr_base = ((struct dtr *) addr)->base;
532 TRACE((regs, regs->eip - eip, "lgdt 0x%x <%d, 0x%x>",
533 addr, oldctx.gdtr_limit, oldctx.gdtr_base));
535 return 1;
536 }
538 /*
539 * Modify CR0 either through an lmsw instruction.
540 */
541 static int
542 lmsw(struct regs *regs, unsigned prefix, unsigned modrm)
543 {
544 unsigned eip = regs->eip - 3;
545 unsigned ax = operand(prefix, regs, modrm) & 0xF;
546 unsigned cr0 = (oldctx.cr0 & 0xFFFFFFF0) | ax;
548 TRACE((regs, regs->eip - eip, "lmsw 0x%x", ax));
549 #ifndef TEST
550 oldctx.cr0 = cr0 | CR0_PE | CR0_NE;
551 #else
552 oldctx.cr0 = cr0 | CR0_PE | CR0_NE | CR0_PG;
553 #endif
554 if (cr0 & CR0_PE)
555 set_mode(regs, VM86_REAL_TO_PROTECTED);
557 return 1;
558 }
560 /*
561 * We need to handle moves that address memory beyond the 64KB segment
562 * limit that VM8086 mode enforces.
563 */
564 static int
565 movr(struct regs *regs, unsigned prefix, unsigned opc)
566 {
567 unsigned eip = regs->eip - 1;
568 unsigned modrm = fetch8(regs);
569 unsigned addr = operand(prefix, regs, modrm);
570 unsigned val, r = (modrm >> 3) & 7;
572 if ((modrm & 0xC0) == 0xC0) /* no registers */
573 return 0;
575 switch (opc) {
576 case 0x88: /* addr32 mov r8, r/m8 */
577 val = getreg8(regs, r);
578 TRACE((regs, regs->eip - eip,
579 "movb %%e%s, *0x%x", rnames[r], addr));
580 write8(addr, val);
581 break;
583 case 0x8A: /* addr32 mov r/m8, r8 */
584 TRACE((regs, regs->eip - eip,
585 "movb *0x%x, %%%s", addr, rnames[r]));
586 setreg8(regs, r, read8(addr));
587 break;
589 case 0x89: /* addr32 mov r16, r/m16 */
590 val = getreg32(regs, r);
591 if (prefix & DATA32) {
592 TRACE((regs, regs->eip - eip,
593 "movl %%e%s, *0x%x", rnames[r], addr));
594 write32(addr, val);
595 } else {
596 TRACE((regs, regs->eip - eip,
597 "movw %%%s, *0x%x", rnames[r], addr));
598 write16(addr, MASK16(val));
599 }
600 break;
602 case 0x8B: /* addr32 mov r/m16, r16 */
603 if (prefix & DATA32) {
604 TRACE((regs, regs->eip - eip,
605 "movl *0x%x, %%e%s", addr, rnames[r]));
606 setreg32(regs, r, read32(addr));
607 } else {
608 TRACE((regs, regs->eip - eip,
609 "movw *0x%x, %%%s", addr, rnames[r]));
610 setreg16(regs, r, read16(addr));
611 }
612 break;
614 case 0xC6: /* addr32 movb $imm, r/m8 */
615 if ((modrm >> 3) & 7)
616 return 0;
617 val = fetch8(regs);
618 write8(addr, val);
619 TRACE((regs, regs->eip - eip, "movb $0x%x, *0x%x",
620 val, addr));
621 break;
622 }
623 return 1;
624 }
626 /*
627 * Move to and from a control register.
628 */
629 static int
630 movcr(struct regs *regs, unsigned prefix, unsigned opc)
631 {
632 unsigned eip = regs->eip - 2;
633 unsigned modrm = fetch8(regs);
634 unsigned cr = (modrm >> 3) & 7;
636 if ((modrm & 0xC0) != 0xC0) /* only registers */
637 return 0;
639 switch (opc) {
640 case 0x20: /* mov Rd, Cd */
641 TRACE((regs, regs->eip - eip, "movl %%cr%d, %%eax", cr));
642 switch (cr) {
643 case 0:
644 #ifndef TEST
645 setreg32(regs, modrm,
646 oldctx.cr0 & ~(CR0_PE | CR0_NE));
647 #else
648 setreg32(regs, modrm,
649 oldctx.cr0 & ~(CR0_PE | CR0_NE | CR0_PG));
650 #endif
651 break;
652 case 2:
653 setreg32(regs, modrm, get_cr2());
654 break;
655 case 3:
656 setreg32(regs, modrm, oldctx.cr3);
657 break;
658 case 4:
659 setreg32(regs, modrm, oldctx.cr4);
660 break;
661 }
662 break;
663 case 0x22: /* mov Cd, Rd */
664 TRACE((regs, regs->eip - eip, "movl %%eax, %%cr%d", cr));
665 switch (cr) {
666 case 0:
667 oldctx.cr0 = getreg32(regs, modrm) | (CR0_PE | CR0_NE);
668 #ifdef TEST
669 oldctx.cr0 |= CR0_PG;
670 #endif
671 if (getreg32(regs, modrm) & CR0_PE)
672 set_mode(regs, VM86_REAL_TO_PROTECTED);
673 else
674 set_mode(regs, VM86_REAL);
675 break;
676 case 3:
677 oldctx.cr3 = getreg32(regs, modrm);
678 break;
679 case 4:
680 oldctx.cr4 = getreg32(regs, modrm);
681 break;
682 }
683 break;
684 }
686 return 1;
687 }
689 static inline void set_eflags_ZF(unsigned mask, unsigned v1, struct regs *regs)
690 {
691 if ((v1 & mask) == 0)
692 regs->eflags |= EFLAGS_ZF;
693 else
694 regs->eflags &= ~EFLAGS_ZF;
695 }
697 /*
698 * We need to handle cmp opcodes that address memory beyond the 64KB
699 * segment limit that VM8086 mode enforces.
700 */
701 static int
702 cmp(struct regs *regs, unsigned prefix, unsigned opc)
703 {
704 unsigned eip = regs->eip - 1;
705 unsigned modrm = fetch8(regs);
706 unsigned addr = operand(prefix, regs, modrm);
707 unsigned diff, val, r = (modrm >> 3) & 7;
709 if ((modrm & 0xC0) == 0xC0) /* no registers */
710 return 0;
712 switch (opc) {
713 case 0x39: /* addr32 cmp r16, r/m16 */
714 val = getreg32(regs, r);
715 if (prefix & DATA32) {
716 diff = read32(addr) - val;
717 set_eflags_ZF(~0, diff, regs);
719 TRACE((regs, regs->eip - eip,
720 "cmp %%e%s, *0x%x (0x%x)",
721 rnames[r], addr, diff));
722 } else {
723 diff = read16(addr) - val;
724 set_eflags_ZF(0xFFFF, diff, regs);
726 TRACE((regs, regs->eip - eip,
727 "cmp %%%s, *0x%x (0x%x)",
728 rnames[r], addr, diff));
729 }
730 break;
732 /* other cmp opcodes ... */
733 }
734 return 1;
735 }
737 /*
738 * We need to handle test opcodes that address memory beyond the 64KB
739 * segment limit that VM8086 mode enforces.
740 */
741 static int
742 test(struct regs *regs, unsigned prefix, unsigned opc)
743 {
744 unsigned eip = regs->eip - 1;
745 unsigned modrm = fetch8(regs);
746 unsigned addr = operand(prefix, regs, modrm);
747 unsigned val, diff;
749 if ((modrm & 0xC0) == 0xC0) /* no registers */
750 return 0;
752 switch (opc) {
753 case 0xF6: /* testb $imm, r/m8 */
754 if ((modrm >> 3) & 7)
755 return 0;
756 val = fetch8(regs);
757 diff = read8(addr) & val;
758 set_eflags_ZF(0xFF, diff, regs);
760 TRACE((regs, regs->eip - eip, "testb $0x%x, *0x%x (0x%x)",
761 val, addr, diff));
762 break;
764 /* other test opcodes ... */
765 }
767 return 1;
768 }
770 /*
771 * We need to handle pop opcodes that address memory beyond the 64KB
772 * segment limit that VM8086 mode enforces.
773 */
774 static int
775 pop(struct regs *regs, unsigned prefix, unsigned opc)
776 {
777 unsigned eip = regs->eip - 1;
778 unsigned modrm = fetch8(regs);
779 unsigned addr = operand(prefix, regs, modrm);
781 if ((modrm & 0xC0) == 0xC0) /* no registers */
782 return 0;
784 switch (opc) {
785 case 0x8F: /* pop r/m16 */
786 if ((modrm >> 3) & 7)
787 return 0;
788 if (prefix & DATA32)
789 write32(addr, pop32(regs));
790 else
791 write16(addr, pop16(regs));
792 TRACE((regs, regs->eip - eip, "pop *0x%x", addr));
793 break;
795 /* other pop opcodes ... */
796 }
798 return 1;
799 }
801 /*
802 * Emulate a segment load in protected mode
803 */
804 static int
805 load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
806 {
807 unsigned long long entry;
809 /* protected mode: use seg as index into gdt */
810 if (sel > oldctx.gdtr_limit)
811 return 0;
813 if (sel == 0) {
814 arbytes->fields.null_bit = 1;
815 return 1;
816 }
818 entry = ((unsigned long long *)
819 guest_linear_to_real(oldctx.gdtr_base))[sel >> 3];
821 /* Check the P bit first */
822 if (!((entry >> (15+32)) & 0x1) && sel != 0)
823 return 0;
825 *base = (((entry >> (56-24)) & 0xFF000000) |
826 ((entry >> (32-16)) & 0x00FF0000) |
827 ((entry >> ( 16)) & 0x0000FFFF));
828 *limit = (((entry >> (48-16)) & 0x000F0000) |
829 ((entry ) & 0x0000FFFF));
831 arbytes->bytes = 0;
832 arbytes->fields.seg_type = (entry >> (8+32)) & 0xF; /* TYPE */
833 arbytes->fields.s = (entry >> (12+32)) & 0x1; /* S */
834 if (arbytes->fields.s)
835 arbytes->fields.seg_type |= 1; /* accessed */
836 arbytes->fields.dpl = (entry >> (13+32)) & 0x3; /* DPL */
837 arbytes->fields.p = (entry >> (15+32)) & 0x1; /* P */
838 arbytes->fields.avl = (entry >> (20+32)) & 0x1; /* AVL */
839 arbytes->fields.default_ops_size = (entry >> (22+32)) & 0x1; /* D */
841 if (entry & (1ULL << (23+32))) { /* G */
842 arbytes->fields.g = 1;
843 *limit = (*limit << 12) | 0xFFF;
844 }
846 return 1;
847 }
849 /*
850 * Transition to protected mode
851 */
852 static void
853 protected_mode(struct regs *regs)
854 {
855 regs->eflags &= ~(EFLAGS_TF|EFLAGS_VM);
857 oldctx.eip = regs->eip;
858 oldctx.esp = regs->uesp;
859 oldctx.eflags = regs->eflags;
861 memset(&saved_rm_regs, 0, sizeof(struct regs));
863 /* reload all segment registers */
864 if (!load_seg(regs->cs, &oldctx.cs_base,
865 &oldctx.cs_limit, &oldctx.cs_arbytes))
866 panic("Invalid %%cs=0x%x for protected mode\n", regs->cs);
867 oldctx.cs_sel = regs->cs;
869 if (load_seg(regs->ves, &oldctx.es_base,
870 &oldctx.es_limit, &oldctx.es_arbytes))
871 oldctx.es_sel = regs->ves;
872 else {
873 load_seg(0, &oldctx.es_base,
874 &oldctx.es_limit, &oldctx.es_arbytes);
875 oldctx.es_sel = 0;
876 saved_rm_regs.ves = regs->ves;
877 }
879 if (load_seg(regs->uss, &oldctx.ss_base,
880 &oldctx.ss_limit, &oldctx.ss_arbytes))
881 oldctx.ss_sel = regs->uss;
882 else {
883 load_seg(0, &oldctx.ss_base,
884 &oldctx.ss_limit, &oldctx.ss_arbytes);
885 oldctx.ss_sel = 0;
886 saved_rm_regs.uss = regs->uss;
887 }
889 if (load_seg(regs->vds, &oldctx.ds_base,
890 &oldctx.ds_limit, &oldctx.ds_arbytes))
891 oldctx.ds_sel = regs->vds;
892 else {
893 load_seg(0, &oldctx.ds_base,
894 &oldctx.ds_limit, &oldctx.ds_arbytes);
895 oldctx.ds_sel = 0;
896 saved_rm_regs.vds = regs->vds;
897 }
899 if (load_seg(regs->vfs, &oldctx.fs_base,
900 &oldctx.fs_limit, &oldctx.fs_arbytes))
901 oldctx.fs_sel = regs->vfs;
902 else {
903 load_seg(0, &oldctx.fs_base,
904 &oldctx.fs_limit, &oldctx.fs_arbytes);
905 oldctx.fs_sel = 0;
906 saved_rm_regs.vfs = regs->vfs;
907 }
909 if (load_seg(regs->vgs, &oldctx.gs_base,
910 &oldctx.gs_limit, &oldctx.gs_arbytes))
911 oldctx.gs_sel = regs->vgs;
912 else {
913 load_seg(0, &oldctx.gs_base,
914 &oldctx.gs_limit, &oldctx.gs_arbytes);
915 oldctx.gs_sel = 0;
916 saved_rm_regs.vgs = regs->vgs;
917 }
919 /* initialize jump environment to warp back to protected mode */
920 regs->cs = CODE_SELECTOR;
921 regs->ds = DATA_SELECTOR;
922 regs->es = DATA_SELECTOR;
923 regs->fs = DATA_SELECTOR;
924 regs->gs = DATA_SELECTOR;
925 regs->eip = (unsigned) &switch_to_protected_mode;
927 /* this should get us into 32-bit mode */
928 }
930 /*
931 * Start real-mode emulation
932 */
933 static void
934 real_mode(struct regs *regs)
935 {
936 regs->eflags |= EFLAGS_VM | 0x02;
937 regs->ds = DATA_SELECTOR;
938 regs->es = DATA_SELECTOR;
939 regs->fs = DATA_SELECTOR;
940 regs->gs = DATA_SELECTOR;
942 /*
943 * When we transition from protected to real-mode and we
944 * have not reloaded the segment descriptors yet, they are
945 * interpreted as if they were in protect mode.
946 * We emulate this behavior by assuming that these memory
947 * reference are below 1MB and set %ss, %ds, %es accordingly.
948 */
949 if (regs->uss != 0) {
950 if (regs->uss >= HIGHMEM)
951 panic("%%ss 0x%lx higher than 1MB", regs->uss);
952 regs->uss = address(regs, regs->uss, 0) >> 4;
953 } else {
954 regs->uss = saved_rm_regs.uss;
955 }
956 if (regs->vds != 0) {
957 if (regs->vds >= HIGHMEM)
958 panic("%%ds 0x%lx higher than 1MB", regs->vds);
959 regs->vds = address(regs, regs->vds, 0) >> 4;
960 } else {
961 regs->vds = saved_rm_regs.vds;
962 }
963 if (regs->ves != 0) {
964 if (regs->ves >= HIGHMEM)
965 panic("%%es 0x%lx higher than 1MB", regs->ves);
966 regs->ves = address(regs, regs->ves, 0) >> 4;
967 } else {
968 regs->ves = saved_rm_regs.ves;
969 }
971 /* this should get us into 16-bit mode */
972 }
974 /*
975 * This is the smarts of the emulator and handles the mode transitions. The
976 * emulator handles 4 different modes. 1) VM86_REAL: emulated real-mode,
977 * Just handle those instructions that are not supported under VM8086.
978 * 2) VM86_REAL_TO_PROTECTED: going from real-mode to protected mode. In
979 * this we single step through the instructions until we reload the
980 * new %cs (some OSes do a lot of computations before reloading %cs). 2)
981 * VM86_PROTECTED_TO_REAL when we are going from protected to real mode. In
982 * this case we emulate the instructions by hand. Finally, 4) VM86_PROTECTED
983 * when we transitioned to protected mode and we should abandon the
984 * emulator. No instructions are emulated when in VM86_PROTECTED mode.
985 */
986 void
987 set_mode(struct regs *regs, enum vm86_mode newmode)
988 {
989 switch (newmode) {
990 case VM86_REAL:
991 if ((mode == VM86_PROTECTED_TO_REAL) ||
992 (mode == VM86_REAL_TO_PROTECTED)) {
993 regs->eflags &= ~EFLAGS_TF;
994 real_mode(regs);
995 break;
996 } else if (mode == VM86_REAL) {
997 break;
998 } else
999 panic("unexpected real mode transition");
1000 break;
1002 case VM86_REAL_TO_PROTECTED:
1003 if (mode == VM86_REAL) {
1004 regs->eflags |= EFLAGS_TF;
1005 break;
1006 } else if (mode == VM86_REAL_TO_PROTECTED) {
1007 break;
1008 } else
1009 panic("unexpected real-to-protected mode transition");
1010 break;
1012 case VM86_PROTECTED_TO_REAL:
1013 if (mode == VM86_PROTECTED) {
1014 break;
1015 } else
1016 panic("unexpected protected-to-real mode transition");
1017 break;
1019 case VM86_PROTECTED:
1020 if (mode == VM86_REAL_TO_PROTECTED) {
1021 protected_mode(regs);
1022 // printf("<VM86_PROTECTED>\n");
1023 mode = newmode;
1024 return;
1025 } else
1026 panic("unexpected protected mode transition");
1027 break;
1030 mode = newmode;
1031 TRACE((regs, 0, states[mode]));
1034 static void
1035 jmpl(struct regs *regs, int prefix)
1037 unsigned n = regs->eip;
1038 unsigned cs, eip;
1040 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
1041 eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs);
1042 cs = fetch16(regs);
1044 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1046 regs->cs = cs;
1047 regs->eip = eip;
1048 set_mode(regs, VM86_PROTECTED);
1049 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
1050 eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs);
1051 cs = fetch16(regs);
1053 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1055 regs->cs = cs;
1056 regs->eip = eip;
1057 set_mode(regs, VM86_REAL);
1058 } else
1059 panic("jmpl");
1062 static void
1063 jmpl_indirect(struct regs *regs, int prefix, unsigned modrm)
1065 unsigned n = regs->eip;
1066 unsigned cs, eip;
1067 unsigned addr;
1069 addr = operand(prefix, regs, modrm);
1071 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
1072 eip = (prefix & DATA32) ? read32(addr) : read16(addr);
1073 addr += (prefix & DATA32) ? 4 : 2;
1074 cs = read16(addr);
1076 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1078 regs->cs = cs;
1079 regs->eip = eip;
1080 set_mode(regs, VM86_PROTECTED);
1081 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
1082 eip = (prefix & DATA32) ? read32(addr) : read16(addr);
1083 addr += (prefix & DATA32) ? 4 : 2;
1084 cs = read16(addr);
1086 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1088 regs->cs = cs;
1089 regs->eip = eip;
1090 set_mode(regs, VM86_REAL);
1091 } else
1092 panic("jmpl");
1095 static void
1096 retl(struct regs *regs, int prefix)
1098 unsigned cs, eip;
1100 if (prefix & DATA32) {
1101 eip = pop32(regs);
1102 cs = MASK16(pop32(regs));
1103 } else {
1104 eip = pop16(regs);
1105 cs = pop16(regs);
1108 TRACE((regs, 1, "retl (to 0x%x:0x%x)", cs, eip));
1110 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
1111 regs->cs = cs;
1112 regs->eip = eip;
1113 set_mode(regs, VM86_PROTECTED);
1114 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
1115 regs->cs = cs;
1116 regs->eip = eip;
1117 set_mode(regs, VM86_REAL);
1118 } else
1119 panic("retl");
1122 static void
1123 interrupt(struct regs *regs, int n)
1125 TRACE((regs, 0, "external interrupt %d", n));
1126 push16(regs, regs->eflags);
1127 push16(regs, regs->cs);
1128 push16(regs, regs->eip);
1129 regs->eflags &= ~EFLAGS_IF;
1130 regs->eip = read16(address(regs, 0, n * 4));
1131 regs->cs = read16(address(regs, 0, n * 4 + 2));
1134 /*
1135 * Most port I/O operations are passed unmodified. We do have to be
1136 * careful and make sure the emulated program isn't remapping the
1137 * interrupt vectors. The following simple state machine catches
1138 * these attempts and rewrites them.
1139 */
1140 static int
1141 outbyte(struct regs *regs, unsigned prefix, unsigned opc)
1143 static char icw2[2] = { 0 };
1144 int al, port;
1146 switch (opc) {
1147 case 0xE6: /* outb port, al */
1148 port = fetch8(regs);
1149 break;
1150 case 0xEE: /* outb (%dx), al */
1151 port = MASK16(regs->edx);
1152 break;
1153 default:
1154 return 0;
1157 al = regs->eax & 0xFF;
1159 switch (port) {
1160 case PIC_MASTER + PIC_CMD:
1161 if (al & (1 << 4)) /* A0=0,D4=1 -> ICW1 */
1162 icw2[0] = 1;
1163 break;
1164 case PIC_MASTER + PIC_IMR:
1165 if (icw2[0]) {
1166 icw2[0] = 0;
1167 printf("Remapping master: ICW2 0x%x -> 0x%x\n",
1168 al, NR_EXCEPTION_HANDLER);
1169 al = NR_EXCEPTION_HANDLER;
1171 break;
1173 case PIC_SLAVE + PIC_CMD:
1174 if (al & (1 << 4)) /* A0=0,D4=1 -> ICW1 */
1175 icw2[1] = 1;
1176 break;
1177 case PIC_SLAVE + PIC_IMR:
1178 if (icw2[1]) {
1179 icw2[1] = 0;
1180 printf("Remapping slave: ICW2 0x%x -> 0x%x\n",
1181 al, NR_EXCEPTION_HANDLER+8);
1182 al = NR_EXCEPTION_HANDLER+8;
1184 break;
1187 outb(port, al);
1188 return 1;
1191 static int
1192 inbyte(struct regs *regs, unsigned prefix, unsigned opc)
1194 int port;
1196 switch (opc) {
1197 case 0xE4: /* inb al, port */
1198 port = fetch8(regs);
1199 break;
1200 case 0xEC: /* inb al, (%dx) */
1201 port = MASK16(regs->edx);
1202 break;
1203 default:
1204 return 0;
1207 regs->eax = (regs->eax & ~0xFF) | inb(port);
1208 return 1;
1211 static void
1212 pushrm(struct regs *regs, int prefix, unsigned modrm)
1214 unsigned n = regs->eip;
1215 unsigned addr;
1216 unsigned data;
1218 addr = operand(prefix, regs, modrm);
1220 if (prefix & DATA32) {
1221 data = read32(addr);
1222 push32(regs, data);
1223 } else {
1224 data = read16(addr);
1225 push16(regs, data);
1228 TRACE((regs, (regs->eip - n) + 1, "push *0x%x", addr));
1231 enum { OPC_INVALID, OPC_EMULATED };
1233 #define rdmsr(msr,val1,val2) \
1234 __asm__ __volatile__( \
1235 "rdmsr" \
1236 : "=a" (val1), "=d" (val2) \
1237 : "c" (msr))
1239 #define wrmsr(msr,val1,val2) \
1240 __asm__ __volatile__( \
1241 "wrmsr" \
1242 : /* no outputs */ \
1243 : "c" (msr), "a" (val1), "d" (val2))
1245 /*
1246 * Emulate a single instruction, including all its prefixes. We only implement
1247 * a small subset of the opcodes, and not all opcodes are implemented for each
1248 * of the four modes we can operate in.
1249 */
1250 static int
1251 opcode(struct regs *regs)
1253 unsigned eip = regs->eip;
1254 unsigned opc, modrm, disp;
1255 unsigned prefix = 0;
1257 for (;;) {
1258 switch ((opc = fetch8(regs))) {
1259 case 0x07:
1260 if (prefix & DATA32)
1261 regs->ves = pop32(regs);
1262 else
1263 regs->ves = pop16(regs);
1264 TRACE((regs, regs->eip - eip, "pop %%es"));
1265 return OPC_EMULATED;
1267 case 0x0F: /* two byte opcode */
1268 if (mode == VM86_PROTECTED)
1269 goto invalid;
1270 switch ((opc = fetch8(regs))) {
1271 case 0x01:
1272 switch (((modrm = fetch8(regs)) >> 3) & 7) {
1273 case 0: /* sgdt */
1274 case 1: /* sidt */
1275 goto invalid;
1276 case 2: /* lgdt */
1277 if (!lgdt(regs, prefix, modrm))
1278 goto invalid;
1279 return OPC_EMULATED;
1280 case 3: /* lidt */
1281 if (!lidt(regs, prefix, modrm))
1282 goto invalid;
1283 return OPC_EMULATED;
1284 case 4: /* smsw */
1285 goto invalid;
1286 case 5:
1287 goto invalid;
1288 case 6: /* lmsw */
1289 if (!lmsw(regs, prefix, modrm))
1290 goto invalid;
1291 return OPC_EMULATED;
1292 case 7: /* invlpg */
1293 goto invalid;
1295 break;
1296 case 0x09: /* wbinvd */
1297 return OPC_EMULATED;
1298 case 0x20: /* mov Rd, Cd (1h) */
1299 case 0x22:
1300 if (!movcr(regs, prefix, opc))
1301 goto invalid;
1302 return OPC_EMULATED;
1303 case 0x30: /* WRMSR */
1304 wrmsr(regs->ecx, regs->eax, regs->edx);
1305 return OPC_EMULATED;
1306 case 0x32: /* RDMSR */
1307 rdmsr(regs->ecx, regs->eax, regs->edx);
1308 return OPC_EMULATED;
1309 default:
1310 goto invalid;
1312 goto invalid;
1314 case 0x26:
1315 TRACE((regs, regs->eip - eip, "%%es:"));
1316 prefix |= SEG_ES;
1317 continue;
1319 case 0x2E:
1320 TRACE((regs, regs->eip - eip, "%%cs:"));
1321 prefix |= SEG_CS;
1322 continue;
1324 case 0x36:
1325 TRACE((regs, regs->eip - eip, "%%ss:"));
1326 prefix |= SEG_SS;
1327 continue;
1329 case 0x39: /* addr32 cmp r16, r/m16 */
1330 case 0x3B: /* addr32 cmp r/m16, r16 */
1331 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
1332 goto invalid;
1333 if ((prefix & ADDR32) == 0)
1334 goto invalid;
1335 if (!cmp(regs, prefix, opc))
1336 goto invalid;
1337 return OPC_EMULATED;
1339 case 0x3E:
1340 TRACE((regs, regs->eip - eip, "%%ds:"));
1341 prefix |= SEG_DS;
1342 continue;
1344 case 0x64:
1345 TRACE((regs, regs->eip - eip, "%%fs:"));
1346 prefix |= SEG_FS;
1347 continue;
1349 case 0x65:
1350 TRACE((regs, regs->eip - eip, "%%gs:"));
1351 prefix |= SEG_GS;
1352 continue;
1354 case 0x66:
1355 TRACE((regs, regs->eip - eip, "data32"));
1356 prefix |= DATA32;
1357 continue;
1359 case 0x67:
1360 TRACE((regs, regs->eip - eip, "addr32"));
1361 prefix |= ADDR32;
1362 continue;
1364 case 0x88: /* addr32 mov r8, r/m8 */
1365 case 0x8A: /* addr32 mov r/m8, r8 */
1366 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
1367 goto invalid;
1368 if ((prefix & ADDR32) == 0)
1369 goto invalid;
1370 if (!movr(regs, prefix, opc))
1371 goto invalid;
1372 return OPC_EMULATED;
1374 case 0x89: /* addr32 mov r16, r/m16 */
1375 if (mode == VM86_PROTECTED_TO_REAL) {
1376 unsigned modrm = fetch8(regs);
1377 unsigned addr = operand(prefix, regs, modrm);
1378 unsigned val, r = (modrm >> 3) & 7;
1380 if (prefix & DATA32) {
1381 val = getreg16(regs, r);
1382 write32(addr, val);
1383 } else {
1384 val = getreg32(regs, r);
1385 write16(addr, MASK16(val));
1387 TRACE((regs, regs->eip - eip,
1388 "mov %%%s, *0x%x", rnames[r], addr));
1389 return OPC_EMULATED;
1391 case 0x8B: /* addr32 mov r/m16, r16 */
1392 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
1393 goto invalid;
1394 if ((prefix & ADDR32) == 0)
1395 goto invalid;
1396 if (!movr(regs, prefix, opc))
1397 goto invalid;
1398 return OPC_EMULATED;
1400 case 0x8F: /* addr32 pop r/m16 */
1401 if ((prefix & ADDR32) == 0)
1402 goto invalid;
1403 if (!pop(regs, prefix, opc))
1404 goto invalid;
1405 return OPC_EMULATED;
1407 case 0x90: /* nop */
1408 TRACE((regs, regs->eip - eip, "nop"));
1409 return OPC_EMULATED;
1411 case 0x9C: /* pushf */
1412 TRACE((regs, regs->eip - eip, "pushf"));
1413 if (prefix & DATA32)
1414 push32(regs, regs->eflags & ~EFLAGS_VM);
1415 else
1416 push16(regs, regs->eflags & ~EFLAGS_VM);
1417 return OPC_EMULATED;
1419 case 0x9D: /* popf */
1420 TRACE((regs, regs->eip - eip, "popf"));
1421 if (prefix & DATA32)
1422 regs->eflags = pop32(regs);
1423 else
1424 regs->eflags = (regs->eflags & 0xFFFF0000L) |
1425 pop16(regs);
1426 regs->eflags |= EFLAGS_VM;
1427 return OPC_EMULATED;
1429 case 0xA1: /* mov ax, r/m16 */
1431 int addr, data;
1432 int seg = segment(prefix, regs, regs->vds);
1433 int offset = prefix & ADDR32? fetch32(regs) : fetch16(regs);
1435 if (prefix & DATA32) {
1436 addr = address(regs, seg, offset);
1437 data = read32(addr);
1438 setreg32(regs, 0, data);
1439 } else {
1440 addr = address(regs, seg, offset);
1441 data = read16(addr);
1442 setreg16(regs, 0, data);
1444 TRACE((regs, regs->eip - eip, "mov *0x%x, %%ax", addr));
1446 return OPC_EMULATED;
1448 case 0xBB: /* mov bx, imm16 */
1450 int data;
1451 if (prefix & DATA32) {
1452 data = fetch32(regs);
1453 setreg32(regs, 3, data);
1454 } else {
1455 data = fetch16(regs);
1456 setreg16(regs, 3, data);
1458 TRACE((regs, regs->eip - eip, "mov $0x%x, %%bx", data));
1460 return OPC_EMULATED;
1462 case 0xC6: /* addr32 movb $imm, r/m8 */
1463 if ((prefix & ADDR32) == 0)
1464 goto invalid;
1465 if (!movr(regs, prefix, opc))
1466 goto invalid;
1467 return OPC_EMULATED;
1469 case 0xCB: /* retl */
1470 if ((mode == VM86_REAL_TO_PROTECTED) ||
1471 (mode == VM86_PROTECTED_TO_REAL)) {
1472 retl(regs, prefix);
1473 return OPC_INVALID;
1475 goto invalid;
1477 case 0xCD: /* int $n */
1478 TRACE((regs, regs->eip - eip, "int"));
1479 interrupt(regs, fetch8(regs));
1480 return OPC_EMULATED;
1482 case 0xCF: /* iret */
1483 if (prefix & DATA32) {
1484 TRACE((regs, regs->eip - eip, "data32 iretd"));
1485 regs->eip = pop32(regs);
1486 regs->cs = pop32(regs);
1487 regs->eflags = pop32(regs);
1488 } else {
1489 TRACE((regs, regs->eip - eip, "iret"));
1490 regs->eip = pop16(regs);
1491 regs->cs = pop16(regs);
1492 regs->eflags = (regs->eflags & 0xFFFF0000L) |
1493 pop16(regs);
1495 return OPC_EMULATED;
1497 case 0xE4: /* inb al, port */
1498 if (!inbyte(regs, prefix, opc))
1499 goto invalid;
1500 return OPC_EMULATED;
1502 case 0xE6: /* outb port, al */
1503 if (!outbyte(regs, prefix, opc))
1504 goto invalid;
1505 return OPC_EMULATED;
1507 case 0xEA: /* jmpl */
1508 if ((mode == VM86_REAL_TO_PROTECTED) ||
1509 (mode == VM86_PROTECTED_TO_REAL)) {
1510 jmpl(regs, prefix);
1511 return OPC_INVALID;
1513 goto invalid;
1515 case 0xFF: /* jmpl (indirect) */
1517 unsigned modrm = fetch8(regs);
1518 switch((modrm >> 3) & 7) {
1519 case 5: /* jmpl (indirect) */
1520 if ((mode == VM86_REAL_TO_PROTECTED) ||
1521 (mode == VM86_PROTECTED_TO_REAL)) {
1522 jmpl_indirect(regs, prefix, modrm);
1523 return OPC_INVALID;
1525 goto invalid;
1527 case 6: /* push r/m16 */
1528 pushrm(regs, prefix, modrm);
1529 return OPC_EMULATED;
1531 default:
1532 goto invalid;
1536 case 0xEB: /* short jump */
1537 if ((mode == VM86_REAL_TO_PROTECTED) ||
1538 (mode == VM86_PROTECTED_TO_REAL)) {
1539 disp = (char) fetch8(regs);
1540 TRACE((regs, 2, "jmp 0x%x", regs->eip + disp));
1541 regs->eip += disp;
1542 return OPC_EMULATED;
1544 goto invalid;
1546 case 0xEC: /* inb al, (%dx) */
1547 if (!inbyte(regs, prefix, opc))
1548 goto invalid;
1549 return OPC_EMULATED;
1551 case 0xEE: /* outb (%dx), al */
1552 if (!outbyte(regs, prefix, opc))
1553 goto invalid;
1554 return OPC_EMULATED;
1556 case 0xF0: /* lock */
1557 TRACE((regs, regs->eip - eip, "lock"));
1558 continue;
1560 case 0xF6: /* addr32 testb $imm, r/m8 */
1561 if ((prefix & ADDR32) == 0)
1562 goto invalid;
1563 if (!test(regs, prefix, opc))
1564 goto invalid;
1565 return OPC_EMULATED;
1567 case 0xFA: /* cli */
1568 TRACE((regs, regs->eip - eip, "cli"));
1569 regs->eflags &= ~EFLAGS_IF;
1570 return OPC_EMULATED;
1572 case 0xFB: /* sti */
1573 TRACE((regs, regs->eip - eip, "sti"));
1574 regs->eflags |= EFLAGS_IF;
1575 return OPC_EMULATED;
1577 default:
1578 goto invalid;
1582 invalid:
1583 regs->eip = eip;
1584 TRACE((regs, regs->eip - eip, "opc 0x%x", opc));
1585 return OPC_INVALID;
1588 void
1589 emulate(struct regs *regs)
1591 unsigned flteip;
1592 int nemul = 0;
1594 /* emulate as many instructions as possible */
1595 while (opcode(regs) != OPC_INVALID)
1596 nemul++;
1598 /* detect the case where we are not making progress */
1599 if (nemul == 0 && prev_eip == regs->eip) {
1600 flteip = address(regs, MASK16(regs->cs), regs->eip);
1601 panic("Unknown opcode at %04x:%04x=0x%x",
1602 MASK16(regs->cs), regs->eip, flteip);
1603 } else
1604 prev_eip = regs->eip;
1607 void
1608 trap(int trapno, int errno, struct regs *regs)
1610 /* emulate device interrupts */
1611 if (trapno >= NR_EXCEPTION_HANDLER) {
1612 int irq = trapno - NR_EXCEPTION_HANDLER;
1613 if (irq < 8)
1614 interrupt(regs, irq + 8);
1615 else
1616 interrupt(regs, 0x70 + (irq - 8));
1617 return;
1620 switch (trapno) {
1621 case 1: /* Debug */
1622 if (regs->eflags & EFLAGS_VM) {
1623 /* emulate any 8086 instructions */
1624 if (mode != VM86_REAL_TO_PROTECTED)
1625 panic("not in real-to-protected mode");
1626 emulate(regs);
1627 return;
1629 goto invalid;
1631 case 13: /* GPF */
1632 if (regs->eflags & EFLAGS_VM) {
1633 /* emulate any 8086 instructions */
1634 if (mode == VM86_PROTECTED)
1635 panic("unexpected protected mode");
1636 emulate(regs);
1637 return;
1639 goto invalid;
1641 default:
1642 invalid:
1643 printf("Trap (0x%x) while in %s mode\n",
1644 trapno, regs->eflags & EFLAGS_VM ? "real" : "protected");
1645 if (trapno == 14)
1646 printf("Page fault address 0x%x\n", get_cr2());
1647 dump_regs(regs);
1648 halt();