ia64/xen-unstable

view tools/firmware/vmxassist/vm86.c @ 12773:275a8f9a0710

Remove useless segments push/pop in VMXAssist.
According to Intel Spec, segments registors are cleared when exiting
virtual-8086 mode through trap or interrupts gate, so it's no need to
save their values in stack.
Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Mon Dec 04 09:20:12 2006 +0000 (2006-12-04)
parents fa4b1c287afa
children 44319e9dc0c5
line source
1 /*
2 * vm86.c: A vm86 emulator. The main purpose of this emulator is to do as
3 * little work as possible.
4 *
5 * Leendert van Doorn, leendert@watson.ibm.com
6 * Copyright (c) 2005-2006, International Business Machines Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
21 #include "vm86.h"
22 #include "util.h"
23 #include "machine.h"
25 #define HIGHMEM (1 << 20) /* 1MB */
26 #define MASK16(v) ((v) & 0xFFFF)
28 #define DATA32 0x0001
29 #define ADDR32 0x0002
30 #define SEG_CS 0x0004
31 #define SEG_DS 0x0008
32 #define SEG_ES 0x0010
33 #define SEG_SS 0x0020
34 #define SEG_FS 0x0040
35 #define SEG_GS 0x0080
37 static unsigned prev_eip = 0;
38 enum vm86_mode mode = 0;
40 static struct regs saved_rm_regs;
42 #ifdef DEBUG
43 int traceset = 0;
45 char *states[] = {
46 "<VM86_REAL>",
47 "<VM86_REAL_TO_PROTECTED>",
48 "<VM86_PROTECTED_TO_REAL>",
49 "<VM86_PROTECTED>"
50 };
52 static char *rnames[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" };
53 #endif /* DEBUG */
55 #define PDE_PS (1 << 7)
56 #define PT_ENTRY_PRESENT 0x1
58 /* We only support access to <=4G physical memory due to 1:1 mapping */
59 static uint64_t
60 guest_linear_to_phys(uint32_t base)
61 {
62 uint32_t gcr3 = oldctx.cr3;
63 uint64_t l2_mfn;
64 uint64_t l1_mfn;
65 uint64_t l0_mfn;
67 if (!(oldctx.cr0 & CR0_PG))
68 return base;
70 if (!(oldctx.cr4 & CR4_PAE)) {
71 l1_mfn = ((uint32_t *)(long)gcr3)[(base >> 22) & 0x3ff];
72 if (!(l1_mfn & PT_ENTRY_PRESENT))
73 panic("l2 entry not present\n");
75 if ((oldctx.cr4 & CR4_PSE) && (l1_mfn & PDE_PS)) {
76 l0_mfn = l1_mfn & 0xffc00000;
77 return l0_mfn + (base & 0x3fffff);
78 }
80 l1_mfn &= 0xfffff000;
82 l0_mfn = ((uint32_t *)(long)l1_mfn)[(base >> 12) & 0x3ff];
83 if (!(l0_mfn & PT_ENTRY_PRESENT))
84 panic("l1 entry not present\n");
85 l0_mfn &= 0xfffff000;
87 return l0_mfn + (base & 0xfff);
88 } else {
89 l2_mfn = ((uint64_t *)(long)gcr3)[(base >> 30) & 0x3];
90 if (!(l2_mfn & PT_ENTRY_PRESENT))
91 panic("l3 entry not present\n");
92 l2_mfn &= 0xffffff000ULL;
94 if (l2_mfn & 0xf00000000ULL) {
95 printf("l2 page above 4G\n");
96 cpuid_addr_value(l2_mfn + 8 * ((base >> 21) & 0x1ff), &l1_mfn);
97 } else
98 l1_mfn = ((uint64_t *)(long)l2_mfn)[(base >> 21) & 0x1ff];
99 if (!(l1_mfn & PT_ENTRY_PRESENT))
100 panic("l2 entry not present\n");
102 if (l1_mfn & PDE_PS) { /* CR4.PSE is ignored in PAE mode */
103 l0_mfn = l1_mfn & 0xfffe00000ULL;
104 return l0_mfn + (base & 0x1fffff);
105 }
107 l1_mfn &= 0xffffff000ULL;
109 if (l1_mfn & 0xf00000000ULL) {
110 printf("l1 page above 4G\n");
111 cpuid_addr_value(l1_mfn + 8 * ((base >> 12) & 0x1ff), &l0_mfn);
112 } else
113 l0_mfn = ((uint64_t *)(long)l1_mfn)[(base >> 12) & 0x1ff];
114 if (!(l0_mfn & PT_ENTRY_PRESENT))
115 panic("l1 entry not present\n");
117 l0_mfn &= 0xffffff000ULL;
119 return l0_mfn + (base & 0xfff);
120 }
121 }
123 static unsigned
124 address(struct regs *regs, unsigned seg, unsigned off)
125 {
126 uint64_t gdt_phys_base;
127 unsigned long long entry;
128 unsigned seg_base, seg_limit;
129 unsigned entry_low, entry_high;
131 if (seg == 0) {
132 if (mode == VM86_REAL || mode == VM86_REAL_TO_PROTECTED)
133 return off;
134 else
135 panic("segment is zero, but not in real mode!\n");
136 }
138 if (mode == VM86_REAL || seg > oldctx.gdtr_limit ||
139 (mode == VM86_REAL_TO_PROTECTED && regs->cs == seg))
140 return ((seg & 0xFFFF) << 4) + off;
142 gdt_phys_base = guest_linear_to_phys(oldctx.gdtr_base);
143 if (gdt_phys_base != (uint32_t)gdt_phys_base) {
144 printf("gdt base address above 4G\n");
145 cpuid_addr_value(gdt_phys_base + 8 * (seg >> 3), &entry);
146 } else
147 entry = ((unsigned long long *)(long)gdt_phys_base)[seg >> 3];
149 entry_high = entry >> 32;
150 entry_low = entry & 0xFFFFFFFF;
152 seg_base = (entry_high & 0xFF000000) | ((entry >> 16) & 0xFFFFFF);
153 seg_limit = (entry_high & 0xF0000) | (entry_low & 0xFFFF);
155 if (entry_high & 0x8000 &&
156 ((entry_high & 0x800000 && off >> 12 <= seg_limit) ||
157 (!(entry_high & 0x800000) && off <= seg_limit)))
158 return seg_base + off;
160 panic("should never reach here in function address():\n\t"
161 "entry=0x%08x%08x, mode=%d, seg=0x%08x, offset=0x%08x\n",
162 entry_high, entry_low, mode, seg, off);
164 return 0;
165 }
167 #ifdef DEBUG
168 void
169 trace(struct regs *regs, int adjust, char *fmt, ...)
170 {
171 unsigned off = regs->eip - adjust;
172 va_list ap;
174 if ((traceset & (1 << mode)) &&
175 (mode == VM86_REAL_TO_PROTECTED || mode == VM86_REAL)) {
176 /* 16-bit, seg:off addressing */
177 unsigned addr = address(regs, regs->cs, off);
178 printf("0x%08x: 0x%x:0x%04x ", addr, regs->cs, off);
179 printf("(%d) ", mode);
180 va_start(ap, fmt);
181 vprintf(fmt, ap);
182 va_end(ap);
183 printf("\n");
184 }
185 if ((traceset & (1 << mode)) &&
186 (mode == VM86_PROTECTED_TO_REAL || mode == VM86_PROTECTED)) {
187 /* 16-bit, gdt addressing */
188 unsigned addr = address(regs, regs->cs, off);
189 printf("0x%08x: 0x%x:0x%08x ", addr, regs->cs, off);
190 printf("(%d) ", mode);
191 va_start(ap, fmt);
192 vprintf(fmt, ap);
193 va_end(ap);
194 printf("\n");
195 }
196 }
197 #endif /* DEBUG */
199 static inline unsigned
200 read32(unsigned addr)
201 {
202 return *(unsigned long *) addr;
203 }
205 static inline unsigned
206 read16(unsigned addr)
207 {
208 return *(unsigned short *) addr;
209 }
211 static inline unsigned
212 read8(unsigned addr)
213 {
214 return *(unsigned char *) addr;
215 }
217 static inline void
218 write32(unsigned addr, unsigned value)
219 {
220 *(unsigned long *) addr = value;
221 }
223 static inline void
224 write16(unsigned addr, unsigned value)
225 {
226 *(unsigned short *) addr = value;
227 }
229 static inline void
230 write8(unsigned addr, unsigned value)
231 {
232 *(unsigned char *) addr = value;
233 }
235 static inline void
236 push32(struct regs *regs, unsigned value)
237 {
238 regs->uesp -= 4;
239 write32(address(regs, regs->uss, MASK16(regs->uesp)), value);
240 }
242 static inline void
243 push16(struct regs *regs, unsigned value)
244 {
245 regs->uesp -= 2;
246 write16(address(regs, regs->uss, MASK16(regs->uesp)), value);
247 }
249 static inline unsigned
250 pop32(struct regs *regs)
251 {
252 unsigned value = read32(address(regs, regs->uss, MASK16(regs->uesp)));
253 regs->uesp += 4;
254 return value;
255 }
257 static inline unsigned
258 pop16(struct regs *regs)
259 {
260 unsigned value = read16(address(regs, regs->uss, MASK16(regs->uesp)));
261 regs->uesp += 2;
262 return value;
263 }
265 static inline unsigned
266 fetch32(struct regs *regs)
267 {
268 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
270 regs->eip += 4;
271 return read32(addr);
272 }
274 static inline unsigned
275 fetch16(struct regs *regs)
276 {
277 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
279 regs->eip += 2;
280 return read16(addr);
281 }
283 static inline unsigned
284 fetch8(struct regs *regs)
285 {
286 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
288 regs->eip++;
289 return read8(addr);
290 }
292 static unsigned
293 getreg32(struct regs *regs, int r)
294 {
295 switch (r & 7) {
296 case 0: return regs->eax;
297 case 1: return regs->ecx;
298 case 2: return regs->edx;
299 case 3: return regs->ebx;
300 case 4: return regs->esp;
301 case 5: return regs->ebp;
302 case 6: return regs->esi;
303 case 7: return regs->edi;
304 }
305 return ~0;
306 }
308 static unsigned
309 getreg16(struct regs *regs, int r)
310 {
311 return MASK16(getreg32(regs, r));
312 }
314 static unsigned
315 getreg8(struct regs *regs, int r)
316 {
317 switch (r & 7) {
318 case 0: return regs->eax & 0xFF; /* al */
319 case 1: return regs->ecx & 0xFF; /* cl */
320 case 2: return regs->edx & 0xFF; /* dl */
321 case 3: return regs->ebx & 0xFF; /* bl */
322 case 4: return (regs->esp >> 8) & 0xFF; /* ah */
323 case 5: return (regs->ebp >> 8) & 0xFF; /* ch */
324 case 6: return (regs->esi >> 8) & 0xFF; /* dh */
325 case 7: return (regs->edi >> 8) & 0xFF; /* bh */
326 }
327 return ~0;
328 }
330 static void
331 setreg32(struct regs *regs, int r, unsigned v)
332 {
333 switch (r & 7) {
334 case 0: regs->eax = v; break;
335 case 1: regs->ecx = v; break;
336 case 2: regs->edx = v; break;
337 case 3: regs->ebx = v; break;
338 case 4: regs->esp = v; break;
339 case 5: regs->ebp = v; break;
340 case 6: regs->esi = v; break;
341 case 7: regs->edi = v; break;
342 }
343 }
345 static void
346 setreg16(struct regs *regs, int r, unsigned v)
347 {
348 setreg32(regs, r, (getreg32(regs, r) & ~0xFFFF) | MASK16(v));
349 }
351 static void
352 setreg8(struct regs *regs, int r, unsigned v)
353 {
354 v &= 0xFF;
355 switch (r & 7) {
356 case 0: regs->eax = (regs->eax & ~0xFF) | v; break;
357 case 1: regs->ecx = (regs->ecx & ~0xFF) | v; break;
358 case 2: regs->edx = (regs->edx & ~0xFF) | v; break;
359 case 3: regs->ebx = (regs->ebx & ~0xFF) | v; break;
360 case 4: regs->esp = (regs->esp & ~0xFF00) | (v << 8); break;
361 case 5: regs->ebp = (regs->ebp & ~0xFF00) | (v << 8); break;
362 case 6: regs->esi = (regs->esi & ~0xFF00) | (v << 8); break;
363 case 7: regs->edi = (regs->edi & ~0xFF00) | (v << 8); break;
364 }
365 }
367 static unsigned
368 segment(unsigned prefix, struct regs *regs, unsigned seg)
369 {
370 if (prefix & SEG_ES)
371 seg = regs->ves;
372 if (prefix & SEG_DS)
373 seg = regs->vds;
374 if (prefix & SEG_CS)
375 seg = regs->cs;
376 if (prefix & SEG_SS)
377 seg = regs->uss;
378 if (prefix & SEG_FS)
379 seg = regs->vfs;
380 if (prefix & SEG_GS)
381 seg = regs->vgs;
382 return seg;
383 }
385 static unsigned
386 sib(struct regs *regs, int mod, unsigned byte)
387 {
388 unsigned scale = (byte >> 6) & 3;
389 int index = (byte >> 3) & 7;
390 int base = byte & 7;
391 unsigned addr = 0;
393 switch (mod) {
394 case 0:
395 if (base == 5)
396 addr = fetch32(regs);
397 else
398 addr = getreg32(regs, base);
399 break;
400 case 1:
401 addr = getreg32(regs, base) + (char) fetch8(regs);
402 break;
403 case 2:
404 addr = getreg32(regs, base) + fetch32(regs);
405 break;
406 }
408 if (index != 4)
409 addr += getreg32(regs, index) << scale;
411 return addr;
412 }
414 /*
415 * Operand (modrm) decode
416 */
417 static unsigned
418 operand(unsigned prefix, struct regs *regs, unsigned modrm)
419 {
420 int mod, disp = 0, seg;
422 seg = segment(prefix, regs, regs->vds);
424 if (prefix & ADDR32) { /* 32-bit addressing */
425 switch ((mod = (modrm >> 6) & 3)) {
426 case 0:
427 switch (modrm & 7) {
428 case 0: return address(regs, seg, regs->eax);
429 case 1: return address(regs, seg, regs->ecx);
430 case 2: return address(regs, seg, regs->edx);
431 case 3: return address(regs, seg, regs->ebx);
432 case 4: return address(regs, seg,
433 sib(regs, mod, fetch8(regs)));
434 case 5: return address(regs, seg, fetch32(regs));
435 case 6: return address(regs, seg, regs->esi);
436 case 7: return address(regs, seg, regs->edi);
437 }
438 break;
439 case 1:
440 case 2:
441 if ((modrm & 7) != 4) {
442 if (mod == 1)
443 disp = (char) fetch8(regs);
444 else
445 disp = (int) fetch32(regs);
446 }
447 switch (modrm & 7) {
448 case 0: return address(regs, seg, regs->eax + disp);
449 case 1: return address(regs, seg, regs->ecx + disp);
450 case 2: return address(regs, seg, regs->edx + disp);
451 case 3: return address(regs, seg, regs->ebx + disp);
452 case 4: return address(regs, seg,
453 sib(regs, mod, fetch8(regs)));
454 case 5: return address(regs, seg, regs->ebp + disp);
455 case 6: return address(regs, seg, regs->esi + disp);
456 case 7: return address(regs, seg, regs->edi + disp);
457 }
458 break;
459 case 3:
460 return getreg32(regs, modrm);
461 }
462 } else { /* 16-bit addressing */
463 switch ((mod = (modrm >> 6) & 3)) {
464 case 0:
465 switch (modrm & 7) {
466 case 0: return address(regs, seg, MASK16(regs->ebx) +
467 MASK16(regs->esi));
468 case 1: return address(regs, seg, MASK16(regs->ebx) +
469 MASK16(regs->edi));
470 case 2: return address(regs, seg, MASK16(regs->ebp) +
471 MASK16(regs->esi));
472 case 3: return address(regs, seg, MASK16(regs->ebp) +
473 MASK16(regs->edi));
474 case 4: return address(regs, seg, MASK16(regs->esi));
475 case 5: return address(regs, seg, MASK16(regs->edi));
476 case 6: return address(regs, seg, fetch16(regs));
477 case 7: return address(regs, seg, MASK16(regs->ebx));
478 }
479 break;
480 case 1:
481 case 2:
482 if (mod == 1)
483 disp = (char) fetch8(regs);
484 else
485 disp = (int) fetch16(regs);
486 switch (modrm & 7) {
487 case 0: return address(regs, seg, MASK16(regs->ebx) +
488 MASK16(regs->esi) + disp);
489 case 1: return address(regs, seg, MASK16(regs->ebx) +
490 MASK16(regs->edi) + disp);
491 case 2: return address(regs, seg, MASK16(regs->ebp) +
492 MASK16(regs->esi) + disp);
493 case 3: return address(regs, seg, MASK16(regs->ebp) +
494 MASK16(regs->edi) + disp);
495 case 4: return address(regs, seg,
496 MASK16(regs->esi) + disp);
497 case 5: return address(regs, seg,
498 MASK16(regs->edi) + disp);
499 case 6: return address(regs, seg,
500 MASK16(regs->ebp) + disp);
501 case 7: return address(regs, seg,
502 MASK16(regs->ebx) + disp);
503 }
504 break;
505 case 3:
506 return getreg16(regs, modrm);
507 }
508 }
510 return 0;
511 }
513 /*
514 * Load new IDT
515 */
516 static int
517 lidt(struct regs *regs, unsigned prefix, unsigned modrm)
518 {
519 unsigned eip = regs->eip - 3;
520 unsigned addr = operand(prefix, regs, modrm);
522 oldctx.idtr_limit = ((struct dtr *) addr)->size;
523 if ((prefix & DATA32) == 0)
524 oldctx.idtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
525 else
526 oldctx.idtr_base = ((struct dtr *) addr)->base;
527 TRACE((regs, regs->eip - eip, "lidt 0x%x <%d, 0x%x>",
528 addr, oldctx.idtr_limit, oldctx.idtr_base));
530 return 1;
531 }
533 /*
534 * Load new GDT
535 */
536 static int
537 lgdt(struct regs *regs, unsigned prefix, unsigned modrm)
538 {
539 unsigned eip = regs->eip - 3;
540 unsigned addr = operand(prefix, regs, modrm);
542 oldctx.gdtr_limit = ((struct dtr *) addr)->size;
543 if ((prefix & DATA32) == 0)
544 oldctx.gdtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
545 else
546 oldctx.gdtr_base = ((struct dtr *) addr)->base;
547 TRACE((regs, regs->eip - eip, "lgdt 0x%x <%d, 0x%x>",
548 addr, oldctx.gdtr_limit, oldctx.gdtr_base));
550 return 1;
551 }
553 /*
554 * Modify CR0 either through an lmsw instruction.
555 */
556 static int
557 lmsw(struct regs *regs, unsigned prefix, unsigned modrm)
558 {
559 unsigned eip = regs->eip - 3;
560 unsigned ax = operand(prefix, regs, modrm) & 0xF;
561 unsigned cr0 = (oldctx.cr0 & 0xFFFFFFF0) | ax;
563 TRACE((regs, regs->eip - eip, "lmsw 0x%x", ax));
564 #ifndef TEST
565 oldctx.cr0 = cr0 | CR0_PE | CR0_NE;
566 #else
567 oldctx.cr0 = cr0 | CR0_PE | CR0_NE | CR0_PG;
568 #endif
569 if (cr0 & CR0_PE)
570 set_mode(regs, VM86_REAL_TO_PROTECTED);
572 return 1;
573 }
575 /*
576 * We need to handle moves that address memory beyond the 64KB segment
577 * limit that VM8086 mode enforces.
578 */
579 static int
580 movr(struct regs *regs, unsigned prefix, unsigned opc)
581 {
582 unsigned eip = regs->eip - 1;
583 unsigned modrm = fetch8(regs);
584 unsigned addr = operand(prefix, regs, modrm);
585 unsigned val, r = (modrm >> 3) & 7;
587 if ((modrm & 0xC0) == 0xC0) /* no registers */
588 return 0;
590 switch (opc) {
591 case 0x88: /* addr32 mov r8, r/m8 */
592 val = getreg8(regs, r);
593 TRACE((regs, regs->eip - eip,
594 "movb %%e%s, *0x%x", rnames[r], addr));
595 write8(addr, val);
596 break;
598 case 0x8A: /* addr32 mov r/m8, r8 */
599 TRACE((regs, regs->eip - eip,
600 "movb *0x%x, %%%s", addr, rnames[r]));
601 setreg8(regs, r, read8(addr));
602 break;
604 case 0x89: /* addr32 mov r16, r/m16 */
605 val = getreg32(regs, r);
606 if (prefix & DATA32) {
607 TRACE((regs, regs->eip - eip,
608 "movl %%e%s, *0x%x", rnames[r], addr));
609 write32(addr, val);
610 } else {
611 TRACE((regs, regs->eip - eip,
612 "movw %%%s, *0x%x", rnames[r], addr));
613 write16(addr, MASK16(val));
614 }
615 break;
617 case 0x8B: /* addr32 mov r/m16, r16 */
618 if (prefix & DATA32) {
619 TRACE((regs, regs->eip - eip,
620 "movl *0x%x, %%e%s", addr, rnames[r]));
621 setreg32(regs, r, read32(addr));
622 } else {
623 TRACE((regs, regs->eip - eip,
624 "movw *0x%x, %%%s", addr, rnames[r]));
625 setreg16(regs, r, read16(addr));
626 }
627 break;
629 case 0xC6: /* addr32 movb $imm, r/m8 */
630 if ((modrm >> 3) & 7)
631 return 0;
632 val = fetch8(regs);
633 write8(addr, val);
634 TRACE((regs, regs->eip - eip, "movb $0x%x, *0x%x",
635 val, addr));
636 break;
637 }
638 return 1;
639 }
641 /*
642 * Move to and from a control register.
643 */
644 static int
645 movcr(struct regs *regs, unsigned prefix, unsigned opc)
646 {
647 unsigned eip = regs->eip - 2;
648 unsigned modrm = fetch8(regs);
649 unsigned cr = (modrm >> 3) & 7;
651 if ((modrm & 0xC0) != 0xC0) /* only registers */
652 return 0;
654 switch (opc) {
655 case 0x20: /* mov Rd, Cd */
656 TRACE((regs, regs->eip - eip, "movl %%cr%d, %%eax", cr));
657 switch (cr) {
658 case 0:
659 #ifndef TEST
660 setreg32(regs, modrm,
661 oldctx.cr0 & ~(CR0_PE | CR0_NE));
662 #else
663 setreg32(regs, modrm,
664 oldctx.cr0 & ~(CR0_PE | CR0_NE | CR0_PG));
665 #endif
666 break;
667 case 2:
668 setreg32(regs, modrm, get_cr2());
669 break;
670 case 3:
671 setreg32(regs, modrm, oldctx.cr3);
672 break;
673 case 4:
674 setreg32(regs, modrm, oldctx.cr4);
675 break;
676 }
677 break;
678 case 0x22: /* mov Cd, Rd */
679 TRACE((regs, regs->eip - eip, "movl %%eax, %%cr%d", cr));
680 switch (cr) {
681 case 0:
682 oldctx.cr0 = getreg32(regs, modrm) | (CR0_PE | CR0_NE);
683 #ifdef TEST
684 oldctx.cr0 |= CR0_PG;
685 #endif
686 if (getreg32(regs, modrm) & CR0_PE)
687 set_mode(regs, VM86_REAL_TO_PROTECTED);
688 else
689 set_mode(regs, VM86_REAL);
690 break;
691 case 3:
692 oldctx.cr3 = getreg32(regs, modrm);
693 break;
694 case 4:
695 oldctx.cr4 = getreg32(regs, modrm);
696 break;
697 }
698 break;
699 }
701 return 1;
702 }
704 static inline void set_eflags_ZF(unsigned mask, unsigned v1, struct regs *regs)
705 {
706 if ((v1 & mask) == 0)
707 regs->eflags |= EFLAGS_ZF;
708 else
709 regs->eflags &= ~EFLAGS_ZF;
710 }
712 /*
713 * We need to handle cmp opcodes that address memory beyond the 64KB
714 * segment limit that VM8086 mode enforces.
715 */
716 static int
717 cmp(struct regs *regs, unsigned prefix, unsigned opc)
718 {
719 unsigned eip = regs->eip - 1;
720 unsigned modrm = fetch8(regs);
721 unsigned addr = operand(prefix, regs, modrm);
722 unsigned diff, val, r = (modrm >> 3) & 7;
724 if ((modrm & 0xC0) == 0xC0) /* no registers */
725 return 0;
727 switch (opc) {
728 case 0x39: /* addr32 cmp r16, r/m16 */
729 val = getreg32(regs, r);
730 if (prefix & DATA32) {
731 diff = read32(addr) - val;
732 set_eflags_ZF(~0, diff, regs);
734 TRACE((regs, regs->eip - eip,
735 "cmp %%e%s, *0x%x (0x%x)",
736 rnames[r], addr, diff));
737 } else {
738 diff = read16(addr) - val;
739 set_eflags_ZF(0xFFFF, diff, regs);
741 TRACE((regs, regs->eip - eip,
742 "cmp %%%s, *0x%x (0x%x)",
743 rnames[r], addr, diff));
744 }
745 break;
747 /* other cmp opcodes ... */
748 }
749 return 1;
750 }
752 /*
753 * We need to handle test opcodes that address memory beyond the 64KB
754 * segment limit that VM8086 mode enforces.
755 */
756 static int
757 test(struct regs *regs, unsigned prefix, unsigned opc)
758 {
759 unsigned eip = regs->eip - 1;
760 unsigned modrm = fetch8(regs);
761 unsigned addr = operand(prefix, regs, modrm);
762 unsigned val, diff;
764 if ((modrm & 0xC0) == 0xC0) /* no registers */
765 return 0;
767 switch (opc) {
768 case 0xF6: /* testb $imm, r/m8 */
769 if ((modrm >> 3) & 7)
770 return 0;
771 val = fetch8(regs);
772 diff = read8(addr) & val;
773 set_eflags_ZF(0xFF, diff, regs);
775 TRACE((regs, regs->eip - eip, "testb $0x%x, *0x%x (0x%x)",
776 val, addr, diff));
777 break;
779 /* other test opcodes ... */
780 }
782 return 1;
783 }
785 /*
786 * We need to handle pop opcodes that address memory beyond the 64KB
787 * segment limit that VM8086 mode enforces.
788 */
789 static int
790 pop(struct regs *regs, unsigned prefix, unsigned opc)
791 {
792 unsigned eip = regs->eip - 1;
793 unsigned modrm = fetch8(regs);
794 unsigned addr = operand(prefix, regs, modrm);
796 if ((modrm & 0xC0) == 0xC0) /* no registers */
797 return 0;
799 switch (opc) {
800 case 0x8F: /* pop r/m16 */
801 if ((modrm >> 3) & 7)
802 return 0;
803 if (prefix & DATA32)
804 write32(addr, pop32(regs));
805 else
806 write16(addr, pop16(regs));
807 TRACE((regs, regs->eip - eip, "pop *0x%x", addr));
808 break;
810 /* other pop opcodes ... */
811 }
813 return 1;
814 }
816 static int
817 mov_to_seg(struct regs *regs, unsigned prefix, unsigned opc)
818 {
819 unsigned modrm = fetch8(regs);
821 /* Only need to emulate segment loads in real->protected mode. */
822 if (mode != VM86_REAL_TO_PROTECTED)
823 return 0;
825 /* Register source only. */
826 if ((modrm & 0xC0) != 0xC0)
827 goto fail;
829 switch ((modrm & 0x38) >> 3) {
830 case 0: /* es */
831 regs->ves = getreg16(regs, modrm);
832 saved_rm_regs.ves = 0;
833 oldctx.es_sel = regs->ves;
834 return 1;
836 /* case 1: cs */
838 case 2: /* ss */
839 regs->uss = getreg16(regs, modrm);
840 saved_rm_regs.uss = 0;
841 oldctx.ss_sel = regs->uss;
842 return 1;
843 case 3: /* ds */
844 regs->vds = getreg16(regs, modrm);
845 saved_rm_regs.vds = 0;
846 oldctx.ds_sel = regs->vds;
847 return 1;
848 case 4: /* fs */
849 regs->vfs = getreg16(regs, modrm);
850 saved_rm_regs.vfs = 0;
851 oldctx.fs_sel = regs->vfs;
852 return 1;
853 case 5: /* gs */
854 regs->vgs = getreg16(regs, modrm);
855 saved_rm_regs.vgs = 0;
856 oldctx.gs_sel = regs->vgs;
857 return 1;
858 }
860 fail:
861 printf("%s:%d: missed opcode %02x %02x\n",
862 __FUNCTION__, __LINE__, opc, modrm);
863 return 0;
864 }
866 /*
867 * Emulate a segment load in protected mode
868 */
869 static int
870 load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
871 {
872 uint64_t gdt_phys_base;
873 unsigned long long entry;
875 /* protected mode: use seg as index into gdt */
876 if (sel > oldctx.gdtr_limit)
877 return 0;
879 if (sel == 0) {
880 arbytes->fields.null_bit = 1;
881 return 1;
882 }
884 gdt_phys_base = guest_linear_to_phys(oldctx.gdtr_base);
885 if (gdt_phys_base != (uint32_t)gdt_phys_base) {
886 printf("gdt base address above 4G\n");
887 cpuid_addr_value(gdt_phys_base + 8 * (sel >> 3), &entry);
888 } else
889 entry = ((unsigned long long *)(long)gdt_phys_base)[sel >> 3];
891 /* Check the P bit first */
892 if (!((entry >> (15+32)) & 0x1) && sel != 0)
893 return 0;
895 *base = (((entry >> (56-24)) & 0xFF000000) |
896 ((entry >> (32-16)) & 0x00FF0000) |
897 ((entry >> ( 16)) & 0x0000FFFF));
898 *limit = (((entry >> (48-16)) & 0x000F0000) |
899 ((entry ) & 0x0000FFFF));
901 arbytes->bytes = 0;
902 arbytes->fields.seg_type = (entry >> (8+32)) & 0xF; /* TYPE */
903 arbytes->fields.s = (entry >> (12+32)) & 0x1; /* S */
904 if (arbytes->fields.s)
905 arbytes->fields.seg_type |= 1; /* accessed */
906 arbytes->fields.dpl = (entry >> (13+32)) & 0x3; /* DPL */
907 arbytes->fields.p = (entry >> (15+32)) & 0x1; /* P */
908 arbytes->fields.avl = (entry >> (20+32)) & 0x1; /* AVL */
909 arbytes->fields.default_ops_size = (entry >> (22+32)) & 0x1; /* D */
911 if (entry & (1ULL << (23+32))) { /* G */
912 arbytes->fields.g = 1;
913 *limit = (*limit << 12) | 0xFFF;
914 }
916 return 1;
917 }
919 /*
920 * Emulate a protected mode segment load, falling back to clearing it if
921 * the descriptor was invalid.
922 */
923 static void
924 load_or_clear_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
925 {
926 if (!load_seg(sel, base, limit, arbytes))
927 load_seg(0, base, limit, arbytes);
928 }
931 /*
932 * Transition to protected mode
933 */
934 static void
935 protected_mode(struct regs *regs)
936 {
937 extern char stack_top[];
939 regs->eflags &= ~(EFLAGS_TF|EFLAGS_VM);
941 oldctx.eip = regs->eip;
942 oldctx.esp = regs->uesp;
943 oldctx.eflags = regs->eflags;
945 /* reload all segment registers */
946 if (!load_seg(regs->cs, &oldctx.cs_base,
947 &oldctx.cs_limit, &oldctx.cs_arbytes))
948 panic("Invalid %%cs=0x%x for protected mode\n", regs->cs);
949 oldctx.cs_sel = regs->cs;
951 load_or_clear_seg(oldctx.es_sel, &oldctx.es_base,
952 &oldctx.es_limit, &oldctx.es_arbytes);
953 load_or_clear_seg(oldctx.ss_sel, &oldctx.ss_base,
954 &oldctx.ss_limit, &oldctx.ss_arbytes);
955 load_or_clear_seg(oldctx.ds_sel, &oldctx.ds_base,
956 &oldctx.ds_limit, &oldctx.ds_arbytes);
957 load_or_clear_seg(oldctx.fs_sel, &oldctx.fs_base,
958 &oldctx.fs_limit, &oldctx.fs_arbytes);
959 load_or_clear_seg(oldctx.gs_sel, &oldctx.gs_base,
960 &oldctx.gs_limit, &oldctx.gs_arbytes);
962 /* initialize jump environment to warp back to protected mode */
963 regs->uss = DATA_SELECTOR;
964 regs->uesp = stack_top;
965 regs->cs = CODE_SELECTOR;
966 regs->eip = (unsigned) switch_to_protected_mode;
968 /* this should get us into 32-bit mode */
969 }
971 /*
972 * Start real-mode emulation
973 */
974 static void
975 real_mode(struct regs *regs)
976 {
977 regs->eflags |= EFLAGS_VM | 0x02;
979 /*
980 * When we transition from protected to real-mode and we
981 * have not reloaded the segment descriptors yet, they are
982 * interpreted as if they were in protect mode.
983 * We emulate this behavior by assuming that these memory
984 * reference are below 1MB and set %ss, %ds, %es accordingly.
985 */
986 if (regs->uss != 0) {
987 if (regs->uss >= HIGHMEM)
988 panic("%%ss 0x%lx higher than 1MB", regs->uss);
989 regs->uss = address(regs, regs->uss, 0) >> 4;
990 } else {
991 regs->uss = saved_rm_regs.uss;
992 }
993 if (regs->vds != 0) {
994 if (regs->vds >= HIGHMEM)
995 panic("%%ds 0x%lx higher than 1MB", regs->vds);
996 regs->vds = address(regs, regs->vds, 0) >> 4;
997 } else {
998 regs->vds = saved_rm_regs.vds;
999 }
1000 if (regs->ves != 0) {
1001 if (regs->ves >= HIGHMEM)
1002 panic("%%es 0x%lx higher than 1MB", regs->ves);
1003 regs->ves = address(regs, regs->ves, 0) >> 4;
1004 } else {
1005 regs->ves = saved_rm_regs.ves;
1008 /* this should get us into 16-bit mode */
1011 /*
1012 * This is the smarts of the emulator and handles the mode transitions. The
1013 * emulator handles 4 different modes. 1) VM86_REAL: emulated real-mode,
1014 * Just handle those instructions that are not supported under VM8086.
1015 * 2) VM86_REAL_TO_PROTECTED: going from real-mode to protected mode. In
1016 * this we single step through the instructions until we reload the
1017 * new %cs (some OSes do a lot of computations before reloading %cs). 2)
1018 * VM86_PROTECTED_TO_REAL when we are going from protected to real mode. In
1019 * this case we emulate the instructions by hand. Finally, 4) VM86_PROTECTED
1020 * when we transitioned to protected mode and we should abandon the
1021 * emulator. No instructions are emulated when in VM86_PROTECTED mode.
1022 */
1023 void
1024 set_mode(struct regs *regs, enum vm86_mode newmode)
1026 switch (newmode) {
1027 case VM86_REAL:
1028 if ((mode == VM86_PROTECTED_TO_REAL) ||
1029 (mode == VM86_REAL_TO_PROTECTED)) {
1030 regs->eflags &= ~EFLAGS_TF;
1031 real_mode(regs);
1032 break;
1033 } else if (mode == VM86_REAL) {
1034 break;
1035 } else
1036 panic("unexpected real mode transition");
1037 break;
1039 case VM86_REAL_TO_PROTECTED:
1040 if (mode == VM86_REAL) {
1041 regs->eflags |= EFLAGS_TF;
1042 saved_rm_regs.vds = regs->vds;
1043 saved_rm_regs.ves = regs->ves;
1044 saved_rm_regs.vfs = regs->vfs;
1045 saved_rm_regs.vgs = regs->vgs;
1046 saved_rm_regs.uss = regs->uss;
1047 oldctx.ds_sel = 0;
1048 oldctx.es_sel = 0;
1049 oldctx.fs_sel = 0;
1050 oldctx.gs_sel = 0;
1051 oldctx.ss_sel = 0;
1052 break;
1053 } else if (mode == VM86_REAL_TO_PROTECTED) {
1054 break;
1055 } else
1056 panic("unexpected real-to-protected mode transition");
1057 break;
1059 case VM86_PROTECTED_TO_REAL:
1060 if (mode == VM86_PROTECTED) {
1061 break;
1062 } else
1063 panic("unexpected protected-to-real mode transition");
1064 break;
1066 case VM86_PROTECTED:
1067 if (mode == VM86_REAL_TO_PROTECTED) {
1068 protected_mode(regs);
1069 } else
1070 panic("unexpected protected mode transition");
1071 break;
1074 mode = newmode;
1075 TRACE((regs, 0, states[mode]));
1078 static void
1079 jmpl(struct regs *regs, int prefix)
1081 unsigned n = regs->eip;
1082 unsigned cs, eip;
1084 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
1085 eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs);
1086 cs = fetch16(regs);
1088 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1090 regs->cs = cs;
1091 regs->eip = eip;
1092 set_mode(regs, VM86_PROTECTED);
1093 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
1094 eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs);
1095 cs = fetch16(regs);
1097 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1099 regs->cs = cs;
1100 regs->eip = eip;
1101 set_mode(regs, VM86_REAL);
1102 } else
1103 panic("jmpl");
1106 static void
1107 jmpl_indirect(struct regs *regs, int prefix, unsigned modrm)
1109 unsigned n = regs->eip;
1110 unsigned cs, eip;
1111 unsigned addr;
1113 addr = operand(prefix, regs, modrm);
1115 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
1116 eip = (prefix & DATA32) ? read32(addr) : read16(addr);
1117 addr += (prefix & DATA32) ? 4 : 2;
1118 cs = read16(addr);
1120 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1122 regs->cs = cs;
1123 regs->eip = eip;
1124 set_mode(regs, VM86_PROTECTED);
1125 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
1126 eip = (prefix & DATA32) ? read32(addr) : read16(addr);
1127 addr += (prefix & DATA32) ? 4 : 2;
1128 cs = read16(addr);
1130 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1132 regs->cs = cs;
1133 regs->eip = eip;
1134 set_mode(regs, VM86_REAL);
1135 } else
1136 panic("jmpl");
1139 static void
1140 retl(struct regs *regs, int prefix)
1142 unsigned cs, eip;
1144 if (prefix & DATA32) {
1145 eip = pop32(regs);
1146 cs = MASK16(pop32(regs));
1147 } else {
1148 eip = pop16(regs);
1149 cs = pop16(regs);
1152 TRACE((regs, 1, "retl (to 0x%x:0x%x)", cs, eip));
1154 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
1155 regs->cs = cs;
1156 regs->eip = eip;
1157 set_mode(regs, VM86_PROTECTED);
1158 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
1159 regs->cs = cs;
1160 regs->eip = eip;
1161 set_mode(regs, VM86_REAL);
1162 } else
1163 panic("retl");
1166 static void
1167 interrupt(struct regs *regs, int n)
1169 TRACE((regs, 0, "external interrupt %d", n));
1170 push16(regs, regs->eflags);
1171 push16(regs, regs->cs);
1172 push16(regs, regs->eip);
1173 regs->eflags &= ~EFLAGS_IF;
1174 regs->eip = read16(address(regs, 0, n * 4));
1175 regs->cs = read16(address(regs, 0, n * 4 + 2));
1178 /*
1179 * Most port I/O operations are passed unmodified. We do have to be
1180 * careful and make sure the emulated program isn't remapping the
1181 * interrupt vectors. The following simple state machine catches
1182 * these attempts and rewrites them.
1183 */
1184 static int
1185 outbyte(struct regs *regs, unsigned prefix, unsigned opc)
1187 static char icw2[2] = { 0 };
1188 int al, port;
1190 switch (opc) {
1191 case 0xE6: /* outb port, al */
1192 port = fetch8(regs);
1193 break;
1194 case 0xEE: /* outb (%dx), al */
1195 port = MASK16(regs->edx);
1196 break;
1197 default:
1198 return 0;
1201 al = regs->eax & 0xFF;
1203 switch (port) {
1204 case PIC_MASTER + PIC_CMD:
1205 if (al & (1 << 4)) /* A0=0,D4=1 -> ICW1 */
1206 icw2[0] = 1;
1207 break;
1208 case PIC_MASTER + PIC_IMR:
1209 if (icw2[0]) {
1210 icw2[0] = 0;
1211 printf("Remapping master: ICW2 0x%x -> 0x%x\n",
1212 al, NR_EXCEPTION_HANDLER);
1213 al = NR_EXCEPTION_HANDLER;
1215 break;
1217 case PIC_SLAVE + PIC_CMD:
1218 if (al & (1 << 4)) /* A0=0,D4=1 -> ICW1 */
1219 icw2[1] = 1;
1220 break;
1221 case PIC_SLAVE + PIC_IMR:
1222 if (icw2[1]) {
1223 icw2[1] = 0;
1224 printf("Remapping slave: ICW2 0x%x -> 0x%x\n",
1225 al, NR_EXCEPTION_HANDLER+8);
1226 al = NR_EXCEPTION_HANDLER+8;
1228 break;
1231 outb(port, al);
1232 return 1;
1235 static int
1236 inbyte(struct regs *regs, unsigned prefix, unsigned opc)
1238 int port;
1240 switch (opc) {
1241 case 0xE4: /* inb al, port */
1242 port = fetch8(regs);
1243 break;
1244 case 0xEC: /* inb al, (%dx) */
1245 port = MASK16(regs->edx);
1246 break;
1247 default:
1248 return 0;
1251 regs->eax = (regs->eax & ~0xFF) | inb(port);
1252 return 1;
1255 static void
1256 pushrm(struct regs *regs, int prefix, unsigned modrm)
1258 unsigned n = regs->eip;
1259 unsigned addr;
1260 unsigned data;
1262 addr = operand(prefix, regs, modrm);
1264 if (prefix & DATA32) {
1265 data = read32(addr);
1266 push32(regs, data);
1267 } else {
1268 data = read16(addr);
1269 push16(regs, data);
1272 TRACE((regs, (regs->eip - n) + 1, "push *0x%x", addr));
1275 enum { OPC_INVALID, OPC_EMULATED };
1277 #define rdmsr(msr,val1,val2) \
1278 __asm__ __volatile__( \
1279 "rdmsr" \
1280 : "=a" (val1), "=d" (val2) \
1281 : "c" (msr))
1283 #define wrmsr(msr,val1,val2) \
1284 __asm__ __volatile__( \
1285 "wrmsr" \
1286 : /* no outputs */ \
1287 : "c" (msr), "a" (val1), "d" (val2))
1289 /*
1290 * Emulate a single instruction, including all its prefixes. We only implement
1291 * a small subset of the opcodes, and not all opcodes are implemented for each
1292 * of the four modes we can operate in.
1293 */
1294 static int
1295 opcode(struct regs *regs)
1297 unsigned eip = regs->eip;
1298 unsigned opc, modrm, disp;
1299 unsigned prefix = 0;
1301 for (;;) {
1302 switch ((opc = fetch8(regs))) {
1303 case 0x07: /* pop %es */
1304 regs->ves = (prefix & DATA32) ?
1305 pop32(regs) : pop16(regs);
1306 TRACE((regs, regs->eip - eip, "pop %%es"));
1307 if (mode == VM86_REAL_TO_PROTECTED) {
1308 saved_rm_regs.ves = 0;
1309 oldctx.es_sel = regs->ves;
1311 return OPC_EMULATED;
1313 case 0x0F: /* two byte opcode */
1314 if (mode == VM86_PROTECTED)
1315 goto invalid;
1316 switch ((opc = fetch8(regs))) {
1317 case 0x01:
1318 switch (((modrm = fetch8(regs)) >> 3) & 7) {
1319 case 0: /* sgdt */
1320 case 1: /* sidt */
1321 goto invalid;
1322 case 2: /* lgdt */
1323 if (!lgdt(regs, prefix, modrm))
1324 goto invalid;
1325 return OPC_EMULATED;
1326 case 3: /* lidt */
1327 if (!lidt(regs, prefix, modrm))
1328 goto invalid;
1329 return OPC_EMULATED;
1330 case 4: /* smsw */
1331 goto invalid;
1332 case 5:
1333 goto invalid;
1334 case 6: /* lmsw */
1335 if (!lmsw(regs, prefix, modrm))
1336 goto invalid;
1337 return OPC_EMULATED;
1338 case 7: /* invlpg */
1339 goto invalid;
1341 break;
1342 case 0x09: /* wbinvd */
1343 return OPC_EMULATED;
1344 case 0x20: /* mov Rd, Cd (1h) */
1345 case 0x22:
1346 if (!movcr(regs, prefix, opc))
1347 goto invalid;
1348 return OPC_EMULATED;
1349 case 0x30: /* WRMSR */
1350 wrmsr(regs->ecx, regs->eax, regs->edx);
1351 return OPC_EMULATED;
1352 case 0x32: /* RDMSR */
1353 rdmsr(regs->ecx, regs->eax, regs->edx);
1354 return OPC_EMULATED;
1355 default:
1356 goto invalid;
1358 goto invalid;
1360 case 0x1F: /* pop %ds */
1361 regs->vds = (prefix & DATA32) ?
1362 pop32(regs) : pop16(regs);
1363 TRACE((regs, regs->eip - eip, "pop %%ds"));
1364 if (mode == VM86_REAL_TO_PROTECTED) {
1365 saved_rm_regs.vds = 0;
1366 oldctx.ds_sel = regs->vds;
1368 return OPC_EMULATED;
1370 case 0x26:
1371 TRACE((regs, regs->eip - eip, "%%es:"));
1372 prefix |= SEG_ES;
1373 continue;
1375 case 0x2E:
1376 TRACE((regs, regs->eip - eip, "%%cs:"));
1377 prefix |= SEG_CS;
1378 continue;
1380 case 0x36:
1381 TRACE((regs, regs->eip - eip, "%%ss:"));
1382 prefix |= SEG_SS;
1383 continue;
1385 case 0x39: /* addr32 cmp r16, r/m16 */
1386 case 0x3B: /* addr32 cmp r/m16, r16 */
1387 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
1388 goto invalid;
1389 if ((prefix & ADDR32) == 0)
1390 goto invalid;
1391 if (!cmp(regs, prefix, opc))
1392 goto invalid;
1393 return OPC_EMULATED;
1395 case 0x3E:
1396 TRACE((regs, regs->eip - eip, "%%ds:"));
1397 prefix |= SEG_DS;
1398 continue;
1400 case 0x64:
1401 TRACE((regs, regs->eip - eip, "%%fs:"));
1402 prefix |= SEG_FS;
1403 continue;
1405 case 0x65:
1406 TRACE((regs, regs->eip - eip, "%%gs:"));
1407 prefix |= SEG_GS;
1408 continue;
1410 case 0x66:
1411 TRACE((regs, regs->eip - eip, "data32"));
1412 prefix |= DATA32;
1413 continue;
1415 case 0x67:
1416 TRACE((regs, regs->eip - eip, "addr32"));
1417 prefix |= ADDR32;
1418 continue;
1420 case 0x88: /* addr32 mov r8, r/m8 */
1421 case 0x8A: /* addr32 mov r/m8, r8 */
1422 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
1423 goto invalid;
1424 if ((prefix & ADDR32) == 0)
1425 goto invalid;
1426 if (!movr(regs, prefix, opc))
1427 goto invalid;
1428 return OPC_EMULATED;
1430 case 0x89: /* addr32 mov r16, r/m16 */
1431 if (mode == VM86_PROTECTED_TO_REAL) {
1432 unsigned modrm = fetch8(regs);
1433 unsigned addr = operand(prefix, regs, modrm);
1434 unsigned val, r = (modrm >> 3) & 7;
1436 if (prefix & DATA32) {
1437 val = getreg16(regs, r);
1438 write32(addr, val);
1439 } else {
1440 val = getreg32(regs, r);
1441 write16(addr, MASK16(val));
1443 TRACE((regs, regs->eip - eip,
1444 "mov %%%s, *0x%x", rnames[r], addr));
1445 return OPC_EMULATED;
1447 case 0x8B: /* addr32 mov r/m16, r16 */
1448 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
1449 goto invalid;
1450 if ((prefix & ADDR32) == 0)
1451 goto invalid;
1452 if (!movr(regs, prefix, opc))
1453 goto invalid;
1454 return OPC_EMULATED;
1456 case 0x8E: /* mov r16, sreg */
1457 if (!mov_to_seg(regs, prefix, opc))
1458 goto invalid;
1459 return OPC_EMULATED;
1461 case 0x8F: /* addr32 pop r/m16 */
1462 if ((prefix & ADDR32) == 0)
1463 goto invalid;
1464 if (!pop(regs, prefix, opc))
1465 goto invalid;
1466 return OPC_EMULATED;
1468 case 0x90: /* nop */
1469 TRACE((regs, regs->eip - eip, "nop"));
1470 return OPC_EMULATED;
1472 case 0x9C: /* pushf */
1473 TRACE((regs, regs->eip - eip, "pushf"));
1474 if (prefix & DATA32)
1475 push32(regs, regs->eflags & ~EFLAGS_VM);
1476 else
1477 push16(regs, regs->eflags & ~EFLAGS_VM);
1478 return OPC_EMULATED;
1480 case 0x9D: /* popf */
1481 TRACE((regs, regs->eip - eip, "popf"));
1482 if (prefix & DATA32)
1483 regs->eflags = pop32(regs);
1484 else
1485 regs->eflags = (regs->eflags & 0xFFFF0000L) |
1486 pop16(regs);
1487 regs->eflags |= EFLAGS_VM;
1488 return OPC_EMULATED;
1490 case 0xA1: /* mov ax, r/m16 */
1492 int addr, data;
1493 int seg = segment(prefix, regs, regs->vds);
1494 int offset = prefix & ADDR32? fetch32(regs) : fetch16(regs);
1496 if (prefix & DATA32) {
1497 addr = address(regs, seg, offset);
1498 data = read32(addr);
1499 setreg32(regs, 0, data);
1500 } else {
1501 addr = address(regs, seg, offset);
1502 data = read16(addr);
1503 setreg16(regs, 0, data);
1505 TRACE((regs, regs->eip - eip, "mov *0x%x, %%ax", addr));
1507 return OPC_EMULATED;
1509 case 0xBB: /* mov bx, imm16 */
1511 int data;
1512 if (prefix & DATA32) {
1513 data = fetch32(regs);
1514 setreg32(regs, 3, data);
1515 } else {
1516 data = fetch16(regs);
1517 setreg16(regs, 3, data);
1519 TRACE((regs, regs->eip - eip, "mov $0x%x, %%bx", data));
1521 return OPC_EMULATED;
1523 case 0xC6: /* addr32 movb $imm, r/m8 */
1524 if ((prefix & ADDR32) == 0)
1525 goto invalid;
1526 if (!movr(regs, prefix, opc))
1527 goto invalid;
1528 return OPC_EMULATED;
1530 case 0xCB: /* retl */
1531 if ((mode == VM86_REAL_TO_PROTECTED) ||
1532 (mode == VM86_PROTECTED_TO_REAL)) {
1533 retl(regs, prefix);
1534 return OPC_INVALID;
1536 goto invalid;
1538 case 0xCD: /* int $n */
1539 TRACE((regs, regs->eip - eip, "int"));
1540 interrupt(regs, fetch8(regs));
1541 return OPC_EMULATED;
1543 case 0xCF: /* iret */
1544 if (prefix & DATA32) {
1545 TRACE((regs, regs->eip - eip, "data32 iretd"));
1546 regs->eip = pop32(regs);
1547 regs->cs = pop32(regs);
1548 regs->eflags = pop32(regs);
1549 } else {
1550 TRACE((regs, regs->eip - eip, "iret"));
1551 regs->eip = pop16(regs);
1552 regs->cs = pop16(regs);
1553 regs->eflags = (regs->eflags & 0xFFFF0000L) |
1554 pop16(regs);
1556 return OPC_EMULATED;
1558 case 0xE4: /* inb al, port */
1559 if (!inbyte(regs, prefix, opc))
1560 goto invalid;
1561 return OPC_EMULATED;
1563 case 0xE6: /* outb port, al */
1564 if (!outbyte(regs, prefix, opc))
1565 goto invalid;
1566 return OPC_EMULATED;
1568 case 0xEA: /* jmpl */
1569 if ((mode == VM86_REAL_TO_PROTECTED) ||
1570 (mode == VM86_PROTECTED_TO_REAL)) {
1571 jmpl(regs, prefix);
1572 return OPC_INVALID;
1574 goto invalid;
1576 case 0xFF: /* jmpl (indirect) */
1578 unsigned modrm = fetch8(regs);
1579 switch((modrm >> 3) & 7) {
1580 case 5: /* jmpl (indirect) */
1581 if ((mode == VM86_REAL_TO_PROTECTED) ||
1582 (mode == VM86_PROTECTED_TO_REAL)) {
1583 jmpl_indirect(regs, prefix, modrm);
1584 return OPC_INVALID;
1586 goto invalid;
1588 case 6: /* push r/m16 */
1589 pushrm(regs, prefix, modrm);
1590 return OPC_EMULATED;
1592 default:
1593 goto invalid;
1597 case 0xEB: /* short jump */
1598 if ((mode == VM86_REAL_TO_PROTECTED) ||
1599 (mode == VM86_PROTECTED_TO_REAL)) {
1600 disp = (char) fetch8(regs);
1601 TRACE((regs, 2, "jmp 0x%x", regs->eip + disp));
1602 regs->eip += disp;
1603 return OPC_EMULATED;
1605 goto invalid;
1607 case 0xEC: /* inb al, (%dx) */
1608 if (!inbyte(regs, prefix, opc))
1609 goto invalid;
1610 return OPC_EMULATED;
1612 case 0xEE: /* outb (%dx), al */
1613 if (!outbyte(regs, prefix, opc))
1614 goto invalid;
1615 return OPC_EMULATED;
1617 case 0xF0: /* lock */
1618 TRACE((regs, regs->eip - eip, "lock"));
1619 continue;
1621 case 0xF6: /* addr32 testb $imm, r/m8 */
1622 if ((prefix & ADDR32) == 0)
1623 goto invalid;
1624 if (!test(regs, prefix, opc))
1625 goto invalid;
1626 return OPC_EMULATED;
1628 case 0xFA: /* cli */
1629 TRACE((regs, regs->eip - eip, "cli"));
1630 regs->eflags &= ~EFLAGS_IF;
1631 return OPC_EMULATED;
1633 case 0xFB: /* sti */
1634 TRACE((regs, regs->eip - eip, "sti"));
1635 regs->eflags |= EFLAGS_IF;
1636 return OPC_EMULATED;
1638 default:
1639 goto invalid;
1643 invalid:
1644 regs->eip = eip;
1645 TRACE((regs, regs->eip - eip, "opc 0x%x", opc));
1646 return OPC_INVALID;
1649 void
1650 emulate(struct regs *regs)
1652 unsigned flteip;
1653 int nemul = 0;
1655 /* emulate as many instructions as possible */
1656 while (opcode(regs) != OPC_INVALID)
1657 nemul++;
1659 /* detect the case where we are not making progress */
1660 if (nemul == 0 && prev_eip == regs->eip) {
1661 flteip = address(regs, MASK16(regs->cs), regs->eip);
1662 panic("Unknown opcode at %04x:%04x=0x%x",
1663 MASK16(regs->cs), regs->eip, flteip);
1664 } else
1665 prev_eip = regs->eip;
1668 void
1669 trap(int trapno, int errno, struct regs *regs)
1671 /* emulate device interrupts */
1672 if (trapno >= NR_EXCEPTION_HANDLER) {
1673 int irq = trapno - NR_EXCEPTION_HANDLER;
1674 if (irq < 8)
1675 interrupt(regs, irq + 8);
1676 else
1677 interrupt(regs, 0x70 + (irq - 8));
1678 return;
1681 switch (trapno) {
1682 case 1: /* Debug */
1683 if (regs->eflags & EFLAGS_VM) {
1684 /* emulate any 8086 instructions */
1685 if (mode != VM86_REAL_TO_PROTECTED)
1686 panic("not in real-to-protected mode");
1687 emulate(regs);
1688 return;
1690 goto invalid;
1692 case 13: /* GPF */
1693 if (regs->eflags & EFLAGS_VM) {
1694 /* emulate any 8086 instructions */
1695 if (mode == VM86_PROTECTED)
1696 panic("unexpected protected mode");
1697 emulate(regs);
1698 return;
1700 goto invalid;
1702 default:
1703 invalid:
1704 printf("Trap (0x%x) while in %s mode\n",
1705 trapno, regs->eflags & EFLAGS_VM ? "real" : "protected");
1706 if (trapno == 14)
1707 printf("Page fault address 0x%x\n", get_cr2());
1708 dump_regs(regs);
1709 halt();