direct-io.hg

view tools/firmware/vmxassist/vm86.c @ 7477:5a7baecb1c70

Fix an issue for passing arguement from control panel to deivce model
for some arguemnt like 'localtime', 'isa', device model need an argument
"-localtime", instead of "-localtime 1"
Signed-off-by: Xiaofeng Ling <xiaofeng.ling@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Oct 23 16:51:47 2005 +0100 (2005-10-23)
parents e398a9797c4c
children f1b361b05bf3
line source
1 /*
2 * vm86.c: A vm86 emulator. The main purpose of this emulator is to do as
3 * little work as possible.
4 *
5 * Leendert van Doorn, leendert@watson.ibm.com
6 * Copyright (c) 2005, International Business Machines Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
21 #include "vm86.h"
22 #include "util.h"
23 #include "machine.h"
25 #define HIGHMEM (1 << 20) /* 1MB */
26 #define MASK16(v) ((v) & 0xFFFF)
28 #define DATA32 0x0001
29 #define ADDR32 0x0002
30 #define SEG_CS 0x0004
31 #define SEG_DS 0x0008
32 #define SEG_ES 0x0010
33 #define SEG_SS 0x0020
34 #define SEG_FS 0x0040
35 #define SEG_GS 0x0080
37 unsigned prev_eip = 0;
38 enum vm86_mode mode;
40 #ifdef DEBUG
41 int traceset = 0;
43 char *states[] = {
44 "<VM86_REAL>",
45 "<VM86_REAL_TO_PROTECTED>",
46 "<VM86_PROTECTED_TO_REAL>",
47 "<VM86_PROTECTED>"
48 };
49 #endif /* DEBUG */
52 unsigned
53 address(struct regs *regs, unsigned seg, unsigned off)
54 {
55 unsigned long long entry;
56 unsigned addr;
58 /* real mode: segment is part of the address */
59 if (mode == VM86_REAL || mode == VM86_REAL_TO_PROTECTED)
60 return ((seg & 0xFFFF) << 4) + off;
62 /* protected mode: use seg as index into gdt */
63 if (seg > oldctx.gdtr_limit) {
64 printf("address: Invalid segment descriptor (0x%x)\n", seg);
65 return 0;
66 }
68 entry = ((unsigned long long *) oldctx.gdtr_base)[seg >> 3];
69 addr = (((entry >> (56-24)) & 0xFF000000) |
70 ((entry >> (32-16)) & 0x00FF0000) |
71 ((entry >> ( 16)) & 0x0000FFFF)) + off;
72 return addr;
73 }
75 #ifdef DEBUG
76 void
77 trace(struct regs *regs, int adjust, char *fmt, ...)
78 {
79 unsigned off = regs->eip - adjust;
80 va_list ap;
82 if ((traceset & (1 << mode)) &&
83 (mode == VM86_REAL_TO_PROTECTED || mode == VM86_REAL)) {
84 /* 16-bit, seg:off addressing */
85 unsigned addr = address(regs, regs->cs, off);
86 printf("0x%08x: 0x%x:0x%04x ", addr, regs->cs, off);
87 printf("(%d) ", mode);
88 va_start(ap, fmt);
89 vprintf(fmt, ap);
90 va_end(ap);
91 printf("\n");
92 }
93 if ((traceset & (1 << mode)) &&
94 (mode == VM86_PROTECTED_TO_REAL || mode == VM86_PROTECTED)) {
95 /* 16-bit, gdt addressing */
96 unsigned addr = address(regs, regs->cs, off);
97 printf("0x%08x: 0x%x:0x%08x ", addr, regs->cs, off);
98 printf("(%d) ", mode);
99 va_start(ap, fmt);
100 vprintf(fmt, ap);
101 va_end(ap);
102 printf("\n");
103 }
104 }
105 #endif /* DEBUG */
107 static inline unsigned
108 read32(unsigned addr)
109 {
110 return *(unsigned long *) addr;
111 }
113 static inline unsigned
114 read16(unsigned addr)
115 {
116 return *(unsigned short *) addr;
117 }
119 static inline unsigned
120 read8(unsigned addr)
121 {
122 return *(unsigned char *) addr;
123 }
125 static inline void
126 write32(unsigned addr, unsigned value)
127 {
128 *(unsigned long *) addr = value;
129 }
131 static inline void
132 write16(unsigned addr, unsigned value)
133 {
134 *(unsigned short *) addr = value;
135 }
137 static inline void
138 write8(unsigned addr, unsigned value)
139 {
140 *(unsigned char *) addr = value;
141 }
143 static inline void
144 push32(struct regs *regs, unsigned value)
145 {
146 regs->uesp -= 4;
147 write32(address(regs, regs->uss, MASK16(regs->uesp)), value);
148 }
150 static inline void
151 push16(struct regs *regs, unsigned value)
152 {
153 regs->uesp -= 2;
154 write16(address(regs, regs->uss, MASK16(regs->uesp)), value);
155 }
157 static inline unsigned
158 pop32(struct regs *regs)
159 {
160 unsigned value = read32(address(regs, regs->uss, MASK16(regs->uesp)));
161 regs->uesp += 4;
162 return value;
163 }
165 static inline unsigned
166 pop16(struct regs *regs)
167 {
168 unsigned value = read16(address(regs, regs->uss, MASK16(regs->uesp)));
169 regs->uesp += 2;
170 return value;
171 }
173 static inline unsigned
174 fetch32(struct regs *regs)
175 {
176 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
178 regs->eip += 4;
179 return read32(addr);
180 }
182 static inline unsigned
183 fetch16(struct regs *regs)
184 {
185 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
187 regs->eip += 2;
188 return read16(addr);
189 }
191 static inline unsigned
192 fetch8(struct regs *regs)
193 {
194 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
196 regs->eip++;
197 return read8(addr);
198 }
200 unsigned
201 getreg(struct regs *regs, int r)
202 {
203 switch (r & 7) {
204 case 0: return regs->eax;
205 case 1: return regs->ecx;
206 case 2: return regs->edx;
207 case 3: return regs->ebx;
208 case 4: return regs->esp;
209 case 5: return regs->ebp;
210 case 6: return regs->esi;
211 case 7: return regs->edi;
212 }
213 return ~0;
214 }
216 void
217 setreg(struct regs *regs, int r, unsigned v)
218 {
219 switch (r & 7) {
220 case 0: regs->eax = v; break;
221 case 1: regs->ecx = v; break;
222 case 2: regs->edx = v; break;
223 case 3: regs->ebx = v; break;
224 case 4: regs->esp = v; break;
225 case 5: regs->ebp = v; break;
226 case 6: regs->esi = v; break;
227 case 7: regs->edi = v; break;
228 }
229 }
231 /*
232 * Operand (modrm) decode
233 */
234 unsigned
235 operand(unsigned prefix, struct regs *regs, unsigned modrm)
236 {
237 int mod, disp = 0, seg;
239 seg = regs->vds;
240 if (prefix & SEG_ES)
241 seg = regs->ves;
242 if (prefix & SEG_DS)
243 seg = regs->vds;
244 if (prefix & SEG_CS)
245 seg = regs->cs;
246 if (prefix & SEG_SS)
247 seg = regs->uss;
248 if (prefix & SEG_FS)
249 seg = regs->fs;
250 if (prefix & SEG_GS)
251 seg = regs->gs;
253 if (prefix & ADDR32) { /* 32-bit addressing */
254 switch ((mod = (modrm >> 6) & 3)) {
255 case 0:
256 switch (modrm & 7) {
257 case 0: return address(regs, seg, regs->eax);
258 case 1: return address(regs, seg, regs->ecx);
259 case 2: return address(regs, seg, regs->edx);
260 case 3: return address(regs, seg, regs->ebx);
261 case 4: panic("No SIB decode (yet)");
262 case 5: return address(regs, seg, fetch32(regs));
263 case 6: return address(regs, seg, regs->esi);
264 case 7: return address(regs, seg, regs->edi);
265 }
266 break;
267 case 1:
268 case 2:
269 if ((modrm & 7) != 4) {
270 if (mod == 1)
271 disp = (char) fetch8(regs);
272 else
273 disp = (int) fetch32(regs);
274 }
275 switch (modrm & 7) {
276 case 0: return address(regs, seg, regs->eax + disp);
277 case 1: return address(regs, seg, regs->ecx + disp);
278 case 2: return address(regs, seg, regs->edx + disp);
279 case 3: return address(regs, seg, regs->ebx + disp);
280 case 4: panic("No SIB decode (yet)");
281 case 5: return address(regs, seg, regs->ebp + disp);
282 case 6: return address(regs, seg, regs->esi + disp);
283 case 7: return address(regs, seg, regs->edi + disp);
284 }
285 break;
286 case 3:
287 return getreg(regs, modrm);
288 }
289 } else { /* 16-bit addressing */
290 switch ((mod = (modrm >> 6) & 3)) {
291 case 0:
292 switch (modrm & 7) {
293 case 0: return address(regs, seg, MASK16(regs->ebx) +
294 MASK16(regs->esi));
295 case 1: return address(regs, seg, MASK16(regs->ebx) +
296 MASK16(regs->edi));
297 case 2: return address(regs, seg, MASK16(regs->ebp) +
298 MASK16(regs->esi));
299 case 3: return address(regs, seg, MASK16(regs->ebp) +
300 MASK16(regs->edi));
301 case 4: return address(regs, seg, MASK16(regs->esi));
302 case 5: return address(regs, seg, MASK16(regs->edi));
303 case 6: return address(regs, seg, fetch16(regs));
304 case 7: return address(regs, seg, MASK16(regs->ebx));
305 }
306 break;
307 case 1:
308 case 2:
309 if (mod == 1)
310 disp = (char) fetch8(regs);
311 else
312 disp = (int) fetch16(regs);
313 switch (modrm & 7) {
314 case 0: return address(regs, seg, MASK16(regs->ebx) +
315 MASK16(regs->esi) + disp);
316 case 1: return address(regs, seg, MASK16(regs->ebx) +
317 MASK16(regs->edi) + disp);
318 case 2: return address(regs, seg, MASK16(regs->ebp) +
319 MASK16(regs->esi) + disp);
320 case 3: return address(regs, seg, MASK16(regs->ebp) +
321 MASK16(regs->edi) + disp);
322 case 4: return address(regs, seg,
323 MASK16(regs->esi) + disp);
324 case 5: return address(regs, seg,
325 MASK16(regs->edi) + disp);
326 case 6: return address(regs, seg,
327 MASK16(regs->ebp) + disp);
328 case 7: return address(regs, seg,
329 MASK16(regs->ebx) + disp);
330 }
331 break;
332 case 3:
333 return MASK16(getreg(regs, modrm));
334 }
335 }
337 return 0;
338 }
340 /*
341 * Load new IDT
342 */
343 int
344 lidt(struct regs *regs, unsigned prefix, unsigned modrm)
345 {
346 unsigned eip = regs->eip - 3;
347 unsigned addr = operand(prefix, regs, modrm);
349 oldctx.idtr_limit = ((struct dtr *) addr)->size;
350 if ((prefix & DATA32) == 0)
351 oldctx.idtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
352 else
353 oldctx.idtr_base = ((struct dtr *) addr)->base;
354 TRACE((regs, regs->eip - eip, "lidt 0x%x <%d, 0x%x>",
355 addr, oldctx.idtr_limit, oldctx.idtr_base));
357 return 1;
358 }
360 /*
361 * Load new GDT
362 */
363 int
364 lgdt(struct regs *regs, unsigned prefix, unsigned modrm)
365 {
366 unsigned eip = regs->eip - 3;
367 unsigned addr = operand(prefix, regs, modrm);
369 oldctx.gdtr_limit = ((struct dtr *) addr)->size;
370 if ((prefix & DATA32) == 0)
371 oldctx.gdtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
372 else
373 oldctx.gdtr_base = ((struct dtr *) addr)->base;
374 TRACE((regs, regs->eip - eip, "lgdt 0x%x <%d, 0x%x>",
375 addr, oldctx.gdtr_limit, oldctx.gdtr_base));
377 return 1;
378 }
380 /*
381 * Modify CR0 either through an lmsw instruction.
382 */
383 int
384 lmsw(struct regs *regs, unsigned prefix, unsigned modrm)
385 {
386 unsigned eip = regs->eip - 3;
387 unsigned ax = operand(prefix, regs, modrm) & 0xF;
388 unsigned cr0 = (oldctx.cr0 & 0xFFFFFFF0) | ax;
390 TRACE((regs, regs->eip - eip, "lmsw 0x%x", ax));
391 #ifndef TEST
392 oldctx.cr0 = cr0 | CR0_PE | CR0_NE;
393 #else
394 oldctx.cr0 = cr0 | CR0_PE | CR0_NE | CR0_PG;
395 #endif
396 if (cr0 & CR0_PE)
397 set_mode(regs, VM86_REAL_TO_PROTECTED);
399 return 1;
400 }
402 /*
403 * Move to and from a control register.
404 */
405 int
406 movcr(struct regs *regs, unsigned prefix, unsigned opc)
407 {
408 unsigned eip = regs->eip - 2;
409 unsigned modrm = fetch8(regs);
410 unsigned cr = (modrm >> 3) & 7;
412 if ((modrm & 0xC0) != 0xC0) /* only registers */
413 return 0;
415 switch (opc) {
416 case 0x20: /* mov Rd, Cd */
417 TRACE((regs, regs->eip - eip, "movl %%cr%d, %%eax", cr));
418 switch (cr) {
419 case 0:
420 #ifndef TEST
421 setreg(regs, modrm,
422 oldctx.cr0 & ~(CR0_PE | CR0_NE));
423 #else
424 setreg(regs, modrm,
425 oldctx.cr0 & ~(CR0_PE | CR0_NE | CR0_PG));
426 #endif
427 break;
428 case 2:
429 setreg(regs, modrm, get_cr2());
430 break;
431 case 3:
432 setreg(regs, modrm, oldctx.cr3);
433 break;
434 case 4:
435 setreg(regs, modrm, oldctx.cr4);
436 break;
437 }
438 break;
439 case 0x22: /* mov Cd, Rd */
440 TRACE((regs, regs->eip - eip, "movl %%eax, %%cr%d", cr));
441 switch (cr) {
442 case 0:
443 oldctx.cr0 = getreg(regs, modrm) | (CR0_PE | CR0_NE);
444 #ifdef TEST
445 oldctx.cr0 |= CR0_PG;
446 #endif
447 if (getreg(regs, modrm) & CR0_PE)
448 set_mode(regs, VM86_REAL_TO_PROTECTED);
450 break;
451 case 3:
452 oldctx.cr3 = getreg(regs, modrm);
453 break;
454 case 4:
455 oldctx.cr4 = getreg(regs, modrm);
456 break;
457 }
458 break;
459 }
461 return 1;
462 }
464 /*
465 * Emulate a segment load in protected mode
466 */
467 int
468 load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
469 {
470 unsigned long long entry;
472 /* protected mode: use seg as index into gdt */
473 if (sel > oldctx.gdtr_limit)
474 return 0;
476 if (sel == 0) {
477 arbytes->fields.null_bit = 1;
478 return 1;
479 }
481 entry = ((unsigned long long *) oldctx.gdtr_base)[sel >> 3];
483 /* Check the P bit fisrt*/
484 if (!((entry >> (15+32)) & 0x1) && sel != 0) {
485 return 0;
486 }
488 *base = (((entry >> (56-24)) & 0xFF000000) |
489 ((entry >> (32-16)) & 0x00FF0000) |
490 ((entry >> ( 16)) & 0x0000FFFF));
491 *limit = (((entry >> (48-16)) & 0x000F0000) |
492 ((entry ) & 0x0000FFFF));
494 arbytes->bytes = 0;
495 arbytes->fields.seg_type = (entry >> (8+32)) & 0xF; /* TYPE */
496 arbytes->fields.s = (entry >> (12+32)) & 0x1; /* S */
497 if (arbytes->fields.s)
498 arbytes->fields.seg_type |= 1; /* accessed */
499 arbytes->fields.dpl = (entry >> (13+32)) & 0x3; /* DPL */
500 arbytes->fields.p = (entry >> (15+32)) & 0x1; /* P */
501 arbytes->fields.avl = (entry >> (20+32)) & 0x1; /* AVL */
502 arbytes->fields.default_ops_size = (entry >> (22+32)) & 0x1; /* D */
504 if (entry & (1ULL << (23+32))) { /* G */
505 arbytes->fields.g = 1;
506 *limit = (*limit << 12) | 0xFFF;
507 }
509 return 1;
510 }
512 /*
513 * Transition to protected mode
514 */
515 void
516 protected_mode(struct regs *regs)
517 {
518 regs->eflags &= ~(EFLAGS_TF|EFLAGS_VM);
520 oldctx.eip = regs->eip;
521 oldctx.esp = regs->uesp;
522 oldctx.eflags = regs->eflags;
524 /* reload all segment registers */
525 if (!load_seg(regs->cs, &oldctx.cs_base,
526 &oldctx.cs_limit, &oldctx.cs_arbytes))
527 panic("Invalid %%cs=0x%x for protected mode\n", regs->cs);
528 oldctx.cs_sel = regs->cs;
530 if (load_seg(regs->ves, &oldctx.es_base,
531 &oldctx.es_limit, &oldctx.es_arbytes))
532 oldctx.es_sel = regs->ves;
533 else {
534 load_seg(0, &oldctx.es_base,&oldctx.es_limit, &oldctx.es_arbytes);
535 oldctx.es_sel = 0;
536 }
538 if (load_seg(regs->uss, &oldctx.ss_base,
539 &oldctx.ss_limit, &oldctx.ss_arbytes))
540 oldctx.ss_sel = regs->uss;
541 else {
542 load_seg(0, &oldctx.ss_base, &oldctx.ss_limit, &oldctx.ss_arbytes);
543 oldctx.ss_sel = 0;
544 }
546 if (load_seg(regs->vds, &oldctx.ds_base,
547 &oldctx.ds_limit, &oldctx.ds_arbytes))
548 oldctx.ds_sel = regs->vds;
549 else {
550 load_seg(0, &oldctx.ds_base, &oldctx.ds_limit, &oldctx.ds_arbytes);
551 oldctx.ds_sel = 0;
552 }
554 if (load_seg(regs->vfs, &oldctx.fs_base,
555 &oldctx.fs_limit, &oldctx.fs_arbytes))
556 oldctx.fs_sel = regs->vfs;
557 else {
558 load_seg(0, &oldctx.fs_base, &oldctx.fs_limit, &oldctx.fs_arbytes);
559 oldctx.fs_sel = 0;
560 }
562 if (load_seg(regs->vgs, &oldctx.gs_base,
563 &oldctx.gs_limit, &oldctx.gs_arbytes))
564 oldctx.gs_sel = regs->vgs;
565 else {
566 load_seg(0, &oldctx.gs_base, &oldctx.gs_limit, &oldctx.gs_arbytes);
567 oldctx.gs_sel = 0;
568 }
570 /* initialize jump environment to warp back to protected mode */
571 regs->cs = CODE_SELECTOR;
572 regs->ds = DATA_SELECTOR;
573 regs->es = DATA_SELECTOR;
574 regs->fs = DATA_SELECTOR;
575 regs->gs = DATA_SELECTOR;
576 regs->eip = (unsigned) &switch_to_protected_mode;
578 /* this should get us into 32-bit mode */
579 }
581 /*
582 * Start real-mode emulation
583 */
584 void
585 real_mode(struct regs *regs)
586 {
587 regs->eflags |= EFLAGS_VM | 0x02;
588 regs->ds = DATA_SELECTOR;
589 regs->es = DATA_SELECTOR;
590 regs->fs = DATA_SELECTOR;
591 regs->gs = DATA_SELECTOR;
593 /*
594 * When we transition from protected to real-mode and we
595 * have not reloaded the segment descriptors yet, they are
596 * interpreted as if they were in protect mode.
597 * We emulate this behavior by assuming that these memory
598 * reference are below 1MB and set %ss, %ds, %es accordingly.
599 */
600 if (regs->uss != 0) {
601 if (regs->uss >= HIGHMEM)
602 panic("%%ss 0x%lx higher than 1MB", regs->uss);
603 regs->uss = address(regs, regs->uss, 0) >> 4;
604 }
605 if (regs->vds != 0) {
606 if (regs->vds >= HIGHMEM)
607 panic("%%ds 0x%lx higher than 1MB", regs->vds);
608 regs->vds = address(regs, regs->vds, 0) >> 4;
609 }
610 if (regs->ves != 0) {
611 if (regs->ves >= HIGHMEM)
612 panic("%%es 0x%lx higher than 1MB", regs->ves);
613 regs->ves = address(regs, regs->ves, 0) >> 4;
614 }
616 /* this should get us into 16-bit mode */
617 }
619 /*
620 * This is the smarts of the emulator and handles the mode transitions. The
621 * emulator handles 4 different modes. 1) VM86_REAL: emulated real-mode, Just
622 * handle those instructions that are not supported under VM8086.
623 * 2) VM86_REAL_TO_PROTECTED: going from real-mode to protected mode. In this
624 * we single step through the instructions until we reload the new %cs (some
625 * OSes do a lot of computations before reloading %cs). 2) VM86_PROTECTED_TO_REAL
626 * when we are going from protected to real mode. In this case we emulate the
627 * instructions by hand. Finally, 4) VM86_PROTECTED when we transitioned to
628 * protected mode and we should abandon the emulator. No instructions are
629 * emulated when in VM86_PROTECTED mode.
630 */
631 void
632 set_mode(struct regs *regs, enum vm86_mode newmode)
633 {
634 switch (newmode) {
635 case VM86_REAL:
636 if (mode == VM86_PROTECTED_TO_REAL) {
637 real_mode(regs);
638 break;
639 } else if (mode == VM86_REAL) {
640 break;
641 } else
642 panic("unexpected real mode transition");
643 break;
645 case VM86_REAL_TO_PROTECTED:
646 if (mode == VM86_REAL) {
647 regs->eflags |= EFLAGS_TF;
648 break;
649 } else if (mode == VM86_REAL_TO_PROTECTED) {
650 break;
651 } else
652 panic("unexpected real-to-protected mode transition");
653 break;
655 case VM86_PROTECTED_TO_REAL:
656 if (mode == VM86_PROTECTED)
657 break;
658 else
659 panic("unexpected protected-to-real mode transition");
661 case VM86_PROTECTED:
662 if (mode == VM86_REAL_TO_PROTECTED) {
663 protected_mode(regs);
664 break;
665 } else
666 panic("unexpected protected mode transition");
667 break;
668 }
670 mode = newmode;
671 TRACE((regs, 0, states[mode]));
672 }
674 void
675 jmpl(struct regs *regs, int prefix)
676 {
677 unsigned n = regs->eip;
678 unsigned cs, eip;
680 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
681 eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs);
682 cs = fetch16(regs);
684 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
686 regs->cs = cs;
687 regs->eip = eip;
688 set_mode(regs, VM86_PROTECTED);
689 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
690 eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs);
691 cs = fetch16(regs);
693 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
695 regs->cs = cs;
696 regs->eip = eip;
697 set_mode(regs, VM86_REAL);
698 } else
699 panic("jmpl");
700 }
702 void
703 retl(struct regs *regs, int prefix)
704 {
705 unsigned cs, eip;
707 if (prefix & DATA32) {
708 eip = pop32(regs);
709 cs = MASK16(pop32(regs));
710 } else {
711 eip = pop16(regs);
712 cs = pop16(regs);
713 }
715 TRACE((regs, 1, "retl (to 0x%x:0x%x)", cs, eip));
717 if (mode == VM86_REAL_TO_PROTECTED) { /* jump to protected mode */
718 regs->cs = cs;
719 regs->eip = eip;
720 set_mode(regs, VM86_PROTECTED);
721 } else if (mode == VM86_PROTECTED_TO_REAL) { /* jump to real mode */
722 regs->cs = cs;
723 regs->eip = eip;
724 set_mode(regs, VM86_REAL);
725 } else
726 panic("retl");
727 }
729 void
730 interrupt(struct regs *regs, int n)
731 {
732 TRACE((regs, 0, "external interrupt %d", n));
733 push16(regs, regs->eflags);
734 push16(regs, regs->cs);
735 push16(regs, regs->eip);
736 regs->eflags &= ~EFLAGS_IF;
737 regs->eip = read16(address(regs, 0, n * 4));
738 regs->cs = read16(address(regs, 0, n * 4 + 2));
739 }
741 enum { OPC_INVALID, OPC_EMULATED };
743 /*
744 * Emulate a single instruction, including all its prefixes. We only implement
745 * a small subset of the opcodes, and not all opcodes are implemented for each
746 * of the four modes we can operate in.
747 */
748 int
749 opcode(struct regs *regs)
750 {
751 unsigned eip = regs->eip;
752 unsigned opc, modrm, disp;
753 unsigned prefix = 0;
755 for (;;) {
756 switch ((opc = fetch8(regs))) {
757 case 0x0F: /* two byte opcode */
758 if (mode == VM86_PROTECTED)
759 goto invalid;
760 switch ((opc = fetch8(regs))) {
761 case 0x01:
762 switch (((modrm = fetch8(regs)) >> 3) & 7) {
763 case 0: /* sgdt */
764 case 1: /* sidt */
765 goto invalid;
766 case 2: /* lgdt */
767 if (!lgdt(regs, prefix, modrm))
768 goto invalid;
769 return OPC_EMULATED;
770 case 3: /* lidt */
771 if (!lidt(regs, prefix, modrm))
772 goto invalid;
773 return OPC_EMULATED;
774 case 4: /* smsw */
775 goto invalid;
776 case 5:
777 goto invalid;
778 case 6: /* lmsw */
779 if (!lmsw(regs, prefix, modrm))
780 goto invalid;
781 return OPC_EMULATED;
782 case 7: /* invlpg */
783 goto invalid;
784 }
785 break;
786 case 0x09: /* wbinvd */
787 return OPC_EMULATED;
788 case 0x20: /* mov Rd, Cd (1h) */
789 case 0x22:
790 if (!movcr(regs, prefix, opc))
791 goto invalid;
792 return OPC_EMULATED;
793 default:
794 goto invalid;
795 }
796 goto invalid;
798 case 0x26:
799 TRACE((regs, regs->eip - eip, "%%es:"));
800 prefix |= SEG_ES;
801 continue;
803 case 0x2E:
804 TRACE((regs, regs->eip - eip, "%%cs:"));
805 prefix |= SEG_CS;
806 continue;
808 case 0x36:
809 TRACE((regs, regs->eip - eip, "%%ss:"));
810 prefix |= SEG_SS;
811 continue;
813 case 0x3E:
814 TRACE((regs, regs->eip - eip, "%%ds:"));
815 prefix |= SEG_DS;
816 continue;
818 case 0x64:
819 TRACE((regs, regs->eip - eip, "%%fs:"));
820 prefix |= SEG_FS;
821 continue;
823 case 0x65:
824 TRACE((regs, regs->eip - eip, "%%gs:"));
825 prefix |= SEG_GS;
826 continue;
828 case 0x66:
829 TRACE((regs, regs->eip - eip, "data32"));
830 prefix |= DATA32;
831 continue;
833 case 0x67:
834 TRACE((regs, regs->eip - eip, "addr32"));
835 prefix |= ADDR32;
836 continue;
838 case 0x90: /* nop */
839 TRACE((regs, regs->eip - eip, "nop"));
840 return OPC_EMULATED;
842 case 0x9C: /* pushf */
843 TRACE((regs, regs->eip - eip, "pushf"));
844 if (prefix & DATA32)
845 push32(regs, regs->eflags & ~EFLAGS_VM);
846 else
847 push16(regs, regs->eflags & ~EFLAGS_VM);
848 return OPC_EMULATED;
850 case 0x9D: /* popf */
851 TRACE((regs, regs->eip - eip, "popf"));
852 if (prefix & DATA32)
853 regs->eflags = pop32(regs);
854 else
855 regs->eflags = (regs->eflags & 0xFFFF0000L) |
856 pop16(regs);
857 regs->eflags |= EFLAGS_VM;
858 return OPC_EMULATED;
860 case 0xCB: /* retl */
861 if ((mode == VM86_REAL_TO_PROTECTED) ||
862 (mode == VM86_PROTECTED_TO_REAL)) {
863 retl(regs, prefix);
864 return OPC_EMULATED;
865 }
866 goto invalid;
868 case 0xCD: /* int $n */
869 TRACE((regs, regs->eip - eip, "int"));
870 interrupt(regs, fetch8(regs));
871 return OPC_EMULATED;
873 case 0xCF: /* iret */
874 if (prefix & DATA32) {
875 TRACE((regs, regs->eip - eip, "data32 iretd"));
876 regs->eip = pop32(regs);
877 regs->cs = pop32(regs);
878 regs->eflags = pop32(regs);
879 } else {
880 TRACE((regs, regs->eip - eip, "iret"));
881 regs->eip = pop16(regs);
882 regs->cs = pop16(regs);
883 regs->eflags = (regs->eflags & 0xFFFF0000L) |
884 pop16(regs);
885 }
886 return OPC_EMULATED;
888 case 0xEA: /* jmpl */
889 if ((mode == VM86_REAL_TO_PROTECTED) ||
890 (mode == VM86_PROTECTED_TO_REAL)) {
891 jmpl(regs, prefix);
892 return OPC_EMULATED;
893 }
894 goto invalid;
896 case 0xEB: /* short jump */
897 if ((mode == VM86_REAL_TO_PROTECTED) ||
898 (mode == VM86_PROTECTED_TO_REAL)) {
899 disp = (char) fetch8(regs);
900 TRACE((regs, 2, "jmp 0x%x", regs->eip + disp));
901 regs->eip += disp;
902 return OPC_EMULATED;
903 }
904 goto invalid;
906 case 0xF0: /* lock */
907 TRACE((regs, regs->eip - eip, "lock"));
908 continue;
910 case 0xFA: /* cli */
911 TRACE((regs, regs->eip - eip, "cli"));
912 regs->eflags &= ~EFLAGS_IF;
913 return OPC_EMULATED;
915 case 0xFB: /* sti */
916 TRACE((regs, regs->eip - eip, "sti"));
917 regs->eflags |= EFLAGS_IF;
918 return OPC_EMULATED;
920 default:
921 goto invalid;
922 }
923 }
925 invalid:
926 regs->eip = eip;
927 return OPC_INVALID;
928 }
930 void
931 emulate(struct regs *regs)
932 {
933 unsigned flteip;
934 int nemul = 0;
936 /* emulate as many instructions as possible */
937 while (opcode(regs) != OPC_INVALID)
938 nemul++;
940 /* detect the case where we are not making progress */
941 if (nemul == 0 && prev_eip == regs->eip) {
942 flteip = address(regs, MASK16(regs->cs), regs->eip);
943 panic("Unknown opcode at %04x:%04x=0x%x",
944 MASK16(regs->cs), regs->eip, flteip);
945 } else
946 prev_eip = regs->eip;
947 }
949 void
950 trap(int trapno, int errno, struct regs *regs)
951 {
952 /* emulate device interrupts */
953 if (trapno >= NR_EXCEPTION_HANDLER) {
954 int irq = trapno - NR_EXCEPTION_HANDLER;
955 if (irq < 8)
956 interrupt(regs, irq + 8);
957 else
958 interrupt(regs, 0x70 + (irq - 8));
959 return;
960 }
962 switch (trapno) {
963 case 1: /* Debug */
964 if (regs->eflags & EFLAGS_VM) {
965 /* emulate any 8086 instructions */
966 if (mode != VM86_REAL_TO_PROTECTED)
967 panic("not in real-to-protected mode");
968 emulate(regs);
969 return;
970 }
971 goto invalid;
973 case 13: /* GPF */
974 if (regs->eflags & EFLAGS_VM) {
975 /* emulate any 8086 instructions */
976 if (mode == VM86_PROTECTED)
977 panic("unexpected protected mode");
978 emulate(regs);
979 return;
980 }
981 goto invalid;
983 default:
984 invalid:
985 printf("Trap (0x%x) while in %s mode\n",
986 trapno, regs->eflags & EFLAGS_VM ? "real" : "protected");
987 if (trapno == 14)
988 printf("Page fault address 0x%x\n", get_cr2());
989 dump_regs(regs);
990 halt();
991 }
992 }