ia64/xen-unstable

view xen/arch/x86/hvm/vmx/realmode.c @ 16989:92734271810a

vmx realmode: Emulate protected-mode transition while CS and SS have
bad selector values (bottom two bits non-zero).

Allows opensuse 10.3 install CD to boot. Unfortunately SUSE Linux 10.1
install CD still fails to work...

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 05 15:45:10 2008 +0000 (2008-02-05)
parents af5d189df051
children 199f81c4b882
line source
1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir.fraser@citrix.com>
10 */
12 #include <xen/config.h>
13 #include <xen/init.h>
14 #include <xen/lib.h>
15 #include <xen/sched.h>
16 #include <asm/event.h>
17 #include <asm/hvm/hvm.h>
18 #include <asm/hvm/support.h>
19 #include <asm/hvm/vmx/vmx.h>
20 #include <asm/hvm/vmx/vmcs.h>
21 #include <asm/hvm/vmx/cpu.h>
22 #include <asm/x86_emulate.h>
24 struct realmode_emulate_ctxt {
25 struct x86_emulate_ctxt ctxt;
27 /* Cache of 16 bytes of instruction. */
28 uint8_t insn_buf[16];
29 unsigned long insn_buf_eip;
31 struct segment_register seg_reg[10];
33 union {
34 struct {
35 unsigned int hlt:1;
36 unsigned int mov_ss:1;
37 unsigned int sti:1;
38 } flags;
39 unsigned int flag_word;
40 };
42 uint8_t exn_vector;
43 uint8_t exn_insn_len;
45 uint32_t intr_shadow;
46 };
48 static void realmode_deliver_exception(
49 unsigned int vector,
50 unsigned int insn_len,
51 struct realmode_emulate_ctxt *rm_ctxt)
52 {
53 struct segment_register *idtr = &rm_ctxt->seg_reg[x86_seg_idtr];
54 struct segment_register *csr = &rm_ctxt->seg_reg[x86_seg_cs];
55 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
56 uint32_t cs_eip, pstk;
57 uint16_t frame[3];
58 unsigned int last_byte;
60 again:
61 last_byte = (vector * 4) + 3;
62 if ( idtr->limit < last_byte )
63 {
64 /* Software interrupt? */
65 if ( insn_len != 0 )
66 {
67 insn_len = 0;
68 vector = TRAP_gp_fault;
69 goto again;
70 }
72 /* Exception or hardware interrupt. */
73 switch ( vector )
74 {
75 case TRAP_double_fault:
76 hvm_triple_fault();
77 return;
78 case TRAP_gp_fault:
79 vector = TRAP_double_fault;
80 goto again;
81 default:
82 vector = TRAP_gp_fault;
83 goto again;
84 }
85 }
87 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
89 frame[0] = regs->eip + insn_len;
90 frame[1] = csr->sel;
91 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
93 if ( rm_ctxt->ctxt.addr_size == 32 )
94 {
95 regs->esp -= 6;
96 pstk = regs->esp;
97 }
98 else
99 {
100 pstk = (uint16_t)(regs->esp - 6);
101 regs->esp &= ~0xffff;
102 regs->esp |= pstk;
103 }
105 pstk += rm_ctxt->seg_reg[x86_seg_ss].base;
106 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
108 csr->sel = cs_eip >> 16;
109 csr->base = (uint32_t)csr->sel << 4;
110 regs->eip = (uint16_t)cs_eip;
111 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
113 /* Exception delivery clears STI and MOV-SS blocking. */
114 if ( rm_ctxt->intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
115 {
116 rm_ctxt->intr_shadow &= ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
117 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
118 }
119 }
121 static uint32_t virtual_to_linear(
122 enum x86_segment seg,
123 uint32_t offset,
124 struct realmode_emulate_ctxt *rm_ctxt)
125 {
126 uint32_t addr = offset;
127 if ( seg == x86_seg_none )
128 return addr;
129 ASSERT(is_x86_user_segment(seg));
130 return addr + rm_ctxt->seg_reg[seg].base;
131 }
133 static int
134 realmode_read(
135 enum x86_segment seg,
136 unsigned long offset,
137 unsigned long *val,
138 unsigned int bytes,
139 enum hvm_access_type access_type,
140 struct realmode_emulate_ctxt *rm_ctxt)
141 {
142 uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt);
144 *val = 0;
146 if ( hvm_copy_from_guest_virt_nofault(val, addr, bytes) )
147 {
148 struct vcpu *curr = current;
150 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
151 return X86EMUL_UNHANDLEABLE;
153 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
154 return X86EMUL_UNHANDLEABLE;
156 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
157 {
158 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
159 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
160 0, IOREQ_READ, 0, 0);
161 }
163 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
164 return X86EMUL_RETRY;
166 *val = curr->arch.hvm_vmx.real_mode_io_data;
167 curr->arch.hvm_vmx.real_mode_io_completed = 0;
168 }
170 return X86EMUL_OKAY;
171 }
173 static int
174 realmode_emulate_read(
175 enum x86_segment seg,
176 unsigned long offset,
177 unsigned long *val,
178 unsigned int bytes,
179 struct x86_emulate_ctxt *ctxt)
180 {
181 return realmode_read(
182 seg, offset, val, bytes, hvm_access_read,
183 container_of(ctxt, struct realmode_emulate_ctxt, ctxt));
184 }
186 static int
187 realmode_emulate_insn_fetch(
188 enum x86_segment seg,
189 unsigned long offset,
190 unsigned long *val,
191 unsigned int bytes,
192 struct x86_emulate_ctxt *ctxt)
193 {
194 struct realmode_emulate_ctxt *rm_ctxt =
195 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
196 unsigned int insn_off = offset - rm_ctxt->insn_buf_eip;
198 /* Fall back if requested bytes are not in the prefetch cache. */
199 if ( unlikely((insn_off + bytes) > sizeof(rm_ctxt->insn_buf)) )
200 return realmode_read(
201 seg, offset, val, bytes,
202 hvm_access_insn_fetch, rm_ctxt);
204 /* Hit the cache. Simple memcpy. */
205 *val = 0;
206 memcpy(val, &rm_ctxt->insn_buf[insn_off], bytes);
207 return X86EMUL_OKAY;
208 }
210 static int
211 realmode_emulate_write(
212 enum x86_segment seg,
213 unsigned long offset,
214 unsigned long val,
215 unsigned int bytes,
216 struct x86_emulate_ctxt *ctxt)
217 {
218 struct realmode_emulate_ctxt *rm_ctxt =
219 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
220 uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt);
222 if ( hvm_copy_to_guest_virt_nofault(addr, &val, bytes) )
223 {
224 struct vcpu *curr = current;
226 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
227 return X86EMUL_UNHANDLEABLE;
229 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
230 return X86EMUL_UNHANDLEABLE;
232 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
233 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
234 val, IOREQ_WRITE, 0, 0);
235 }
237 return X86EMUL_OKAY;
238 }
240 static int
241 realmode_emulate_cmpxchg(
242 enum x86_segment seg,
243 unsigned long offset,
244 unsigned long old,
245 unsigned long new,
246 unsigned int bytes,
247 struct x86_emulate_ctxt *ctxt)
248 {
249 /* Fix this in case the guest is really relying on r-m-w atomicity. */
250 return realmode_emulate_write(seg, offset, new, bytes, ctxt);
251 }
253 static int
254 realmode_rep_ins(
255 uint16_t src_port,
256 enum x86_segment dst_seg,
257 unsigned long dst_offset,
258 unsigned int bytes_per_rep,
259 unsigned long *reps,
260 struct x86_emulate_ctxt *ctxt)
261 {
262 struct realmode_emulate_ctxt *rm_ctxt =
263 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
264 struct vcpu *curr = current;
265 uint32_t paddr = virtual_to_linear(dst_seg, dst_offset, rm_ctxt);
267 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
268 return X86EMUL_UNHANDLEABLE;
270 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
271 return X86EMUL_UNHANDLEABLE;
273 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
274 {
275 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
276 send_pio_req(src_port, *reps, bytes_per_rep,
277 paddr, IOREQ_READ,
278 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
279 }
281 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
282 return X86EMUL_RETRY;
284 curr->arch.hvm_vmx.real_mode_io_completed = 0;
286 return X86EMUL_OKAY;
287 }
289 static int
290 realmode_rep_outs(
291 enum x86_segment src_seg,
292 unsigned long src_offset,
293 uint16_t dst_port,
294 unsigned int bytes_per_rep,
295 unsigned long *reps,
296 struct x86_emulate_ctxt *ctxt)
297 {
298 struct realmode_emulate_ctxt *rm_ctxt =
299 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
300 struct vcpu *curr = current;
301 uint32_t paddr = virtual_to_linear(src_seg, src_offset, rm_ctxt);
303 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
304 return X86EMUL_UNHANDLEABLE;
306 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
307 return X86EMUL_UNHANDLEABLE;
309 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
310 send_pio_req(dst_port, *reps, bytes_per_rep,
311 paddr, IOREQ_WRITE,
312 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
314 return X86EMUL_OKAY;
315 }
317 static int
318 realmode_read_segment(
319 enum x86_segment seg,
320 struct segment_register *reg,
321 struct x86_emulate_ctxt *ctxt)
322 {
323 struct realmode_emulate_ctxt *rm_ctxt =
324 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
325 memcpy(reg, &rm_ctxt->seg_reg[seg], sizeof(struct segment_register));
326 return X86EMUL_OKAY;
327 }
329 static int
330 realmode_write_segment(
331 enum x86_segment seg,
332 struct segment_register *reg,
333 struct x86_emulate_ctxt *ctxt)
334 {
335 struct realmode_emulate_ctxt *rm_ctxt =
336 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
337 struct vcpu *curr = current;
339 if ( seg == x86_seg_cs )
340 {
341 if ( reg->attr.fields.dpl != 0 )
342 return X86EMUL_UNHANDLEABLE;
343 curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS;
344 if ( reg->sel & 3 )
345 curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS;
346 }
348 if ( seg == x86_seg_ss )
349 {
350 if ( reg->attr.fields.dpl != 0 )
351 return X86EMUL_UNHANDLEABLE;
352 curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS;
353 if ( reg->sel & 3 )
354 curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS;
355 rm_ctxt->flags.mov_ss = 1;
356 }
358 memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
360 return X86EMUL_OKAY;
361 }
363 static int
364 realmode_read_io(
365 unsigned int port,
366 unsigned int bytes,
367 unsigned long *val,
368 struct x86_emulate_ctxt *ctxt)
369 {
370 struct vcpu *curr = current;
372 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
373 return X86EMUL_UNHANDLEABLE;
375 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
376 {
377 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
378 send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
379 }
381 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
382 return X86EMUL_RETRY;
384 *val = curr->arch.hvm_vmx.real_mode_io_data;
385 curr->arch.hvm_vmx.real_mode_io_completed = 0;
387 return X86EMUL_OKAY;
388 }
390 static int realmode_write_io(
391 unsigned int port,
392 unsigned int bytes,
393 unsigned long val,
394 struct x86_emulate_ctxt *ctxt)
395 {
396 struct vcpu *curr = current;
398 if ( port == 0xe9 )
399 {
400 hvm_print_line(curr, val);
401 return X86EMUL_OKAY;
402 }
404 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
405 return X86EMUL_UNHANDLEABLE;
407 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
408 send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
410 return X86EMUL_OKAY;
411 }
413 static int
414 realmode_read_cr(
415 unsigned int reg,
416 unsigned long *val,
417 struct x86_emulate_ctxt *ctxt)
418 {
419 switch ( reg )
420 {
421 case 0:
422 case 2:
423 case 3:
424 case 4:
425 *val = current->arch.hvm_vcpu.guest_cr[reg];
426 break;
427 default:
428 return X86EMUL_UNHANDLEABLE;
429 }
431 return X86EMUL_OKAY;
432 }
434 static int
435 realmode_write_cr(
436 unsigned int reg,
437 unsigned long val,
438 struct x86_emulate_ctxt *ctxt)
439 {
440 switch ( reg )
441 {
442 case 0:
443 if ( !hvm_set_cr0(val) )
444 return X86EMUL_UNHANDLEABLE;
445 break;
446 case 2:
447 current->arch.hvm_vcpu.guest_cr[2] = val;
448 break;
449 case 3:
450 if ( !hvm_set_cr3(val) )
451 return X86EMUL_UNHANDLEABLE;
452 break;
453 case 4:
454 if ( !hvm_set_cr4(val) )
455 return X86EMUL_UNHANDLEABLE;
456 break;
457 default:
458 return X86EMUL_UNHANDLEABLE;
459 }
461 return X86EMUL_OKAY;
462 }
464 static int
465 realmode_read_msr(
466 unsigned long reg,
467 uint64_t *val,
468 struct x86_emulate_ctxt *ctxt)
469 {
470 struct cpu_user_regs _regs;
472 _regs.ecx = (uint32_t)reg;
474 if ( !vmx_msr_read_intercept(&_regs) )
475 {
476 struct realmode_emulate_ctxt *rm_ctxt =
477 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
478 rm_ctxt->exn_vector = (uint8_t)__vmread(VM_ENTRY_INTR_INFO);
479 rm_ctxt->exn_insn_len = 0;
480 __vmwrite(VM_ENTRY_INTR_INFO, 0);
481 return X86EMUL_EXCEPTION;
482 }
484 *val = ((uint64_t)(uint32_t)_regs.edx << 32) || (uint32_t)_regs.eax;
485 return X86EMUL_OKAY;
486 }
488 static int
489 realmode_write_msr(
490 unsigned long reg,
491 uint64_t val,
492 struct x86_emulate_ctxt *ctxt)
493 {
494 struct cpu_user_regs _regs;
496 _regs.edx = (uint32_t)(val >> 32);
497 _regs.eax = (uint32_t)val;
498 _regs.ecx = (uint32_t)reg;
500 if ( !vmx_msr_write_intercept(&_regs) )
501 {
502 struct realmode_emulate_ctxt *rm_ctxt =
503 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
504 rm_ctxt->exn_vector = (uint8_t)__vmread(VM_ENTRY_INTR_INFO);
505 rm_ctxt->exn_insn_len = 0;
506 __vmwrite(VM_ENTRY_INTR_INFO, 0);
507 return X86EMUL_EXCEPTION;
508 }
510 return X86EMUL_OKAY;
511 }
513 static int realmode_write_rflags(
514 unsigned long val,
515 struct x86_emulate_ctxt *ctxt)
516 {
517 struct realmode_emulate_ctxt *rm_ctxt =
518 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
519 if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
520 rm_ctxt->flags.sti = 1;
521 return X86EMUL_OKAY;
522 }
524 static int realmode_wbinvd(
525 struct x86_emulate_ctxt *ctxt)
526 {
527 vmx_wbinvd_intercept();
528 return X86EMUL_OKAY;
529 }
531 static int realmode_cpuid(
532 unsigned int *eax,
533 unsigned int *ebx,
534 unsigned int *ecx,
535 unsigned int *edx,
536 struct x86_emulate_ctxt *ctxt)
537 {
538 vmx_cpuid_intercept(eax, ebx, ecx, edx);
539 return X86EMUL_OKAY;
540 }
542 static int realmode_hlt(
543 struct x86_emulate_ctxt *ctxt)
544 {
545 struct realmode_emulate_ctxt *rm_ctxt =
546 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
547 rm_ctxt->flags.hlt = 1;
548 return X86EMUL_OKAY;
549 }
551 static int realmode_inject_hw_exception(
552 uint8_t vector,
553 uint16_t error_code,
554 struct x86_emulate_ctxt *ctxt)
555 {
556 struct realmode_emulate_ctxt *rm_ctxt =
557 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
559 /* We don't emulate protected-mode exception delivery. */
560 if ( current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
561 return X86EMUL_UNHANDLEABLE;
563 if ( error_code != 0 )
564 return X86EMUL_UNHANDLEABLE;
566 rm_ctxt->exn_vector = vector;
567 rm_ctxt->exn_insn_len = 0;
569 return X86EMUL_OKAY;
570 }
572 static int realmode_inject_sw_interrupt(
573 uint8_t vector,
574 uint8_t insn_len,
575 struct x86_emulate_ctxt *ctxt)
576 {
577 struct realmode_emulate_ctxt *rm_ctxt =
578 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
580 /* We don't emulate protected-mode exception delivery. */
581 if ( current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
582 return X86EMUL_UNHANDLEABLE;
584 rm_ctxt->exn_vector = vector;
585 rm_ctxt->exn_insn_len = insn_len;
587 return X86EMUL_OKAY;
588 }
590 static void realmode_load_fpu_ctxt(
591 struct x86_emulate_ctxt *ctxt)
592 {
593 if ( !current->fpu_dirtied )
594 vmx_do_no_device_fault();
595 }
597 static struct x86_emulate_ops realmode_emulator_ops = {
598 .read = realmode_emulate_read,
599 .insn_fetch = realmode_emulate_insn_fetch,
600 .write = realmode_emulate_write,
601 .cmpxchg = realmode_emulate_cmpxchg,
602 .rep_ins = realmode_rep_ins,
603 .rep_outs = realmode_rep_outs,
604 .read_segment = realmode_read_segment,
605 .write_segment = realmode_write_segment,
606 .read_io = realmode_read_io,
607 .write_io = realmode_write_io,
608 .read_cr = realmode_read_cr,
609 .write_cr = realmode_write_cr,
610 .read_msr = realmode_read_msr,
611 .write_msr = realmode_write_msr,
612 .write_rflags = realmode_write_rflags,
613 .wbinvd = realmode_wbinvd,
614 .cpuid = realmode_cpuid,
615 .hlt = realmode_hlt,
616 .inject_hw_exception = realmode_inject_hw_exception,
617 .inject_sw_interrupt = realmode_inject_sw_interrupt,
618 .load_fpu_ctxt = realmode_load_fpu_ctxt
619 };
621 static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
622 {
623 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
624 struct vcpu *curr = current;
625 u32 new_intr_shadow;
626 int rc, io_completed;
627 unsigned long addr;
629 rm_ctxt->ctxt.addr_size =
630 rm_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
631 rm_ctxt->ctxt.sp_size =
632 rm_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
634 rm_ctxt->insn_buf_eip = (uint32_t)regs->eip;
635 addr = virtual_to_linear(x86_seg_cs, regs->eip, rm_ctxt);
636 if ( hvm_fetch_from_guest_virt_nofault(rm_ctxt->insn_buf, addr,
637 sizeof(rm_ctxt->insn_buf))
638 != HVMCOPY_okay )
639 {
640 gdprintk(XENLOG_ERR, "Failed to pre-fetch instruction bytes.\n");
641 goto fail;
642 }
644 rm_ctxt->flag_word = 0;
646 io_completed = curr->arch.hvm_vmx.real_mode_io_completed;
647 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
648 {
649 gdprintk(XENLOG_ERR, "I/O in progress before insn is emulated.\n");
650 goto fail;
651 }
653 rc = x86_emulate(&rm_ctxt->ctxt, &realmode_emulator_ops);
655 if ( curr->arch.hvm_vmx.real_mode_io_completed )
656 {
657 gdprintk(XENLOG_ERR, "I/O completion after insn is emulated.\n");
658 goto fail;
659 }
661 if ( rc == X86EMUL_UNHANDLEABLE )
662 {
663 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
664 goto fail;
665 }
667 if ( rc == X86EMUL_RETRY )
668 {
669 BUG_ON(!curr->arch.hvm_vmx.real_mode_io_in_progress);
670 if ( !io_completed )
671 return;
672 gdprintk(XENLOG_ERR, "Multiple I/O reads in a single insn.\n");
673 goto fail;
674 }
676 if ( curr->arch.hvm_vmx.real_mode_io_in_progress &&
677 (get_ioreq(curr)->vp_ioreq.dir == IOREQ_READ) )
678 {
679 gdprintk(XENLOG_ERR, "I/O read in progress but insn is retired.\n");
680 goto fail;
681 }
683 new_intr_shadow = rm_ctxt->intr_shadow;
685 /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
686 if ( rm_ctxt->flags.mov_ss )
687 new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
688 else
689 new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
691 /* STI instruction toggles STI shadow, else we just clear it. */
692 if ( rm_ctxt->flags.sti )
693 new_intr_shadow ^= VMX_INTR_SHADOW_STI;
694 else
695 new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
697 /* Update interrupt shadow information in VMCS only if it changes. */
698 if ( rm_ctxt->intr_shadow != new_intr_shadow )
699 {
700 rm_ctxt->intr_shadow = new_intr_shadow;
701 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
702 }
704 if ( rc == X86EMUL_EXCEPTION )
705 {
706 realmode_deliver_exception(
707 rm_ctxt->exn_vector, rm_ctxt->exn_insn_len, rm_ctxt);
708 }
709 else if ( rm_ctxt->flags.hlt && !hvm_local_events_need_delivery(curr) )
710 {
711 hvm_hlt(regs->eflags);
712 }
714 return;
716 fail:
717 gdprintk(XENLOG_ERR,
718 "Real-mode emulation failed @ %04x:%08lx: "
719 "%02x %02x %02x %02x %02x %02x\n",
720 rm_ctxt->seg_reg[x86_seg_cs].sel, rm_ctxt->insn_buf_eip,
721 rm_ctxt->insn_buf[0], rm_ctxt->insn_buf[1],
722 rm_ctxt->insn_buf[2], rm_ctxt->insn_buf[3],
723 rm_ctxt->insn_buf[4], rm_ctxt->insn_buf[5]);
724 domain_crash_synchronous();
725 }
727 void vmx_realmode(struct cpu_user_regs *regs)
728 {
729 struct vcpu *curr = current;
730 struct realmode_emulate_ctxt rm_ctxt;
731 unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
732 int i;
734 rm_ctxt.ctxt.regs = regs;
736 for ( i = 0; i < 10; i++ )
737 hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
739 rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
741 if ( curr->arch.hvm_vmx.real_mode_io_in_progress ||
742 curr->arch.hvm_vmx.real_mode_io_completed )
743 realmode_emulate_one(&rm_ctxt);
745 /* Only deliver interrupts into emulated real mode. */
746 if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
747 (intr_info & INTR_INFO_VALID_MASK) )
748 {
749 realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
750 __vmwrite(VM_ENTRY_INTR_INFO, 0);
751 }
753 while ( curr->arch.hvm_vmx.vmxemul &&
754 !softirq_pending(smp_processor_id()) &&
755 !curr->arch.hvm_vmx.real_mode_io_in_progress &&
756 /* Check for pending interrupts only in proper real mode. */
757 ((curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
758 !hvm_local_events_need_delivery(curr)) )
759 realmode_emulate_one(&rm_ctxt);
761 if ( !curr->arch.hvm_vmx.vmxemul )
762 {
763 /*
764 * Cannot enter protected mode with bogus selector RPLs and DPLs.
765 * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
766 * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
767 */
768 rm_ctxt.seg_reg[x86_seg_ds].attr.fields.dpl =
769 rm_ctxt.seg_reg[x86_seg_ds].sel & 3;
770 rm_ctxt.seg_reg[x86_seg_es].attr.fields.dpl =
771 rm_ctxt.seg_reg[x86_seg_es].sel & 3;
772 rm_ctxt.seg_reg[x86_seg_fs].attr.fields.dpl =
773 rm_ctxt.seg_reg[x86_seg_fs].sel & 3;
774 rm_ctxt.seg_reg[x86_seg_gs].attr.fields.dpl =
775 rm_ctxt.seg_reg[x86_seg_gs].sel & 3;
776 }
778 for ( i = 0; i < 10; i++ )
779 hvm_set_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
780 }
782 int vmx_realmode_io_complete(void)
783 {
784 struct vcpu *curr = current;
785 ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
787 if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
788 return 0;
790 curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
791 if ( p->dir == IOREQ_READ )
792 {
793 curr->arch.hvm_vmx.real_mode_io_completed = 1;
794 curr->arch.hvm_vmx.real_mode_io_data = p->data;
795 }
797 return 1;
798 }