ia64/xen-unstable

view xen/arch/x86/hvm/vmx/realmode.c @ 18788:07d0be88571f

hvm: fix single stepping on debugger

The debuggee domain will die with unexpected trap
on single stepping of emulated instruction.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 11 11:47:03 2008 +0000 (2008-11-11)
parents 92d0e13b0ea3
children 6595393a3d28
line source
1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007-2008 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir.fraser@citrix.com>
10 */
12 #include <xen/config.h>
13 #include <xen/init.h>
14 #include <xen/lib.h>
15 #include <xen/sched.h>
16 #include <xen/paging.h>
17 #include <asm/event.h>
18 #include <asm/hvm/emulate.h>
19 #include <asm/hvm/hvm.h>
20 #include <asm/hvm/support.h>
21 #include <asm/hvm/vmx/vmx.h>
22 #include <asm/hvm/vmx/vmcs.h>
24 static void realmode_deliver_exception(
25 unsigned int vector,
26 unsigned int insn_len,
27 struct hvm_emulate_ctxt *hvmemul_ctxt)
28 {
29 struct segment_register *idtr, *csr;
30 struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
31 uint32_t cs_eip, pstk;
32 uint16_t frame[3];
33 unsigned int last_byte;
35 idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
36 csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
37 __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);
39 again:
40 last_byte = (vector * 4) + 3;
41 if ( idtr->limit < last_byte )
42 {
43 /* Software interrupt? */
44 if ( insn_len != 0 )
45 {
46 insn_len = 0;
47 vector = TRAP_gp_fault;
48 goto again;
49 }
51 /* Exception or hardware interrupt. */
52 switch ( vector )
53 {
54 case TRAP_double_fault:
55 hvm_triple_fault();
56 return;
57 case TRAP_gp_fault:
58 vector = TRAP_double_fault;
59 goto again;
60 default:
61 vector = TRAP_gp_fault;
62 goto again;
63 }
64 }
66 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
68 frame[0] = regs->eip + insn_len;
69 frame[1] = csr->sel;
70 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
72 /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
73 if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
74 {
75 regs->esp -= 6;
76 pstk = regs->esp;
77 }
78 else
79 {
80 pstk = (uint16_t)(regs->esp - 6);
81 regs->esp &= ~0xffff;
82 regs->esp |= pstk;
83 }
85 pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
86 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
88 csr->sel = cs_eip >> 16;
89 csr->base = (uint32_t)csr->sel << 4;
90 regs->eip = (uint16_t)cs_eip;
91 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
93 /* Exception delivery clears STI and MOV-SS blocking. */
94 if ( hvmemul_ctxt->intr_shadow &
95 (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
96 {
97 hvmemul_ctxt->intr_shadow &=
98 ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
99 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
100 }
101 }
103 static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
104 {
105 struct vcpu *curr = current;
106 unsigned long seg_reg_dirty;
107 uint32_t intr_info;
108 int rc;
110 seg_reg_dirty = hvmemul_ctxt->seg_reg_dirty;
111 hvmemul_ctxt->seg_reg_dirty = 0;
113 rc = hvm_emulate_one(hvmemul_ctxt);
115 if ( test_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty) )
116 {
117 curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS;
118 if ( hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel & 3 )
119 curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS;
120 }
122 if ( test_bit(x86_seg_ss, &hvmemul_ctxt->seg_reg_dirty) )
123 {
124 curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS;
125 if ( hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->sel & 3 )
126 curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS;
127 }
129 hvmemul_ctxt->seg_reg_dirty |= seg_reg_dirty;
131 if ( rc == X86EMUL_UNHANDLEABLE )
132 {
133 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
134 goto fail;
135 }
137 if ( rc == X86EMUL_EXCEPTION )
138 {
139 if ( !hvmemul_ctxt->exn_pending )
140 {
141 intr_info = __vmread(VM_ENTRY_INTR_INFO);
142 __vmwrite(VM_ENTRY_INTR_INFO, 0);
143 if ( !(intr_info & INTR_INFO_VALID_MASK) )
144 {
145 gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
146 goto fail;
147 }
148 hvmemul_ctxt->exn_vector = (uint8_t)intr_info;
149 hvmemul_ctxt->exn_insn_len = 0;
150 }
152 if ( unlikely(curr->domain->debugger_attached) &&
153 ((hvmemul_ctxt->exn_vector == TRAP_debug) ||
154 (hvmemul_ctxt->exn_vector == TRAP_int3)) )
155 {
156 domain_pause_for_debugger();
157 }
158 else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
159 {
160 gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
161 hvmemul_ctxt->exn_vector);
162 goto fail;
163 }
164 else
165 {
166 realmode_deliver_exception(
167 hvmemul_ctxt->exn_vector,
168 hvmemul_ctxt->exn_insn_len,
169 hvmemul_ctxt);
170 }
171 }
173 return;
175 fail:
176 gdprintk(XENLOG_ERR,
177 "Real-mode emulation failed @ %04x:%08lx: "
178 "%02x %02x %02x %02x %02x %02x\n",
179 hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel,
180 hvmemul_ctxt->insn_buf_eip,
181 hvmemul_ctxt->insn_buf[0], hvmemul_ctxt->insn_buf[1],
182 hvmemul_ctxt->insn_buf[2], hvmemul_ctxt->insn_buf[3],
183 hvmemul_ctxt->insn_buf[4], hvmemul_ctxt->insn_buf[5]);
184 domain_crash(curr->domain);
185 }
187 void vmx_realmode(struct cpu_user_regs *regs)
188 {
189 struct vcpu *curr = current;
190 struct hvm_emulate_ctxt hvmemul_ctxt;
191 struct segment_register *sreg;
192 unsigned long intr_info;
193 unsigned int emulations = 0;
195 /* Get-and-clear VM_ENTRY_INTR_INFO. */
196 intr_info = __vmread(VM_ENTRY_INTR_INFO);
197 if ( intr_info & INTR_INFO_VALID_MASK )
198 __vmwrite(VM_ENTRY_INTR_INFO, 0);
200 hvm_emulate_prepare(&hvmemul_ctxt, regs);
202 if ( curr->arch.hvm_vcpu.io_state == HVMIO_completed )
203 realmode_emulate_one(&hvmemul_ctxt);
205 /* Only deliver interrupts into emulated real mode. */
206 if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
207 (intr_info & INTR_INFO_VALID_MASK) )
208 {
209 realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
210 intr_info = 0;
211 }
213 while ( curr->arch.hvm_vmx.vmxemul &&
214 !softirq_pending(smp_processor_id()) &&
215 (curr->arch.hvm_vcpu.io_state == HVMIO_none) )
216 {
217 /*
218 * Check for pending interrupts only every 16 instructions, because
219 * hvm_local_events_need_delivery() is moderately expensive, and only
220 * in real mode, because we don't emulate protected-mode IDT vectoring.
221 */
222 if ( unlikely(!(++emulations & 15)) &&
223 !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
224 hvm_local_events_need_delivery(curr) )
225 break;
226 realmode_emulate_one(&hvmemul_ctxt);
227 }
229 if ( !curr->arch.hvm_vmx.vmxemul )
230 {
231 /*
232 * Cannot enter protected mode with bogus selector RPLs and DPLs.
233 * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
234 * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
235 */
236 sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
237 sreg->attr.fields.dpl = sreg->sel & 3;
238 sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
239 sreg->attr.fields.dpl = sreg->sel & 3;
240 sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
241 sreg->attr.fields.dpl = sreg->sel & 3;
242 sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
243 sreg->attr.fields.dpl = sreg->sel & 3;
244 hvmemul_ctxt.seg_reg_dirty |=
245 (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
246 (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
247 }
249 hvm_emulate_writeback(&hvmemul_ctxt);
251 /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
252 if ( intr_info & INTR_INFO_VALID_MASK )
253 __vmwrite(VM_ENTRY_INTR_INFO, intr_info);
254 }