ia64/xen-unstable

view xen/arch/x86/hvm/vmx/realmode.c @ 16482:4d6f92fa1014

vmx realmode: Emulate writes to control registers.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Nov 26 17:55:23 2007 +0000 (2007-11-26)
parents 4deb65519d9b
children f9a43c6b5be1
line source
1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir.fraser@citrix.com>
10 */
12 #include <xen/config.h>
13 #include <xen/init.h>
14 #include <xen/lib.h>
15 #include <xen/sched.h>
16 #include <asm/event.h>
17 #include <asm/hvm/hvm.h>
18 #include <asm/hvm/support.h>
19 #include <asm/hvm/vmx/vmx.h>
20 #include <asm/hvm/vmx/vmcs.h>
21 #include <asm/hvm/vmx/cpu.h>
22 #include <asm/x86_emulate.h>
24 struct realmode_emulate_ctxt {
25 struct x86_emulate_ctxt ctxt;
27 /* Cache of 16 bytes of instruction. */
28 uint8_t insn_buf[16];
29 unsigned long insn_buf_eip;
31 struct segment_register seg_reg[10];
33 union {
34 struct {
35 unsigned int hlt:1;
36 unsigned int mov_ss:1;
37 unsigned int sti:1;
38 unsigned int exn_raised:1;
39 } flags;
40 unsigned int flag_word;
41 };
42 };
44 static void realmode_deliver_exception(
45 unsigned int vector,
46 unsigned int insn_len,
47 struct realmode_emulate_ctxt *rm_ctxt)
48 {
49 struct segment_register *idtr = &rm_ctxt->seg_reg[x86_seg_idtr];
50 struct segment_register *csr = &rm_ctxt->seg_reg[x86_seg_cs];
51 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
52 uint32_t cs_eip, pstk;
53 uint16_t frame[3];
54 unsigned int last_byte;
56 again:
57 last_byte = (vector * 4) + 3;
58 if ( idtr->limit < last_byte )
59 {
60 /* Software interrupt? */
61 if ( insn_len != 0 )
62 {
63 insn_len = 0;
64 vector = TRAP_gp_fault;
65 goto again;
66 }
68 /* Exception or hardware interrupt. */
69 switch ( vector )
70 {
71 case TRAP_double_fault:
72 hvm_triple_fault();
73 return;
74 case TRAP_gp_fault:
75 vector = TRAP_double_fault;
76 goto again;
77 default:
78 vector = TRAP_gp_fault;
79 goto again;
80 }
81 }
83 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
85 frame[0] = regs->eip + insn_len;
86 frame[1] = csr->sel;
87 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
89 if ( rm_ctxt->ctxt.addr_size == 32 )
90 {
91 regs->esp -= 6;
92 pstk = regs->esp;
93 }
94 else
95 {
96 pstk = (uint16_t)(regs->esp - 6);
97 regs->esp &= ~0xffff;
98 regs->esp |= pstk;
99 }
101 pstk += rm_ctxt->seg_reg[x86_seg_ss].base;
102 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
104 csr->sel = cs_eip >> 16;
105 csr->base = (uint32_t)csr->sel << 4;
106 regs->eip = (uint16_t)cs_eip;
107 regs->eflags &= ~(X86_EFLAGS_AC | X86_EFLAGS_TF |
108 X86_EFLAGS_AC | X86_EFLAGS_RF);
109 }
111 static int
112 realmode_read(
113 enum x86_segment seg,
114 unsigned long offset,
115 unsigned long *val,
116 unsigned int bytes,
117 enum hvm_access_type access_type,
118 struct realmode_emulate_ctxt *rm_ctxt)
119 {
120 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
121 int todo;
123 *val = 0;
124 todo = hvm_copy_from_guest_phys(val, addr, bytes);
126 if ( todo )
127 {
128 struct vcpu *curr = current;
130 if ( todo != bytes )
131 {
132 gdprintk(XENLOG_WARNING, "RM: Partial read at %08x (%d/%d)\n",
133 addr, todo, bytes);
134 return X86EMUL_UNHANDLEABLE;
135 }
137 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
138 return X86EMUL_UNHANDLEABLE;
140 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
141 {
142 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
143 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
144 0, IOREQ_READ, 0, 0);
145 }
147 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
148 return X86EMUL_UNHANDLEABLE;
150 *val = curr->arch.hvm_vmx.real_mode_io_data;
151 curr->arch.hvm_vmx.real_mode_io_completed = 0;
152 }
154 return X86EMUL_OKAY;
155 }
157 static int
158 realmode_emulate_read(
159 enum x86_segment seg,
160 unsigned long offset,
161 unsigned long *val,
162 unsigned int bytes,
163 struct x86_emulate_ctxt *ctxt)
164 {
165 return realmode_read(
166 seg, offset, val, bytes, hvm_access_read,
167 container_of(ctxt, struct realmode_emulate_ctxt, ctxt));
168 }
170 static int
171 realmode_emulate_insn_fetch(
172 enum x86_segment seg,
173 unsigned long offset,
174 unsigned long *val,
175 unsigned int bytes,
176 struct x86_emulate_ctxt *ctxt)
177 {
178 struct realmode_emulate_ctxt *rm_ctxt =
179 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
180 unsigned int insn_off = offset - rm_ctxt->insn_buf_eip;
182 /* Fall back if requested bytes are not in the prefetch cache. */
183 if ( unlikely((insn_off + bytes) > sizeof(rm_ctxt->insn_buf)) )
184 return realmode_read(
185 seg, offset, val, bytes,
186 hvm_access_insn_fetch, rm_ctxt);
188 /* Hit the cache. Simple memcpy. */
189 *val = 0;
190 memcpy(val, &rm_ctxt->insn_buf[insn_off], bytes);
191 return X86EMUL_OKAY;
192 }
194 static int
195 realmode_emulate_write(
196 enum x86_segment seg,
197 unsigned long offset,
198 unsigned long val,
199 unsigned int bytes,
200 struct x86_emulate_ctxt *ctxt)
201 {
202 struct realmode_emulate_ctxt *rm_ctxt =
203 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
204 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
205 int todo;
207 todo = hvm_copy_to_guest_phys(addr, &val, bytes);
209 if ( todo )
210 {
211 struct vcpu *curr = current;
213 if ( todo != bytes )
214 {
215 gdprintk(XENLOG_WARNING, "RM: Partial write at %08x (%d/%d)\n",
216 addr, todo, bytes);
217 return X86EMUL_UNHANDLEABLE;
218 }
220 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
221 return X86EMUL_UNHANDLEABLE;
223 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
224 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
225 val, IOREQ_WRITE, 0, 0);
226 }
228 return X86EMUL_OKAY;
229 }
231 static int
232 realmode_emulate_cmpxchg(
233 enum x86_segment seg,
234 unsigned long offset,
235 unsigned long old,
236 unsigned long new,
237 unsigned int bytes,
238 struct x86_emulate_ctxt *ctxt)
239 {
240 /* Fix this in case the guest is really relying on r-m-w atomicity. */
241 return realmode_emulate_write(seg, offset, new, bytes, ctxt);
242 }
244 static int
245 realmode_read_segment(
246 enum x86_segment seg,
247 struct segment_register *reg,
248 struct x86_emulate_ctxt *ctxt)
249 {
250 struct realmode_emulate_ctxt *rm_ctxt =
251 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
252 memcpy(reg, &rm_ctxt->seg_reg[seg], sizeof(struct segment_register));
253 return X86EMUL_OKAY;
254 }
256 static int
257 realmode_write_segment(
258 enum x86_segment seg,
259 struct segment_register *reg,
260 struct x86_emulate_ctxt *ctxt)
261 {
262 struct realmode_emulate_ctxt *rm_ctxt =
263 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
264 memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
265 if ( seg == x86_seg_ss )
266 rm_ctxt->flags.mov_ss = 1;
267 return X86EMUL_OKAY;
268 }
270 static int
271 realmode_read_io(
272 unsigned int port,
273 unsigned int bytes,
274 unsigned long *val,
275 struct x86_emulate_ctxt *ctxt)
276 {
277 struct vcpu *curr = current;
279 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
280 return X86EMUL_UNHANDLEABLE;
282 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
283 {
284 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
285 send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
286 }
288 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
289 return X86EMUL_UNHANDLEABLE;
291 *val = curr->arch.hvm_vmx.real_mode_io_data;
292 curr->arch.hvm_vmx.real_mode_io_completed = 0;
294 return X86EMUL_OKAY;
295 }
297 static int realmode_write_io(
298 unsigned int port,
299 unsigned int bytes,
300 unsigned long val,
301 struct x86_emulate_ctxt *ctxt)
302 {
303 struct vcpu *curr = current;
305 if ( port == 0xe9 )
306 {
307 hvm_print_line(curr, val);
308 return X86EMUL_OKAY;
309 }
311 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
312 return X86EMUL_UNHANDLEABLE;
314 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
315 send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
317 return X86EMUL_OKAY;
318 }
320 static int
321 realmode_read_cr(
322 unsigned int reg,
323 unsigned long *val,
324 struct x86_emulate_ctxt *ctxt)
325 {
326 switch ( reg )
327 {
328 case 0:
329 case 2:
330 case 3:
331 case 4:
332 *val = current->arch.hvm_vcpu.guest_cr[reg];
333 break;
334 default:
335 return X86EMUL_UNHANDLEABLE;
336 }
338 return X86EMUL_OKAY;
339 }
341 static int
342 realmode_write_cr(
343 unsigned int reg,
344 unsigned long val,
345 struct x86_emulate_ctxt *ctxt)
346 {
347 switch ( reg )
348 {
349 case 0:
350 if ( !hvm_set_cr0(val) )
351 return X86EMUL_UNHANDLEABLE;
352 break;
353 case 2:
354 current->arch.hvm_vcpu.guest_cr[2] = val;
355 break;
356 case 3:
357 if ( !hvm_set_cr3(val) )
358 return X86EMUL_UNHANDLEABLE;
359 break;
360 case 4:
361 if ( !hvm_set_cr4(val) )
362 return X86EMUL_UNHANDLEABLE;
363 break;
364 default:
365 return X86EMUL_UNHANDLEABLE;
366 }
368 return X86EMUL_OKAY;
369 }
371 static int realmode_write_rflags(
372 unsigned long val,
373 struct x86_emulate_ctxt *ctxt)
374 {
375 struct realmode_emulate_ctxt *rm_ctxt =
376 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
377 if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
378 rm_ctxt->flags.sti = 1;
379 return X86EMUL_OKAY;
380 }
382 static int realmode_wbinvd(
383 struct x86_emulate_ctxt *ctxt)
384 {
385 vmx_wbinvd_intercept();
386 return X86EMUL_OKAY;
387 }
389 static int realmode_cpuid(
390 unsigned int *eax,
391 unsigned int *ebx,
392 unsigned int *ecx,
393 unsigned int *edx,
394 struct x86_emulate_ctxt *ctxt)
395 {
396 vmx_cpuid_intercept(eax, ebx, ecx, edx);
397 return X86EMUL_OKAY;
398 }
400 static int realmode_hlt(
401 struct x86_emulate_ctxt *ctxt)
402 {
403 struct realmode_emulate_ctxt *rm_ctxt =
404 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
405 rm_ctxt->flags.hlt = 1;
406 return X86EMUL_OKAY;
407 }
409 static int realmode_inject_hw_exception(
410 uint8_t vector,
411 struct x86_emulate_ctxt *ctxt)
412 {
413 struct realmode_emulate_ctxt *rm_ctxt =
414 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
416 rm_ctxt->flags.exn_raised = 1;
417 realmode_deliver_exception(vector, 0, rm_ctxt);
419 return X86EMUL_OKAY;
420 }
422 static int realmode_inject_sw_interrupt(
423 uint8_t vector,
424 uint8_t insn_len,
425 struct x86_emulate_ctxt *ctxt)
426 {
427 struct realmode_emulate_ctxt *rm_ctxt =
428 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
430 realmode_deliver_exception(vector, insn_len, rm_ctxt);
432 return X86EMUL_OKAY;
433 }
435 static struct x86_emulate_ops realmode_emulator_ops = {
436 .read = realmode_emulate_read,
437 .insn_fetch = realmode_emulate_insn_fetch,
438 .write = realmode_emulate_write,
439 .cmpxchg = realmode_emulate_cmpxchg,
440 .read_segment = realmode_read_segment,
441 .write_segment = realmode_write_segment,
442 .read_io = realmode_read_io,
443 .write_io = realmode_write_io,
444 .read_cr = realmode_read_cr,
445 .write_cr = realmode_write_cr,
446 .write_rflags = realmode_write_rflags,
447 .wbinvd = realmode_wbinvd,
448 .cpuid = realmode_cpuid,
449 .hlt = realmode_hlt,
450 .inject_hw_exception = realmode_inject_hw_exception,
451 .inject_sw_interrupt = realmode_inject_sw_interrupt
452 };
454 void vmx_realmode(struct cpu_user_regs *regs)
455 {
456 struct vcpu *curr = current;
457 struct realmode_emulate_ctxt rm_ctxt;
458 unsigned long intr_info;
459 int i, rc;
460 u32 intr_shadow, new_intr_shadow;
462 rm_ctxt.ctxt.regs = regs;
464 for ( i = 0; i < 10; i++ )
465 hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
467 rm_ctxt.ctxt.addr_size =
468 rm_ctxt.seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
469 rm_ctxt.ctxt.sp_size =
470 rm_ctxt.seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
472 intr_info = __vmread(VM_ENTRY_INTR_INFO);
473 if ( intr_info & INTR_INFO_VALID_MASK )
474 {
475 __vmwrite(VM_ENTRY_INTR_INFO, 0);
476 realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
477 }
479 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
480 new_intr_shadow = intr_shadow;
482 while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
483 !softirq_pending(smp_processor_id()) &&
484 !hvm_local_events_need_delivery(curr) )
485 {
486 rm_ctxt.insn_buf_eip = regs->eip;
487 (void)hvm_copy_from_guest_phys(
488 rm_ctxt.insn_buf,
489 (uint32_t)(rm_ctxt.seg_reg[x86_seg_cs].base + regs->eip),
490 sizeof(rm_ctxt.insn_buf));
492 rm_ctxt.flag_word = 0;
494 rc = x86_emulate(&rm_ctxt.ctxt, &realmode_emulator_ops);
496 /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
497 if ( rm_ctxt.flags.mov_ss )
498 new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
499 else
500 new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
502 /* STI instruction toggles STI shadow, else we just clear it. */
503 if ( rm_ctxt.flags.sti )
504 new_intr_shadow ^= VMX_INTR_SHADOW_STI;
505 else
506 new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
508 /* Update interrupt shadow information in VMCS only if it changes. */
509 if ( intr_shadow != new_intr_shadow )
510 {
511 intr_shadow = new_intr_shadow;
512 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
513 }
515 /* HLT happens after instruction retire, if no interrupt/exception. */
516 if ( unlikely(rm_ctxt.flags.hlt) &&
517 !rm_ctxt.flags.exn_raised &&
518 !hvm_local_events_need_delivery(curr) )
519 hvm_hlt(regs->eflags);
521 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
522 break;
524 if ( rc == X86EMUL_UNHANDLEABLE )
525 {
526 gdprintk(XENLOG_DEBUG,
527 "RM %04x:%08lx: %02x %02x %02x %02x %02x %02x\n",
528 rm_ctxt.seg_reg[x86_seg_cs].sel, rm_ctxt.insn_buf_eip,
529 rm_ctxt.insn_buf[0], rm_ctxt.insn_buf[1],
530 rm_ctxt.insn_buf[2], rm_ctxt.insn_buf[3],
531 rm_ctxt.insn_buf[4], rm_ctxt.insn_buf[5]);
532 gdprintk(XENLOG_ERR, "Emulation failed\n");
533 domain_crash_synchronous();
534 }
535 }
537 for ( i = 0; i < 10; i++ )
538 hvm_set_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
539 }
541 int vmx_realmode_io_complete(void)
542 {
543 struct vcpu *curr = current;
544 ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
546 if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
547 return 0;
549 #if 0
550 gdprintk(XENLOG_DEBUG, "RM I/O %d %c bytes=%d addr=%lx data=%lx\n",
551 p->type, p->dir ? 'R' : 'W',
552 (int)p->size, (long)p->addr, (long)p->data);
553 #endif
555 curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
556 if ( p->dir == IOREQ_READ )
557 {
558 curr->arch.hvm_vmx.real_mode_io_completed = 1;
559 curr->arch.hvm_vmx.real_mode_io_data = p->data;
560 }
562 return 1;
563 }