ia64/xen-unstable

annotate xen/arch/x86/hvm/vmx/realmode.c @ 16479:11bfa26dd125

vmx realmode: Fix emulation of exception delivery (stack pointer must
be adjusted for FLAGS push), and fix up vmx_realmode() exit protocol
now that it is called from asm stub context.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Nov 26 16:47:10 2007 +0000 (2007-11-26)
parents 9f61a0add5b6
children 4deb65519d9b
rev   line source
keir@16455 1 /******************************************************************************
keir@16455 2 * arch/x86/hvm/vmx/realmode.c
keir@16455 3 *
keir@16455 4 * Real-mode emulation for VMX.
keir@16455 5 *
keir@16455 6 * Copyright (c) 2007 Citrix Systems, Inc.
keir@16455 7 *
keir@16455 8 * Authors:
keir@16455 9 * Keir Fraser <keir.fraser@citrix.com>
keir@16455 10 */
keir@16455 11
keir@16455 12 #include <xen/config.h>
keir@16455 13 #include <xen/init.h>
keir@16455 14 #include <xen/lib.h>
keir@16455 15 #include <xen/sched.h>
keir@16462 16 #include <asm/event.h>
keir@16455 17 #include <asm/hvm/hvm.h>
keir@16455 18 #include <asm/hvm/support.h>
keir@16455 19 #include <asm/hvm/vmx/vmx.h>
keir@16455 20 #include <asm/hvm/vmx/vmcs.h>
keir@16455 21 #include <asm/hvm/vmx/cpu.h>
keir@16455 22 #include <asm/x86_emulate.h>
keir@16455 23
keir@16455 24 struct realmode_emulate_ctxt {
keir@16455 25 struct x86_emulate_ctxt ctxt;
keir@16455 26
keir@16462 27 /* Cache of 16 bytes of instruction. */
keir@16462 28 uint8_t insn_buf[16];
keir@16455 29 unsigned long insn_buf_eip;
keir@16455 30
keir@16455 31 struct segment_register seg_reg[10];
keir@16468 32
keir@16468 33 union {
keir@16468 34 struct {
keir@16468 35 unsigned int hlt:1;
keir@16468 36 unsigned int mov_ss:1;
keir@16468 37 unsigned int sti:1;
keir@16468 38 unsigned int exn_raised:1;
keir@16468 39 } flags;
keir@16468 40 unsigned int flag_word;
keir@16468 41 };
keir@16455 42 };
keir@16455 43
keir@16462 44 static void realmode_deliver_exception(
keir@16462 45 unsigned int vector,
keir@16462 46 unsigned int insn_len,
keir@16462 47 struct realmode_emulate_ctxt *rm_ctxt)
keir@16455 48 {
keir@16462 49 struct segment_register *idtr = &rm_ctxt->seg_reg[x86_seg_idtr];
keir@16462 50 struct segment_register *csr = &rm_ctxt->seg_reg[x86_seg_cs];
keir@16462 51 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
keir@16462 52 uint32_t cs_eip, pstk;
keir@16462 53 uint16_t frame[3];
keir@16462 54 unsigned int last_byte;
keir@16455 55
keir@16462 56 again:
keir@16462 57 last_byte = (vector * 4) + 3;
keir@16462 58 if ( idtr->limit < last_byte )
keir@16462 59 {
keir@16462 60 /* Software interrupt? */
keir@16462 61 if ( insn_len != 0 )
keir@16462 62 {
keir@16462 63 insn_len = 0;
keir@16462 64 vector = TRAP_gp_fault;
keir@16462 65 goto again;
keir@16462 66 }
keir@16455 67
keir@16462 68 /* Exception or hardware interrupt. */
keir@16462 69 switch ( vector )
keir@16462 70 {
keir@16462 71 case TRAP_double_fault:
keir@16462 72 hvm_triple_fault();
keir@16462 73 return;
keir@16462 74 case TRAP_gp_fault:
keir@16462 75 vector = TRAP_double_fault;
keir@16462 76 goto again;
keir@16462 77 default:
keir@16462 78 vector = TRAP_gp_fault;
keir@16462 79 goto again;
keir@16462 80 }
keir@16455 81 }
keir@16455 82
keir@16462 83 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
keir@16462 84
keir@16462 85 frame[0] = regs->eip + insn_len;
keir@16462 86 frame[1] = csr->sel;
keir@16462 87 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
keir@16462 88
keir@16462 89 if ( rm_ctxt->ctxt.addr_size == 32 )
keir@16462 90 {
keir@16479 91 regs->esp -= 6;
keir@16462 92 pstk = regs->esp;
keir@16462 93 }
keir@16462 94 else
keir@16462 95 {
keir@16479 96 pstk = (uint16_t)(regs->esp - 6);
keir@16462 97 regs->esp &= ~0xffff;
keir@16462 98 regs->esp |= pstk;
keir@16462 99 }
keir@16462 100
keir@16462 101 pstk += rm_ctxt->seg_reg[x86_seg_ss].base;
keir@16462 102 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
keir@16462 103
keir@16462 104 csr->sel = cs_eip >> 16;
keir@16462 105 csr->base = (uint32_t)csr->sel << 4;
keir@16462 106 regs->eip = (uint16_t)cs_eip;
keir@16462 107 regs->eflags &= ~(X86_EFLAGS_AC | X86_EFLAGS_TF |
keir@16462 108 X86_EFLAGS_AC | X86_EFLAGS_RF);
keir@16455 109 }
keir@16455 110
keir@16455 111 static int
keir@16455 112 realmode_read(
keir@16455 113 enum x86_segment seg,
keir@16455 114 unsigned long offset,
keir@16455 115 unsigned long *val,
keir@16455 116 unsigned int bytes,
keir@16455 117 enum hvm_access_type access_type,
keir@16455 118 struct realmode_emulate_ctxt *rm_ctxt)
keir@16455 119 {
keir@16462 120 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
keir@16466 121 int todo;
keir@16466 122
keir@16455 123 *val = 0;
keir@16466 124 todo = hvm_copy_from_guest_phys(val, addr, bytes);
keir@16466 125
keir@16466 126 if ( todo )
keir@16466 127 {
keir@16466 128 struct vcpu *curr = current;
keir@16466 129
keir@16466 130 if ( todo != bytes )
keir@16466 131 {
keir@16466 132 gdprintk(XENLOG_WARNING, "RM: Partial read at %08x (%d/%d)\n",
keir@16466 133 addr, todo, bytes);
keir@16466 134 return X86EMUL_UNHANDLEABLE;
keir@16466 135 }
keir@16466 136
keir@16466 137 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
keir@16466 138 return X86EMUL_UNHANDLEABLE;
keir@16466 139
keir@16466 140 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
keir@16466 141 {
keir@16466 142 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
keir@16466 143 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
keir@16466 144 0, IOREQ_READ, 0, 0);
keir@16466 145 }
keir@16466 146
keir@16466 147 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
keir@16466 148 return X86EMUL_UNHANDLEABLE;
keir@16466 149
keir@16466 150 *val = curr->arch.hvm_vmx.real_mode_io_data;
keir@16466 151 curr->arch.hvm_vmx.real_mode_io_completed = 0;
keir@16466 152 }
keir@16466 153
keir@16455 154 return X86EMUL_OKAY;
keir@16455 155 }
keir@16455 156
keir@16455 157 static int
keir@16455 158 realmode_emulate_read(
keir@16455 159 enum x86_segment seg,
keir@16455 160 unsigned long offset,
keir@16455 161 unsigned long *val,
keir@16455 162 unsigned int bytes,
keir@16455 163 struct x86_emulate_ctxt *ctxt)
keir@16455 164 {
keir@16455 165 return realmode_read(
keir@16455 166 seg, offset, val, bytes, hvm_access_read,
keir@16455 167 container_of(ctxt, struct realmode_emulate_ctxt, ctxt));
keir@16455 168 }
keir@16455 169
keir@16455 170 static int
keir@16455 171 realmode_emulate_insn_fetch(
keir@16455 172 enum x86_segment seg,
keir@16455 173 unsigned long offset,
keir@16455 174 unsigned long *val,
keir@16455 175 unsigned int bytes,
keir@16455 176 struct x86_emulate_ctxt *ctxt)
keir@16455 177 {
keir@16455 178 struct realmode_emulate_ctxt *rm_ctxt =
keir@16455 179 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16455 180 unsigned int insn_off = offset - rm_ctxt->insn_buf_eip;
keir@16455 181
keir@16455 182 /* Fall back if requested bytes are not in the prefetch cache. */
keir@16462 183 if ( unlikely((insn_off + bytes) > sizeof(rm_ctxt->insn_buf)) )
keir@16455 184 return realmode_read(
keir@16455 185 seg, offset, val, bytes,
keir@16455 186 hvm_access_insn_fetch, rm_ctxt);
keir@16455 187
keir@16455 188 /* Hit the cache. Simple memcpy. */
keir@16455 189 *val = 0;
keir@16455 190 memcpy(val, &rm_ctxt->insn_buf[insn_off], bytes);
keir@16455 191 return X86EMUL_OKAY;
keir@16455 192 }
keir@16455 193
keir@16455 194 static int
keir@16455 195 realmode_emulate_write(
keir@16455 196 enum x86_segment seg,
keir@16455 197 unsigned long offset,
keir@16455 198 unsigned long val,
keir@16455 199 unsigned int bytes,
keir@16455 200 struct x86_emulate_ctxt *ctxt)
keir@16455 201 {
keir@16455 202 struct realmode_emulate_ctxt *rm_ctxt =
keir@16455 203 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16462 204 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
keir@16466 205 int todo;
keir@16466 206
keir@16466 207 todo = hvm_copy_to_guest_phys(addr, &val, bytes);
keir@16466 208
keir@16466 209 if ( todo )
keir@16466 210 {
keir@16466 211 struct vcpu *curr = current;
keir@16466 212
keir@16466 213 if ( todo != bytes )
keir@16466 214 {
keir@16466 215 gdprintk(XENLOG_WARNING, "RM: Partial write at %08x (%d/%d)\n",
keir@16466 216 addr, todo, bytes);
keir@16466 217 return X86EMUL_UNHANDLEABLE;
keir@16466 218 }
keir@16466 219
keir@16466 220 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
keir@16466 221 return X86EMUL_UNHANDLEABLE;
keir@16466 222
keir@16466 223 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
keir@16466 224 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
keir@16466 225 val, IOREQ_WRITE, 0, 0);
keir@16466 226 }
keir@16466 227
keir@16455 228 return X86EMUL_OKAY;
keir@16455 229 }
keir@16455 230
keir@16455 231 static int
keir@16455 232 realmode_emulate_cmpxchg(
keir@16455 233 enum x86_segment seg,
keir@16455 234 unsigned long offset,
keir@16455 235 unsigned long old,
keir@16455 236 unsigned long new,
keir@16455 237 unsigned int bytes,
keir@16455 238 struct x86_emulate_ctxt *ctxt)
keir@16455 239 {
keir@16455 240 return X86EMUL_UNHANDLEABLE;
keir@16455 241 }
keir@16455 242
keir@16455 243 static int
keir@16455 244 realmode_read_segment(
keir@16455 245 enum x86_segment seg,
keir@16455 246 struct segment_register *reg,
keir@16455 247 struct x86_emulate_ctxt *ctxt)
keir@16455 248 {
keir@16455 249 struct realmode_emulate_ctxt *rm_ctxt =
keir@16455 250 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16455 251 memcpy(reg, &rm_ctxt->seg_reg[seg], sizeof(struct segment_register));
keir@16455 252 return X86EMUL_OKAY;
keir@16455 253 }
keir@16455 254
keir@16455 255 static int
keir@16455 256 realmode_write_segment(
keir@16455 257 enum x86_segment seg,
keir@16455 258 struct segment_register *reg,
keir@16455 259 struct x86_emulate_ctxt *ctxt)
keir@16455 260 {
keir@16455 261 struct realmode_emulate_ctxt *rm_ctxt =
keir@16455 262 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16455 263 memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
keir@16460 264 if ( seg == x86_seg_ss )
keir@16468 265 rm_ctxt->flags.mov_ss = 1;
keir@16455 266 return X86EMUL_OKAY;
keir@16455 267 }
keir@16455 268
keir@16455 269 static int
keir@16455 270 realmode_read_io(
keir@16455 271 unsigned int port,
keir@16455 272 unsigned int bytes,
keir@16455 273 unsigned long *val,
keir@16455 274 struct x86_emulate_ctxt *ctxt)
keir@16455 275 {
keir@16459 276 struct vcpu *curr = current;
keir@16459 277
keir@16459 278 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
keir@16459 279 return X86EMUL_UNHANDLEABLE;
keir@16459 280
keir@16459 281 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
keir@16459 282 {
keir@16459 283 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
keir@16459 284 send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
keir@16459 285 }
keir@16459 286
keir@16459 287 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
keir@16459 288 return X86EMUL_UNHANDLEABLE;
keir@16459 289
keir@16459 290 *val = curr->arch.hvm_vmx.real_mode_io_data;
keir@16459 291 curr->arch.hvm_vmx.real_mode_io_completed = 0;
keir@16459 292
keir@16459 293 return X86EMUL_OKAY;
keir@16455 294 }
keir@16455 295
keir@16455 296 static int realmode_write_io(
keir@16455 297 unsigned int port,
keir@16455 298 unsigned int bytes,
keir@16455 299 unsigned long val,
keir@16455 300 struct x86_emulate_ctxt *ctxt)
keir@16455 301 {
keir@16459 302 struct vcpu *curr = current;
keir@16459 303
keir@16466 304 if ( port == 0xe9 )
keir@16466 305 {
keir@16466 306 hvm_print_line(curr, val);
keir@16466 307 return X86EMUL_OKAY;
keir@16466 308 }
keir@16466 309
keir@16459 310 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
keir@16459 311 return X86EMUL_UNHANDLEABLE;
keir@16459 312
keir@16459 313 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
keir@16459 314 send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
keir@16459 315
keir@16459 316 return X86EMUL_OKAY;
keir@16455 317 }
keir@16455 318
keir@16455 319 static int
keir@16455 320 realmode_read_cr(
keir@16455 321 unsigned int reg,
keir@16455 322 unsigned long *val,
keir@16455 323 struct x86_emulate_ctxt *ctxt)
keir@16455 324 {
keir@16455 325 switch ( reg )
keir@16455 326 {
keir@16455 327 case 0:
keir@16455 328 case 2:
keir@16455 329 case 3:
keir@16455 330 case 4:
keir@16455 331 *val = current->arch.hvm_vcpu.guest_cr[reg];
keir@16455 332 break;
keir@16455 333 default:
keir@16455 334 return X86EMUL_UNHANDLEABLE;
keir@16455 335 }
keir@16455 336
keir@16455 337 return X86EMUL_OKAY;
keir@16455 338 }
keir@16455 339
keir@16460 340 static int realmode_write_rflags(
keir@16460 341 unsigned long val,
keir@16460 342 struct x86_emulate_ctxt *ctxt)
keir@16460 343 {
keir@16468 344 struct realmode_emulate_ctxt *rm_ctxt =
keir@16468 345 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16460 346 if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
keir@16468 347 rm_ctxt->flags.sti = 1;
keir@16468 348 return X86EMUL_OKAY;
keir@16468 349 }
keir@16460 350
keir@16468 351 static int realmode_wbinvd(
keir@16468 352 struct x86_emulate_ctxt *ctxt)
keir@16468 353 {
keir@16468 354 vmx_wbinvd_intercept();
keir@16468 355 return X86EMUL_OKAY;
keir@16468 356 }
keir@16468 357
keir@16468 358 static int realmode_cpuid(
keir@16468 359 unsigned int *eax,
keir@16468 360 unsigned int *ebx,
keir@16468 361 unsigned int *ecx,
keir@16468 362 unsigned int *edx,
keir@16468 363 struct x86_emulate_ctxt *ctxt)
keir@16468 364 {
keir@16468 365 vmx_cpuid_intercept(eax, ebx, ecx, edx);
keir@16468 366 return X86EMUL_OKAY;
keir@16468 367 }
keir@16468 368
keir@16468 369 static int realmode_hlt(
keir@16468 370 struct x86_emulate_ctxt *ctxt)
keir@16468 371 {
keir@16468 372 struct realmode_emulate_ctxt *rm_ctxt =
keir@16468 373 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16468 374 rm_ctxt->flags.hlt = 1;
keir@16460 375 return X86EMUL_OKAY;
keir@16460 376 }
keir@16460 377
keir@16462 378 static int realmode_inject_hw_exception(
keir@16462 379 uint8_t vector,
keir@16462 380 struct x86_emulate_ctxt *ctxt)
keir@16462 381 {
keir@16462 382 struct realmode_emulate_ctxt *rm_ctxt =
keir@16462 383 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16462 384
keir@16468 385 rm_ctxt->flags.exn_raised = 1;
keir@16462 386 realmode_deliver_exception(vector, 0, rm_ctxt);
keir@16462 387
keir@16462 388 return X86EMUL_OKAY;
keir@16462 389 }
keir@16462 390
keir@16462 391 static int realmode_inject_sw_interrupt(
keir@16462 392 uint8_t vector,
keir@16462 393 uint8_t insn_len,
keir@16462 394 struct x86_emulate_ctxt *ctxt)
keir@16462 395 {
keir@16462 396 struct realmode_emulate_ctxt *rm_ctxt =
keir@16462 397 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
keir@16462 398
keir@16462 399 realmode_deliver_exception(vector, insn_len, rm_ctxt);
keir@16462 400
keir@16462 401 return X86EMUL_OKAY;
keir@16462 402 }
keir@16462 403
keir@16455 404 static struct x86_emulate_ops realmode_emulator_ops = {
keir@16455 405 .read = realmode_emulate_read,
keir@16455 406 .insn_fetch = realmode_emulate_insn_fetch,
keir@16455 407 .write = realmode_emulate_write,
keir@16455 408 .cmpxchg = realmode_emulate_cmpxchg,
keir@16455 409 .read_segment = realmode_read_segment,
keir@16455 410 .write_segment = realmode_write_segment,
keir@16455 411 .read_io = realmode_read_io,
keir@16455 412 .write_io = realmode_write_io,
keir@16460 413 .read_cr = realmode_read_cr,
keir@16462 414 .write_rflags = realmode_write_rflags,
keir@16468 415 .wbinvd = realmode_wbinvd,
keir@16468 416 .cpuid = realmode_cpuid,
keir@16468 417 .hlt = realmode_hlt,
keir@16462 418 .inject_hw_exception = realmode_inject_hw_exception,
keir@16462 419 .inject_sw_interrupt = realmode_inject_sw_interrupt
keir@16455 420 };
keir@16455 421
keir@16479 422 void vmx_realmode(struct cpu_user_regs *regs)
keir@16455 423 {
keir@16455 424 struct vcpu *curr = current;
keir@16455 425 struct realmode_emulate_ctxt rm_ctxt;
keir@16462 426 unsigned long intr_info;
keir@16479 427 int i, rc;
keir@16468 428 u32 intr_shadow, new_intr_shadow;
keir@16455 429
keir@16462 430 rm_ctxt.ctxt.regs = regs;
keir@16462 431
keir@16455 432 for ( i = 0; i < 10; i++ )
keir@16455 433 hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
keir@16455 434
keir@16462 435 rm_ctxt.ctxt.addr_size =
keir@16462 436 rm_ctxt.seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
keir@16462 437 rm_ctxt.ctxt.sp_size =
keir@16462 438 rm_ctxt.seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
keir@16455 439
keir@16462 440 intr_info = __vmread(VM_ENTRY_INTR_INFO);
keir@16462 441 if ( intr_info & INTR_INFO_VALID_MASK )
keir@16462 442 {
keir@16462 443 __vmwrite(VM_ENTRY_INTR_INFO, 0);
keir@16462 444 realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
keir@16462 445 }
keir@16462 446
keir@16468 447 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
keir@16468 448 new_intr_shadow = intr_shadow;
keir@16468 449
keir@16462 450 while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
keir@16462 451 !softirq_pending(smp_processor_id()) &&
keir@16462 452 !hvm_local_events_need_delivery(curr) )
keir@16462 453 {
keir@16455 454 rm_ctxt.insn_buf_eip = regs->eip;
keir@16462 455 (void)hvm_copy_from_guest_phys(
keir@16462 456 rm_ctxt.insn_buf,
keir@16462 457 (uint32_t)(rm_ctxt.seg_reg[x86_seg_cs].base + regs->eip),
keir@16462 458 sizeof(rm_ctxt.insn_buf));
keir@16455 459
keir@16468 460 rm_ctxt.flag_word = 0;
keir@16468 461
keir@16459 462 rc = x86_emulate(&rm_ctxt.ctxt, &realmode_emulator_ops);
keir@16459 463
keir@16468 464 /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
keir@16468 465 if ( rm_ctxt.flags.mov_ss )
keir@16468 466 new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
keir@16468 467 else
keir@16468 468 new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
keir@16468 469
keir@16468 470 /* STI instruction toggles STI shadow, else we just clear it. */
keir@16468 471 if ( rm_ctxt.flags.sti )
keir@16468 472 new_intr_shadow ^= VMX_INTR_SHADOW_STI;
keir@16468 473 else
keir@16468 474 new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
keir@16468 475
keir@16468 476 /* Update interrupt shadow information in VMCS only if it changes. */
keir@16468 477 if ( intr_shadow != new_intr_shadow )
keir@16468 478 {
keir@16468 479 intr_shadow = new_intr_shadow;
keir@16468 480 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
keir@16468 481 }
keir@16468 482
keir@16468 483 /* HLT happens after instruction retire, if no interrupt/exception. */
keir@16468 484 if ( unlikely(rm_ctxt.flags.hlt) &&
keir@16468 485 !rm_ctxt.flags.exn_raised &&
keir@16468 486 !hvm_local_events_need_delivery(curr) )
keir@16468 487 hvm_hlt(regs->eflags);
keir@16468 488
keir@16459 489 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
keir@16459 490 break;
keir@16459 491
keir@16462 492 if ( rc == X86EMUL_UNHANDLEABLE )
keir@16459 493 {
keir@16460 494 gdprintk(XENLOG_DEBUG,
keir@16460 495 "RM %04x:%08lx: %02x %02x %02x %02x %02x %02x\n",
keir@16460 496 rm_ctxt.seg_reg[x86_seg_cs].sel, rm_ctxt.insn_buf_eip,
keir@16460 497 rm_ctxt.insn_buf[0], rm_ctxt.insn_buf[1],
keir@16460 498 rm_ctxt.insn_buf[2], rm_ctxt.insn_buf[3],
keir@16460 499 rm_ctxt.insn_buf[4], rm_ctxt.insn_buf[5]);
keir@16455 500 gdprintk(XENLOG_ERR, "Emulation failed\n");
keir@16479 501 domain_crash_synchronous();
keir@16455 502 }
keir@16455 503 }
keir@16455 504
keir@16455 505 for ( i = 0; i < 10; i++ )
keir@16455 506 hvm_set_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
keir@16455 507 }
keir@16459 508
keir@16459 509 int vmx_realmode_io_complete(void)
keir@16459 510 {
keir@16459 511 struct vcpu *curr = current;
keir@16459 512 ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
keir@16459 513
keir@16459 514 if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
keir@16459 515 return 0;
keir@16459 516
keir@16460 517 #if 0
keir@16460 518 gdprintk(XENLOG_DEBUG, "RM I/O %d %c bytes=%d addr=%lx data=%lx\n",
keir@16460 519 p->type, p->dir ? 'R' : 'W',
keir@16460 520 (int)p->size, (long)p->addr, (long)p->data);
keir@16460 521 #endif
keir@16460 522
keir@16459 523 curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
keir@16459 524 if ( p->dir == IOREQ_READ )
keir@16459 525 {
keir@16459 526 curr->arch.hvm_vmx.real_mode_io_completed = 1;
keir@16459 527 curr->arch.hvm_vmx.real_mode_io_data = p->data;
keir@16459 528 }
keir@16459 529
keir@16459 530 return 1;
keir@16459 531 }