ia64/xen-unstable

changeset 17108:200787660b5f

hvm: More emulation changes: push some of the realmode or HVM-emulate
specific stuff into core x86_emulate().
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Feb 22 18:32:41 2008 +0000 (2008-02-22)
parents 757cd7bb5e35
children 5adc98ae6861
files xen/arch/x86/hvm/emulate.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/realmode.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/x86_32/exits.S xen/arch/x86/hvm/vmx/x86_64/exits.S xen/arch/x86/x86_emulate.c xen/include/asm-x86/hvm/emulate.h xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/x86_emulate.h
line diff
     1.1 --- a/xen/arch/x86/hvm/emulate.c	Fri Feb 22 16:49:56 2008 +0000
     1.2 +++ b/xen/arch/x86/hvm/emulate.c	Fri Feb 22 18:32:41 2008 +0000
     1.3 @@ -124,8 +124,9 @@ static int hvmemul_virtual_to_linear(
     1.4  
     1.5      if ( !okay )
     1.6      {
     1.7 -        hvmemul_ctxt->flags.exn_pending = 1;
     1.8 +        hvmemul_ctxt->exn_pending = 1;
     1.9          hvmemul_ctxt->exn_vector = TRAP_gp_fault;
    1.10 +        hvmemul_ctxt->exn_error_code = 0;
    1.11          hvmemul_ctxt->exn_insn_len = 0;
    1.12          return X86EMUL_EXCEPTION;
    1.13      }
    1.14 @@ -439,9 +440,6 @@ static int hvmemul_write_segment(
    1.15          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
    1.16      struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
    1.17  
    1.18 -    if ( seg == x86_seg_ss )
    1.19 -        hvmemul_ctxt->flags.mov_ss = 1;
    1.20 -
    1.21      memcpy(sreg, reg, sizeof(struct segment_register));
    1.22      __set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
    1.23  
    1.24 @@ -571,17 +569,6 @@ static int hvmemul_write_msr(
    1.25      return hvm_funcs.msr_write_intercept(&_regs);
    1.26  }
    1.27  
    1.28 -static int hvmemul_write_rflags(
    1.29 -    unsigned long val,
    1.30 -    struct x86_emulate_ctxt *ctxt)
    1.31 -{
    1.32 -    struct hvm_emulate_ctxt *hvmemul_ctxt =
    1.33 -        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
    1.34 -    if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
    1.35 -        hvmemul_ctxt->flags.sti = 1;
    1.36 -    return X86EMUL_OKAY;
    1.37 -}
    1.38 -
    1.39  static int hvmemul_wbinvd(
    1.40      struct x86_emulate_ctxt *ctxt)
    1.41  {
    1.42 @@ -600,28 +587,17 @@ static int hvmemul_cpuid(
    1.43      return X86EMUL_OKAY;
    1.44  }
    1.45  
    1.46 -static int hvmemul_hlt(
    1.47 -    struct x86_emulate_ctxt *ctxt)
    1.48 -{
    1.49 -    struct hvm_emulate_ctxt *hvmemul_ctxt =
    1.50 -        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
    1.51 -    hvmemul_ctxt->flags.hlt = 1;
    1.52 -    return X86EMUL_OKAY;
    1.53 -}
    1.54 -
    1.55  static int hvmemul_inject_hw_exception(
    1.56      uint8_t vector,
    1.57 -    uint16_t error_code,
    1.58 +    int32_t error_code,
    1.59      struct x86_emulate_ctxt *ctxt)
    1.60  {
    1.61      struct hvm_emulate_ctxt *hvmemul_ctxt =
    1.62          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
    1.63  
    1.64 -    if ( error_code != 0 )
    1.65 -        return X86EMUL_UNHANDLEABLE;
    1.66 -
    1.67 -    hvmemul_ctxt->flags.exn_pending = 1;
    1.68 +    hvmemul_ctxt->exn_pending = 1;
    1.69      hvmemul_ctxt->exn_vector = vector;
    1.70 +    hvmemul_ctxt->exn_error_code = error_code;
    1.71      hvmemul_ctxt->exn_insn_len = 0;
    1.72  
    1.73      return X86EMUL_OKAY;
    1.74 @@ -635,8 +611,9 @@ static int hvmemul_inject_sw_interrupt(
    1.75      struct hvm_emulate_ctxt *hvmemul_ctxt =
    1.76          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
    1.77  
    1.78 -    hvmemul_ctxt->flags.exn_pending = 1;
    1.79 +    hvmemul_ctxt->exn_pending = 1;
    1.80      hvmemul_ctxt->exn_vector = vector;
    1.81 +    hvmemul_ctxt->exn_error_code = -1;
    1.82      hvmemul_ctxt->exn_insn_len = insn_len;
    1.83  
    1.84      return X86EMUL_OKAY;
    1.85 @@ -684,10 +661,8 @@ static struct x86_emulate_ops hvm_emulat
    1.86      .write_cr      = hvmemul_write_cr,
    1.87      .read_msr      = hvmemul_read_msr,
    1.88      .write_msr     = hvmemul_write_msr,
    1.89 -    .write_rflags  = hvmemul_write_rflags,
    1.90      .wbinvd        = hvmemul_wbinvd,
    1.91      .cpuid         = hvmemul_cpuid,
    1.92 -    .hlt           = hvmemul_hlt,
    1.93      .inject_hw_exception = hvmemul_inject_hw_exception,
    1.94      .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
    1.95      .load_fpu_ctxt = hvmemul_load_fpu_ctxt,
    1.96 @@ -698,7 +673,9 @@ int hvm_emulate_one(
    1.97      struct hvm_emulate_ctxt *hvmemul_ctxt)
    1.98  {
    1.99      struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
   1.100 +    uint32_t new_intr_shadow;
   1.101      unsigned long addr;
   1.102 +    int rc;
   1.103  
   1.104      hvmemul_ctxt->ctxt.addr_size =
   1.105          hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
   1.106 @@ -715,15 +692,46 @@ int hvm_emulate_one(
   1.107               hvmemul_ctxt->insn_buf, addr, sizeof(hvmemul_ctxt->insn_buf)))
   1.108          ? sizeof(hvmemul_ctxt->insn_buf) : 0;
   1.109  
   1.110 -    hvmemul_ctxt->flag_word = 0;
   1.111 +    hvmemul_ctxt->exn_pending = 0;
   1.112  
   1.113 -    return x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
   1.114 +    rc = x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
   1.115 +    if ( rc != X86EMUL_OKAY )
   1.116 +        return rc;
   1.117 +
   1.118 +    new_intr_shadow = hvmemul_ctxt->intr_shadow;
   1.119 +
   1.120 +    /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
   1.121 +    if ( hvmemul_ctxt->ctxt.retire.flags.mov_ss )
   1.122 +        new_intr_shadow ^= HVM_INTR_SHADOW_MOV_SS;
   1.123 +    else
   1.124 +        new_intr_shadow &= ~HVM_INTR_SHADOW_MOV_SS;
   1.125 +
   1.126 +    /* STI instruction toggles STI shadow, else we just clear it. */
   1.127 +    if ( hvmemul_ctxt->ctxt.retire.flags.sti )
   1.128 +        new_intr_shadow ^= HVM_INTR_SHADOW_STI;
   1.129 +    else
   1.130 +        new_intr_shadow &= ~HVM_INTR_SHADOW_STI;
   1.131 +
   1.132 +    if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
   1.133 +    {
   1.134 +        hvmemul_ctxt->intr_shadow = new_intr_shadow;
   1.135 +        hvm_funcs.set_interrupt_shadow(current, new_intr_shadow);
   1.136 +    }
   1.137 +
   1.138 +    if ( hvmemul_ctxt->ctxt.retire.flags.hlt &&
   1.139 +         !hvm_local_events_need_delivery(current) )
   1.140 +    {
   1.141 +        hvm_hlt(regs->eflags);
   1.142 +    }
   1.143 +
   1.144 +    return X86EMUL_OKAY;
   1.145  }
   1.146  
   1.147  void hvm_emulate_prepare(
   1.148      struct hvm_emulate_ctxt *hvmemul_ctxt,
   1.149      struct cpu_user_regs *regs)
   1.150  {
   1.151 +    hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(current);
   1.152      hvmemul_ctxt->ctxt.regs = regs;
   1.153      hvmemul_ctxt->ctxt.force_writeback = 1;
   1.154      hvmemul_ctxt->seg_reg_accessed = 0;
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Feb 22 16:49:56 2008 +0000
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Feb 22 18:32:41 2008 +0000
     2.3 @@ -1640,12 +1640,22 @@ void hvm_cpuid(unsigned int input, unsig
     2.4  
     2.5  enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
     2.6  {
     2.7 -    enum hvm_intblk r;
     2.8 +    unsigned long intr_shadow;
     2.9 +
    2.10      ASSERT(v == current);
    2.11  
    2.12 -    r = hvm_funcs.interrupt_blocked(v, intack);
    2.13 -    if ( r != hvm_intblk_none )
    2.14 -        return r;
    2.15 +    if ( (intack.source != hvm_intsrc_nmi) &&
    2.16 +         !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
    2.17 +        return hvm_intblk_rflags_ie;
    2.18 +
    2.19 +    intr_shadow = hvm_funcs.get_interrupt_shadow(v);
    2.20 +
    2.21 +    if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
    2.22 +        return hvm_intblk_shadow;
    2.23 +
    2.24 +    if ( intack.source == hvm_intsrc_nmi )
    2.25 +        return ((intr_shadow & HVM_INTR_SHADOW_NMI) ?
    2.26 +                hvm_intblk_nmi_iret : hvm_intblk_none);
    2.27  
    2.28      if ( intack.source == hvm_intsrc_lapic )
    2.29      {
    2.30 @@ -1654,7 +1664,7 @@ enum hvm_intblk hvm_interrupt_blocked(st
    2.31              return hvm_intblk_tpr;
    2.32      }
    2.33  
    2.34 -    return r;
    2.35 +    return hvm_intblk_none;
    2.36  }
    2.37  
    2.38  static long hvm_grant_table_op(
     3.1 --- a/xen/arch/x86/hvm/io.c	Fri Feb 22 16:49:56 2008 +0000
     3.2 +++ b/xen/arch/x86/hvm/io.c	Fri Feb 22 18:32:41 2008 +0000
     3.3 @@ -262,8 +262,8 @@ int handle_mmio(void)
     3.4                   ctxt.insn_buf[4], ctxt.insn_buf[5]);
     3.5          return 0;
     3.6      case X86EMUL_EXCEPTION:
     3.7 -        if ( ctxt.flags.exn_pending )
     3.8 -            hvm_inject_exception(ctxt.exn_vector, 0, 0);
     3.9 +        if ( ctxt.exn_pending )
    3.10 +            hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
    3.11          break;
    3.12      default:
    3.13          break;
     4.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Feb 22 16:49:56 2008 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Feb 22 18:32:41 2008 +0000
     4.3 @@ -366,24 +366,17 @@ static void svm_fpu_leave(struct vcpu *v
     4.4      }
     4.5  }
     4.6  
     4.7 -static enum hvm_intblk svm_interrupt_blocked(
     4.8 -    struct vcpu *v, struct hvm_intack intack)
     4.9 +static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
    4.10  {
    4.11      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    4.12 -
    4.13 -    if ( vmcb->interrupt_shadow )
    4.14 -        return hvm_intblk_shadow;
    4.15 -
    4.16 -    if ( intack.source == hvm_intsrc_nmi )
    4.17 -        return hvm_intblk_none;
    4.18 +    return (vmcb->interrupt_shadow ? HVM_INTR_SHADOW_MOV_SS : 0);
    4.19 +}
    4.20  
    4.21 -    ASSERT((intack.source == hvm_intsrc_pic) ||
    4.22 -           (intack.source == hvm_intsrc_lapic));
    4.23 -
    4.24 -    if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
    4.25 -        return hvm_intblk_rflags_ie;
    4.26 -
    4.27 -    return hvm_intblk_none;
    4.28 +static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
    4.29 +{
    4.30 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    4.31 +    vmcb->interrupt_shadow = !!(vmcb->interrupt_shadow &
    4.32 +                                (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI));
    4.33  }
    4.34  
    4.35  static int svm_guest_x86_mode(struct vcpu *v)
    4.36 @@ -779,7 +772,8 @@ static struct hvm_function_table svm_fun
    4.37      .vcpu_destroy         = svm_vcpu_destroy,
    4.38      .save_cpu_ctxt        = svm_save_vmcb_ctxt,
    4.39      .load_cpu_ctxt        = svm_load_vmcb_ctxt,
    4.40 -    .interrupt_blocked    = svm_interrupt_blocked,
    4.41 +    .get_interrupt_shadow = svm_get_interrupt_shadow,
    4.42 +    .set_interrupt_shadow = svm_set_interrupt_shadow,
    4.43      .guest_x86_mode       = svm_guest_x86_mode,
    4.44      .get_segment_register = svm_get_segment_register,
    4.45      .set_segment_register = svm_set_segment_register,
    4.46 @@ -1176,7 +1170,7 @@ static void svm_vmexit_do_hlt(struct vmc
    4.47      /* Check for pending exception or new interrupt. */
    4.48      if ( vmcb->eventinj.fields.v ||
    4.49           ((intack.source != hvm_intsrc_none) &&
    4.50 -          !svm_interrupt_blocked(current, intack)) )
    4.51 +          !hvm_interrupt_blocked(current, intack)) )
    4.52      {
    4.53          HVMTRACE_1D(HLT, curr, /*int pending=*/ 1);
    4.54          return;
     5.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Fri Feb 22 16:49:56 2008 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Fri Feb 22 18:32:41 2008 +0000
     5.3 @@ -65,10 +65,6 @@
     5.4   * Injecting a virtual NMI sets the NMI-blocking interruptibility flag only
     5.5   * if the 'virtual NMIs' control is set. Injecting *any* kind of event clears
     5.6   * the STI- and MOV-SS-blocking interruptibility-state flags.
     5.7 - * 
     5.8 - * If MOV/POP SS is executed while MOV-SS-blocking is in effect, the effect
     5.9 - * is cleared. If STI is executed while MOV-SS- or STI-blocking is in effect,
    5.10 - * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
    5.11   */
    5.12  
    5.13  static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
     6.1 --- a/xen/arch/x86/hvm/vmx/realmode.c	Fri Feb 22 16:49:56 2008 +0000
     6.2 +++ b/xen/arch/x86/hvm/vmx/realmode.c	Fri Feb 22 18:32:41 2008 +0000
     6.3 @@ -21,25 +21,20 @@
     6.4  #include <asm/hvm/vmx/vmx.h>
     6.5  #include <asm/hvm/vmx/vmcs.h>
     6.6  
     6.7 -struct realmode_emulate_ctxt {
     6.8 -    struct hvm_emulate_ctxt hvm;
     6.9 -    uint32_t intr_shadow;
    6.10 -};
    6.11 -
    6.12  static void realmode_deliver_exception(
    6.13      unsigned int vector,
    6.14      unsigned int insn_len,
    6.15 -    struct realmode_emulate_ctxt *rm_ctxt)
    6.16 +    struct hvm_emulate_ctxt *hvmemul_ctxt)
    6.17  {
    6.18      struct segment_register *idtr, *csr;
    6.19 -    struct cpu_user_regs *regs = rm_ctxt->hvm.ctxt.regs;
    6.20 +    struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
    6.21      uint32_t cs_eip, pstk;
    6.22      uint16_t frame[3];
    6.23      unsigned int last_byte;
    6.24  
    6.25 -    idtr = hvmemul_get_seg_reg(x86_seg_idtr, &rm_ctxt->hvm);
    6.26 -    csr  = hvmemul_get_seg_reg(x86_seg_cs,   &rm_ctxt->hvm);
    6.27 -    __set_bit(x86_seg_cs, &rm_ctxt->hvm.seg_reg_dirty);
    6.28 +    idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
    6.29 +    csr  = hvmemul_get_seg_reg(x86_seg_cs,   hvmemul_ctxt);
    6.30 +    __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);
    6.31  
    6.32   again:
    6.33      last_byte = (vector * 4) + 3;
    6.34 @@ -74,7 +69,7 @@ static void realmode_deliver_exception(
    6.35      frame[1] = csr->sel;
    6.36      frame[2] = regs->eflags & ~X86_EFLAGS_RF;
    6.37  
    6.38 -    if ( rm_ctxt->hvm.ctxt.addr_size == 32 )
    6.39 +    if ( hvmemul_ctxt->ctxt.addr_size == 32 )
    6.40      {
    6.41          regs->esp -= 6;
    6.42          pstk = regs->esp;
    6.43 @@ -86,7 +81,7 @@ static void realmode_deliver_exception(
    6.44          regs->esp |= pstk;
    6.45      }
    6.46  
    6.47 -    pstk += hvmemul_get_seg_reg(x86_seg_ss, &rm_ctxt->hvm)->base;
    6.48 +    pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
    6.49      (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
    6.50  
    6.51      csr->sel  = cs_eip >> 16;
    6.52 @@ -95,41 +90,42 @@ static void realmode_deliver_exception(
    6.53      regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
    6.54  
    6.55      /* Exception delivery clears STI and MOV-SS blocking. */
    6.56 -    if ( rm_ctxt->intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
    6.57 +    if ( hvmemul_ctxt->intr_shadow &
    6.58 +         (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
    6.59      {
    6.60 -        rm_ctxt->intr_shadow &= ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
    6.61 -        __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
    6.62 +        hvmemul_ctxt->intr_shadow &=
    6.63 +            ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
    6.64 +        __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
    6.65      }
    6.66  }
    6.67  
    6.68 -static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
    6.69 +static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
    6.70  {
    6.71 -    struct cpu_user_regs *regs = rm_ctxt->hvm.ctxt.regs;
    6.72      struct vcpu *curr = current;
    6.73      unsigned long seg_reg_dirty;
    6.74 -    uint32_t new_intr_shadow, intr_info;
    6.75 +    uint32_t intr_info;
    6.76      int rc;
    6.77  
    6.78 -    seg_reg_dirty = rm_ctxt->hvm.seg_reg_dirty;
    6.79 -    rm_ctxt->hvm.seg_reg_dirty = 0;
    6.80 +    seg_reg_dirty = hvmemul_ctxt->seg_reg_dirty;
    6.81 +    hvmemul_ctxt->seg_reg_dirty = 0;
    6.82  
    6.83 -    rc = hvm_emulate_one(&rm_ctxt->hvm);
    6.84 +    rc = hvm_emulate_one(hvmemul_ctxt);
    6.85  
    6.86 -    if ( test_bit(x86_seg_cs, &rm_ctxt->hvm.seg_reg_dirty) )
    6.87 +    if ( test_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty) )
    6.88      {
    6.89          curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS;
    6.90 -        if ( hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm)->sel & 3 )
    6.91 +        if ( hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel & 3 )
    6.92              curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS;
    6.93      }
    6.94  
    6.95 -    if ( test_bit(x86_seg_ss, &rm_ctxt->hvm.seg_reg_dirty) )
    6.96 +    if ( test_bit(x86_seg_ss, &hvmemul_ctxt->seg_reg_dirty) )
    6.97      {
    6.98          curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS;
    6.99 -        if ( hvmemul_get_seg_reg(x86_seg_ss, &rm_ctxt->hvm)->sel & 3 )
   6.100 +        if ( hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->sel & 3 )
   6.101              curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS;
   6.102      }
   6.103  
   6.104 -    rm_ctxt->hvm.seg_reg_dirty |= seg_reg_dirty;
   6.105 +    hvmemul_ctxt->seg_reg_dirty |= seg_reg_dirty;
   6.106  
   6.107      if ( rc == X86EMUL_UNHANDLEABLE )
   6.108      {
   6.109 @@ -137,33 +133,9 @@ static void realmode_emulate_one(struct 
   6.110          goto fail;
   6.111      }
   6.112  
   6.113 -    if ( rc == X86EMUL_RETRY )
   6.114 -        return;
   6.115 -
   6.116 -    new_intr_shadow = rm_ctxt->intr_shadow;
   6.117 -
   6.118 -    /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
   6.119 -    if ( rm_ctxt->hvm.flags.mov_ss )
   6.120 -        new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
   6.121 -    else
   6.122 -        new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
   6.123 -
   6.124 -    /* STI instruction toggles STI shadow, else we just clear it. */
   6.125 -    if ( rm_ctxt->hvm.flags.sti )
   6.126 -        new_intr_shadow ^= VMX_INTR_SHADOW_STI;
   6.127 -    else
   6.128 -        new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
   6.129 -
   6.130 -    /* Update interrupt shadow information in VMCS only if it changes. */
   6.131 -    if ( rm_ctxt->intr_shadow != new_intr_shadow )
   6.132 -    {
   6.133 -        rm_ctxt->intr_shadow = new_intr_shadow;
   6.134 -        __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
   6.135 -    }
   6.136 -
   6.137      if ( rc == X86EMUL_EXCEPTION )
   6.138      {
   6.139 -        if ( !rm_ctxt->hvm.flags.exn_pending )
   6.140 +        if ( !hvmemul_ctxt->exn_pending )
   6.141          {
   6.142              intr_info = __vmread(VM_ENTRY_INTR_INFO);
   6.143              __vmwrite(VM_ENTRY_INTR_INFO, 0);
   6.144 @@ -172,23 +144,21 @@ static void realmode_emulate_one(struct 
   6.145                  gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
   6.146                  goto fail;
   6.147              }
   6.148 -            rm_ctxt->hvm.exn_vector = (uint8_t)intr_info;
   6.149 -            rm_ctxt->hvm.exn_insn_len = 0;
   6.150 +            hvmemul_ctxt->exn_vector = (uint8_t)intr_info;
   6.151 +            hvmemul_ctxt->exn_insn_len = 0;
   6.152          }
   6.153  
   6.154          if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
   6.155          {
   6.156              gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
   6.157 -                     rm_ctxt->hvm.exn_vector);
   6.158 +                     hvmemul_ctxt->exn_vector);
   6.159              goto fail;
   6.160          }
   6.161  
   6.162          realmode_deliver_exception(
   6.163 -            rm_ctxt->hvm.exn_vector, rm_ctxt->hvm.exn_insn_len, rm_ctxt);
   6.164 -    }
   6.165 -    else if ( rm_ctxt->hvm.flags.hlt && !hvm_local_events_need_delivery(curr) )
   6.166 -    {
   6.167 -        hvm_hlt(regs->eflags);
   6.168 +            hvmemul_ctxt->exn_vector,
   6.169 +            hvmemul_ctxt->exn_insn_len,
   6.170 +            hvmemul_ctxt);
   6.171      }
   6.172  
   6.173      return;
   6.174 @@ -197,18 +167,18 @@ static void realmode_emulate_one(struct 
   6.175      gdprintk(XENLOG_ERR,
   6.176               "Real-mode emulation failed @ %04x:%08lx: "
   6.177               "%02x %02x %02x %02x %02x %02x\n",
   6.178 -             hvmemul_get_seg_reg(x86_seg_cs, &rm_ctxt->hvm)->sel,
   6.179 -             rm_ctxt->hvm.insn_buf_eip,
   6.180 -             rm_ctxt->hvm.insn_buf[0], rm_ctxt->hvm.insn_buf[1],
   6.181 -             rm_ctxt->hvm.insn_buf[2], rm_ctxt->hvm.insn_buf[3],
   6.182 -             rm_ctxt->hvm.insn_buf[4], rm_ctxt->hvm.insn_buf[5]);
   6.183 +             hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt)->sel,
   6.184 +             hvmemul_ctxt->insn_buf_eip,
   6.185 +             hvmemul_ctxt->insn_buf[0], hvmemul_ctxt->insn_buf[1],
   6.186 +             hvmemul_ctxt->insn_buf[2], hvmemul_ctxt->insn_buf[3],
   6.187 +             hvmemul_ctxt->insn_buf[4], hvmemul_ctxt->insn_buf[5]);
   6.188      domain_crash_synchronous();
   6.189  }
   6.190  
   6.191  void vmx_realmode(struct cpu_user_regs *regs)
   6.192  {
   6.193      struct vcpu *curr = current;
   6.194 -    struct realmode_emulate_ctxt rm_ctxt;
   6.195 +    struct hvm_emulate_ctxt hvmemul_ctxt;
   6.196      struct segment_register *sreg;
   6.197      unsigned long intr_info;
   6.198      unsigned int emulations = 0;
   6.199 @@ -218,17 +188,16 @@ void vmx_realmode(struct cpu_user_regs *
   6.200      if ( intr_info & INTR_INFO_VALID_MASK )
   6.201          __vmwrite(VM_ENTRY_INTR_INFO, 0);
   6.202  
   6.203 -    hvm_emulate_prepare(&rm_ctxt.hvm, regs);
   6.204 -    rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
   6.205 +    hvm_emulate_prepare(&hvmemul_ctxt, regs);
   6.206  
   6.207      if ( curr->arch.hvm_vcpu.io_completed )
   6.208 -        realmode_emulate_one(&rm_ctxt);
   6.209 +        realmode_emulate_one(&hvmemul_ctxt);
   6.210  
   6.211      /* Only deliver interrupts into emulated real mode. */
   6.212      if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
   6.213           (intr_info & INTR_INFO_VALID_MASK) )
   6.214      {
   6.215 -        realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
   6.216 +        realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
   6.217          intr_info = 0;
   6.218      }
   6.219  
   6.220 @@ -245,7 +214,7 @@ void vmx_realmode(struct cpu_user_regs *
   6.221               !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
   6.222               hvm_local_events_need_delivery(curr) )
   6.223              break;
   6.224 -        realmode_emulate_one(&rm_ctxt);
   6.225 +        realmode_emulate_one(&hvmemul_ctxt);
   6.226      }
   6.227  
   6.228      if ( !curr->arch.hvm_vmx.vmxemul )
   6.229 @@ -255,20 +224,20 @@ void vmx_realmode(struct cpu_user_regs *
   6.230           * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
   6.231           * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
   6.232           */
   6.233 -        sreg = hvmemul_get_seg_reg(x86_seg_ds, &rm_ctxt.hvm);
   6.234 -        sreg->attr.fields.dpl = sreg->sel & 3;
   6.235 -        sreg = hvmemul_get_seg_reg(x86_seg_es, &rm_ctxt.hvm);
   6.236 +        sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
   6.237          sreg->attr.fields.dpl = sreg->sel & 3;
   6.238 -        sreg = hvmemul_get_seg_reg(x86_seg_fs, &rm_ctxt.hvm);
   6.239 +        sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
   6.240          sreg->attr.fields.dpl = sreg->sel & 3;
   6.241 -        sreg = hvmemul_get_seg_reg(x86_seg_gs, &rm_ctxt.hvm);
   6.242 +        sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
   6.243          sreg->attr.fields.dpl = sreg->sel & 3;
   6.244 -        rm_ctxt.hvm.seg_reg_dirty |=
   6.245 +        sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
   6.246 +        sreg->attr.fields.dpl = sreg->sel & 3;
   6.247 +        hvmemul_ctxt.seg_reg_dirty |=
   6.248              (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
   6.249              (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
   6.250      }
   6.251  
   6.252 -    hvm_emulate_writeback(&rm_ctxt.hvm);
   6.253 +    hvm_emulate_writeback(&hvmemul_ctxt);
   6.254  
   6.255      /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
   6.256      if ( intr_info & INTR_INFO_VALID_MASK )
     7.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Feb 22 16:49:56 2008 +0000
     7.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Feb 22 18:32:41 2008 +0000
     7.3 @@ -890,32 +890,14 @@ static void vmx_init_hypercall_page(stru
     7.4      *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
     7.5  }
     7.6  
     7.7 -static enum hvm_intblk vmx_interrupt_blocked(
     7.8 -    struct vcpu *v, struct hvm_intack intack)
     7.9 +static unsigned int vmx_get_interrupt_shadow(struct vcpu *v)
    7.10  {
    7.11 -    unsigned long intr_shadow;
    7.12 -
    7.13 -    /*
    7.14 -     * Test EFLAGS.IF first. It is often the most likely reason for interrupt
    7.15 -     * blockage, and is the cheapest to test (because no VMREAD is required).
    7.16 -     */
    7.17 -    if ( (intack.source != hvm_intsrc_nmi) &&
    7.18 -         !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
    7.19 -        return hvm_intblk_rflags_ie;
    7.20 +    return __vmread(GUEST_INTERRUPTIBILITY_INFO);
    7.21 +}
    7.22  
    7.23 -    intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
    7.24 -
    7.25 -    if ( intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
    7.26 -        return hvm_intblk_shadow;
    7.27 -
    7.28 -    if ( intack.source == hvm_intsrc_nmi )
    7.29 -        return ((intr_shadow & VMX_INTR_SHADOW_NMI) ?
    7.30 -                hvm_intblk_nmi_iret : hvm_intblk_none);
    7.31 -
    7.32 -    ASSERT((intack.source == hvm_intsrc_pic) ||
    7.33 -           (intack.source == hvm_intsrc_lapic));
    7.34 -
    7.35 -    return hvm_intblk_none;
    7.36 +static void vmx_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
    7.37 +{
    7.38 +    __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
    7.39  }
    7.40  
    7.41  static void vmx_update_host_cr3(struct vcpu *v)
    7.42 @@ -1038,7 +1020,8 @@ static struct hvm_function_table vmx_fun
    7.43      .vcpu_destroy         = vmx_vcpu_destroy,
    7.44      .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
    7.45      .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
    7.46 -    .interrupt_blocked    = vmx_interrupt_blocked,
    7.47 +    .get_interrupt_shadow = vmx_get_interrupt_shadow,
    7.48 +    .set_interrupt_shadow = vmx_set_interrupt_shadow,
    7.49      .guest_x86_mode       = vmx_guest_x86_mode,
    7.50      .get_segment_register = vmx_get_segment_register,
    7.51      .set_segment_register = vmx_set_segment_register,
     8.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Fri Feb 22 16:49:56 2008 +0000
     8.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Fri Feb 22 18:32:41 2008 +0000
     8.3 @@ -89,7 +89,7 @@ ENTRY(vmx_asm_vmexit_handler)
     8.4  
     8.5          ALIGN
     8.6  vmx_process_softirqs:
     8.7 -        sti       
     8.8 +        sti
     8.9          call do_softirq
    8.10          jmp vmx_asm_do_vmentry
    8.11  
    8.12 @@ -104,6 +104,10 @@ ENTRY(vmx_asm_do_vmentry)
    8.13          jnz  vmx_process_softirqs
    8.14  
    8.15          call vmx_intr_assist
    8.16 +
    8.17 +        testb $0xff,VCPU_vmx_emul(%ebx)
    8.18 +        jnz  vmx_goto_realmode
    8.19 +
    8.20          movl VCPU_hvm_guest_cr2(%ebx),%eax
    8.21          movl %eax,%cr2
    8.22          call vmx_trace_vmentry
    8.23 @@ -115,9 +119,6 @@ ENTRY(vmx_asm_do_vmentry)
    8.24          movl $GUEST_RFLAGS,%eax
    8.25          VMWRITE(UREGS_eflags)
    8.26  
    8.27 -        testb $0xff,VCPU_vmx_emul(%ebx)
    8.28 -        jnz  vmx_goto_realmode
    8.29 -
    8.30          cmpb $0,VCPU_vmx_launched(%ebx)
    8.31          je   vmx_launch
    8.32  
     9.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Fri Feb 22 16:49:56 2008 +0000
     9.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Fri Feb 22 18:32:41 2008 +0000
     9.3 @@ -105,7 +105,7 @@ ENTRY(vmx_asm_vmexit_handler)
     9.4  
     9.5          ALIGN
     9.6  vmx_process_softirqs:
     9.7 -        sti       
     9.8 +        sti
     9.9          call do_softirq
    9.10          jmp vmx_asm_do_vmentry
    9.11  
    9.12 @@ -121,6 +121,10 @@ ENTRY(vmx_asm_do_vmentry)
    9.13          jnz   vmx_process_softirqs
    9.14  
    9.15          call vmx_intr_assist
    9.16 +
    9.17 +        testb $0xff,VCPU_vmx_emul(%rbx)
    9.18 +        jnz  vmx_goto_realmode
    9.19 +
    9.20          movq VCPU_hvm_guest_cr2(%rbx),%rax
    9.21          movq %rax,%cr2
    9.22          call vmx_trace_vmentry
    9.23 @@ -134,9 +138,6 @@ ENTRY(vmx_asm_do_vmentry)
    9.24          movl $GUEST_RFLAGS,%eax
    9.25          VMWRITE(UREGS_eflags)
    9.26  
    9.27 -        testb $0xff,VCPU_vmx_emul(%rbx)
    9.28 -        jnz  vmx_goto_realmode
    9.29 -
    9.30          cmpb $0,VCPU_vmx_launched(%rbx)
    9.31          je   vmx_launch
    9.32  
    10.1 --- a/xen/arch/x86/x86_emulate.c	Fri Feb 22 16:49:56 2008 +0000
    10.2 +++ b/xen/arch/x86/x86_emulate.c	Fri Feb 22 18:32:41 2008 +0000
    10.3 @@ -482,7 +482,7 @@ do{ asm volatile (                      
    10.4     if ( !mode_64bit() ) _eip = (uint32_t)_eip; /* ignore upper dword */ \
    10.5     _regs.eip += (_size); /* real hardware doesn't truncate */           \
    10.6     generate_exception_if((uint8_t)(_regs.eip - ctxt->regs->eip) > 15,   \
    10.7 -                         EXC_GP);                                       \
    10.8 +                         EXC_GP, 0);                                    \
    10.9     rc = ops->insn_fetch(x86_seg_cs, _eip, &_x, (_size), ctxt);          \
   10.10     if ( rc ) goto done;                                                 \
   10.11     _x;                                                                  \
   10.12 @@ -505,12 +505,12 @@ do {                                    
   10.13      if ( rc ) goto done;                                \
   10.14  } while (0)
   10.15  
   10.16 -#define generate_exception_if(p, e)                                      \
   10.17 -({  if ( (p) ) {                                                         \
   10.18 -        fail_if(ops->inject_hw_exception == NULL);                       \
   10.19 -        rc = ops->inject_hw_exception(e, 0, ctxt) ? : X86EMUL_EXCEPTION; \
   10.20 -        goto done;                                                       \
   10.21 -    }                                                                    \
   10.22 +#define generate_exception_if(p, e, ec)                                   \
   10.23 +({  if ( (p) ) {                                                          \
   10.24 +        fail_if(ops->inject_hw_exception == NULL);                        \
   10.25 +        rc = ops->inject_hw_exception(e, ec, ctxt) ? : X86EMUL_EXCEPTION; \
   10.26 +        goto done;                                                        \
   10.27 +    }                                                                     \
   10.28  })
   10.29  
   10.30  /*
   10.31 @@ -1023,6 +1023,8 @@ x86_emulate(
   10.32      ea.mem.seg = x86_seg_ds;
   10.33      ea.mem.off = 0;
   10.34  
   10.35 +    ctxt->retire.byte = 0;
   10.36 +
   10.37      op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8;
   10.38      if ( op_bytes == 8 )
   10.39      {
   10.40 @@ -1105,7 +1107,7 @@ x86_emulate(
   10.41      }
   10.42  
   10.43      /* Lock prefix is allowed only on RMW instructions. */
   10.44 -    generate_exception_if((d & Mov) && lock_prefix, EXC_GP);
   10.45 +    generate_exception_if((d & Mov) && lock_prefix, EXC_GP, 0);
   10.46  
   10.47      /* ModRM and SIB bytes. */
   10.48      if ( d & ModRM )
   10.49 @@ -1393,7 +1395,7 @@ x86_emulate(
   10.50      }
   10.51  
   10.52      /* LOCK prefix allowed only on instructions with memory destination. */
   10.53 -    generate_exception_if(lock_prefix && (dst.type != OP_MEM), EXC_GP);
   10.54 +    generate_exception_if(lock_prefix && (dst.type != OP_MEM), EXC_GP, 0);
   10.55  
   10.56      if ( twobyte )
   10.57          goto twobyte_insn;
   10.58 @@ -1459,14 +1461,15 @@ x86_emulate(
   10.59      case 0x62: /* bound */ {
   10.60          unsigned long src_val2;
   10.61          int lb, ub, idx;
   10.62 -        generate_exception_if(mode_64bit() || (src.type != OP_MEM), EXC_UD);
   10.63 +        generate_exception_if(mode_64bit() || (src.type != OP_MEM),
   10.64 +                              EXC_UD, -1);
   10.65          if ( (rc = ops->read(src.mem.seg, src.mem.off + op_bytes,
   10.66                               &src_val2, op_bytes, ctxt)) )
   10.67              goto done;
   10.68          ub  = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
   10.69          lb  = (op_bytes == 2) ? (int16_t)src.val  : (int32_t)src.val;
   10.70          idx = (op_bytes == 2) ? (int16_t)dst.val  : (int32_t)dst.val;
   10.71 -        generate_exception_if((idx < lb) || (idx > ub), EXC_BR);
   10.72 +        generate_exception_if((idx < lb) || (idx > ub), EXC_BR, -1);
   10.73          dst.type = OP_NONE;
   10.74          break;
   10.75      }
   10.76 @@ -1493,7 +1496,7 @@ x86_emulate(
   10.77                  dst.val  = (dst.val & ~3) | (src_val & 3);
   10.78              else
   10.79                  dst.type = OP_NONE;
   10.80 -            generate_exception_if(in_realmode(ctxt, ops), EXC_UD);
   10.81 +            generate_exception_if(in_realmode(ctxt, ops), EXC_UD, -1);
   10.82          }
   10.83          break;
   10.84  
   10.85 @@ -1534,7 +1537,7 @@ x86_emulate(
   10.86      }
   10.87  
   10.88      case 0x82: /* Grp1 (x86/32 only) */
   10.89 -        generate_exception_if(mode_64bit(), EXC_UD);
   10.90 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
   10.91      case 0x80: case 0x81: case 0x83: /* Grp1 */
   10.92          switch ( modrm_reg & 7 )
   10.93          {
   10.94 @@ -1571,7 +1574,7 @@ x86_emulate(
   10.95          break;
   10.96  
   10.97      case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
   10.98 -        generate_exception_if((modrm_reg & 7) != 0, EXC_UD);
   10.99 +        generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
  10.100      case 0x88 ... 0x8b: /* mov */
  10.101          dst.val = src.val;
  10.102          break;
  10.103 @@ -1579,7 +1582,7 @@ x86_emulate(
  10.104      case 0x8c: /* mov Sreg,r/m */ {
  10.105          struct segment_register reg;
  10.106          enum x86_segment seg = decode_segment(modrm_reg);
  10.107 -        generate_exception_if(seg == decode_segment_failed, EXC_UD);
  10.108 +        generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
  10.109          fail_if(ops->read_segment == NULL);
  10.110          if ( (rc = ops->read_segment(seg, &reg, ctxt)) != 0 )
  10.111              goto done;
  10.112 @@ -1591,9 +1594,11 @@ x86_emulate(
  10.113  
  10.114      case 0x8e: /* mov r/m,Sreg */ {
  10.115          enum x86_segment seg = decode_segment(modrm_reg);
  10.116 -        generate_exception_if(seg == decode_segment_failed, EXC_UD);
  10.117 +        generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
  10.118          if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 )
  10.119              goto done;
  10.120 +        if ( seg == x86_seg_ss )
  10.121 +            ctxt->retire.flags.mov_ss = 1;
  10.122          dst.type = OP_NONE;
  10.123          break;
  10.124      }
  10.125 @@ -1603,7 +1608,7 @@ x86_emulate(
  10.126          break;
  10.127  
  10.128      case 0x8f: /* pop (sole member of Grp1a) */
  10.129 -        generate_exception_if((modrm_reg & 7) != 0, EXC_UD);
  10.130 +        generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
  10.131          /* 64-bit mode: POP defaults to a 64-bit operand. */
  10.132          if ( mode_64bit() && (dst.bytes == 4) )
  10.133              dst.bytes = 8;
  10.134 @@ -1659,7 +1664,7 @@ x86_emulate(
  10.135          unsigned long sel;
  10.136          dst.val = x86_seg_es;
  10.137      les: /* dst.val identifies the segment */
  10.138 -        generate_exception_if(src.type != OP_MEM, EXC_UD);
  10.139 +        generate_exception_if(src.type != OP_MEM, EXC_UD, -1);
  10.140          if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes,
  10.141                               &sel, 2, ctxt)) != 0 )
  10.142              goto done;
  10.143 @@ -1797,7 +1802,7 @@ x86_emulate(
  10.144                  v    = (uint8_t)src.val;
  10.145                  generate_exception_if(
  10.146                      div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]),
  10.147 -                    EXC_DE);
  10.148 +                    EXC_DE, -1);
  10.149                  dst.val = (uint8_t)u[0];
  10.150                  ((uint8_t *)&_regs.eax)[1] = u[1];
  10.151                  break;
  10.152 @@ -1807,7 +1812,7 @@ x86_emulate(
  10.153                  v    = (uint16_t)src.val;
  10.154                  generate_exception_if(
  10.155                      div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]),
  10.156 -                    EXC_DE);
  10.157 +                    EXC_DE, -1);
  10.158                  dst.val = (uint16_t)u[0];
  10.159                  *(uint16_t *)&_regs.edx = u[1];
  10.160                  break;
  10.161 @@ -1818,7 +1823,7 @@ x86_emulate(
  10.162                  v    = (uint32_t)src.val;
  10.163                  generate_exception_if(
  10.164                      div_dbl(u, v) || ((uint32_t)u[0] != u[0]),
  10.165 -                    EXC_DE);
  10.166 +                    EXC_DE, -1);
  10.167                  dst.val   = (uint32_t)u[0];
  10.168                  _regs.edx = (uint32_t)u[1];
  10.169                  break;
  10.170 @@ -1827,7 +1832,7 @@ x86_emulate(
  10.171                  u[0] = _regs.eax;
  10.172                  u[1] = _regs.edx;
  10.173                  v    = src.val;
  10.174 -                generate_exception_if(div_dbl(u, v), EXC_DE);
  10.175 +                generate_exception_if(div_dbl(u, v), EXC_DE, -1);
  10.176                  dst.val   = u[0];
  10.177                  _regs.edx = u[1];
  10.178                  break;
  10.179 @@ -1847,7 +1852,7 @@ x86_emulate(
  10.180                  v    = (int8_t)src.val;
  10.181                  generate_exception_if(
  10.182                      idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]),
  10.183 -                    EXC_DE);
  10.184 +                    EXC_DE, -1);
  10.185                  dst.val = (int8_t)u[0];
  10.186                  ((int8_t *)&_regs.eax)[1] = u[1];
  10.187                  break;
  10.188 @@ -1857,7 +1862,7 @@ x86_emulate(
  10.189                  v    = (int16_t)src.val;
  10.190                  generate_exception_if(
  10.191                      idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]),
  10.192 -                    EXC_DE);
  10.193 +                    EXC_DE, -1);
  10.194                  dst.val = (int16_t)u[0];
  10.195                  *(int16_t *)&_regs.edx = u[1];
  10.196                  break;
  10.197 @@ -1868,7 +1873,7 @@ x86_emulate(
  10.198                  v    = (int32_t)src.val;
  10.199                  generate_exception_if(
  10.200                      idiv_dbl(u, v) || ((int32_t)u[0] != u[0]),
  10.201 -                    EXC_DE);
  10.202 +                    EXC_DE, -1);
  10.203                  dst.val   = (int32_t)u[0];
  10.204                  _regs.edx = (uint32_t)u[1];
  10.205                  break;
  10.206 @@ -1877,7 +1882,7 @@ x86_emulate(
  10.207                  u[0] = _regs.eax;
  10.208                  u[1] = _regs.edx;
  10.209                  v    = src.val;
  10.210 -                generate_exception_if(idiv_dbl(u, v), EXC_DE);
  10.211 +                generate_exception_if(idiv_dbl(u, v), EXC_DE, -1);
  10.212                  dst.val   = u[0];
  10.213                  _regs.edx = u[1];
  10.214                  break;
  10.215 @@ -1890,7 +1895,7 @@ x86_emulate(
  10.216          break;
  10.217  
  10.218      case 0xfe: /* Grp4 */
  10.219 -        generate_exception_if((modrm_reg & 7) >= 2, EXC_UD);
  10.220 +        generate_exception_if((modrm_reg & 7) >= 2, EXC_UD, -1);
  10.221      case 0xff: /* Grp5 */
  10.222          switch ( modrm_reg & 7 )
  10.223          {
  10.224 @@ -1921,7 +1926,7 @@ x86_emulate(
  10.225          case 5: /* jmp (far, absolute indirect) */ {
  10.226              unsigned long sel;
  10.227  
  10.228 -            generate_exception_if(dst.type != OP_MEM, EXC_UD);
  10.229 +            generate_exception_if(dst.type != OP_MEM, EXC_UD, -1);
  10.230  
  10.231              if ( (rc = ops->read(dst.mem.seg, dst.mem.off+dst.bytes,
  10.232                                   &sel, 2, ctxt)) )
  10.233 @@ -1963,7 +1968,7 @@ x86_emulate(
  10.234              dst.type = OP_NONE;
  10.235              break;
  10.236          case 7:
  10.237 -            generate_exception_if(1, EXC_UD);
  10.238 +            generate_exception_if(1, EXC_UD, -1);
  10.239          default:
  10.240              goto cannot_emulate;
  10.241          }
  10.242 @@ -2003,11 +2008,9 @@ x86_emulate(
  10.243      /* Commit shadow register state. */
  10.244      _regs.eflags &= ~EFLG_RF;
  10.245      *ctxt->regs = _regs;
  10.246 -
  10.247 -    if ( (_regs.eflags & EFLG_TF) &&
  10.248 -         (rc == X86EMUL_OKAY) &&
  10.249 +    if ( (_regs.eflags & EFLG_TF) && (rc == X86EMUL_OKAY) &&
  10.250           (ops->inject_hw_exception != NULL) )
  10.251 -        rc = ops->inject_hw_exception(EXC_DB, 0, ctxt) ? : X86EMUL_EXCEPTION;
  10.252 +        rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION;
  10.253  
  10.254   done:
  10.255      return rc;
  10.256 @@ -2022,7 +2025,7 @@ x86_emulate(
  10.257      generate_exception_if(lock_prefix &&
  10.258                            ((b < 0x20) || (b > 0x23)) && /* MOV CRn/DRn */
  10.259                            (b != 0xc7),                  /* CMPXCHG{8,16}B */
  10.260 -                          EXC_GP);
  10.261 +                          EXC_GP, 0);
  10.262  
  10.263      if ( twobyte )
  10.264          goto twobyte_special_insn;
  10.265 @@ -2069,6 +2072,7 @@ x86_emulate(
  10.266  
  10.267      case 0x17: /* pop %%ss */
  10.268          src.val = x86_seg_ss;
  10.269 +        ctxt->retire.flags.mov_ss = 1;
  10.270          goto pop_seg;
  10.271  
  10.272      case 0x1e: /* push %%ds */
  10.273 @@ -2082,7 +2086,7 @@ x86_emulate(
  10.274      case 0x27: /* daa */ {
  10.275          uint8_t al = _regs.eax;
  10.276          unsigned long eflags = _regs.eflags;
  10.277 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.278 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.279          _regs.eflags &= ~(EFLG_CF|EFLG_AF);
  10.280          if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) )
  10.281          {
  10.282 @@ -2104,7 +2108,7 @@ x86_emulate(
  10.283      case 0x2f: /* das */ {
  10.284          uint8_t al = _regs.eax;
  10.285          unsigned long eflags = _regs.eflags;
  10.286 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.287 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.288          _regs.eflags &= ~(EFLG_CF|EFLG_AF);
  10.289          if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) )
  10.290          {
  10.291 @@ -2127,7 +2131,7 @@ x86_emulate(
  10.292  
  10.293      case 0x37: /* aaa */
  10.294      case 0x3f: /* aas */
  10.295 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.296 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.297          _regs.eflags &= ~EFLG_CF;
  10.298          if ( ((uint8_t)_regs.eax > 9) || (_regs.eflags & EFLG_AF) )
  10.299          {
  10.300 @@ -2171,7 +2175,7 @@ x86_emulate(
  10.301          unsigned long regs[] = {
  10.302              _regs.eax, _regs.ecx, _regs.edx, _regs.ebx,
  10.303              _regs.esp, _regs.ebp, _regs.esi, _regs.edi };
  10.304 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.305 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.306          for ( i = 0; i < 8; i++ )
  10.307              if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
  10.308                                    regs[i], op_bytes, ctxt)) != 0 )
  10.309 @@ -2186,7 +2190,7 @@ x86_emulate(
  10.310              (unsigned long *)&_regs.ebp, (unsigned long *)&dummy_esp,
  10.311              (unsigned long *)&_regs.ebx, (unsigned long *)&_regs.edx,
  10.312              (unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax };
  10.313 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.314 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.315          for ( i = 0; i < 8; i++ )
  10.316          {
  10.317              if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
  10.318 @@ -2224,7 +2228,7 @@ x86_emulate(
  10.319  
  10.320      case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ {
  10.321          unsigned long nr_reps = get_rep_prefix();
  10.322 -        generate_exception_if(!mode_iopl(), EXC_GP);
  10.323 +        generate_exception_if(!mode_iopl(), EXC_GP, 0);
  10.324          dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
  10.325          dst.mem.seg = x86_seg_es;
  10.326          dst.mem.off = truncate_ea(_regs.edi);
  10.327 @@ -2254,7 +2258,7 @@ x86_emulate(
  10.328  
  10.329      case 0x6e ... 0x6f: /* outs %esi,%dx */ {
  10.330          unsigned long nr_reps = get_rep_prefix();
  10.331 -        generate_exception_if(!mode_iopl(), EXC_GP);
  10.332 +        generate_exception_if(!mode_iopl(), EXC_GP, 0);
  10.333          dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
  10.334          if ( (nr_reps > 1) && (ops->rep_outs != NULL) &&
  10.335               ((rc = ops->rep_outs(ea.mem.seg, truncate_ea(_regs.esi),
  10.336 @@ -2333,7 +2337,7 @@ x86_emulate(
  10.337          uint32_t eip;
  10.338  
  10.339          fail_if(ops->read_segment == NULL);
  10.340 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.341 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.342  
  10.343          eip = insn_fetch_bytes(op_bytes);
  10.344          sel = insn_fetch_type(uint16_t);
  10.345 @@ -2359,7 +2363,6 @@ x86_emulate(
  10.346          uint32_t mask = EFLG_VIP | EFLG_VIF | EFLG_VM;
  10.347          if ( !mode_iopl() )
  10.348              mask |= EFLG_IOPL;
  10.349 -        fail_if(ops->write_rflags == NULL);
  10.350          /* 64-bit mode: POP defaults to a 64-bit operand. */
  10.351          if ( mode_64bit() && (op_bytes == 4) )
  10.352              op_bytes = 8;
  10.353 @@ -2371,8 +2374,6 @@ x86_emulate(
  10.354          dst.val &= 0x257fd5;
  10.355          _regs.eflags &= mask;
  10.356          _regs.eflags |= (uint32_t)(dst.val & ~mask) | 0x02;
  10.357 -        if ( (rc = ops->write_rflags(_regs.eflags, ctxt)) != 0 )
  10.358 -            goto done;
  10.359          break;
  10.360      }
  10.361  
  10.362 @@ -2597,7 +2598,7 @@ x86_emulate(
  10.363          goto done;
  10.364  
  10.365      case 0xce: /* into */
  10.366 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.367 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.368          if ( !(_regs.eflags & EFLG_OF) )
  10.369              break;
  10.370          src.val = EXC_OF;
  10.371 @@ -2609,7 +2610,6 @@ x86_emulate(
  10.372          if ( !mode_iopl() )
  10.373              mask |= EFLG_IOPL;
  10.374          fail_if(!in_realmode(ctxt, ops));
  10.375 -        fail_if(ops->write_rflags == NULL);
  10.376          if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
  10.377                               &eip, op_bytes, ctxt)) ||
  10.378               (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
  10.379 @@ -2622,8 +2622,6 @@ x86_emulate(
  10.380          eflags &= 0x257fd5;
  10.381          _regs.eflags &= mask;
  10.382          _regs.eflags |= (uint32_t)(eflags & ~mask) | 0x02;
  10.383 -        if ( (rc = ops->write_rflags(_regs.eflags, ctxt)) != 0 )
  10.384 -            goto done;
  10.385          _regs.eip = eip;
  10.386          if ( (rc = load_seg(x86_seg_cs, (uint16_t)cs, ctxt, ops)) != 0 )
  10.387              goto done;
  10.388 @@ -2633,8 +2631,8 @@ x86_emulate(
  10.389      case 0xd4: /* aam */ {
  10.390          unsigned int base = insn_fetch_type(uint8_t);
  10.391          uint8_t al = _regs.eax;
  10.392 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.393 -        generate_exception_if(base == 0, EXC_DE);
  10.394 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.395 +        generate_exception_if(base == 0, EXC_DE, -1);
  10.396          *(uint16_t *)&_regs.eax = ((al / base) << 8) | (al % base);
  10.397          _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
  10.398          _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
  10.399 @@ -2646,7 +2644,7 @@ x86_emulate(
  10.400      case 0xd5: /* aad */ {
  10.401          unsigned int base = insn_fetch_type(uint8_t);
  10.402          uint16_t ax = _regs.eax;
  10.403 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.404 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.405          *(uint16_t *)&_regs.eax = (uint8_t)(ax + ((ax >> 8) * base));
  10.406          _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
  10.407          _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
  10.408 @@ -2656,7 +2654,7 @@ x86_emulate(
  10.409      }
  10.410  
  10.411      case 0xd6: /* salc */
  10.412 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.413 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.414          *(uint8_t *)&_regs.eax = (_regs.eflags & EFLG_CF) ? 0xff : 0x00;
  10.415          break;
  10.416  
  10.417 @@ -2743,7 +2741,7 @@ x86_emulate(
  10.418          unsigned int port = ((b < 0xe8)
  10.419                               ? insn_fetch_type(uint8_t)
  10.420                               : (uint16_t)_regs.edx);
  10.421 -        generate_exception_if(!mode_iopl(), EXC_GP);
  10.422 +        generate_exception_if(!mode_iopl(), EXC_GP, 0);
  10.423          op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
  10.424          if ( b & 2 )
  10.425          {
  10.426 @@ -2787,7 +2785,7 @@ x86_emulate(
  10.427      case 0xea: /* jmp (far, absolute) */ {
  10.428          uint16_t sel;
  10.429          uint32_t eip;
  10.430 -        generate_exception_if(mode_64bit(), EXC_UD);
  10.431 +        generate_exception_if(mode_64bit(), EXC_UD, -1);
  10.432          eip = insn_fetch_bytes(op_bytes);
  10.433          sel = insn_fetch_type(uint16_t);
  10.434          if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
  10.435 @@ -2807,9 +2805,7 @@ x86_emulate(
  10.436          goto swint;
  10.437  
  10.438      case 0xf4: /* hlt */
  10.439 -        fail_if(ops->hlt == NULL);
  10.440 -        if ( (rc = ops->hlt(ctxt)) != 0 )
  10.441 -            goto done;
  10.442 +        ctxt->retire.flags.hlt = 1;
  10.443          break;
  10.444  
  10.445      case 0xf5: /* cmc */
  10.446 @@ -2825,14 +2821,17 @@ x86_emulate(
  10.447          break;
  10.448  
  10.449      case 0xfa: /* cli */
  10.450 -    case 0xfb: /* sti */
  10.451 -        generate_exception_if(!mode_iopl(), EXC_GP);
  10.452 -        fail_if(ops->write_rflags == NULL);
  10.453 +        generate_exception_if(!mode_iopl(), EXC_GP, 0);
  10.454          _regs.eflags &= ~EFLG_IF;
  10.455 -        if ( b == 0xfb ) /* sti */
  10.456 +        break;
  10.457 +
  10.458 +    case 0xfb: /* sti */
  10.459 +        generate_exception_if(!mode_iopl(), EXC_GP, 0);
  10.460 +        if ( !(_regs.eflags & EFLG_IF) )
  10.461 +        {
  10.462              _regs.eflags |= EFLG_IF;
  10.463 -        if ( (rc = ops->write_rflags(_regs.eflags, ctxt)) != 0 )
  10.464 -            goto done;
  10.465 +            ctxt->retire.flags.sti = 1;
  10.466 +        }
  10.467          break;
  10.468  
  10.469      case 0xfc: /* cld */
  10.470 @@ -3001,7 +3000,7 @@ x86_emulate(
  10.471          case 5: goto bts;
  10.472          case 6: goto btr;
  10.473          case 7: goto btc;
  10.474 -        default: generate_exception_if(1, EXC_UD);
  10.475 +        default: generate_exception_if(1, EXC_UD, -1);
  10.476          }
  10.477          break;
  10.478  
  10.479 @@ -3038,8 +3037,8 @@ x86_emulate(
  10.480  
  10.481          if ( modrm == 0xdf ) /* invlpga */
  10.482          {
  10.483 -            generate_exception_if(in_realmode(ctxt, ops), EXC_UD);
  10.484 -            generate_exception_if(!mode_ring0(), EXC_GP);
  10.485 +            generate_exception_if(in_realmode(ctxt, ops), EXC_UD, -1);
  10.486 +            generate_exception_if(!mode_ring0(), EXC_GP, 0);
  10.487              fail_if(ops->invlpg == NULL);
  10.488              if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.eax),
  10.489                                     ctxt)) )
  10.490 @@ -3051,7 +3050,7 @@ x86_emulate(
  10.491          {
  10.492          case 0: /* sgdt */
  10.493          case 1: /* sidt */
  10.494 -            generate_exception_if(ea.type != OP_MEM, EXC_UD);
  10.495 +            generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
  10.496              fail_if(ops->read_segment == NULL);
  10.497              if ( (rc = ops->read_segment((modrm_reg & 1) ?
  10.498                                           x86_seg_idtr : x86_seg_gdtr,
  10.499 @@ -3067,7 +3066,7 @@ x86_emulate(
  10.500              break;
  10.501          case 2: /* lgdt */
  10.502          case 3: /* lidt */
  10.503 -            generate_exception_if(ea.type != OP_MEM, EXC_UD);
  10.504 +            generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
  10.505              fail_if(ops->write_segment == NULL);
  10.506              memset(&reg, 0, sizeof(reg));
  10.507              if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0,
  10.508 @@ -3108,8 +3107,8 @@ x86_emulate(
  10.509                  goto done;
  10.510              break;
  10.511          case 7: /* invlpg */
  10.512 -            generate_exception_if(!mode_ring0(), EXC_GP);
  10.513 -            generate_exception_if(ea.type != OP_MEM, EXC_UD);
  10.514 +            generate_exception_if(!mode_ring0(), EXC_GP, 0);
  10.515 +            generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
  10.516              fail_if(ops->invlpg == NULL);
  10.517              if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) )
  10.518                  goto done;
  10.519 @@ -3121,7 +3120,7 @@ x86_emulate(
  10.520      }
  10.521  
  10.522      case 0x06: /* clts */
  10.523 -        generate_exception_if(!mode_ring0(), EXC_GP);
  10.524 +        generate_exception_if(!mode_ring0(), EXC_GP, 0);
  10.525          fail_if((ops->read_cr == NULL) || (ops->write_cr == NULL));
  10.526          if ( (rc = ops->read_cr(0, &dst.val, ctxt)) ||
  10.527               (rc = ops->write_cr(0, dst.val&~8, ctxt)) )
  10.528 @@ -3130,7 +3129,7 @@ x86_emulate(
  10.529  
  10.530      case 0x08: /* invd */
  10.531      case 0x09: /* wbinvd */
  10.532 -        generate_exception_if(!mode_ring0(), EXC_GP);
  10.533 +        generate_exception_if(!mode_ring0(), EXC_GP, 0);
  10.534          fail_if(ops->wbinvd == NULL);
  10.535          if ( (rc = ops->wbinvd(ctxt)) != 0 )
  10.536              goto done;
  10.537 @@ -3145,7 +3144,7 @@ x86_emulate(
  10.538      case 0x21: /* mov dr,reg */
  10.539      case 0x22: /* mov reg,cr */
  10.540      case 0x23: /* mov reg,dr */
  10.541 -        generate_exception_if(!mode_ring0(), EXC_GP);
  10.542 +        generate_exception_if(!mode_ring0(), EXC_GP, 0);
  10.543          modrm_rm  |= (rex_prefix & 1) << 3;
  10.544          modrm_reg |= lock_prefix << 3;
  10.545          if ( b & 2 )
  10.546 @@ -3182,7 +3181,7 @@ x86_emulate(
  10.547  
  10.548      case 0x30: /* wrmsr */ {
  10.549          uint64_t val = ((uint64_t)_regs.edx << 32) | (uint32_t)_regs.eax;
  10.550 -        generate_exception_if(!mode_ring0(), EXC_GP);
  10.551 +        generate_exception_if(!mode_ring0(), EXC_GP, 0);
  10.552          fail_if(ops->write_msr == NULL);
  10.553          if ( (rc = ops->write_msr((uint32_t)_regs.ecx, val, ctxt)) != 0 )
  10.554              goto done;
  10.555 @@ -3195,7 +3194,7 @@ x86_emulate(
  10.556          fail_if(ops->read_cr == NULL);
  10.557          if ( (rc = ops->read_cr(4, &cr4, ctxt)) )
  10.558              goto done;
  10.559 -        generate_exception_if((cr4 & CR4_TSD) && !mode_ring0(), EXC_GP);
  10.560 +        generate_exception_if((cr4 & CR4_TSD) && !mode_ring0(), EXC_GP, 0);
  10.561          fail_if(ops->read_msr == NULL);
  10.562          if ( (rc = ops->read_msr(MSR_TSC, &val, ctxt)) != 0 )
  10.563              goto done;
  10.564 @@ -3206,7 +3205,7 @@ x86_emulate(
  10.565  
  10.566      case 0x32: /* rdmsr */ {
  10.567          uint64_t val;
  10.568 -        generate_exception_if(!mode_ring0(), EXC_GP);
  10.569 +        generate_exception_if(!mode_ring0(), EXC_GP, 0);
  10.570          fail_if(ops->read_msr == NULL);
  10.571          if ( (rc = ops->read_msr((uint32_t)_regs.ecx, &val, ctxt)) != 0 )
  10.572              goto done;
  10.573 @@ -3255,8 +3254,8 @@ x86_emulate(
  10.574  #if defined(__i386__)
  10.575      {
  10.576          unsigned long old_lo, old_hi;
  10.577 -        generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
  10.578 -        generate_exception_if(ea.type != OP_MEM, EXC_UD);
  10.579 +        generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1);
  10.580 +        generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
  10.581          if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &old_lo, 4, ctxt)) ||
  10.582               (rc = ops->read(ea.mem.seg, ea.mem.off+4, &old_hi, 4, ctxt)) )
  10.583              goto done;
  10.584 @@ -3283,8 +3282,8 @@ x86_emulate(
  10.585  #elif defined(__x86_64__)
  10.586      {
  10.587          unsigned long old, new;
  10.588 -        generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
  10.589 -        generate_exception_if(ea.type != OP_MEM, EXC_UD);
  10.590 +        generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1);
  10.591 +        generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
  10.592          if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) != 0 )
  10.593              goto done;
  10.594          if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) ||
    11.1 --- a/xen/include/asm-x86/hvm/emulate.h	Fri Feb 22 16:49:56 2008 +0000
    11.2 +++ b/xen/include/asm-x86/hvm/emulate.h	Fri Feb 22 18:32:41 2008 +0000
    11.3 @@ -27,18 +27,12 @@ struct hvm_emulate_ctxt {
    11.4      unsigned long seg_reg_accessed;
    11.5      unsigned long seg_reg_dirty;
    11.6  
    11.7 -    union {
    11.8 -        struct {
    11.9 -            unsigned int hlt:1;
   11.10 -            unsigned int mov_ss:1;
   11.11 -            unsigned int sti:1;
   11.12 -            unsigned int exn_pending:1;
   11.13 -        } flags;
   11.14 -        unsigned int flag_word;
   11.15 -    };
   11.16 -
   11.17 +    bool_t exn_pending;
   11.18      uint8_t exn_vector;
   11.19      uint8_t exn_insn_len;
   11.20 +    int32_t exn_error_code;
   11.21 +
   11.22 +    uint32_t intr_shadow;
   11.23  };
   11.24  
   11.25  int hvm_emulate_one(
    12.1 --- a/xen/include/asm-x86/hvm/hvm.h	Fri Feb 22 16:49:56 2008 +0000
    12.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Fri Feb 22 18:32:41 2008 +0000
    12.3 @@ -49,6 +49,12 @@ enum hvm_intblk {
    12.4      hvm_intblk_nmi_iret   /* NMI blocked until IRET */
    12.5  };
    12.6  
    12.7 +/* These happen to be the same as the VMX interrupt shadow definitions. */
    12.8 +#define HVM_INTR_SHADOW_STI    0x00000001
    12.9 +#define HVM_INTR_SHADOW_MOV_SS 0x00000002
   12.10 +#define HVM_INTR_SHADOW_SMI    0x00000004
   12.11 +#define HVM_INTR_SHADOW_NMI    0x00000008
   12.12 +
   12.13  /*
   12.14   * The hardware virtual machine (HVM) interface abstracts away from the
   12.15   * x86/x86_64 CPU virtualization assist specifics. Currently this interface
   12.16 @@ -72,14 +78,9 @@ struct hvm_function_table {
   12.17      void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
   12.18      int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
   12.19  
   12.20 -    /*
   12.21 -     * Examine specifics of the guest state:
   12.22 -     * 1) determine whether interrupts are enabled or not
   12.23 -     * 2) determine the mode the guest is running in
   12.24 -     * 3) return the current guest segment descriptor base
   12.25 -     * 4) return the current guest segment descriptor
   12.26 -     */
   12.27 -    enum hvm_intblk (*interrupt_blocked)(struct vcpu *v, struct hvm_intack);
   12.28 +    /* Examine specifics of the guest state. */
   12.29 +    unsigned int (*get_interrupt_shadow)(struct vcpu *v);
   12.30 +    void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
   12.31      int (*guest_x86_mode)(struct vcpu *v);
   12.32      void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
   12.33                                   struct segment_register *reg);
    13.1 --- a/xen/include/asm-x86/x86_emulate.h	Fri Feb 22 16:49:56 2008 +0000
    13.2 +++ b/xen/include/asm-x86/x86_emulate.h	Fri Feb 22 18:32:41 2008 +0000
    13.3 @@ -318,11 +318,6 @@ struct x86_emulate_ops
    13.4          uint64_t val,
    13.5          struct x86_emulate_ctxt *ctxt);
    13.6  
    13.7 -    /* write_rflags: Modify privileged bits in RFLAGS. */
    13.8 -    int (*write_rflags)(
    13.9 -        unsigned long val,
   13.10 -        struct x86_emulate_ctxt *ctxt);
   13.11 -
   13.12      /* wbinvd: Write-back and invalidate cache contents. */
   13.13      int (*wbinvd)(
   13.14          struct x86_emulate_ctxt *ctxt);
   13.15 @@ -335,14 +330,10 @@ struct x86_emulate_ops
   13.16          unsigned int *edx,
   13.17          struct x86_emulate_ctxt *ctxt);
   13.18  
   13.19 -    /* hlt: Emulate HLT. */
   13.20 -    int (*hlt)(
   13.21 -        struct x86_emulate_ctxt *ctxt);
   13.22 -
   13.23      /* inject_hw_exception */
   13.24      int (*inject_hw_exception)(
   13.25          uint8_t vector,
   13.26 -        uint16_t error_code,
   13.27 +        int32_t error_code,
   13.28          struct x86_emulate_ctxt *ctxt);
   13.29  
   13.30      /* inject_sw_interrupt */
   13.31 @@ -376,7 +367,17 @@ struct x86_emulate_ctxt
   13.32      unsigned int sp_size;
   13.33  
   13.34      /* Set this if writes may have side effects. */
   13.35 -    int force_writeback;
   13.36 +    uint8_t force_writeback;
   13.37 +
   13.38 +    /* Retirement state, set by the emulator (valid only on X86EMUL_OKAY). */
   13.39 +    union {
   13.40 +        struct {
   13.41 +            uint8_t hlt:1;          /* Instruction HLTed. */
   13.42 +            uint8_t mov_ss:1;       /* Instruction sets MOV-SS irq shadow. */
   13.43 +            uint8_t sti:1;          /* Instruction sets STI irq shadow. */
   13.44 +        } flags;
   13.45 +        uint8_t byte;
   13.46 +    } retire;
   13.47  };
   13.48  
   13.49  /*