ia64/xen-unstable

changeset 16427:fd3f6d814f6d

x86: single step after instruction emulation

Inject single step trap after emulating instructions if guest's
EFLAGS.TF is set.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Nov 22 18:28:47 2007 +0000 (2007-11-22)
parents ae087a0fa2c9
children 69b56d3289f5
files xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/traps.c xen/arch/x86/x86_emulate.c xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/io.c	Thu Nov 22 17:44:51 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/io.c	Thu Nov 22 18:28:47 2007 +0000
     1.3 @@ -863,6 +863,8 @@ void hvm_io_assist(void)
     1.4      /* Copy register changes back into current guest state. */
     1.5      regs->eflags &= ~X86_EFLAGS_RF;
     1.6      memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
     1.7 +    if ( regs->eflags & X86_EFLAGS_TF )
     1.8 +        hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
     1.9  
    1.10   out:
    1.11      vcpu_end_shutdown_deferral(v);
     2.1 --- a/xen/arch/x86/hvm/platform.c	Thu Nov 22 17:44:51 2007 +0000
     2.2 +++ b/xen/arch/x86/hvm/platform.c	Thu Nov 22 18:28:47 2007 +0000
     2.3 @@ -1061,7 +1061,6 @@ void handle_mmio(unsigned long gpa)
     2.4      }
     2.5  
     2.6      regs->eip += inst_len; /* advance %eip */
     2.7 -    regs->eflags &= ~X86_EFLAGS_RF;
     2.8  
     2.9      switch ( mmio_op->instr ) {
    2.10      case INSTR_MOV:
    2.11 @@ -1121,7 +1120,6 @@ void handle_mmio(unsigned long gpa)
    2.12              /* The guest does not have the non-mmio address mapped. 
    2.13               * Need to send in a page fault */
    2.14              regs->eip -= inst_len; /* do not advance %eip */
    2.15 -            regs->eflags |= X86_EFLAGS_RF; /* RF was set by original #PF */
    2.16              hvm_inject_exception(TRAP_page_fault, pfec, addr);
    2.17              return;
    2.18          }
    2.19 @@ -1150,7 +1148,6 @@ void handle_mmio(unsigned long gpa)
    2.20                          /* Failed on the page-spanning copy.  Inject PF into
    2.21                           * the guest for the address where we failed */
    2.22                          regs->eip -= inst_len; /* do not advance %eip */
    2.23 -                        regs->eflags |= X86_EFLAGS_RF; /* RF was set by #PF */
    2.24                          /* Must set CR2 at the failing address */ 
    2.25                          addr += size - rv;
    2.26                          gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Nov 22 17:44:51 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Nov 22 18:28:47 2007 +0000
     3.3 @@ -64,6 +64,9 @@ asmlinkage void do_IRQ(struct cpu_user_r
     3.4  static int svm_reset_to_realmode(
     3.5      struct vcpu *v, struct cpu_user_regs *regs);
     3.6  static void svm_update_guest_cr(struct vcpu *v, unsigned int cr);
     3.7 +static void svm_update_guest_efer(struct vcpu *v);
     3.8 +static void svm_inject_exception(
     3.9 +    unsigned int trapnr, int errcode, unsigned long cr2);
    3.10  
    3.11  /* va of hardware host save area     */
    3.12  static void *hsa[NR_CPUS] __read_mostly;
    3.13 @@ -71,15 +74,15 @@ static void *hsa[NR_CPUS] __read_mostly;
    3.14  /* vmcb used for extended host state */
    3.15  static void *root_vmcb[NR_CPUS] __read_mostly;
    3.16  
    3.17 -static void svm_update_guest_efer(struct vcpu *v);
    3.18 -
    3.19  static void inline __update_guest_eip(
    3.20      struct cpu_user_regs *regs, unsigned int inst_len)
    3.21  {
    3.22 +    struct vcpu *curr = current;
    3.23 +
    3.24      if ( unlikely((inst_len == 0) || (inst_len > 15)) )
    3.25      {
    3.26          gdprintk(XENLOG_ERR, "Bad instruction length %u\n", inst_len);
    3.27 -        domain_crash(current->domain);
    3.28 +        domain_crash(curr->domain);
    3.29          return;
    3.30      }
    3.31  
    3.32 @@ -88,28 +91,10 @@ static void inline __update_guest_eip(
    3.33      regs->eip += inst_len;
    3.34      regs->eflags &= ~X86_EFLAGS_RF;
    3.35  
    3.36 -    current->arch.hvm_svm.vmcb->interrupt_shadow = 0;
    3.37 -}
    3.38 -
    3.39 -static void svm_inject_exception(
    3.40 -    struct vcpu *v, int trap, int ev, int error_code)
    3.41 -{
    3.42 -    eventinj_t event;
    3.43 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.44 +    curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
    3.45  
    3.46 -    if ( trap == TRAP_page_fault )
    3.47 -        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
    3.48 -    else
    3.49 -        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
    3.50 -
    3.51 -    event.bytes = 0;            
    3.52 -    event.fields.v = 1;
    3.53 -    event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
    3.54 -    event.fields.vector = trap;
    3.55 -    event.fields.ev = ev;
    3.56 -    event.fields.errorcode = error_code;
    3.57 -
    3.58 -    vmcb->eventinj = event;
    3.59 +    if ( regs->eflags & X86_EFLAGS_TF )
    3.60 +        svm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
    3.61  }
    3.62  
    3.63  static void svm_cpu_down(void)
    3.64 @@ -171,7 +156,9 @@ static void __restore_debug_registers(st
    3.65  {
    3.66      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.67  
    3.68 -    ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty);
    3.69 +    if ( v->arch.hvm_vcpu.flag_dr_dirty )
    3.70 +        return;
    3.71 +
    3.72      v->arch.hvm_vcpu.flag_dr_dirty = 1;
    3.73      vmcb->dr_intercepts = 0;
    3.74  
    3.75 @@ -868,13 +855,38 @@ static void svm_vcpu_destroy(struct vcpu
    3.76      svm_destroy_vmcb(v);
    3.77  }
    3.78  
    3.79 -static void svm_hvm_inject_exception(
    3.80 +static void svm_inject_exception(
    3.81      unsigned int trapnr, int errcode, unsigned long cr2)
    3.82  {
    3.83 -    struct vcpu *v = current;
    3.84 +    struct vcpu *curr = current;
    3.85 +    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
    3.86 +    eventinj_t event;
    3.87 +
    3.88 +    event.bytes = 0;
    3.89 +    event.fields.v = 1;
    3.90 +    event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
    3.91 +    event.fields.vector = trapnr;
    3.92 +    event.fields.ev = (errcode != HVM_DELIVER_NO_ERROR_CODE);
    3.93 +    event.fields.errorcode = errcode;
    3.94 +
    3.95 +    vmcb->eventinj = event;
    3.96 +
    3.97      if ( trapnr == TRAP_page_fault )
    3.98 -        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2] = cr2;
    3.99 -    svm_inject_exception(v, trapnr, (errcode != -1), errcode);
   3.100 +    {
   3.101 +        vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2;
   3.102 +        HVMTRACE_2D(PF_INJECT, curr, curr->arch.hvm_vcpu.guest_cr[2], errcode);
   3.103 +    }
   3.104 +    else
   3.105 +    {
   3.106 +        HVMTRACE_2D(INJ_EXC, curr, trapnr, errcode);
   3.107 +    }
   3.108 +
   3.109 +    if ( (trapnr == TRAP_debug) &&
   3.110 +         (guest_cpu_user_regs()->eflags & X86_EFLAGS_TF) )
   3.111 +    {
   3.112 +        __restore_debug_registers(curr);
   3.113 +        vmcb->dr6 |= 0x4000;
   3.114 +    }
   3.115  }
   3.116  
   3.117  static int svm_event_pending(struct vcpu *v)
   3.118 @@ -904,7 +916,7 @@ static struct hvm_function_table svm_fun
   3.119      .update_vtpr          = svm_update_vtpr,
   3.120      .stts                 = svm_stts,
   3.121      .set_tsc_offset       = svm_set_tsc_offset,
   3.122 -    .inject_exception     = svm_hvm_inject_exception,
   3.123 +    .inject_exception     = svm_inject_exception,
   3.124      .init_ap_context      = svm_init_ap_context,
   3.125      .init_hypercall_page  = svm_init_hypercall_page,
   3.126      .event_pending        = svm_event_pending
   3.127 @@ -1274,7 +1286,7 @@ static int svm_get_io_address(
   3.128          if (!seg)               /* If no prefix, used DS. */
   3.129              seg = &vmcb->ds;
   3.130          if (!long_mode && (seg->attr.fields.type & 0xa) == 0x8) {
   3.131 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.132 +            svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.133              return 0;
   3.134          }
   3.135      }
   3.136 @@ -1283,7 +1295,7 @@ static int svm_get_io_address(
   3.137          reg = regs->edi;
   3.138          seg = &vmcb->es;        /* Note: This is ALWAYS ES. */
   3.139          if (!long_mode && (seg->attr.fields.type & 0xa) != 0x2) {
   3.140 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.141 +            svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.142              return 0;
   3.143          }
   3.144      }
   3.145 @@ -1291,7 +1303,7 @@ static int svm_get_io_address(
   3.146      /* If the segment isn't present, give GP fault! */
   3.147      if (!long_mode && !seg->attr.fields.p) 
   3.148      {
   3.149 -        svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.150 +        svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.151          return 0;
   3.152      }
   3.153  
   3.154 @@ -1316,7 +1328,7 @@ static int svm_get_io_address(
   3.155              *addr + size - 1 > seg->limit :
   3.156              *addr <= seg->limit)
   3.157          {
   3.158 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.159 +            svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.160              return 0;
   3.161          }
   3.162  
   3.163 @@ -1371,7 +1383,7 @@ static int svm_get_io_address(
   3.164          if (!is_canonical_address(*addr) ||
   3.165              !is_canonical_address(*addr + size - 1))
   3.166          {
   3.167 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.168 +            svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.169              return 0;
   3.170          }
   3.171          if (*count > (1UL << 48) / size)
   3.172 @@ -1472,7 +1484,7 @@ static void svm_io_instruction(struct vc
   3.173          {
   3.174              /* The guest does not have the RAM address mapped. 
   3.175               * Need to send in a page fault */
   3.176 -            svm_hvm_inject_exception(TRAP_page_fault, pfec, addr);
   3.177 +            svm_inject_exception(TRAP_page_fault, pfec, addr);
   3.178              return;
   3.179          }
   3.180          paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
   3.181 @@ -1500,7 +1512,7 @@ static void svm_io_instruction(struct vc
   3.182                          addr += size - rv;
   3.183                          gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
   3.184                                   "of a page-spanning PIO: va=%#lx\n", addr);
   3.185 -                        svm_hvm_inject_exception(TRAP_page_fault, 0, addr);
   3.186 +                        svm_inject_exception(TRAP_page_fault, 0, addr);
   3.187                          return;
   3.188                      }
   3.189                  }
   3.190 @@ -1796,7 +1808,7 @@ static void svm_do_msr_access(
   3.191              break;
   3.192  
   3.193          case MSR_K8_VM_HSAVE_PA:
   3.194 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.195 +            svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.196              break;
   3.197  
   3.198          case MSR_IA32_MCG_CAP:
   3.199 @@ -1839,7 +1851,7 @@ static void svm_do_msr_access(
   3.200                  regs->edx = edx;
   3.201                  goto done;
   3.202              }
   3.203 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.204 +            svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.205              return;
   3.206          }
   3.207          regs->eax = msr_content & 0xFFFFFFFF;
   3.208 @@ -1870,7 +1882,7 @@ static void svm_do_msr_access(
   3.209              break;
   3.210  
   3.211          case MSR_K8_VM_HSAVE_PA:
   3.212 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   3.213 +            svm_inject_exception(TRAP_gp_fault, 0, 0);
   3.214              break;
   3.215  
   3.216          case MSR_IA32_DEBUGCTLMSR:
   3.217 @@ -1931,7 +1943,7 @@ static void svm_vmexit_do_hlt(struct vmc
   3.218      inst_len = __get_instruction_length(curr, INSTR_HLT, NULL);
   3.219      __update_guest_eip(regs, inst_len);
   3.220  
   3.221 -    /* Check for interrupt not handled or new interrupt. */
   3.222 +    /* Check for pending exception or new interrupt. */
   3.223      if ( vmcb->eventinj.fields.v ||
   3.224           ((intack.source != hvm_intsrc_none) &&
   3.225            !svm_interrupt_blocked(current, intack)) )
   3.226 @@ -2197,8 +2209,7 @@ asmlinkage void svm_vmexit_handler(struc
   3.227              break;
   3.228          }
   3.229  
   3.230 -        v->arch.hvm_vcpu.guest_cr[2] = vmcb->cr2 = va;
   3.231 -        svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
   3.232 +        svm_inject_exception(TRAP_page_fault, regs->error_code, va);
   3.233          break;
   3.234      }
   3.235  
   3.236 @@ -2296,7 +2307,7 @@ asmlinkage void svm_vmexit_handler(struc
   3.237      case VMEXIT_STGI:
   3.238      case VMEXIT_CLGI:
   3.239      case VMEXIT_SKINIT:
   3.240 -        svm_inject_exception(v, TRAP_invalid_op, 0, 0);
   3.241 +        svm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
   3.242          break;
   3.243  
   3.244      case VMEXIT_NPF:
     4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 22 17:44:51 2007 +0000
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Nov 22 18:28:47 2007 +0000
     4.3 @@ -14,7 +14,6 @@
     4.4   * You should have received a copy of the GNU General Public License along with
     4.5   * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
     4.6   * Place - Suite 330, Boston, MA 02111-1307 USA.
     4.7 - *
     4.8   */
     4.9  
    4.10  #include <xen/config.h>
    4.11 @@ -417,7 +416,9 @@ static void vmx_save_dr(struct vcpu *v)
    4.12  
    4.13  static void __restore_debug_registers(struct vcpu *v)
    4.14  {
    4.15 -    ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty);
    4.16 +    if ( v->arch.hvm_vcpu.flag_dr_dirty )
    4.17 +        return;
    4.18 +
    4.19      v->arch.hvm_vcpu.flag_dr_dirty = 1;
    4.20  
    4.21      write_debugreg(0, v->arch.guest_context.debugreg[0]);
    4.22 @@ -1102,10 +1103,19 @@ static void vmx_flush_guest_tlbs(void)
    4.23  static void vmx_inject_exception(
    4.24      unsigned int trapnr, int errcode, unsigned long cr2)
    4.25  {
    4.26 -    struct vcpu *v = current;
    4.27 -    vmx_inject_hw_exception(v, trapnr, errcode);
    4.28 +    struct vcpu *curr = current;
    4.29 +
    4.30 +    vmx_inject_hw_exception(curr, trapnr, errcode);
    4.31 +
    4.32      if ( trapnr == TRAP_page_fault )
    4.33 -        v->arch.hvm_vcpu.guest_cr[2] = cr2;
    4.34 +        curr->arch.hvm_vcpu.guest_cr[2] = cr2;
    4.35 +
    4.36 +    if ( (trapnr == TRAP_debug) &&
    4.37 +         (guest_cpu_user_regs()->eflags & X86_EFLAGS_TF) )
    4.38 +    {
    4.39 +        __restore_debug_registers(curr);
    4.40 +        write_debugreg(6, read_debugreg(6) | 0x4000);
    4.41 +    }
    4.42  }
    4.43  
    4.44  static void vmx_update_vtpr(struct vcpu *v, unsigned long value)
    4.45 @@ -1211,6 +1221,9 @@ static void __update_guest_eip(unsigned 
    4.46          x &= ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS);
    4.47          __vmwrite(GUEST_INTERRUPTIBILITY_INFO, x);
    4.48      }
    4.49 +
    4.50 +    if ( regs->eflags & X86_EFLAGS_TF )
    4.51 +        vmx_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
    4.52  }
    4.53  
    4.54  static void vmx_do_no_device_fault(void)
    4.55 @@ -2589,7 +2602,17 @@ gp_fault:
    4.56  
    4.57  static void vmx_do_hlt(struct cpu_user_regs *regs)
    4.58  {
    4.59 -    HVMTRACE_0D(HLT, current);
    4.60 +    unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
    4.61 +    struct vcpu *curr = current;
    4.62 +
    4.63 +    /* Check for pending exception. */
    4.64 +    if ( intr_info & INTR_INFO_VALID_MASK )
    4.65 +    {
    4.66 +        HVMTRACE_1D(HLT, curr, /*int pending=*/ 1);
    4.67 +        return;
    4.68 +    }
    4.69 +
    4.70 +    HVMTRACE_1D(HLT, curr, /*int pending=*/ 0);
    4.71      hvm_hlt(regs->eflags);
    4.72  }
    4.73  
    4.74 @@ -2904,7 +2927,7 @@ asmlinkage void vmx_vmexit_handler(struc
    4.75      case EXIT_REASON_VMWRITE:
    4.76      case EXIT_REASON_VMXOFF:
    4.77      case EXIT_REASON_VMXON:
    4.78 -        vmx_inject_hw_exception(v, TRAP_invalid_op, VMX_DELIVER_NO_ERROR_CODE);
    4.79 +        vmx_inject_hw_exception(v, TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
    4.80          break;
    4.81  
    4.82      case EXIT_REASON_TPR_BELOW_THRESHOLD:
     5.1 --- a/xen/arch/x86/traps.c	Thu Nov 22 17:44:51 2007 +0000
     5.2 +++ b/xen/arch/x86/traps.c	Thu Nov 22 18:28:47 2007 +0000
     5.3 @@ -414,6 +414,17 @@ static int do_guest_trap(
     5.4      return 0;
     5.5  }
     5.6  
     5.7 +static void instruction_done(struct cpu_user_regs *regs, unsigned long eip)
     5.8 +{
     5.9 +    regs->eip = eip;
    5.10 +    regs->eflags &= ~X86_EFLAGS_RF;
    5.11 +    if ( regs->eflags & X86_EFLAGS_TF )
    5.12 +    {
    5.13 +        current->arch.guest_context.debugreg[6] |= 0xffff4ff0;
    5.14 +        do_guest_trap(TRAP_debug, regs, 0);
    5.15 +    }
    5.16 +}
    5.17 +
    5.18  /*
    5.19   * Called from asm to set up the NMI trapbounce info.
    5.20   * Returns 0 if no callback is set up, else 1.
    5.21 @@ -657,8 +668,8 @@ static int emulate_forced_invalid_op(str
    5.22      regs->ebx = b;
    5.23      regs->ecx = c;
    5.24      regs->edx = d;
    5.25 -    regs->eip = eip;
    5.26 -    regs->eflags &= ~X86_EFLAGS_RF;
    5.27 +
    5.28 +    instruction_done(regs, eip);
    5.29  
    5.30      trace_trap_one_addr(TRC_PV_FORCED_INVALID_OP, regs->eip);
    5.31  
    5.32 @@ -1953,8 +1964,7 @@ static int emulate_privileged_op(struct 
    5.33  #undef rd_ad
    5.34  
    5.35   done:
    5.36 -    regs->eip = eip;
    5.37 -    regs->eflags &= ~X86_EFLAGS_RF;
    5.38 +    instruction_done(regs, eip);
    5.39      return EXCRET_fault_fixed;
    5.40  
    5.41   fail:
    5.42 @@ -2284,8 +2294,8 @@ static int emulate_gate_op(struct cpu_us
    5.43      else
    5.44          sel |= (regs->cs & 3);
    5.45  
    5.46 -    regs->eip = off;
    5.47      regs->cs = sel;
    5.48 +    instruction_done(regs, off);
    5.49  #endif
    5.50  
    5.51      return 0;
     6.1 --- a/xen/arch/x86/x86_emulate.c	Thu Nov 22 17:44:51 2007 +0000
     6.2 +++ b/xen/arch/x86/x86_emulate.c	Thu Nov 22 18:28:47 2007 +0000
     6.3 @@ -1635,6 +1635,7 @@ x86_emulate(
     6.4      /* Commit shadow register state. */
     6.5      _regs.eflags &= ~EFLG_RF;
     6.6      *ctxt->regs = _regs;
     6.7 +    /* FIXME generate_exception_if(_regs.eflags & EFLG_TF, EXC_DB); */
     6.8  
     6.9   done:
    6.10      return rc;
     7.1 --- a/xen/include/asm-x86/hvm/support.h	Thu Nov 22 17:44:51 2007 +0000
     7.2 +++ b/xen/include/asm-x86/hvm/support.h	Thu Nov 22 18:28:47 2007 +0000
     7.3 @@ -50,7 +50,7 @@ static inline vcpu_iodata_t *get_ioreq(s
     7.4  #define TYPE_CLTS               (2 << 4)
     7.5  #define TYPE_LMSW               (3 << 4)
     7.6  
     7.7 -#define VMX_DELIVER_NO_ERROR_CODE  -1
     7.8 +#define HVM_DELIVER_NO_ERROR_CODE  -1
     7.9  
    7.10  #if HVM_DEBUG
    7.11  #define DBG_LEVEL_0                 (1 << 0)
     8.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Nov 22 17:44:51 2007 +0000
     8.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Nov 22 18:28:47 2007 +0000
     8.3 @@ -269,7 +269,7 @@ static inline void __vmx_inject_exceptio
     8.4       */
     8.5  
     8.6      intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
     8.7 -    if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) {
     8.8 +    if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) {
     8.9          __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
    8.10          intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
    8.11      }
    8.12 @@ -291,13 +291,13 @@ static inline void vmx_inject_hw_excepti
    8.13  static inline void vmx_inject_extint(struct vcpu *v, int trap)
    8.14  {
    8.15      __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
    8.16 -                           VMX_DELIVER_NO_ERROR_CODE);
    8.17 +                           HVM_DELIVER_NO_ERROR_CODE);
    8.18  }
    8.19  
    8.20  static inline void vmx_inject_nmi(struct vcpu *v)
    8.21  {
    8.22      __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
    8.23 -                           VMX_DELIVER_NO_ERROR_CODE);
    8.24 +                           HVM_DELIVER_NO_ERROR_CODE);
    8.25  }
    8.26  
    8.27  #endif /* __ASM_X86_HVM_VMX_VMX_H__ */