ia64/xen-unstable

changeset 14146:d2a91b73899a

[xentrace][HVM] introduce HVM tracing to unify SVM and VMX tracing

* new tracing class TRC_HVM replacing TRC_VMX

* 20 new trace events to differentiate the information carried by them
** added corresponding trace points in SVM and VMX code
** updated formats file: descriptive formatting of trace event data
** completely replacing previous simple tracing in VMX

* possibility to toggle single events on and off at compile time in
include/asm-x86/hvm/trace.h

Signed-off-by: Thomas Friebel <thomas.friebel@amd.com>
author kfraser@localhost.localdomain
date Mon Feb 26 17:04:39 2007 +0000 (2007-02-26)
parents 0070b18869f7
children 6efc22cb9c84
files xen/arch/x86/hvm/io.c xen/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vmx.c xen/common/trace.c xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/vcpu.h xen/include/asm-x86/hvm/vmx/vmx.h xen/include/public/trace.h
line diff
     1.1 --- a/xen/arch/x86/hvm/io.c	Mon Feb 26 16:27:35 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/io.c	Mon Feb 26 17:04:39 2007 +0000
     1.3 @@ -416,7 +416,6 @@ static void hvm_pio_assist(struct cpu_us
     1.4              printk("Error: %s unknown port size\n", __FUNCTION__);
     1.5              domain_crash_synchronous();
     1.6          }
     1.7 -        TRACE_VMEXIT(3, regs->eax);
     1.8      }
     1.9  }
    1.10  
     2.1 --- a/xen/arch/x86/hvm/svm/intr.c	Mon Feb 26 16:27:35 2007 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Mon Feb 26 17:04:39 2007 +0000
     2.3 @@ -37,6 +37,7 @@
     2.4  #include <xen/kernel.h>
     2.5  #include <public/hvm/ioreq.h>
     2.6  #include <xen/domain_page.h>
     2.7 +#include <asm/hvm/trace.h>
     2.8  
     2.9  /*
    2.10   * Most of this code is copied from vmx_io.c and modified 
    2.11 @@ -108,6 +109,7 @@ asmlinkage void svm_intr_assist(void)
    2.12              if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow )  
    2.13              {
    2.14                  vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
    2.15 +                HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
    2.16                  svm_inject_extint(v, 0x0); /* actual vector doesn't really matter */
    2.17                  return;
    2.18              }
    2.19 @@ -128,7 +130,10 @@ asmlinkage void svm_intr_assist(void)
    2.20          if ( re_injecting && (pt = is_pt_irq(v, intr_vector, intr_type)) )
    2.21              ++pt->pending_intr_nr;
    2.22          /* let's inject this interrupt */
    2.23 -        TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, intr_vector, 0);
    2.24 +        if (re_injecting)
    2.25 +            HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
    2.26 +        else
    2.27 +            HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
    2.28          svm_inject_extint(v, intr_vector);
    2.29          break;
    2.30      case APIC_DM_SMI:
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Feb 26 16:27:35 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Feb 26 17:04:39 2007 +0000
     3.3 @@ -48,6 +48,7 @@
     3.4  #include <asm/x86_emulate.h>
     3.5  #include <public/sched.h>
     3.6  #include <asm/hvm/vpt.h>
     3.7 +#include <asm/hvm/trace.h>
     3.8  
     3.9  #define SVM_EXTRA_DEBUG
    3.10  
    3.11 @@ -81,6 +82,11 @@ static inline void svm_inject_exception(
    3.12      eventinj_t event;
    3.13      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.14  
    3.15 +    if ( trap == TRAP_page_fault )
    3.16 +        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
    3.17 +    else
    3.18 +        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
    3.19 +
    3.20      event.bytes = 0;            
    3.21      event.fields.v = 1;
    3.22      event.fields.type = EVENTTYPE_EXCEPTION;
    3.23 @@ -977,9 +983,9 @@ static void svm_hvm_inject_exception(
    3.24      unsigned int trapnr, int errcode, unsigned long cr2)
    3.25  {
    3.26      struct vcpu *v = current;
    3.27 -    svm_inject_exception(v, trapnr, (errcode != -1), errcode);
    3.28      if ( trapnr == TRAP_page_fault )
    3.29          v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
    3.30 +    svm_inject_exception(v, trapnr, (errcode != -1), errcode);
    3.31  }
    3.32  
    3.33  static int svm_event_injection_faulted(struct vcpu *v)
    3.34 @@ -1209,13 +1215,17 @@ static void svm_vmexit_do_cpuid(struct v
    3.35      regs->ecx = (unsigned long)ecx;
    3.36      regs->edx = (unsigned long)edx;
    3.37  
    3.38 +    HVMTRACE_3D(CPUID, v, input,
    3.39 +                ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
    3.40 +
    3.41      inst_len = __get_instruction_length(vmcb, INSTR_CPUID, NULL);
    3.42      ASSERT(inst_len > 0);
    3.43      __update_guest_eip(vmcb, inst_len);
    3.44  }
    3.45  
    3.46 -static inline unsigned long *get_reg_p(unsigned int gpreg, 
    3.47 -                                       struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
    3.48 +static inline unsigned long *get_reg_p(
    3.49 +    unsigned int gpreg, 
    3.50 +    struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
    3.51  {
    3.52      unsigned long *reg_p = NULL;
    3.53      switch (gpreg)
    3.54 @@ -1300,6 +1310,8 @@ static void svm_dr_access(struct vcpu *v
    3.55  {
    3.56      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    3.57  
    3.58 +    HVMTRACE_0D(DR_WRITE, v);
    3.59 +
    3.60      v->arch.hvm_vcpu.flag_dr_dirty = 1;
    3.61  
    3.62      __restore_debug_registers(v);
    3.63 @@ -1579,6 +1591,11 @@ static void svm_io_instruction(struct vc
    3.64      else 
    3.65          size = 1;
    3.66  
    3.67 +    if (dir==IOREQ_READ)
    3.68 +        HVMTRACE_2D(IO_READ,  v, port, size);
    3.69 +    else
    3.70 +        HVMTRACE_2D(IO_WRITE, v, port, size);
    3.71 +
    3.72      HVM_DBG_LOG(DBG_LEVEL_IO, 
    3.73                  "svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
    3.74                  "exit_qualification = %"PRIx64,
    3.75 @@ -1835,6 +1852,8 @@ static void mov_from_cr(int cr, int gp, 
    3.76          return;
    3.77      }
    3.78  
    3.79 +    HVMTRACE_2D(CR_READ, v, cr, value);
    3.80 +
    3.81      set_reg(gp, value, regs, vmcb);
    3.82  
    3.83      HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
    3.84 @@ -1859,6 +1878,8 @@ static int mov_to_cr(int gpreg, int cr, 
    3.85  
    3.86      value = get_reg(gpreg, regs, vmcb);
    3.87  
    3.88 +    HVMTRACE_2D(CR_WRITE, v, cr, value);
    3.89 +
    3.90      HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
    3.91      HVM_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
    3.92  
    3.93 @@ -2152,6 +2173,7 @@ static inline void svm_do_msr_access(
    3.94          regs->edx = msr_content >> 32;
    3.95  
    3.96   done:
    3.97 +        HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
    3.98          HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
    3.99                      ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
   3.100  
   3.101 @@ -2161,6 +2183,8 @@ static inline void svm_do_msr_access(
   3.102      {
   3.103          msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
   3.104  
   3.105 +        HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
   3.106 +
   3.107          switch (ecx)
   3.108          {
   3.109          case MSR_IA32_TIME_STAMP_COUNTER:
   3.110 @@ -2198,9 +2222,12 @@ static inline void svm_vmexit_do_hlt(str
   3.111  
   3.112      /* Check for interrupt not handled or new interrupt. */
   3.113      if ( (vmcb->rflags & X86_EFLAGS_IF) &&
   3.114 -         (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
   3.115 +         (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) ) {
   3.116 +        HVMTRACE_1D(HLT, current, /*int pending=*/ 1);
   3.117          return;
   3.118 -
   3.119 +    }
   3.120 +
   3.121 +    HVMTRACE_1D(HLT, current, /*int pending=*/ 0);
   3.122      hvm_hlt(vmcb->rflags);
   3.123  }
   3.124  
   3.125 @@ -2312,6 +2339,8 @@ void svm_handle_invlpg(const short invlp
   3.126          __update_guest_eip (vmcb, inst_len);
   3.127      }
   3.128  
   3.129 +    HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
   3.130 +
   3.131      paging_invlpg(v, g_vaddr);
   3.132  }
   3.133  
   3.134 @@ -2428,6 +2457,8 @@ static int svm_do_vmmcall(struct vcpu *v
   3.135      inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL);
   3.136      ASSERT(inst_len > 0);
   3.137  
   3.138 +    HVMTRACE_1D(VMMCALL, v, regs->eax);
   3.139 +
   3.140      if ( regs->eax & 0x80000000 )
   3.141      {
   3.142          /* VMMCALL sanity check */
   3.143 @@ -2723,7 +2754,6 @@ asmlinkage void svm_vmexit_handler(struc
   3.144      unsigned int exit_reason;
   3.145      unsigned long eip;
   3.146      struct vcpu *v = current;
   3.147 -    int error;
   3.148      int do_debug = 0;
   3.149      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   3.150  
   3.151 @@ -2732,6 +2762,8 @@ asmlinkage void svm_vmexit_handler(struc
   3.152      exit_reason = vmcb->exitcode;
   3.153      save_svm_cpu_user_regs(v, regs);
   3.154  
   3.155 +    HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
   3.156 +
   3.157      if (exit_reason == VMEXIT_INVALID)
   3.158      {
   3.159          svm_dump_vmcb(__func__, vmcb);
   3.160 @@ -2854,8 +2886,6 @@ asmlinkage void svm_vmexit_handler(struc
   3.161      }
   3.162  #endif /* SVM_EXTRA_DEBUG */
   3.163  
   3.164 -    TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
   3.165 -
   3.166      switch (exit_reason) 
   3.167      {
   3.168      case VMEXIT_EXCEPTION_DB:
   3.169 @@ -2872,9 +2902,16 @@ asmlinkage void svm_vmexit_handler(struc
   3.170      break;
   3.171  
   3.172      case VMEXIT_INTR:
   3.173 +        /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
   3.174 +        HVMTRACE_0D(INTR, v);
   3.175 +        break;
   3.176      case VMEXIT_NMI:
   3.177 +        /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
   3.178 +        HVMTRACE_0D(NMI, v);
   3.179 +        break;
   3.180      case VMEXIT_SMI:
   3.181 -        /* Asynchronous events, handled when we STGI'd after the VMEXIT. */
   3.182 +        /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
   3.183 +        HVMTRACE_0D(SMI, v);
   3.184          break;
   3.185  
   3.186      case VMEXIT_INIT:
   3.187 @@ -2914,16 +2951,14 @@ asmlinkage void svm_vmexit_handler(struc
   3.188                      (unsigned long)regs->ecx, (unsigned long)regs->edx,
   3.189                      (unsigned long)regs->esi, (unsigned long)regs->edi);
   3.190  
   3.191 -        if (!(error = svm_do_page_fault(va, regs))) 
   3.192 +        if ( svm_do_page_fault(va, regs) )
   3.193          {
   3.194 -            /* Inject #PG using Interruption-Information Fields */
   3.195 -            svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
   3.196 -
   3.197 -            v->arch.hvm_svm.cpu_cr2 = va;
   3.198 -            vmcb->cr2 = va;
   3.199 -            TRACE_3D(TRC_VMX_INTR, v->domain->domain_id,
   3.200 -                     VMEXIT_EXCEPTION_PF, va);
   3.201 +            HVMTRACE_2D(PF_XEN, v, va, regs->error_code);
   3.202 +            break;
   3.203          }
   3.204 +
   3.205 +        v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
   3.206 +        svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
   3.207          break;
   3.208      }
   3.209  
   3.210 @@ -3061,6 +3096,9 @@ asmlinkage void svm_load_cr2(void)
   3.211  {
   3.212      struct vcpu *v = current;
   3.213  
   3.214 +    // this is the last C code before the VMRUN instruction
   3.215 +    HVMTRACE_0D(VMENTRY, v);
   3.216 +
   3.217      local_irq_disable();
   3.218      asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2));
   3.219  }
     4.1 --- a/xen/arch/x86/hvm/vmx/intr.c	Mon Feb 26 16:27:35 2007 +0000
     4.2 +++ b/xen/arch/x86/hvm/vmx/intr.c	Mon Feb 26 17:04:39 2007 +0000
     4.3 @@ -37,6 +37,7 @@
     4.4  #include <asm/hvm/vpic.h>
     4.5  #include <asm/hvm/vlapic.h>
     4.6  #include <public/hvm/ioreq.h>
     4.7 +#include <asm/hvm/trace.h>
     4.8  
     4.9  
    4.10  static inline void
    4.11 @@ -162,8 +163,8 @@ asmlinkage void vmx_intr_assist(void)
    4.12      case APIC_DM_EXTINT:
    4.13      case APIC_DM_FIXED:
    4.14      case APIC_DM_LOWEST:
    4.15 +        HVMTRACE_2D(INJ_VIRQ, v, highest_vector, /*fake=*/ 0);
    4.16          vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
    4.17 -        TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, highest_vector, 0);
    4.18          break;
    4.19  
    4.20      case APIC_DM_SMI:
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Feb 26 16:27:35 2007 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Feb 26 17:04:39 2007 +0000
     5.3 @@ -49,6 +49,7 @@
     5.4  #include <asm/x86_emulate.h>
     5.5  #include <asm/hvm/vpt.h>
     5.6  #include <public/hvm/save.h>
     5.7 +#include <asm/hvm/trace.h>
     5.8  
     5.9  static void vmx_ctxt_switch_from(struct vcpu *v);
    5.10  static void vmx_ctxt_switch_to(struct vcpu *v);
    5.11 @@ -1127,7 +1128,6 @@ static int vmx_do_page_fault(unsigned lo
    5.12  
    5.13      result = paging_fault(va, regs);
    5.14  
    5.15 -    TRACE_VMEXIT(2, result);
    5.16  #if 0
    5.17      if ( !result )
    5.18      {
    5.19 @@ -1223,6 +1223,9 @@ static void vmx_do_cpuid(struct cpu_user
    5.20      regs->ebx = (unsigned long)ebx;
    5.21      regs->ecx = (unsigned long)ecx;
    5.22      regs->edx = (unsigned long)edx;
    5.23 +
    5.24 +    HVMTRACE_3D(CPUID, current, input,
    5.25 +                ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
    5.26  }
    5.27  
    5.28  #define CASE_GET_REG_P(REG, reg)    \
    5.29 @@ -1247,6 +1250,8 @@ static void vmx_dr_access(unsigned long 
    5.30  {
    5.31      struct vcpu *v = current;
    5.32  
    5.33 +    HVMTRACE_0D(DR_WRITE, v);
    5.34 +
    5.35      v->arch.hvm_vcpu.flag_dr_dirty = 1;
    5.36  
    5.37      /* We could probably be smarter about this */
    5.38 @@ -1267,6 +1272,8 @@ static void vmx_do_invlpg(unsigned long 
    5.39      unsigned long eip;
    5.40      struct vcpu *v = current;
    5.41  
    5.42 +    HVMTRACE_2D(INVLPG, v, /*invlpga=*/ 0, va);
    5.43 +
    5.44      eip = __vmread(GUEST_RIP);
    5.45  
    5.46      HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
    5.47 @@ -1416,11 +1423,14 @@ static void vmx_io_instruction(unsigned 
    5.48      else
    5.49          port = regs->edx & 0xffff;
    5.50  
    5.51 -    TRACE_VMEXIT(1, port);
    5.52 -
    5.53      size = (exit_qualification & 7) + 1;
    5.54      dir = test_bit(3, &exit_qualification); /* direction */
    5.55  
    5.56 +    if (dir==IOREQ_READ)
    5.57 +        HVMTRACE_2D(IO_READ,  current, port, size);
    5.58 +    else
    5.59 +        HVMTRACE_2D(IO_WRITE, current, port, size);
    5.60 +
    5.61      if ( test_bit(4, &exit_qualification) ) { /* string instruction */
    5.62          unsigned long addr, count = 1, base;
    5.63          paddr_t paddr;
    5.64 @@ -1614,9 +1624,6 @@ static void vmx_io_instruction(unsigned 
    5.65          if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
    5.66              hvm_print_line(current, regs->eax); /* guest debug output */
    5.67  
    5.68 -        if ( dir == IOREQ_WRITE )
    5.69 -            TRACE_VMEXIT(2, regs->eax);
    5.70 -
    5.71          regs->eip += inst_len;
    5.72          send_pio_req(port, 1, size, regs->eax, dir, df, 0);
    5.73      }
    5.74 @@ -2062,9 +2069,7 @@ static int mov_to_cr(int gp, int cr, str
    5.75          goto exit_and_crash;
    5.76      }
    5.77  
    5.78 -    TRACE_VMEXIT(1, TYPE_MOV_TO_CR);
    5.79 -    TRACE_VMEXIT(2, cr);
    5.80 -    TRACE_VMEXIT(3, value);
    5.81 +    HVMTRACE_2D(CR_WRITE, v, cr, value);
    5.82  
    5.83      HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
    5.84  
    5.85 @@ -2231,9 +2236,7 @@ static void mov_from_cr(int cr, int gp, 
    5.86          break;
    5.87      }
    5.88  
    5.89 -    TRACE_VMEXIT(1, TYPE_MOV_FROM_CR);
    5.90 -    TRACE_VMEXIT(2, cr);
    5.91 -    TRACE_VMEXIT(3, value);
    5.92 +    HVMTRACE_2D(CR_READ, v, cr, value);
    5.93  
    5.94      HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
    5.95  }
    5.96 @@ -2256,7 +2259,7 @@ static int vmx_cr_access(unsigned long e
    5.97          mov_from_cr(cr, gp, regs);
    5.98          break;
    5.99      case TYPE_CLTS:
   5.100 -        TRACE_VMEXIT(1, TYPE_CLTS);
   5.101 +//        TRACE_VMEXIT(1, TYPE_CLTS);
   5.102  
   5.103          /* We initialise the FPU now, to avoid needing another vmexit. */
   5.104          setup_fpu(v);
   5.105 @@ -2272,8 +2275,8 @@ static int vmx_cr_access(unsigned long e
   5.106          value = v->arch.hvm_vmx.cpu_shadow_cr0;
   5.107          value = (value & ~0xF) |
   5.108              (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
   5.109 -        TRACE_VMEXIT(1, TYPE_LMSW);
   5.110 -        TRACE_VMEXIT(2, value);
   5.111 +//        TRACE_VMEXIT(1, TYPE_LMSW);
   5.112 +//        TRACE_VMEXIT(2, value);
   5.113          return vmx_set_cr0(value);
   5.114          break;
   5.115      default:
   5.116 @@ -2327,6 +2330,7 @@ static inline int vmx_do_msr_read(struct
   5.117      regs->edx = msr_content >> 32;
   5.118  
   5.119  done:
   5.120 +    HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
   5.121      HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
   5.122                  ecx, (unsigned long)regs->eax,
   5.123                  (unsigned long)regs->edx);
   5.124 @@ -2343,6 +2347,7 @@ static inline int vmx_do_msr_write(struc
   5.125                  ecx, (u32)regs->eax, (u32)regs->edx);
   5.126  
   5.127      msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
   5.128 +    HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
   5.129  
   5.130      switch (ecx) {
   5.131      case MSR_IA32_TIME_STAMP_COUNTER:
   5.132 @@ -2373,6 +2378,7 @@ static inline int vmx_do_msr_write(struc
   5.133  static void vmx_do_hlt(void)
   5.134  {
   5.135      unsigned long rflags;
   5.136 +    HVMTRACE_0D(HLT, current);
   5.137      rflags = __vmread(GUEST_RFLAGS);
   5.138      hvm_hlt(rflags);
   5.139  }
   5.140 @@ -2396,7 +2402,7 @@ static inline void vmx_do_extint(struct 
   5.141      BUG_ON(!(vector & INTR_INFO_VALID_MASK));
   5.142  
   5.143      vector &= INTR_INFO_VECTOR_MASK;
   5.144 -    TRACE_VMEXIT(1, vector);
   5.145 +    HVMTRACE_1D(INTR, current, vector);
   5.146  
   5.147      switch(vector) {
   5.148      case LOCAL_TIMER_VECTOR:
   5.149 @@ -2560,12 +2566,11 @@ asmlinkage void vmx_vmexit_handler(struc
   5.150      unsigned long exit_qualification, inst_len = 0;
   5.151      struct vcpu *v = current;
   5.152  
   5.153 -    TRACE_3D(TRC_VMX_VMEXIT + v->vcpu_id, 0, 0, 0);
   5.154 -
   5.155      exit_reason = __vmread(VM_EXIT_REASON);
   5.156  
   5.157 +    HVMTRACE_2D(VMEXIT, v, __vmread(GUEST_RIP), exit_reason);
   5.158 +
   5.159      perfc_incra(vmexits, exit_reason);
   5.160 -    TRACE_VMEXIT(0, exit_reason);
   5.161  
   5.162      if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
   5.163          local_irq_enable();
   5.164 @@ -2589,7 +2594,6 @@ asmlinkage void vmx_vmexit_handler(struc
   5.165  
   5.166          vector = intr_info & INTR_INFO_VECTOR_MASK;
   5.167  
   5.168 -        TRACE_VMEXIT(1, vector);
   5.169          perfc_incra(cause_vector, vector);
   5.170  
   5.171          switch ( vector )
   5.172 @@ -2647,26 +2651,24 @@ asmlinkage void vmx_vmexit_handler(struc
   5.173              exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.174              regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
   5.175  
   5.176 -            TRACE_VMEXIT(3, regs->error_code);
   5.177 -            TRACE_VMEXIT(4, exit_qualification);
   5.178 -
   5.179              HVM_DBG_LOG(DBG_LEVEL_VMMU,
   5.180                          "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
   5.181                          (unsigned long)regs->eax, (unsigned long)regs->ebx,
   5.182                          (unsigned long)regs->ecx, (unsigned long)regs->edx,
   5.183                          (unsigned long)regs->esi, (unsigned long)regs->edi);
   5.184  
   5.185 -            if ( !vmx_do_page_fault(exit_qualification, regs) )
   5.186 +            if ( vmx_do_page_fault(exit_qualification, regs) )
   5.187              {
   5.188 -                /* Inject #PG using Interruption-Information Fields. */
   5.189 -                vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
   5.190 -                v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
   5.191 -                TRACE_3D(TRC_VMX_INTR, v->domain->domain_id,
   5.192 -                         TRAP_page_fault, exit_qualification);
   5.193 +                HVMTRACE_2D(PF_XEN, v, exit_qualification, regs->error_code);
   5.194 +                break;
   5.195              }
   5.196 +
   5.197 +            v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
   5.198 +            vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
   5.199              break;
   5.200          }
   5.201          case TRAP_nmi:
   5.202 +            HVMTRACE_0D(NMI, v);
   5.203              if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
   5.204                  do_nmi(regs); /* Real NMI, vector 2: normal processing. */
   5.205              else
   5.206 @@ -2708,11 +2710,11 @@ asmlinkage void vmx_vmexit_handler(struc
   5.207          __update_guest_eip(inst_len);
   5.208          exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.209          vmx_do_invlpg(exit_qualification);
   5.210 -        TRACE_VMEXIT(4, exit_qualification);
   5.211          break;
   5.212      }
   5.213      case EXIT_REASON_VMCALL:
   5.214      {
   5.215 +        HVMTRACE_1D(VMMCALL, v, regs->eax);
   5.216          inst_len = __get_instruction_length(); /* Safe: VMCALL */
   5.217          __update_guest_eip(inst_len);
   5.218          hvm_do_hypercall(regs);
   5.219 @@ -2724,7 +2726,6 @@ asmlinkage void vmx_vmexit_handler(struc
   5.220          inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
   5.221          if ( vmx_cr_access(exit_qualification, regs) )
   5.222              __update_guest_eip(inst_len);
   5.223 -        TRACE_VMEXIT(4, exit_qualification);
   5.224          break;
   5.225      }
   5.226      case EXIT_REASON_DR_ACCESS:
   5.227 @@ -2735,23 +2736,16 @@ asmlinkage void vmx_vmexit_handler(struc
   5.228          exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.229          inst_len = __get_instruction_length(); /* Safe: IN, INS, OUT, OUTS */
   5.230          vmx_io_instruction(exit_qualification, inst_len);
   5.231 -        TRACE_VMEXIT(4, exit_qualification);
   5.232          break;
   5.233      case EXIT_REASON_MSR_READ:
   5.234          inst_len = __get_instruction_length(); /* Safe: RDMSR */
   5.235          if ( vmx_do_msr_read(regs) )
   5.236              __update_guest_eip(inst_len);
   5.237 -        TRACE_VMEXIT(1, regs->ecx);
   5.238 -        TRACE_VMEXIT(2, regs->eax);
   5.239 -        TRACE_VMEXIT(3, regs->edx);
   5.240          break;
   5.241      case EXIT_REASON_MSR_WRITE:
   5.242          inst_len = __get_instruction_length(); /* Safe: WRMSR */
   5.243          if ( vmx_do_msr_write(regs) )
   5.244              __update_guest_eip(inst_len);
   5.245 -        TRACE_VMEXIT(1, regs->ecx);
   5.246 -        TRACE_VMEXIT(2, regs->eax);
   5.247 -        TRACE_VMEXIT(3, regs->edx);
   5.248          break;
   5.249      case EXIT_REASON_MWAIT_INSTRUCTION:
   5.250      case EXIT_REASON_MONITOR_INSTRUCTION:
   5.251 @@ -2785,18 +2779,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.252  asmlinkage void vmx_trace_vmentry(void)
   5.253  {
   5.254      struct vcpu *v = current;
   5.255 -    TRACE_5D(TRC_VMX_VMENTRY + current->vcpu_id,
   5.256 -             v->arch.hvm_vcpu.hvm_trace_values[0],
   5.257 -             v->arch.hvm_vcpu.hvm_trace_values[1],
   5.258 -             v->arch.hvm_vcpu.hvm_trace_values[2],
   5.259 -             v->arch.hvm_vcpu.hvm_trace_values[3],
   5.260 -             v->arch.hvm_vcpu.hvm_trace_values[4]);
   5.261 -
   5.262 -    TRACE_VMEXIT(0, 0);
   5.263 -    TRACE_VMEXIT(1, 0);
   5.264 -    TRACE_VMEXIT(2, 0);
   5.265 -    TRACE_VMEXIT(3, 0);
   5.266 -    TRACE_VMEXIT(4, 0);
   5.267 +    HVMTRACE_0D(VMENTRY, v);
   5.268  }
   5.269  
   5.270  /*
     6.1 --- a/xen/common/trace.c	Mon Feb 26 16:27:35 2007 +0000
     6.2 +++ b/xen/common/trace.c	Mon Feb 26 17:04:39 2007 +0000
     6.3 @@ -66,7 +66,7 @@ static DEFINE_PER_CPU(unsigned long, los
     6.4  
     6.5  /* a flag recording whether initialization has been done */
     6.6  /* or more properly, if the tbuf subsystem is enabled right now */
     6.7 -int tb_init_done;
     6.8 +int tb_init_done __read_mostly;
     6.9  
    6.10  /* which CPUs tracing is enabled on */
    6.11  static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
     7.1 --- a/xen/include/asm-x86/hvm/support.h	Mon Feb 26 16:27:35 2007 +0000
     7.2 +++ b/xen/include/asm-x86/hvm/support.h	Mon Feb 26 17:04:39 2007 +0000
     7.3 @@ -116,9 +116,6 @@ extern unsigned int opt_hvm_debug_level;
     7.4  #define HVM_DBG_LOG(level, _f, _a...)
     7.5  #endif
     7.6  
     7.7 -#define TRACE_VMEXIT(index, value)                              \
     7.8 -    current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
     7.9 -
    7.10  /*
    7.11   * Save/restore support 
    7.12   */
     8.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Mon Feb 26 16:27:35 2007 +0000
     8.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Mon Feb 26 17:04:39 2007 +0000
     8.3 @@ -45,8 +45,6 @@ struct hvm_vcpu {
     8.4      /* Flags */
     8.5      int                 flag_dr_dirty;
     8.6  
     8.7 -    unsigned long       hvm_trace_values[5];
     8.8 -
     8.9      union {
    8.10          struct arch_vmx_struct vmx;
    8.11          struct arch_svm_struct svm;
     9.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Feb 26 16:27:35 2007 +0000
     9.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Feb 26 17:04:39 2007 +0000
     9.3 @@ -25,6 +25,7 @@
     9.4  #include <asm/processor.h>
     9.5  #include <asm/hvm/vmx/vmcs.h>
     9.6  #include <asm/i387.h>
     9.7 +#include <asm/hvm/trace.h>
     9.8  
     9.9  extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
    9.10  extern void vmx_asm_do_vmentry(void);
    9.11 @@ -294,6 +295,11 @@ static inline void __vmx_inject_exceptio
    9.12        __vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
    9.13  
    9.14      __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
    9.15 +
    9.16 +    if (trap == TRAP_page_fault)
    9.17 +        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
    9.18 +    else
    9.19 +        HVMTRACE_2D(INJ_EXC, v, trap, error_code);
    9.20  }
    9.21  
    9.22  static inline void vmx_inject_hw_exception(
    10.1 --- a/xen/include/public/trace.h	Mon Feb 26 16:27:35 2007 +0000
    10.2 +++ b/xen/include/public/trace.h	Mon Feb 26 17:04:39 2007 +0000
    10.3 @@ -31,17 +31,16 @@
    10.4  #define TRC_GEN     0x0001f000    /* General trace            */
    10.5  #define TRC_SCHED   0x0002f000    /* Xen Scheduler trace      */
    10.6  #define TRC_DOM0OP  0x0004f000    /* Xen DOM0 operation trace */
    10.7 -#define TRC_VMX     0x0008f000    /* Xen VMX trace            */
    10.8 +#define TRC_HVM     0x0008f000    /* Xen HVM trace            */
    10.9  #define TRC_MEM     0x0010f000    /* Xen memory trace         */
   10.10  #define TRC_ALL     0xfffff000
   10.11  
   10.12  /* Trace subclasses */
   10.13  #define TRC_SUBCLS_SHIFT 12
   10.14  
   10.15 -/* trace subclasses for VMX */
   10.16 -#define TRC_VMXEXIT  0x00081000   /* VMX exit trace            */
   10.17 -#define TRC_VMXENTRY 0x00082000   /* VMX exit trace            */
   10.18 -#define TRC_VMXINTR  0x00084000   /* VMX interrupt trace       */
   10.19 +/* trace subclasses for SVM */
   10.20 +#define TRC_HVM_ENTRYEXIT 0x00081000   /* VMENTRY and #VMEXIT       */
   10.21 +#define TRC_HVM_HANDLER   0x00082000   /* various HVM handlers      */
   10.22  
   10.23  /* Trace events per class */
   10.24  #define TRC_LOST_RECORDS        (TRC_GEN + 1)
   10.25 @@ -67,10 +66,28 @@
   10.26  #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
   10.27  
   10.28  /* trace events per subclass */
   10.29 -#define TRC_VMX_VMEXIT          (TRC_VMXEXIT + 1)
   10.30 -#define TRC_VMX_VMENTRY         (TRC_VMXENTRY + 1)
   10.31 -#define TRC_VMX_INTR            (TRC_VMXINTR + 1)
   10.32 -
   10.33 +#define TRC_HVM_VMENTRY         (TRC_HVM_ENTRYEXIT + 0x01)
   10.34 +#define TRC_HVM_VMEXIT          (TRC_HVM_ENTRYEXIT + 0x02)
   10.35 +#define TRC_HVM_PF_XEN          (TRC_HVM_HANDLER + 0x01)
   10.36 +#define TRC_HVM_PF_INJECT       (TRC_HVM_HANDLER + 0x02)
   10.37 +#define TRC_HVM_INJ_EXC         (TRC_HVM_HANDLER + 0x03)
   10.38 +#define TRC_HVM_INJ_VIRQ        (TRC_HVM_HANDLER + 0x04)
   10.39 +#define TRC_HVM_REINJ_VIRQ      (TRC_HVM_HANDLER + 0x05)
   10.40 +#define TRC_HVM_IO_READ         (TRC_HVM_HANDLER + 0x06)
   10.41 +#define TRC_HVM_IO_WRITE        (TRC_HVM_HANDLER + 0x07)
   10.42 +#define TRC_HVM_CR_READ         (TRC_HVM_HANDLER + 0x08)
   10.43 +#define TRC_HVM_CR_WRITE        (TRC_HVM_HANDLER + 0x09)
   10.44 +#define TRC_HVM_DR_READ         (TRC_HVM_HANDLER + 0x0A)
   10.45 +#define TRC_HVM_DR_WRITE        (TRC_HVM_HANDLER + 0x0B)
   10.46 +#define TRC_HVM_MSR_READ        (TRC_HVM_HANDLER + 0x0C)
   10.47 +#define TRC_HVM_MSR_WRITE       (TRC_HVM_HANDLER + 0x0D)
   10.48 +#define TRC_HVM_CPUID           (TRC_HVM_HANDLER + 0x0E)
   10.49 +#define TRC_HVM_INTR            (TRC_HVM_HANDLER + 0x0F)
   10.50 +#define TRC_HVM_NMI             (TRC_HVM_HANDLER + 0x10)
   10.51 +#define TRC_HVM_SMI             (TRC_HVM_HANDLER + 0x11)
   10.52 +#define TRC_HVM_VMMCALL         (TRC_HVM_HANDLER + 0x12)
   10.53 +#define TRC_HVM_HLT             (TRC_HVM_HANDLER + 0x13)
   10.54 +#define TRC_HVM_INVLPG          (TRC_HVM_HANDLER + 0x14)
   10.55  
   10.56  /* This structure represents a single trace buffer record. */
   10.57  struct t_rec {