0x00081001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMENTRY
0x00081002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMEXIT [ exitcode = 0x%(1)08x, rIP = 0x%(2)08x ]
0x00081102 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMEXIT [ exitcode = 0x%(1)08x, rIP = 0x%(3)08x%(2)08x ]
+0x00081401 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) nVMENTRY
+0x00081402 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) nVMEXIT [ exitcode = 0x%(1)08x, rIP = 0x%(2)08x ]
+0x00081502 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) nVMEXIT [ exitcode = 0x%(1)08x, rIP = 0x%(3)08x%(2)08x ]
0x00082001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_XEN [ errorcode = 0x%(2)02x, virt = 0x%(1)08x ]
0x00082101 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_XEN [ errorcode = 0x%(3)02x, virt = 0x%(2)08x%(1)08x ]
0x00082002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_INJECT [ errorcode = 0x%(1)02x, virt = 0x%(2)08x ]
exit_reason = vmcb->exitcode;
+ if ( hvm_long_mode_enabled(v) )
+ HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
+ 1/*cycles*/, 3, exit_reason,
+ (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
+ 0, 0, 0);
+ else
+ HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
+ 1/*cycles*/, 2, exit_reason,
+ (uint32_t)regs->eip,
+ 0, 0, 0, 0);
+
if ( vcpu_guestmode ) {
enum nestedhvm_vmexits nsret;
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
}
}
- if ( hvm_long_mode_enabled(v) )
- HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
- (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
- 0, 0, 0);
- else
- HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, 2, exit_reason,
- (uint32_t)regs->eip,
- 0, 0, 0, 0);
-
if ( unlikely(exit_reason == VMEXIT_INVALID) )
{
svm_vmcb_dump(__func__, vmcb);
asmlinkage void svm_trace_vmentry(void)
{
- HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
+ struct vcpu *curr = current;
+ HVMTRACE_ND(VMENTRY,
+ nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0,
+ 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
}
/*