(Almost) no functional change.
* In irq_move_cleanup_interrupt(), use the 'me' local variable rather than
calling smp_processor_id() again. This manifests as a minor code
improvement.
* In vlapic_update_timer() and lapic_rearm(), introduce a new 'timer_period'
local variable to simplify the expressions used for both the trace and
create_periodic_time() calls.
All other differences in the compiled binary are to do with line numbers
changing.
Some conversion notes:
* HVMTRACE_LONG_[234]D() and TRACE_2_LONG_[234]D() were latently buggy. They
blindly discard extra parameters, but luckily no users are impacted. They
are also obfuscated wrappers, depending on exactly one or two parameters
being TRC_PAR_LONG() to compile successfully.
* HVMTRACE_LONG_1D() behaves unlike its named companions, and takes exactly
one 64bit parameter which it splits manually. It's one user,
vmx_cr_access()'s LMSW path, is gets adjusted.
* TRACE_?D() and TRACE_2_LONG_*() change to TRACE_TIME() as cycles is always
enabled.
* HVMTRACE_ND() is opencoded for VMENTRY/VMEXIT records to include cycles.
These are converted to TRACE_TIME(), with the old modifier parameter
expressed as an OR at the callsite. One callsite, svm_vmenter_helper() had
a nested tb_init_done check, which is dropped. (The optimiser also spotted
this, which is why it doesn't manifest as a binary difference.)
* All uses of *LONG() are either opencoded or swapped to using a struct, to
avoid MISRA issues.
* All HVMTRACE_?D() change to TRACE() as cycles is explicitly skipped.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@cloud.com>
/* Get start time (ticks) */
t1 = alternative_call(cpuidle_get_tick);
/* Trace cpu idle entry */
- TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);
+ TRACE_TIME(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);
update_last_cx_stat(power, cx, t1);
t2 = alternative_call(cpuidle_get_tick);
trace_exit_reason(irq_traced);
/* Trace cpu idle exit */
- TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
- irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
+ TRACE_TIME(TRC_PM_IDLE_EXIT, cx->idx, t2,
+ irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
/* Update statistics */
update_idle_stats(power, cx, t1, t2);
/* Re-enable interrupts */
/* Get start time (ticks) */
t1 = alternative_call(cpuidle_get_tick);
/* Trace cpu idle entry */
- TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);
+ TRACE_TIME(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);
update_last_cx_stat(power, cx, t1);
cstate_restore_tsc();
trace_exit_reason(irq_traced);
/* Trace cpu idle exit */
- TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
- irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
+ TRACE_TIME(TRC_PM_IDLE_EXIT, cx->idx, t2,
+ irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
/* Update statistics */
update_idle_stats(power, cx, t1, t2);
return do_sched_op(cmd, guest_handle_from_ptr(NULL, void));
case SCHEDOP_shutdown:
- TRACE_3D(TRC_SCHED_SHUTDOWN,
- current->domain->domain_id, current->vcpu_id, arg);
+ TRACE_TIME(TRC_SCHED_SHUTDOWN,
+ current->domain->domain_id, current->vcpu_id, arg);
domain_shutdown(current->domain, (u8)arg);
break;
lapic_timer_off();
before = alternative_call(cpuidle_get_tick);
- TRACE_4D(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred);
+ TRACE_TIME(TRC_PM_IDLE_ENTRY, cx->type, before, exp, pred);
update_last_cx_stat(power, cx, before);
local_irq_enable();
- TRACE_6D(TRC_PM_IDLE_EXIT, cx->type, after,
- irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
+ TRACE_TIME(TRC_PM_IDLE_EXIT, cx->type, after,
+ irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
if (!(lapic_timer_reliable_states & (1 << cx->type)))
lapic_timer_on();
{
uint64_t *count_load_time = priv;
- TRACE_0D(TRC_HVM_EMUL_PIT_TIMER_CB);
+ TRACE_TIME(TRC_HVM_EMUL_PIT_TIMER_CB);
*count_load_time = get_guest_time(v);
}
case 2:
case 3:
/* Periodic timer. */
- TRACE_2D(TRC_HVM_EMUL_PIT_START_TIMER, period, period);
+ TRACE_TIME(TRC_HVM_EMUL_PIT_START_TIMER, period, period);
create_periodic_time(v, &pit->pt0, period, period, 0, pit_time_fired,
&pit->count_load_time[channel], false);
break;
case 1:
case 4:
/* One-shot timer. */
- TRACE_2D(TRC_HVM_EMUL_PIT_START_TIMER, period, 0);
+ TRACE_TIME(TRC_HVM_EMUL_PIT_START_TIMER, period, 0);
create_periodic_time(v, &pit->pt0, period, 0, 0, pit_time_fired,
&pit->count_load_time[channel], false);
break;
default:
- TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER);
destroy_periodic_time(&pit->pt0);
break;
}
if ( !has_vpit(current->domain) )
return;
- TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER);
spin_lock(&pit->lock);
destroy_periodic_time(&pit->pt0);
spin_unlock(&pit->lock);
if ( is_hvm_domain(d) )
{
- TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER);
destroy_periodic_time(&pit->pt0);
pit->pt0.source = PTSRC_isa;
}
if ( is_hvm_domain(d) )
{
- TRACE_0D(TRC_HVM_EMUL_PIT_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_PIT_STOP_TIMER);
destroy_periodic_time(&pit->pt0);
}
}
case 3:
case 4:
*val = current->arch.hvm.guest_cr[reg];
- HVMTRACE_LONG_2D(CR_READ, reg, TRC_PAR_LONG(*val));
+ TRACE(TRC_HVM_CR_READ64, reg, *val, *val >> 32);
return X86EMUL_OKAY;
default:
break;
{
int rc;
- HVMTRACE_LONG_2D(CR_WRITE, reg, TRC_PAR_LONG(val));
+ TRACE(TRC_HVM_CR_WRITE64, reg, val, val >> 32);
switch ( reg )
{
case 0:
int rc = x86emul_read_xcr(reg, val, ctxt);
if ( rc == X86EMUL_OKAY )
- HVMTRACE_LONG_2D(XCR_READ, reg, TRC_PAR_LONG(*val));
+ TRACE(TRC_HVM_XCR_READ64, reg, *val, *val >> 32);
return rc;
}
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- HVMTRACE_LONG_2D(XCR_WRITE, reg, TRC_PAR_LONG(val));
+ TRACE(TRC_HVM_XCR_WRITE64, reg, val, val >> 32);
return x86emul_write_xcr(reg, val, ctxt);
}
{
ASSERT(tn < HPET_TIMER_NUM);
ASSERT(rw_is_write_locked(&h->lock));
- TRACE_1D(TRC_HVM_EMUL_HPET_STOP_TIMER, tn);
+ TRACE_TIME(TRC_HVM_EMUL_HPET_STOP_TIMER, tn);
destroy_periodic_time(&h->pt[tn]);
/* read the comparator to get it updated so a read while stopped will
* return the expected value. */
if ( !oneshot )
period_ns = hpet_tick_to_ns(h, h->hpet.period[tn]);
- TRACE_2_LONG_4D(TRC_HVM_EMUL_HPET_START_TIMER, tn, irq,
- TRC_PAR_LONG(diff_ns), TRC_PAR_LONG(period_ns));
+ TRACE_TIME(TRC_HVM_EMUL_HPET_START_TIMER, tn, irq,
+ diff_ns, diff_ns >> 32, period_ns, period_ns >> 32);
create_periodic_time(vhpet_vcpu(h), &h->pt[tn], diff_ns, period_ns,
irq, timer_level(h, tn) ? hpet_timer_fired : NULL,
do_sched_op(SCHEDOP_block, guest_handle_from_ptr(NULL, void));
- HVMTRACE_1D(HLT, /* pending = */ vcpu_runnable(curr));
+ TRACE(TRC_HVM_HLT, /* pending = */ vcpu_runnable(curr));
}
void hvm_triple_fault(void)
unsigned long val = *decode_gpr(guest_cpu_user_regs(), gpr);
int rc;
- HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(val));
+ TRACE(TRC_HVM_CR_WRITE64, cr, val, val >> 32);
HVM_DBG_LOG(DBG_LEVEL_1, "CR%u, value = %lx", cr, val);
switch ( cr )
}
*reg = val;
- HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(val));
+ TRACE(TRC_HVM_CR_READ64, cr, val, val >> 32);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%u, value = %lx", cr, val);
return X86EMUL_OKAY;
}
guest_cpuid(curr, leaf, subleaf, &res);
- HVMTRACE_6D(CPUID, leaf, subleaf, res.a, res.b, res.c, res.d);
+ TRACE(TRC_HVM_CPUID, leaf, subleaf, res.a, res.b, res.c, res.d);
regs->rax = res.a;
regs->rbx = res.b;
{
msr_split(regs, hvm_get_guest_tsc(current));
- HVMTRACE_2D(RDTSC, regs->eax, regs->edx);
+ TRACE(TRC_HVM_RDTSC, regs->eax, regs->edx);
}
int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
}
out:
- HVMTRACE_3D(MSR_READ, msr,
- (uint32_t)*msr_content, (uint32_t)(*msr_content >> 32));
+ TRACE(TRC_HVM_MSR_READ, msr, *msr_content, *msr_content >> 32);
return ret;
gp_fault:
struct domain *d = v->domain;
int ret;
- HVMTRACE_3D(MSR_WRITE, msr,
- (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
+ TRACE(TRC_HVM_MSR_WRITE, msr, msr_content, msr_content >> 32);
if ( may_defer && unlikely(monitored_msr(v->domain, msr)) )
{
&& ++(s->pt_dead_ticks) >= 10 )
{
/* VM is ignoring its RTC; no point in running the timer */
- TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER);
destroy_periodic_time(&s->pt);
s->period = 0;
}
delta = period - ((now - s->start_time) % period);
if ( s->hw.cmos_data[RTC_REG_B] & RTC_PIE )
{
- TRACE_2D(TRC_HVM_EMUL_RTC_START_TIMER, delta, period);
+ TRACE_TIME(TRC_HVM_EMUL_RTC_START_TIMER, delta, period);
create_periodic_time(v, &s->pt, delta, period,
RTC_IRQ, rtc_pf_callback, s, false);
}
}
/* fall through */
default:
- TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER);
destroy_periodic_time(&s->pt);
s->period = 0;
break;
rtc_update_irq(s);
if ( (data ^ orig) & RTC_PIE )
{
- TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER);
destroy_periodic_time(&s->pt);
s->period = 0;
rtc_timer_update(s);
if ( !has_vrtc(d) )
return;
- TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER);
destroy_periodic_time(&s->pt);
s->period = 0;
s->pt.source = PTSRC_isa;
spin_barrier(&s->lock);
- TRACE_0D(TRC_HVM_EMUL_RTC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_RTC_STOP_TIMER);
destroy_periodic_time(&s->pt);
kill_timer(&s->update_timer);
kill_timer(&s->update_timer2);
}
}
- HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
- vmcb->event_inj.v ? vmcb->event_inj.vector : -1);
+ TRACE(TRC_HVM_INTR_WINDOW, intack.vector, intack.source,
+ vmcb->event_inj.v ? vmcb->event_inj.vector : -1);
/*
* Create a dummy virtual interrupt to intercept as soon as the
}
else
{
- HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
+ TRACE(TRC_HVM_INJ_VIRQ, intack.vector, /*fake=*/ 0);
svm_inject_extint(v, intack.vector);
pt_intr_post(v, intack);
}
svm_asid_handle_vmrun();
- if ( unlikely(tb_init_done) )
- HVMTRACE_ND(VMENTRY,
- nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0,
- 1/*cycles*/);
+ TRACE_TIME(TRC_HVM_VMENTRY |
+ (nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0));
svm_sync_vmcb(curr, vmcb_needs_vmsave);
if ( _event.vector == X86_EXC_PF &&
_event.type == X86_EVENTTYPE_HW_EXCEPTION )
- HVMTRACE_LONG_2D(PF_INJECT, _event.error_code,
- TRC_PAR_LONG(_event.cr2));
+ TRACE(TRC_HVM_PF_INJECT64, _event.error_code,
+ _event.cr2, _event.cr2 >> 32);
else
- HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code);
+ TRACE(TRC_HVM_INJ_EXC, _event.vector, _event.error_code);
}
static bool cf_check svm_event_pending(const struct vcpu *v)
{
struct vmcb_struct *vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
- HVMTRACE_0D(DR_WRITE);
+ TRACE(TRC_HVM_DR_WRITE);
__restore_debug_registers(vmcb, v);
}
static void svm_invlpg_intercept(unsigned long linear)
{
- HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(linear));
+ TRACE(TRC_HVM_INVLPG64, 0, linear, linear >> 32);
paging_invlpg(current, linear);
}
exit_reason = vmcb->exitcode;
if ( hvm_long_mode_active(v) )
- HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
- 1/*cycles*/, exit_reason, TRC_PAR_LONG(regs->rip));
+ TRACE_TIME(TRC_HVM_VMEXIT64 | (vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0),
+ exit_reason, regs->rip, regs->rip >> 32);
else
- HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
- 1/*cycles*/, exit_reason, regs->eip);
+ TRACE_TIME(TRC_HVM_VMEXIT | (vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0),
+ exit_reason, regs->eip);
if ( vcpu_guestmode )
{
{
case VMEXIT_INTR:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
- HVMTRACE_0D(INTR);
+ TRACE(TRC_HVM_INTR);
break;
case VMEXIT_NMI:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
- HVMTRACE_0D(NMI);
+ TRACE(TRC_HVM_NMI);
break;
case VMEXIT_SMI:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
- HVMTRACE_0D(SMI);
+ TRACE(TRC_HVM_SMI);
break;
case VMEXIT_ICEBP:
if ( trace_will_trace_event(TRC_SHADOW) )
break;
if ( hvm_long_mode_active(v) )
- HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va));
+ TRACE(TRC_HVM_PF_XEN64, regs->error_code, va, va >> 32);
else
- HVMTRACE_2D(PF_XEN, regs->error_code, va);
+ TRACE(TRC_HVM_PF_XEN, regs->error_code, va);
break;
}
}
case VMEXIT_EXCEPTION_AC:
- HVMTRACE_1D(TRAP, X86_EXC_AC);
+ TRACE(TRC_HVM_TRAP, X86_EXC_AC);
hvm_inject_hw_exception(X86_EXC_AC, vmcb->ei.exc.ec);
break;
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
case VMEXIT_EXCEPTION_MC:
- HVMTRACE_0D(MCE);
+ TRACE(TRC_HVM_MCE);
svm_vmexit_mce_intercept(v, regs);
break;
if ( (insn_len = svm_get_insn_len(v, INSTR_VMCALL)) == 0 )
break;
BUG_ON(vcpu_guestmode);
- HVMTRACE_1D(VMMCALL, regs->eax);
+ TRACE(TRC_HVM_VMMCALL, regs->eax);
if ( hvm_hypercall(regs) == HVM_HCALL_completed )
__update_guest_eip(regs, insn_len);
static void cf_check vlapic_pt_cb(struct vcpu *v, void *data)
{
- TRACE_0D(TRC_HVM_EMUL_LAPIC_TIMER_CB);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_TIMER_CB);
*(s_time_t *)data = hvm_get_guest_time(v);
}
if ( delta && (is_oneshot || is_periodic) )
{
+ uint64_t timer_period = 0;
+
if ( vlapic->hw.timer_divisor != old_divisor )
{
period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT)
delta = delta * vlapic->hw.timer_divisor / old_divisor;
}
- TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta),
- TRC_PAR_LONG(is_periodic ? period : 0),
- vlapic->pt.irq);
+ if ( is_periodic )
+ timer_period = period;
+
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, delta, delta >> 32,
+ timer_period, timer_period >> 32, vlapic->pt.irq);
create_periodic_time(current, &vlapic->pt, delta,
- is_periodic ? period : 0, vlapic->pt.irq,
+ timer_period, vlapic->pt.irq,
is_periodic ? vlapic_pt_cb : NULL,
&vlapic->timer_last_update, false);
}
else
{
- TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
destroy_periodic_time(&vlapic->pt);
/*
* From now, TMCCT should return 0 until TMICT is set again.
vlapic->hw.tdt_msr = value;
/* .... reprogram tdt timer */
- TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta),
- TRC_PAR_LONG(0LL), vlapic->pt.irq);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, delta, delta >> 32,
+ 0, 0, vlapic->pt.irq);
create_periodic_time(v, &vlapic->pt, delta, 0,
vlapic->pt.irq, vlapic_tdt_pt_cb,
&vlapic->timer_last_update, false);
/* trigger a timer event if needed */
if ( value > 0 )
{
- TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(0LL),
- TRC_PAR_LONG(0LL), vlapic->pt.irq);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, 0, 0,
+ 0, 0, vlapic->pt.irq);
create_periodic_time(v, &vlapic->pt, 0, 0,
vlapic->pt.irq, vlapic_tdt_pt_cb,
&vlapic->timer_last_update, false);
else
{
/* .... stop tdt timer */
- TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
destroy_periodic_time(&vlapic->pt);
}
if ( target )
accept = __vlapic_accept_pic_intr(v);
- TRACE_2D(TRC_HVM_EMUL_LAPIC_PIC_INTR, target, accept);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_PIC_INTR, target, accept);
return target && accept;
}
vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
- TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
destroy_periodic_time(&vlapic->pt);
}
static void lapic_rearm(struct vlapic *s)
{
unsigned long tmict;
- uint64_t period, tdt_msr;
+ uint64_t period, timer_period = 0, tdt_msr;
bool is_periodic;
s->pt.irq = vlapic_get_reg(s, APIC_LVTT) & APIC_VECTOR_MASK;
(uint32_t)tmict * s->hw.timer_divisor);
is_periodic = vlapic_lvtt_period(s);
- TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(period),
- TRC_PAR_LONG(is_periodic ? period : 0LL), s->pt.irq);
+ if ( is_periodic )
+ timer_period = period;
+
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_START_TIMER, period, period >> 32,
+ timer_period, timer_period >> 32, s->pt.irq);
create_periodic_time(vlapic_vcpu(s), &s->pt, period,
- is_periodic ? period : 0,
+ timer_period,
s->pt.irq,
is_periodic ? vlapic_pt_cb : NULL,
&s->timer_last_update, false);
return;
tasklet_kill(&vlapic->init_sipi.tasklet);
- TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
+ TRACE_TIME(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
destroy_periodic_time(&vlapic->pt);
unmap_domain_page_global(vlapic->regs);
free_domheap_page(vlapic->regs_page);
unsigned long intr;
__vmread(VM_ENTRY_INTR_INFO, &intr);
- HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
- (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1);
+ TRACE(TRC_HVM_INTR_WINDOW, intack.vector, intack.source,
+ (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1);
}
if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi )
}
else
{
- HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
+ TRACE(TRC_HVM_INJ_VIRQ, intack.vector, /*fake=*/ 0);
vmx_inject_extint(intack.vector, intack.source);
pt_intr_post(v, intack);
}
if ( (_event.vector == X86_EXC_PF) &&
(_event.type == X86_EVENTTYPE_HW_EXCEPTION) )
- HVMTRACE_LONG_2D(PF_INJECT, _event.error_code,
- TRC_PAR_LONG(curr->arch.hvm.guest_cr[2]));
+ TRACE(TRC_HVM_PF_INJECT64, _event.error_code,
+ curr->arch.hvm.guest_cr[2], curr->arch.hvm.guest_cr[2] >> 32);
else
- HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code);
+ TRACE(TRC_HVM_INJ_EXC, _event.vector, _event.error_code);
}
static bool cf_check vmx_event_pending(const struct vcpu *v)
{
struct vcpu *v = current;
- HVMTRACE_0D(DR_WRITE);
+ TRACE(TRC_HVM_DR_WRITE);
if ( !v->arch.hvm.flag_dr_dirty )
__restore_debug_registers(v);
static void vmx_invlpg_intercept(unsigned long linear)
{
- HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(linear));
+ TRACE(TRC_HVM_INVLPG64, /*invlpga=*/ 0, linear, linear >> 32);
paging_invlpg(current, linear);
}
hvm_monitor_crX(CR0, value, old);
curr->arch.hvm.guest_cr[0] = value;
vmx_update_guest_cr(curr, 0, 0);
- HVMTRACE_0D(CLTS);
+ TRACE(TRC_HVM_CLTS);
break;
}
value = (value & ~(X86_CR0_MP|X86_CR0_EM|X86_CR0_TS)) |
(qual.lmsw_data &
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
- HVMTRACE_LONG_1D(LMSW, value);
+ TRACE(TRC_HVM_LMSW64, value, value >> 32);
if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(X86_EXC_GP, 0);
BUG_ON(!(vector & INTR_INFO_VALID_MASK));
vector &= INTR_INFO_VECTOR_MASK;
- HVMTRACE_1D(INTR, vector);
+ TRACE(TRC_HVM_INTR, vector);
regs->entry_vector = vector;
do_IRQ(regs);
case EXIT_REASON_MCE_DURING_VMENTRY:
printk("MCE\n");
- HVMTRACE_0D(MCE);
+ TRACE(TRC_HVM_MCE);
/* Already handled. */
break;
{
update_guest_eip(); /* Safe: APIC data write */
vlapic_EOI_set(vcpu_vlapic(current));
- HVMTRACE_0D(VLAPIC);
+ TRACE(TRC_HVM_VLAPIC);
return 1;
}
__vmread(VM_EXIT_REASON, &exit_reason);
if ( hvm_long_mode_active(v) )
- HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, exit_reason,
- TRC_PAR_LONG(regs->rip));
+ TRACE_TIME(TRC_HVM_VMEXIT64, exit_reason, regs->rip, regs->rip >> 32);
else
- HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, exit_reason, regs->eip);
+ TRACE_TIME(TRC_HVM_VMEXIT, exit_reason, regs->eip);
perfc_incra(vmexits, (uint16_t)exit_reason);
default:
perfc_incr(realmode_exits);
v->arch.hvm.vmx.vmx_emulate = 1;
- HVMTRACE_0D(REALMODE_EMULATE);
+ TRACE(TRC_HVM_REALMODE_EMULATE);
return;
}
case EXIT_REASON_EXTERNAL_INTERRUPT:
* Table 23-1, "Exit Qualification for Debug Exceptions").
*/
__vmread(EXIT_QUALIFICATION, &exit_qualification);
- HVMTRACE_1D(TRAP_DEBUG, exit_qualification);
+ TRACE(TRC_HVM_TRAP_DEBUG, exit_qualification);
__restore_debug_registers(v);
write_debugreg(6, exit_qualification | DR_STATUS_RESERVED_ONE);
domain_pause_for_debugger();
break;
case X86_EXC_BP:
- HVMTRACE_1D(TRAP, vector);
+ TRACE(TRC_HVM_TRAP, vector);
if ( !v->domain->debugger_attached )
{
unsigned long insn_len;
}
break;
case X86_EXC_NM:
- HVMTRACE_1D(TRAP, vector);
+ TRACE(TRC_HVM_TRAP, vector);
vmx_fpu_dirty_intercept();
break;
case X86_EXC_PF:
if ( trace_will_trace_event(TRC_SHADOW) )
break;
if ( hvm_long_mode_active(v) )
- HVMTRACE_LONG_2D(PF_XEN, regs->error_code,
- TRC_PAR_LONG(exit_qualification) );
+ TRACE(TRC_HVM_PF_XEN64, regs->error_code,
+ exit_qualification, exit_qualification >> 32);
else
- HVMTRACE_2D(PF_XEN,
- regs->error_code, exit_qualification );
+ TRACE(TRC_HVM_PF_XEN, regs->error_code, exit_qualification);
break;
}
hvm_inject_page_fault(regs->error_code, exit_qualification);
break;
case X86_EXC_AC:
- HVMTRACE_1D(TRAP, vector);
+ TRACE(TRC_HVM_TRAP, vector);
vmx_propagate_intr(intr_info);
break;
case X86_EXC_NMI:
if ( MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) !=
X86_EVENTTYPE_NMI )
goto exit_and_crash;
- HVMTRACE_0D(NMI);
+ TRACE(TRC_HVM_NMI);
/* Already handled above. */
break;
case X86_EXC_MC:
- HVMTRACE_0D(MCE);
+ TRACE(TRC_HVM_MCE);
/* Already handled above. */
break;
case X86_EXC_UD:
- HVMTRACE_1D(TRAP, vector);
+ TRACE(TRC_HVM_TRAP, vector);
hvm_ud_intercept(regs);
break;
default:
- HVMTRACE_1D(TRAP, vector);
+ TRACE(TRC_HVM_TRAP, vector);
goto exit_and_crash;
}
break;
break;
case EXIT_REASON_VMCALL:
- HVMTRACE_1D(VMMCALL, regs->eax);
+ TRACE(TRC_HVM_VMMCALL, regs->eax);
if ( hvm_hypercall(regs) == HVM_HCALL_completed )
update_guest_eip(); /* Safe: VMCALL */
if ( unlikely(curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_MASK) )
lbr_fixup();
- HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/);
+ TRACE_TIME(TRC_HVM_VMENTRY);
__vmwrite(GUEST_RIP, regs->rip);
__vmwrite(GUEST_RSP, regs->rsp);
ASSERT(vpic_is_locked(vpic));
irq = vpic_get_highest_priority_irq(vpic);
- TRACE_3D(TRC_HVM_EMUL_PIC_INT_OUTPUT, vpic->int_output, vpic->is_master,
- irq);
+ TRACE_TIME(TRC_HVM_EMUL_PIC_INT_OUTPUT, vpic->int_output, vpic->is_master, irq);
if ( vpic->int_output == (!vpic->init_state && irq >= 0) )
return;
if ( v != NULL )
{
- TRACE_1D(TRC_HVM_EMUL_PIC_KICK, irq);
+ TRACE_TIME(TRC_HVM_EMUL_PIC_KICK, irq);
vcpu_kick(v);
}
}
ASSERT(vpic_is_locked(vpic));
- TRACE_2D(TRC_HVM_EMUL_PIC_INTACK, vpic->is_master, irq);
+ TRACE_TIME(TRC_HVM_EMUL_PIC_INTACK, vpic->is_master, irq);
/* Edge-triggered: clear the IRR (forget the edge). */
if ( !(vpic->elcr & mask) )
vpic->irr &= ~mask;
ASSERT(irq <= 15);
ASSERT(vpic_is_locked(vpic));
- TRACE_1D(TRC_HVM_EMUL_PIC_POSEDGE, irq);
+ TRACE_TIME(TRC_HVM_EMUL_PIC_POSEDGE, irq);
if ( irq == 2 )
return;
ASSERT(irq <= 15);
ASSERT(vpic_is_locked(vpic));
- TRACE_1D(TRC_HVM_EMUL_PIC_NEGEDGE, irq);
+ TRACE_TIME(TRC_HVM_EMUL_PIC_NEGEDGE, irq);
if ( irq == 2 )
return;
accept = vlapic_accept_pic_intr(v);
- TRACE_2D(TRC_HVM_EMUL_PIC_PEND_IRQ_CALL, accept, vpic->int_output);
+ TRACE_TIME(TRC_HVM_EMUL_PIC_PEND_IRQ_CALL, accept, vpic->int_output);
if ( !accept || !vpic->int_output )
return -1;
for_each_cpu(cpu, tmp_mask)
{
ASSERT(per_cpu(vector_irq, cpu)[old_vector] == irq);
- TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
+ TRACE_TIME(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
per_cpu(vector_irq, cpu)[old_vector] = ~irq;
}
goto unlock;
}
send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
- TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP_DELAY,
- irq, vector, smp_processor_id());
+ TRACE_TIME(TRC_HW_IRQ_MOVE_CLEANUP_DELAY, irq, vector, me);
goto unlock;
}
- TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP,
- irq, vector, smp_processor_id());
+ TRACE_TIME(TRC_HW_IRQ_MOVE_CLEANUP, irq, vector, me);
per_cpu(vector_irq, me)[vector] = ~irq;
desc->arch.move_cleanup_count--;
spin_unlock(&desc->lock);
}
}
- TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
+ TRACE_TIME(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
}
goto out_no_unlock;
}
tsc_in = tb_init_done ? get_cycles() : 0;
do_IRQ_guest(desc, vector);
- TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
+ TRACE_TIME(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
goto out_no_end;
}
tsc_in = tb_init_done ? get_cycles() : 0;
action->handler(irq, action->dev_id);
- TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
+ TRACE_TIME(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
spin_lock_irq(&desc->lock);
}
curr->arch.pv.ctrlreg[0] &= ~X86_CR0_TS;
}
else
- TRACE_0D(TRC_PV_MATH_STATE_RESTORE);
+ TRACE_TIME(TRC_PV_MATH_STATE_RESTORE);
#else
ASSERT_UNREACHABLE();
#endif
hardware_domain = d;
}
- TRACE_1D(TRC_DOM0_DOM_ADD, d->domain_id);
+ TRACE_TIME(TRC_DOM0_DOM_ADD, d->domain_id);
lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid);
if ( atomic_cmpxchg(&d->refcnt, 0, DOMAIN_DESTROYED) != 0 )
return;
- TRACE_1D(TRC_DOM0_DOM_REM, d->domain_id);
+ TRACE_TIME(TRC_DOM0_DOM_REM, d->domain_id);
/* Delete from task list and task hashtable. */
spin_lock(&domlist_update_lock);
goto undo_out;
}
- TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
+ TRACE_TIME(TRC_MEM_PAGE_GRANT_MAP, op->dom);
/*
* All maptrack entry users check mt->flags first before using the
return;
}
- TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
+ TRACE_TIME(TRC_MEM_PAGE_GRANT_UNMAP, dom);
rgt = rd->grant_table;
put_gfn(d, gop.mfn);
#endif
- TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
+ TRACE_TIME(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
/* Tell the guest about its new page frame. */
grant_read_lock(e->grant_table);
retval = alternative_call(cpufreq_driver.target,
policy, target_freq, relation);
if ( retval == 0 )
- TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq/1000, policy->cur/1000);
+ TRACE_TIME(TRC_PM_FREQ_CHANGE, prev_freq / 1000, policy->cur / 1000);
}
return retval;