static inline void trace_runstate_change(const struct vcpu *v, int new_state)
{
- struct { uint32_t vcpu:16, domain:16; } d;
+ struct { uint16_t vcpu, domain; } d;
uint32_t event;
if ( likely(!tb_init_done) )
event |= ( v->runstate.state & 0x3 ) << 8;
event |= ( new_state & 0x3 ) << 4;
- __trace_var(event, 1/*tsc*/, sizeof(d), &d);
+ trace_time(event, sizeof(d), &d);
}
static inline void trace_continue_running(const struct vcpu *v)
{
- struct { uint32_t vcpu:16, domain:16; } d;
+ struct { uint16_t vcpu, domain; } d;
if ( likely(!tb_init_done) )
return;
d.vcpu = v->vcpu_id;
d.domain = v->domain->domain_id;
- __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d), &d);
+ trace_time(TRC_SCHED_CONTINUE_RUNNING, sizeof(d), &d);
}
static inline void vcpu_urgent_count_update(struct vcpu *v)
return ret;
SCHED_STAT_CRANK(dom_init);
- TRACE_1D(TRC_SCHED_DOM_ADD, d->domain_id);
+ TRACE_TIME(TRC_SCHED_DOM_ADD, d->domain_id);
rcu_read_lock(&sched_res_rculock);
if ( d->cpupool )
{
SCHED_STAT_CRANK(dom_destroy);
- TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
+ TRACE_TIME(TRC_SCHED_DOM_REM, d->domain_id);
rcu_read_lock(&sched_res_rculock);
unsigned long flags;
spinlock_t *lock;
- TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
rcu_read_lock(&sched_res_rculock);
spinlock_t *lock;
struct sched_unit *unit = v->sched_unit;
- TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
rcu_read_lock(&sched_res_rculock);
}
else
{
- TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
raise_softirq(SCHEDULE_SOFTIRQ);
}
}
if ( sched_poll->timeout != 0 )
set_timer(&v->poll_timer, sched_poll->timeout);
- TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
+ TRACE_TIME(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
raise_softirq(SCHEDULE_SOFTIRQ);
return 0;
SCHED_STAT_CRANK(vcpu_yield);
- TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
+ TRACE_TIME(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
raise_softirq(SCHEDULE_SOFTIRQ);
return 0;
}
if ( copy_from_guest(&sched_shutdown, arg, 1) )
break;
- TRACE_3D(TRC_SCHED_SHUTDOWN,
- current->domain->domain_id, current->vcpu_id,
- sched_shutdown.reason);
+ TRACE_TIME(TRC_SCHED_SHUTDOWN, current->domain->domain_id,
+ current->vcpu_id, sched_shutdown.reason);
ret = domain_shutdown(current->domain, (u8)sched_shutdown.reason);
break;
if ( copy_from_guest(&sched_shutdown, arg, 1) )
break;
- TRACE_3D(TRC_SCHED_SHUTDOWN_CODE,
- d->domain_id, current->vcpu_id, sched_shutdown.reason);
+ TRACE_TIME(TRC_SCHED_SHUTDOWN_CODE, d->domain_id, current->vcpu_id,
+ sched_shutdown.reason);
spin_lock(&d->shutdown_lock);
if ( d->shutdown_code == SHUTDOWN_CODE_INVALID )
rcu_read_lock(&sched_res_rculock);
if ( (ret = sched_adjust_dom(dom_scheduler(d), d, op)) == 0 )
- TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
+ TRACE_TIME(TRC_SCHED_ADJDOM, d->domain_id);
rcu_read_unlock(&sched_res_rculock);
sr->curr = next;
sr->prev = prev;
- TRACE_3D(TRC_SCHED_SWITCH_INFPREV, prev->domain->domain_id,
- prev->unit_id, now - prev->state_entry_time);
- TRACE_4D(TRC_SCHED_SWITCH_INFNEXT, next->domain->domain_id,
- next->unit_id,
- (next->vcpu_list->runstate.state == RUNSTATE_runnable) ?
- (now - next->state_entry_time) : 0, prev->next_time);
- TRACE_4D(TRC_SCHED_SWITCH, prev->domain->domain_id, prev->unit_id,
- next->domain->domain_id, next->unit_id);
+ TRACE_TIME(TRC_SCHED_SWITCH_INFPREV, prev->domain->domain_id,
+ prev->unit_id, now - prev->state_entry_time);
+ TRACE_TIME(TRC_SCHED_SWITCH_INFNEXT, next->domain->domain_id, next->unit_id,
+ (next->vcpu_list->runstate.state == RUNSTATE_runnable) ?
+ (now - next->state_entry_time) : 0, prev->next_time);
+ TRACE_TIME(TRC_SCHED_SWITCH, prev->domain->domain_id, prev->unit_id,
+ next->domain->domain_id, next->unit_id);
ASSERT(!unit_running(next));
{
if ( unlikely(vprev == vnext) )
{
- TRACE_4D(TRC_SCHED_SWITCH_INFCONT,
- vnext->domain->domain_id, vnext->sched_unit->unit_id,
- now - vprev->runstate.state_entry_time,
- vprev->sched_unit->next_time);
+ TRACE_TIME(TRC_SCHED_SWITCH_INFCONT,
+ vnext->domain->domain_id, vnext->sched_unit->unit_id,
+ now - vprev->runstate.state_entry_time,
+ vprev->sched_unit->next_time);
sched_context_switched(vprev, vnext);
/*
{
/* Avoid TRACE_*: saves checking !tb_init_done each step */
for_each_cpu(cpu, &mask)
- __trace_var(TRC_CSCHED_TICKLE, 1, sizeof(cpu), &cpu);
+ trace_time(TRC_CSCHED_TICKLE, sizeof(cpu), &cpu);
}
/*
if ( commit && spc )
spc->idle_bias = cpu;
- TRACE_3D(TRC_CSCHED_PICKED_CPU, unit->domain->domain_id, unit->unit_id,
- cpu);
+ TRACE_TIME(TRC_CSCHED_PICKED_CPU, unit->domain->domain_id, unit->unit_id, cpu);
return cpu;
}
}
}
- TRACE_3D(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id,
- svc->unit->unit_id, sdom->active_unit_count);
+ TRACE_TIME(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id,
+ svc->unit->unit_id, sdom->active_unit_count);
spin_unlock_irqrestore(&prv->lock, flags);
}
list_del_init(&sdom->active_sdom_elem);
}
- TRACE_3D(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id,
- svc->unit->unit_id, sdom->active_unit_count);
+ TRACE_TIME(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id,
+ svc->unit->unit_id, sdom->active_unit_count);
}
static void
if ( svc->pri == CSCHED_PRI_TS_BOOST )
{
svc->pri = CSCHED_PRI_TS_UNDER;
- TRACE_2D(TRC_CSCHED_BOOST_END, svc->sdom->dom->domain_id,
- svc->unit->unit_id);
+ TRACE_TIME(TRC_CSCHED_BOOST_END, svc->sdom->dom->domain_id,
+ svc->unit->unit_id);
}
/*
if ( !migrating && svc->pri == CSCHED_PRI_TS_UNDER &&
!test_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
{
- TRACE_2D(TRC_CSCHED_BOOST_START, unit->domain->domain_id,
- unit->unit_id);
+ TRACE_TIME(TRC_CSCHED_BOOST_START, unit->domain->domain_id, unit->unit_id);
SCHED_STAT_CRANK(unit_boost);
svc->pri = CSCHED_PRI_TS_BOOST;
}
if ( __csched_unit_is_migrateable(prv, unit, cpu, cpumask_scratch) )
{
/* We got a candidate. Grab it! */
- TRACE_3D(TRC_CSCHED_STOLEN_UNIT, peer_cpu,
- unit->domain->domain_id, unit->unit_id);
+ TRACE_TIME(TRC_CSCHED_STOLEN_UNIT, peer_cpu,
+ unit->domain->domain_id, unit->unit_id);
SCHED_UNIT_STAT_CRANK(speer, migrate_q);
SCHED_STAT_CRANK(migrate_queued);
runq_remove(speer);
*/
if ( CSCHED_PCPU(peer_cpu)->nr_runnable <= 1 )
{
- TRACE_2D(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skipp'n */ 0);
+ TRACE_TIME(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skipp'n */ 0);
goto next_cpu;
}
if ( !lock )
{
SCHED_STAT_CRANK(steal_trylock_failed);
- TRACE_2D(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skip */ 0);
+ TRACE_TIME(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* skip */ 0);
goto next_cpu;
}
- TRACE_2D(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* checked */ 1);
+ TRACE_TIME(TRC_CSCHED_STEAL_CHECK, peer_cpu, /* checked */ 1);
/* Any work over there to steal? */
speer = cpumask_test_cpu(peer_cpu, online) ?
SCHED_STAT_CRANK(schedule);
CSCHED_UNIT_CHECK(unit);
- /*
- * Here in Credit1 code, we usually just call TRACE_nD() helpers, and
- * don't care about packing. But scheduling happens very often, so it
- * actually is important that the record is as small as possible.
- */
if ( unlikely(tb_init_done) )
{
struct {
- unsigned cpu:16, tasklet:8, idle:8;
- } d;
- d.cpu = cur_cpu;
- d.tasklet = tasklet_work_scheduled;
- d.idle = is_idle_unit(unit);
- __trace_var(TRC_CSCHED_SCHEDULE, 1, sizeof(d),
- (unsigned char *)&d);
+ uint16_t cpu;
+ uint8_t tasklet, idle;
+ } d = {
+ .cpu = cur_cpu,
+ .tasklet = tasklet_work_scheduled,
+ .idle = is_idle_unit(unit),
+ };
+
+ trace_time(TRC_CSCHED_SCHEDULE, sizeof(d), &d);
}
runtime = now - unit->state_entry_time;
if ( unlikely(tb_init_done) )
{
struct {
- unsigned unit:16, dom:16;
- unsigned runtime;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.runtime = runtime;
- __trace_var(TRC_CSCHED_RATELIMIT, 1, sizeof(d),
- (unsigned char *)&d);
+ uint16_t unit, dom;
+ uint32_t runtime;
+ } d = {
+ .dom = unit->domain->domain_id,
+ .unit = unit->unit_id,
+ .runtime = runtime,
+ };
+
+ trace_time(TRC_CSCHED_RATELIMIT, sizeof(d), &d);
}
goto out;
/* Tasklet work (which runs in idle UNIT context) overrides all else. */
if ( tasklet_work_scheduled )
{
- TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
+ TRACE_TIME(TRC_CSCHED_SCHED_TASKLET);
snext = CSCHED_UNIT(sched_idle_unit(sched_cpu));
snext->pri = CSCHED_PRI_TS_BOOST;
}
struct {
uint16_t unit, dom;
uint32_t new_cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.new_cpu = new_cpu;
- __trace_var(TRC_SNULL_PICKED_CPU, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .new_cpu = new_cpu,
+ };
+
+ trace_time(TRC_SNULL_PICKED_CPU, sizeof(d), &d);
}
return get_sched_res(new_cpu);
struct {
uint16_t unit, dom;
uint32_t cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = cpu;
- __trace_var(TRC_SNULL_UNIT_ASSIGN, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = cpu,
+ };
+
+ trace_time(TRC_SNULL_UNIT_ASSIGN, sizeof(d), &d);
}
}
struct {
uint16_t unit, dom;
uint32_t cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = cpu;
- __trace_var(TRC_SNULL_UNIT_DEASSIGN, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = cpu,
+ };
+
+ trace_time(TRC_SNULL_UNIT_DEASSIGN, sizeof(d), &d);
}
spin_lock(&prv->waitq_lock);
struct {
uint16_t unit, dom;
uint16_t cpu, new_cpu;
- } d;
- d.dom = unit->domain->domain_id;
- d.unit = unit->unit_id;
- d.cpu = sched_unit_master(unit);
- d.new_cpu = new_cpu;
- __trace_var(TRC_SNULL_MIGRATE, 1, sizeof(d), &d);
+ } d = {
+ .unit = unit->unit_id,
+ .dom = unit->domain->domain_id,
+ .cpu = sched_unit_master(unit),
+ .new_cpu = new_cpu,
+ };
+
+ trace_time(TRC_SNULL_MIGRATE, sizeof(d), &d);
}
/*
struct {
uint16_t tasklet, cpu;
int16_t unit, dom;
- } d;
- d.cpu = cur_cpu;
- d.tasklet = tasklet_work_scheduled;
+ } d = {
+ .tasklet = tasklet_work_scheduled,
+ .cpu = cur_cpu,
+ };
+
if ( npc->unit == NULL )
{
d.unit = d.dom = -1;
d.unit = npc->unit->unit_id;
d.dom = npc->unit->domain->domain_id;
}
- __trace_var(TRC_SNULL_SCHEDULE, 1, sizeof(d), &d);
+
+ trace_time(TRC_SNULL_SCHEDULE, sizeof(d), &d);
}
if ( tasklet_work_scheduled )
{
- trace_var(TRC_SNULL_TASKLET, 1, 0, NULL);
+ TRACE_TIME(TRC_SNULL_TASKLET);
prev->next_task = sched_idle_unit(sched_cpu);
}
else