BUG_ON( !is_idle_vcpu(vc) );
}
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(unit_check);
}
#define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc))
#else
(NOW() - svc->last_sched_time) < prv->vcpu_migr_delay;
if ( hot )
- SCHED_STAT_CRANK(vcpu_hot);
+ SCHED_STAT_CRANK(unit_hot);
return hot;
}
if ( list_empty(&svc->active_vcpu_elem) )
{
SCHED_VCPU_STAT_CRANK(svc, state_active);
- SCHED_STAT_CRANK(acct_vcpu_active);
+ SCHED_STAT_CRANK(acct_unit_active);
sdom->active_vcpu_count++;
list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
BUG_ON( list_empty(&svc->active_vcpu_elem) );
SCHED_VCPU_STAT_CRANK(svc, state_idle);
- SCHED_STAT_CRANK(acct_vcpu_idle);
+ SCHED_STAT_CRANK(acct_unit_idle);
BUG_ON( prv->weight < sdom->weight );
sdom->active_vcpu_count--;
svc->pri = is_idle_domain(vc->domain) ?
CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
SCHED_VCPU_STATS_RESET(svc);
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return svc;
}
unit_schedule_unlock_irq(lock, unit);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
}
static void
struct csched_unit * const svc = CSCHED_UNIT(unit);
struct csched_dom * const sdom = svc->sdom;
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
ASSERT(!__vcpu_on_runq(svc));
if ( test_and_clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
- SCHED_STAT_CRANK(vcpu_unpark);
+ SCHED_STAT_CRANK(unit_unpark);
vcpu_unpause(svc->vcpu);
}
struct csched_unit * const svc = CSCHED_UNIT(unit);
unsigned int cpu = vc->processor;
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
BUG_ON( is_idle_vcpu(vc) );
if ( unlikely(curr_on_cpu(vc->processor) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
return;
}
if ( unlikely(__vcpu_on_runq(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/*
* We temporarly boost the priority of awaking VCPUs!
!test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
TRACE_2D(TRC_CSCHED_BOOST_START, vc->domain->domain_id, vc->vcpu_id);
- SCHED_STAT_CRANK(vcpu_boost);
+ SCHED_STAT_CRANK(unit_boost);
svc->pri = CSCHED_PRI_TS_BOOST;
}
credit < -credit_cap &&
!test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) )
{
- SCHED_STAT_CRANK(vcpu_park);
+ SCHED_STAT_CRANK(unit_park);
vcpu_pause_nosync(svc->vcpu);
}
* call to make sure the VCPU's priority is not boosted
* if it is woken up here.
*/
- SCHED_STAT_CRANK(vcpu_unpark);
+ SCHED_STAT_CRANK(unit_unpark);
vcpu_unpause(svc->vcpu);
clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags);
}
{
BUG_ON( !is_idle_vcpu(vc) );
}
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(unit_check);
}
#define CSCHED2_VCPU_CHECK(_vc) (csched2_vcpu_check(_vc))
#else
svc->budget_quota = 0;
INIT_LIST_HEAD(&svc->parked_elem);
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return svc;
}
struct csched2_unit * const svc = csched2_unit(unit);
ASSERT(!is_idle_vcpu(vc));
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
if ( curr_on_cpu(vc->processor) == unit )
{
if ( unlikely(curr_on_cpu(cpu) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
goto out;
}
if ( unlikely(vcpu_on_runq(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
goto out;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/* If the context hasn't been saved for this vcpu yet, we can't put it on
* another runqueue. Instead, we set a flag so that it will be put on the runqueue
sdom->nr_vcpus++;
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
CSCHED2_VCPU_CHECK(vc);
}
ASSERT(!is_idle_vcpu(vc));
ASSERT(list_empty(&svc->runq_elem));
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
/* Remove from runqueue */
lock = unit_schedule_lock_irq(unit);
INIT_LIST_HEAD(&nvc->waitq_elem);
nvc->vcpu = v;
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return nvc;
}
}
spin_unlock_irq(lock);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
}
static void null_unit_remove(const struct scheduler *ops,
out:
unit_schedule_unlock_irq(lock, unit);
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
}
static void null_unit_wake(const struct scheduler *ops,
if ( unlikely(curr_on_cpu(cpu) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
return;
}
if ( unlikely(!list_empty(&nvc->waitq_elem)) )
{
/* Not exactly "on runq", but close enough for reusing the counter */
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(v)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/*
* If a vcpu is neither on a pCPU nor in the waitqueue, it means it was
if ( likely(!tickled && curr_on_cpu(cpu) == unit) )
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
}
static struct sched_resource *
else
BUG_ON(!is_idle_vcpu(v));
- SCHED_STAT_CRANK(vcpu_check);
+ SCHED_STAT_CRANK(unit_check);
}
#define NULL_VCPU_CHECK(v) (null_vcpu_check(v))
#else
if ( !is_idle_vcpu(vc) )
svc->budget = RTDS_DEFAULT_BUDGET;
- SCHED_STAT_CRANK(vcpu_alloc);
+ SCHED_STAT_CRANK(unit_alloc);
return svc;
}
}
unit_schedule_unlock_irq(lock, unit);
- SCHED_STAT_CRANK(vcpu_insert);
+ SCHED_STAT_CRANK(unit_insert);
}
/*
struct rt_dom * const sdom = svc->sdom;
spinlock_t *lock;
- SCHED_STAT_CRANK(vcpu_remove);
+ SCHED_STAT_CRANK(unit_remove);
BUG_ON( sdom == NULL );
struct rt_unit * const svc = rt_unit(unit);
BUG_ON( is_idle_vcpu(vc) );
- SCHED_STAT_CRANK(vcpu_sleep);
+ SCHED_STAT_CRANK(unit_sleep);
if ( curr_on_cpu(vc->processor) == unit )
cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
if ( unlikely(curr_on_cpu(vc->processor) == unit) )
{
- SCHED_STAT_CRANK(vcpu_wake_running);
+ SCHED_STAT_CRANK(unit_wake_running);
return;
}
/* on RunQ/DepletedQ, just update info is ok */
if ( unlikely(vcpu_on_q(svc)) )
{
- SCHED_STAT_CRANK(vcpu_wake_onrunq);
+ SCHED_STAT_CRANK(unit_wake_onrunq);
return;
}
if ( likely(vcpu_runnable(vc)) )
- SCHED_STAT_CRANK(vcpu_wake_runnable);
+ SCHED_STAT_CRANK(unit_wake_runnable);
else
- SCHED_STAT_CRANK(vcpu_wake_not_runnable);
+ SCHED_STAT_CRANK(unit_wake_not_runnable);
/*
* If a deadline passed while svc was asleep/blocked, we need new
PERFCOUNTER(schedule, "sched: specific scheduler")
PERFCOUNTER(dom_init, "sched: dom_init")
PERFCOUNTER(dom_destroy, "sched: dom_destroy")
-PERFCOUNTER(vcpu_alloc, "sched: vcpu_alloc")
-PERFCOUNTER(vcpu_insert, "sched: vcpu_insert")
-PERFCOUNTER(vcpu_remove, "sched: vcpu_remove")
-PERFCOUNTER(vcpu_sleep, "sched: vcpu_sleep")
PERFCOUNTER(vcpu_yield, "sched: vcpu_yield")
-PERFCOUNTER(vcpu_wake_running, "sched: vcpu_wake_running")
-PERFCOUNTER(vcpu_wake_onrunq, "sched: vcpu_wake_onrunq")
-PERFCOUNTER(vcpu_wake_runnable, "sched: vcpu_wake_runnable")
-PERFCOUNTER(vcpu_wake_not_runnable, "sched: vcpu_wake_not_runnable")
+PERFCOUNTER(unit_alloc, "sched: unit_alloc")
+PERFCOUNTER(unit_insert, "sched: unit_insert")
+PERFCOUNTER(unit_remove, "sched: unit_remove")
+PERFCOUNTER(unit_sleep, "sched: unit_sleep")
+PERFCOUNTER(unit_wake_running, "sched: unit_wake_running")
+PERFCOUNTER(unit_wake_onrunq, "sched: unit_wake_onrunq")
+PERFCOUNTER(unit_wake_runnable, "sched: unit_wake_runnable")
+PERFCOUNTER(unit_wake_not_runnable, "sched: unit_wake_not_runnable")
PERFCOUNTER(tickled_no_cpu, "sched: tickled_no_cpu")
PERFCOUNTER(tickled_idle_cpu, "sched: tickled_idle_cpu")
PERFCOUNTER(tickled_idle_cpu_excl, "sched: tickled_idle_cpu_exclusive")
PERFCOUNTER(tickled_busy_cpu, "sched: tickled_busy_cpu")
-PERFCOUNTER(vcpu_check, "sched: vcpu_check")
+PERFCOUNTER(unit_check, "sched: unit_check")
/* credit specific counters */
PERFCOUNTER(delay_ms, "csched: delay")
PERFCOUNTER(acct_balance, "csched: acct_balance")
PERFCOUNTER(acct_reorder, "csched: acct_reorder")
PERFCOUNTER(acct_min_credit, "csched: acct_min_credit")
-PERFCOUNTER(acct_vcpu_active, "csched: acct_vcpu_active")
-PERFCOUNTER(acct_vcpu_idle, "csched: acct_vcpu_idle")
-PERFCOUNTER(vcpu_boost, "csched: vcpu_boost")
-PERFCOUNTER(vcpu_park, "csched: vcpu_park")
-PERFCOUNTER(vcpu_unpark, "csched: vcpu_unpark")
+PERFCOUNTER(acct_unit_active, "csched: acct_unit_active")
+PERFCOUNTER(acct_unit_idle, "csched: acct_unit_idle")
+PERFCOUNTER(unit_boost, "csched: unit_boost")
+PERFCOUNTER(unit_park, "csched: unit_park")
+PERFCOUNTER(unit_unpark, "csched: unit_unpark")
PERFCOUNTER(load_balance_idle, "csched: load_balance_idle")
PERFCOUNTER(load_balance_over, "csched: load_balance_over")
PERFCOUNTER(load_balance_other, "csched: load_balance_other")
PERFCOUNTER(migrate_queued, "csched: migrate_queued")
PERFCOUNTER(migrate_running, "csched: migrate_running")
PERFCOUNTER(migrate_kicked_away, "csched: migrate_kicked_away")
-PERFCOUNTER(vcpu_hot, "csched: vcpu_hot")
+PERFCOUNTER(unit_hot, "csched: unit_hot")
/* credit2 specific counters */
PERFCOUNTER(burn_credits_t2c, "csched2: burn_credits_t2c")