atomic_t credit;
unsigned int residual;
+ s_time_t last_sched_time;
+
#ifdef CSCHED_STATS
struct {
int credit_last;
integer_param("vcpu_migration_delay", vcpu_migration_delay_us);
static inline bool
-__csched_vcpu_is_cache_hot(const struct csched_private *prv, struct vcpu *v)
+__csched_vcpu_is_cache_hot(const struct csched_private *prv,
+ const struct csched_vcpu *svc)
{
bool hot = prv->vcpu_migr_delay &&
- (NOW() - v->last_run_time) < prv->vcpu_migr_delay;
+ (NOW() - svc->last_sched_time) < prv->vcpu_migr_delay;
if ( hot )
SCHED_STAT_CRANK(vcpu_hot);
__csched_vcpu_is_migrateable(const struct csched_private *prv, struct vcpu *vc,
int dest_cpu, cpumask_t *mask)
{
+ const struct csched_vcpu *svc = CSCHED_VCPU(vc);
/*
* Don't pick up work that's hot on peer PCPU, or that can't (or
* would prefer not to) run on cpu.
*/
ASSERT(!vc->is_running);
- return !__csched_vcpu_is_cache_hot(prv, vc) &&
+ return !__csched_vcpu_is_cache_hot(prv, svc) &&
cpumask_test_cpu(dest_cpu, mask);
}
/* Update credits of a non-idle VCPU. */
burn_credits(scurr, now);
scurr->start_time -= now;
+ scurr->last_sched_time = now;
}
else
{
((prev->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
(vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
now);
- prev->last_run_time = now;
ASSERT(next->runstate.state != RUNSTATE_running);
vcpu_runstate_change(next, RUNSTATE_running, now);
} runstate_guest; /* guest address */
#endif
- /* last time when vCPU is scheduled out */
- uint64_t last_run_time;
-
/* Has the FPU been initialised? */
bool fpu_initialised;
/* Has the FPU been used since it was last saved? */