#define CSCHED_FLAG_VCPU_PARKED 0x0 /* VCPU over capped credits */
#define CSCHED_FLAG_VCPU_YIELD 0x1 /* VCPU yielding */
#define CSCHED_FLAG_VCPU_MIGRATING 0x2 /* VCPU may have moved to a new pcpu */
+#define CSCHED_FLAG_VCPU_PINNED 0x4 /* VCPU can run only on 1 pcpu */
/*
cpumask_and(&idle_mask, prv->idlers, online);
idlers_empty = cpumask_empty(&idle_mask);
+ /*
+ * Exclusive pinning is when a vcpu has hard-affinity with only one
+ * cpu, and there is no other vcpu that has hard-affinity with that
+ * same cpu. This is infrequent, but if it happens, is for achieving
+ * the most possible determinism, and least possible overhead for
+ * the vcpus in question.
+ *
+ * Try to identify the vast majority of these situations, and deal
+ * with them quickly.
+ */
+ if ( unlikely(test_bit(CSCHED_FLAG_VCPU_PINNED, &new->flags) &&
+ cpumask_test_cpu(cpu, &idle_mask)) )
+ {
+ ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+ SCHED_STAT_CRANK(tickled_idle_cpu_excl);
+ __cpumask_set_cpu(cpu, &mask);
+ goto tickle;
+ }
+
/*
* If the pcpu is idle, or there are no idlers and the new
* vcpu is a higher priority than the old vcpu, run it here.
}
}
+ tickle:
if ( !cpumask_empty(&mask) )
{
if ( unlikely(tb_init_done) )
return rc;
}
+static void
+csched_aff_cntl(const struct scheduler *ops, struct vcpu *v,
+ const cpumask_t *hard, const cpumask_t *soft)
+{
+ struct csched_vcpu *svc = CSCHED_VCPU(v);
+
+ if ( !hard )
+ return;
+
+ /* Are we becoming exclusively pinned? */
+ if ( cpumask_weight(hard) == 1 )
+ set_bit(CSCHED_FLAG_VCPU_PINNED, &svc->flags);
+ else
+ clear_bit(CSCHED_FLAG_VCPU_PINNED, &svc->flags);
+}
+
static inline void
__csched_set_tslice(struct csched_private *prv, unsigned int timeslice_ms)
{
.yield = csched_vcpu_yield,
.adjust = csched_dom_cntl,
+ .adjust_affinity= csched_aff_cntl,
.adjust_global = csched_sys_cntl,
.pick_cpu = csched_cpu_pick,
*/
#define __CSFLAG_vcpu_yield 4
#define CSFLAG_vcpu_yield (1U<<__CSFLAG_vcpu_yield)
+/*
+ * CSFLAGS_pinned: this vcpu is currently 'pinned', i.e., has its hard
+ * affinity set to one and only 1 cpu (and, hence, can only run there).
+ */
+#define __CSFLAG_pinned 5
+#define CSFLAG_pinned (1U<<__CSFLAG_pinned)
static unsigned int __read_mostly opt_migrate_resist = 500;
integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
(unsigned char *)&d);
}
+ /*
+ * Exclusive pinning is when a vcpu has hard-affinity with only one
+ * cpu, and there is no other vcpu that has hard-affinity with that
+ * same cpu. This is infrequent, but if it happens, is for achieving
+ * the most possible determinism, and least possible overhead for
+ * the vcpus in question.
+ *
+ * Try to identify the vast majority of these situations, and deal
+ * with them quickly.
+ */
+ if ( unlikely((new->flags & CSFLAG_pinned) &&
+ cpumask_test_cpu(cpu, &rqd->idle) &&
+ !cpumask_test_cpu(cpu, &rqd->tickled)) )
+ {
+ ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+ SCHED_STAT_CRANK(tickled_idle_cpu_excl);
+ ipid = cpu;
+ goto tickle;
+ }
+
for_each_affinity_balance_step( bs )
{
/* Just skip first step, if we don't have a soft affinity */
return rc;
}
+static void
+csched2_aff_cntl(const struct scheduler *ops, struct vcpu *v,
+ const cpumask_t *hard, const cpumask_t *soft)
+{
+ struct csched2_vcpu *svc = csched2_vcpu(v);
+
+ if ( !hard )
+ return;
+
+ /* Are we becoming exclusively pinned? */
+ if ( cpumask_weight(hard) == 1 )
+ __set_bit(__CSFLAG_pinned, &svc->flags);
+ else
+ __clear_bit(__CSFLAG_pinned, &svc->flags);
+}
+
static int csched2_sys_cntl(const struct scheduler *ops,
struct xen_sysctl_scheduler_op *sc)
{
.yield = csched2_vcpu_yield,
.adjust = csched2_dom_cntl,
+ .adjust_affinity= csched2_aff_cntl,
.adjust_global = csched2_sys_cntl,
.pick_cpu = csched2_cpu_pick,
PERFCOUNTER(vcpu_wake_not_runnable, "sched: vcpu_wake_not_runnable")
PERFCOUNTER(tickled_no_cpu, "sched: tickled_no_cpu")
PERFCOUNTER(tickled_idle_cpu, "sched: tickled_idle_cpu")
+PERFCOUNTER(tickled_idle_cpu_excl, "sched: tickled_idle_cpu_exclusive")
PERFCOUNTER(tickled_busy_cpu, "sched: tickled_busy_cpu")
PERFCOUNTER(vcpu_check, "sched: vcpu_check")