* @param pdata scheduler specific PCPU data (we don't have any)
* @param vdata scheduler specific VCPU data of the idle vcpu
*/
-static void
+static spinlock_t *
a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
void *pdata, void *vdata)
{
idle_vcpu[cpu]->sched_priv = vdata;
- per_cpu(scheduler, cpu) = new_ops;
- per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
-
- /*
- * (Re?)route the lock to its default location. We actually do not use
- * it, but if we leave it pointing to where it does now (i.e., the
- * runqueue lock for this PCPU in the default scheduler), we'd be
- * causing unnecessary contention on that lock (in cases where it is
- * shared among multiple PCPUs, like in Credit2 and RTDS).
- */
- sd->schedule_lock = &sd->_lock;
+ return &sd->_lock;
}
/**
}
/* Change the scheduler of cpu to us (Credit). */
-static void
+static spinlock_t *
csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
void *pdata, void *vdata)
{
init_pdata(prv, pdata, cpu);
spin_unlock(&prv->lock);
- per_cpu(scheduler, cpu) = new_ops;
- per_cpu(schedule_data, cpu).sched_priv = pdata;
-
- /*
- * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
- * if it is free (and it can be) we want that anyone that manages
- * taking it, finds all the initializations we've done above in place.
- */
- smp_mb();
- sd->schedule_lock = &sd->_lock;
+ return &sd->_lock;
}
#ifndef NDEBUG
}
/* Change the scheduler of cpu to us (Credit2). */
-static void
+static spinlock_t *
csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
void *pdata, void *vdata)
{
*/
ASSERT(per_cpu(schedule_data, cpu).schedule_lock != &prv->rqd[rqi].lock);
- per_cpu(scheduler, cpu) = new_ops;
- per_cpu(schedule_data, cpu).sched_priv = pdata;
-
- /*
- * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
- * if it is free (and it can be) we want that anyone that manages
- * taking it, find all the initializations we've done above in place.
- */
- smp_mb();
- per_cpu(schedule_data, cpu).schedule_lock = &prv->rqd[rqi].lock;
-
write_unlock(&prv->lock);
+
+ return &prv->rqd[rqi].lock;
}
static void
}
/* Change the scheduler of cpu to us (null). */
-static void null_switch_sched(struct scheduler *new_ops, unsigned int cpu,
- void *pdata, void *vdata)
+static spinlock_t *null_switch_sched(struct scheduler *new_ops,
+ unsigned int cpu,
+ void *pdata, void *vdata)
{
struct schedule_data *sd = &per_cpu(schedule_data, cpu);
struct null_private *prv = null_priv(new_ops);
init_pdata(prv, cpu);
- per_cpu(scheduler, cpu) = new_ops;
- per_cpu(schedule_data, cpu).sched_priv = pdata;
-
- /*
- * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
- * if it is free (and it can be) we want that anyone that manages
- * taking it, finds all the initializations we've done above in place.
- */
- smp_mb();
- sd->schedule_lock = &sd->_lock;
+ return &sd->_lock;
}
static void null_vcpu_insert(const struct scheduler *ops, struct vcpu *v)
}
/* Change the scheduler of cpu to us (RTDS). */
-static void
+static spinlock_t *
rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
void *pdata, void *vdata)
{
}
idle_vcpu[cpu]->sched_priv = vdata;
- per_cpu(scheduler, cpu) = new_ops;
- per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
- /*
- * (Re?)route the lock to the per pCPU lock as /last/ thing. In fact,
- * if it is free (and it can be) we want that anyone that manages
- * taking it, find all the initializations we've done above in place.
- */
- smp_mb();
- per_cpu(schedule_data, cpu).schedule_lock = &prv->lock;
+ return &prv->lock;
}
static void
struct scheduler *old_ops = per_cpu(scheduler, cpu);
struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
struct cpupool *old_pool = per_cpu(cpupool, cpu);
- spinlock_t * old_lock;
+ struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+ spinlock_t *old_lock, *new_lock;
/*
* pCPUs only move from a valid cpupool to free (i.e., out of any pool),
old_lock = pcpu_schedule_lock_irq(cpu);
vpriv_old = idle->sched_priv;
- ppriv_old = per_cpu(schedule_data, cpu).sched_priv;
- sched_switch_sched(new_ops, cpu, ppriv, vpriv);
+ ppriv_old = sd->sched_priv;
+ new_lock = sched_switch_sched(new_ops, cpu, ppriv, vpriv);
+
+ per_cpu(scheduler, cpu) = new_ops;
+ sd->sched_priv = ppriv;
+
+ /*
+ * The data above is protected under new_lock, which may be unlocked.
+ * Another CPU can take new_lock as soon as sd->schedule_lock is visible,
+ * and must observe all prior initialisation.
+ */
+ smp_wmb();
+ sd->schedule_lock = new_lock;
/* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
spin_unlock_irq(old_lock);
/* Idempotent. */
void (*free_domdata) (const struct scheduler *, void *);
- void (*switch_sched) (struct scheduler *, unsigned int,
+ spinlock_t * (*switch_sched) (struct scheduler *, unsigned int,
void *, void *);
/* Activate / deactivate vcpus in a cpu pool */
s->deinit(s);
}
-static inline void sched_switch_sched(struct scheduler *s, unsigned int cpu,
- void *pdata, void *vdata)
+static inline spinlock_t *sched_switch_sched(struct scheduler *s,
+ unsigned int cpu,
+ void *pdata, void *vdata)
{
- s->switch_sched(s, cpu, pdata, vdata);
+ return s->switch_sched(s, cpu, pdata, vdata);
}
static inline void sched_dump_settings(const struct scheduler *s)