struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
struct list_head *runq, *elem, *next, *last_under;
struct csched_vcpu *svc_elem;
+ spinlock_t *lock;
unsigned long flags;
int sort_epoch;
spc->runq_sort_last = sort_epoch;
- pcpu_schedule_lock_irqsave(cpu, flags);
+ lock = pcpu_schedule_lock_irqsave(cpu, &flags);
runq = &spc->runq;
elem = runq->next;
elem = next;
}
- pcpu_schedule_unlock_irqrestore(cpu, flags);
+ pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
}
static void
* could cause a deadlock if the peer CPU is also load
* balancing and trying to lock this CPU.
*/
- if ( !pcpu_schedule_trylock(peer_cpu) )
+ spinlock_t *lock = pcpu_schedule_trylock(peer_cpu);
+
+ if ( !lock )
{
SCHED_STAT_CRANK(steal_trylock_failed);
peer_cpu = cpumask_cycle(peer_cpu, &workers);
/* Any work over there to steal? */
speer = cpumask_test_cpu(peer_cpu, online) ?
csched_runq_steal(peer_cpu, cpu, snext->pri, bstep) : NULL;
- pcpu_schedule_unlock(peer_cpu);
+ pcpu_schedule_unlock(lock, peer_cpu);
/* As soon as one vcpu is found, balancing ends */
if ( speer != NULL )
*/
if ( ! is_idle_vcpu(vc) )
{
+ spinlock_t *lock;
+
/* FIXME: Do we need the private lock here? */
list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
/* Add vcpu to runqueue of initial processor */
- vcpu_schedule_lock_irq(vc);
+ lock = vcpu_schedule_lock_irq(vc);
runq_assign(ops, vc);
- vcpu_schedule_unlock_irq(vc);
+ vcpu_schedule_unlock_irq(lock, vc);
sdom->nr_vcpus++;
}
if ( ! is_idle_vcpu(vc) )
{
+ spinlock_t *lock;
+
SCHED_STAT_CRANK(vcpu_destroy);
/* Remove from runqueue */
- vcpu_schedule_lock_irq(vc);
+ lock = vcpu_schedule_lock_irq(vc);
runq_deassign(ops, vc);
- vcpu_schedule_unlock_irq(vc);
+ vcpu_schedule_unlock_irq(lock, vc);
/* Remove from sdom list. Don't need a lock for this, as it's called
* syncronously when nothing else can happen. */
{
struct csched_vcpu * const svc = CSCHED_VCPU(vc);
s_time_t now = NOW();
-
- vcpu_schedule_lock_irq(vc);
+ spinlock_t *lock = vcpu_schedule_lock_irq(vc);
BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
else if ( !is_idle_vcpu(vc) )
update_load(ops, svc->rqd, svc, -1, now);
- vcpu_schedule_unlock_irq(vc);
+ vcpu_schedule_unlock_irq(lock, vc);
}
#define MAX_LOAD (1ULL<<60);
* must never lock csched_priv.lock if we're holding a runqueue lock.
* Also, calling vcpu_schedule_lock() is enough, since IRQs have already
* been disabled. */
- vcpu_schedule_lock(svc->vcpu);
+ spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
BUG_ON(svc->rqd != RQD(ops, svc->vcpu->processor));
svc->weight = sdom->weight;
update_max_weight(svc->rqd, svc->weight, old_weight);
- vcpu_schedule_unlock(svc->vcpu);
+ vcpu_schedule_unlock(lock, svc->vcpu);
}
}
}
cpumask_set_cpu(cpu, &rqd->idle);
cpumask_set_cpu(cpu, &rqd->active);
+ /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
spin_unlock(old_lock);
cpumask_set_cpu(cpu, &prv->initialized);
if ( EDOM_INFO(p)->weight )
{
/* Interrupts already off */
- vcpu_schedule_lock(p);
+ spinlock_t *lock = vcpu_schedule_lock(p);
+
EDOM_INFO(p)->period_orig =
EDOM_INFO(p)->period = WEIGHT_PERIOD;
EDOM_INFO(p)->slice_orig =
EDOM_INFO(p)->slice =
(EDOM_INFO(p)->weight *
(WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu];
- vcpu_schedule_unlock(p);
+
+ vcpu_schedule_unlock(lock, p);
}
}
}
{
/* (Here and everywhere in the following) IRQs are already off,
* hence vcpu_spin_lock() is the one. */
- vcpu_schedule_lock(v);
+ spinlock_t *lock = vcpu_schedule_lock(v);
+
EDOM_INFO(v)->extraweight = op->u.sedf.weight;
EDOM_INFO(v)->weight = 0;
EDOM_INFO(v)->slice = 0;
EDOM_INFO(v)->period = WEIGHT_PERIOD;
- vcpu_schedule_unlock(v);
+ vcpu_schedule_unlock(lock, v);
}
}
else
{
/* Weight-driven domains with real-time execution */
- for_each_vcpu ( p, v ) {
- vcpu_schedule_lock(v);
+ for_each_vcpu ( p, v )
+ {
+ spinlock_t *lock = vcpu_schedule_lock(v);
+
EDOM_INFO(v)->weight = op->u.sedf.weight;
- vcpu_schedule_unlock(v);
+ vcpu_schedule_unlock(lock, v);
}
}
}
/* Time-driven domains */
for_each_vcpu ( p, v )
{
- vcpu_schedule_lock(v);
+ spinlock_t *lock = vcpu_schedule_lock(v);
+
EDOM_INFO(v)->weight = 0;
EDOM_INFO(v)->extraweight = 0;
EDOM_INFO(v)->period_orig =
EDOM_INFO(v)->period = op->u.sedf.period;
EDOM_INFO(v)->slice_orig =
EDOM_INFO(v)->slice = op->u.sedf.slice;
- vcpu_schedule_unlock(v);
+ vcpu_schedule_unlock(lock, v);
}
}
for_each_vcpu ( p, v )
{
- vcpu_schedule_lock(v);
+ spinlock_t *lock = vcpu_schedule_lock(v);
+
EDOM_INFO(v)->status =
(EDOM_INFO(v)->status &
~EXTRA_AWARE) | (op->u.sedf.extratime & EXTRA_AWARE);
EDOM_INFO(v)->latency = op->u.sedf.latency;
extraq_check(v);
- vcpu_schedule_unlock(v);
+ vcpu_schedule_unlock(lock, v);
}
}
else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
{
+ spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v);
s_time_t delta;
- if ( unlikely(v != current) )
- vcpu_schedule_lock_irq(v);
-
memcpy(runstate, &v->runstate, sizeof(*runstate));
delta = NOW() - runstate->state_entry_time;
if ( delta > 0 )
runstate->time[runstate->state] += delta;
- if ( unlikely(v != current) )
- vcpu_schedule_unlock_irq(v);
+ if ( unlikely(lock != NULL) )
+ vcpu_schedule_unlock_irq(lock, v);
}
uint64_t get_cpu_idle_time(unsigned int cpu)
void vcpu_sleep_nosync(struct vcpu *v)
{
unsigned long flags;
-
- vcpu_schedule_lock_irqsave(v, flags);
+ spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
if ( likely(!vcpu_runnable(v)) )
{
SCHED_OP(VCPU2OP(v), sleep, v);
}
- vcpu_schedule_unlock_irqrestore(v, flags);
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
}
void vcpu_wake(struct vcpu *v)
{
unsigned long flags;
-
- vcpu_schedule_lock_irqsave(v, flags);
+ spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
if ( likely(vcpu_runnable(v)) )
{
vcpu_runstate_change(v, RUNSTATE_offline, NOW());
}
- vcpu_schedule_unlock_irqrestore(v, flags);
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
}
*/
void vcpu_force_reschedule(struct vcpu *v)
{
- vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
+
if ( v->is_running )
set_bit(_VPF_migrating, &v->pause_flags);
- vcpu_schedule_unlock_irq(v);
+ vcpu_schedule_unlock_irq(lock, v);
if ( test_bit(_VPF_migrating, &v->pause_flags) )
{
for_each_vcpu ( d, v )
{
- vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
if ( v->affinity_broken )
{
if ( v->processor == smp_processor_id() )
{
set_bit(_VPF_migrating, &v->pause_flags);
- vcpu_schedule_unlock_irq(v);
+ vcpu_schedule_unlock_irq(lock, v);
vcpu_sleep_nosync(v);
vcpu_migrate(v);
}
else
{
- vcpu_schedule_unlock_irq(v);
+ vcpu_schedule_unlock_irq(lock, v);
}
}
{
for_each_vcpu ( d, v )
{
- vcpu_schedule_lock_irq(v);
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
if ( cpumask_empty(&online_affinity) &&
if ( v->processor == cpu )
{
set_bit(_VPF_migrating, &v->pause_flags);
- vcpu_schedule_unlock_irq(v);
+ vcpu_schedule_unlock_irq(lock, v);
vcpu_sleep_nosync(v);
vcpu_migrate(v);
}
else
{
- vcpu_schedule_unlock_irq(v);
+ vcpu_schedule_unlock_irq(lock, v);
}
/*
{
cpumask_t online_affinity;
cpumask_t *online;
+ spinlock_t *lock;
if ( v->domain->is_pinned )
return -EINVAL;
if ( cpumask_empty(&online_affinity) )
return -EINVAL;
- vcpu_schedule_lock_irq(v);
+ lock = vcpu_schedule_lock_irq(v);
cpumask_copy(v->cpu_affinity, affinity);
* when changing the affinity */
set_bit(_VPF_migrating, &v->pause_flags);
- vcpu_schedule_unlock_irq(v);
+ vcpu_schedule_unlock_irq(lock, v);
domain_update_node_affinity(v->domain);
static long do_yield(void)
{
struct vcpu * v=current;
+ spinlock_t *lock = vcpu_schedule_lock_irq(v);
- vcpu_schedule_lock_irq(v);
SCHED_OP(VCPU2OP(v), yield, v);
- vcpu_schedule_unlock_irq(v);
+ vcpu_schedule_unlock_irq(lock, v);
TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
raise_softirq(SCHEDULE_SOFTIRQ);
unsigned long *tasklet_work = &this_cpu(tasklet_work_to_do);
bool_t tasklet_work_scheduled = 0;
struct schedule_data *sd;
+ spinlock_t *lock;
struct task_slice next_slice;
int cpu = smp_processor_id();
BUG();
}
- pcpu_schedule_lock_irq(cpu);
+ lock = pcpu_schedule_lock_irq(cpu);
stop_timer(&sd->s_timer);
if ( unlikely(prev == next) )
{
- pcpu_schedule_unlock_irq(cpu);
+ pcpu_schedule_unlock_irq(lock, cpu);
trace_continue_running(next);
return continue_running(prev);
}
ASSERT(!next->is_running);
next->is_running = 1;
- pcpu_schedule_unlock_irq(cpu);
+ pcpu_schedule_unlock_irq(lock, cpu);
SCHED_STAT_CRANK(sched_ctx);
{
unsigned long flags;
struct vcpu *idle;
+ spinlock_t *lock;
void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
struct scheduler *old_ops = per_cpu(scheduler, cpu);
struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
return -ENOMEM;
}
- pcpu_schedule_lock_irqsave(cpu, flags);
+ lock = pcpu_schedule_lock_irqsave(cpu, &flags);
SCHED_OP(old_ops, tick_suspend, cpu);
vpriv_old = idle->sched_priv;
SCHED_OP(new_ops, tick_resume, cpu);
SCHED_OP(new_ops, insert_vcpu, idle);
- pcpu_schedule_unlock_irqrestore(cpu, flags);
+ pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
SCHED_OP(old_ops, free_vdata, vpriv_old);
SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
for_each_cpu (i, cpus)
{
- pcpu_schedule_lock(i);
+ spinlock_t *lock = pcpu_schedule_lock(i);
+
printk("CPU[%02d] ", i);
SCHED_OP(sched, dump_cpu_state, i);
- pcpu_schedule_unlock(i);
+ pcpu_schedule_unlock(lock, i);
}
}
DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);
-static inline spinlock_t * pcpu_schedule_lock(int cpu)
-{
- spinlock_t * lock=NULL;
-
- for ( ; ; )
- {
- /* The per_cpu(v->processor) may also change, if changing
- * cpu pool also changes the scheduler lock. Retry
- * until they match.
- */
- lock=per_cpu(schedule_data, cpu).schedule_lock;
-
- spin_lock(lock);
- if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
- break;
- spin_unlock(lock);
- }
- return lock;
+#define sched_lock(kind, param, cpu, irq, arg...) \
+static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
+{ \
+ for ( ; ; ) \
+ { \
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+ /* \
+ * v->processor may change when grabbing the lock; but \
+ * per_cpu(v->processor) may also change, if changing cpu pool \
+ * also changes the scheduler lock. Retry until they match. \
+ * \
+ * It may also be the case that v->processor may change but the \
+ * lock may be the same; this will succeed in that case. \
+ */ \
+ spin_lock##irq(lock, ## arg); \
+ if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+ return lock; \
+ spin_unlock##irq(lock, ## arg); \
+ } \
}
-static inline int pcpu_schedule_trylock(int cpu)
-{
- spinlock_t * lock=NULL;
-
- lock=per_cpu(schedule_data, cpu).schedule_lock;
- if ( ! spin_trylock(lock) )
- return 0;
- if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
- return 1;
- else
- {
- spin_unlock(lock);
- return 0;
- }
-}
-
-#define pcpu_schedule_lock_irq(p) \
- do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
-#define pcpu_schedule_lock_irqsave(p, flags) \
- do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
-
-static inline void pcpu_schedule_unlock(int cpu)
-{
- spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+#define sched_unlock(kind, param, cpu, irq, arg...) \
+static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
+ EXTRA_TYPE(arg), param) \
+{ \
+ ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+ spin_unlock##irq(lock, ## arg); \
}
-#define pcpu_schedule_unlock_irq(p) \
- do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
-#define pcpu_schedule_unlock_irqrestore(p, flags) \
- do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
-
-static inline void vcpu_schedule_lock(struct vcpu *v)
+#define EXTRA_TYPE(arg)
+sched_lock(pcpu, unsigned int cpu, cpu, )
+sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(pcpu, unsigned int cpu, cpu, _irq)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(pcpu, unsigned int cpu, cpu, )
+sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(pcpu, unsigned int cpu, cpu, _irq)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+#undef EXTRA_TYPE
+
+#define EXTRA_TYPE(arg) , unsigned long arg
+#define spin_unlock_irqsave spin_unlock_irqrestore
+sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+#undef spin_unlock_irqsave
+sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+#undef EXTRA_TYPE
+
+#undef sched_unlock
+#undef sched_lock
+
+static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
{
- spinlock_t * lock;
-
- for ( ; ; )
- {
- /* v->processor may change when grabbing the lock; but
- * per_cpu(v->processor) may also change, if changing
- * cpu pool also changes the scheduler lock. Retry
- * until they match.
- *
- * It may also be the case that v->processor may change
- * but the lock may be the same; this will succeed
- * in that case.
- */
- lock=per_cpu(schedule_data, v->processor).schedule_lock;
-
- spin_lock(lock);
- if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) )
- break;
- spin_unlock(lock);
- }
-}
-
-#define vcpu_schedule_lock_irq(v) \
- do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
-#define vcpu_schedule_lock_irqsave(v, flags) \
- do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
-static inline void vcpu_schedule_unlock(struct vcpu *v)
-{
- spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
+ if ( !spin_trylock(lock) )
+ return NULL;
+ if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+ return lock;
+ spin_unlock(lock);
+ return NULL;
}
-#define vcpu_schedule_unlock_irq(v) \
- do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
-#define vcpu_schedule_unlock_irqrestore(v, flags) \
- do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
-
struct task_slice {
struct vcpu *task;
s_time_t time;