if (cpu != current->processor)
return;
local_irq_save(flags);
- if (!spin_trylock(per_cpu(schedule_data, cpu).schedule_lock))
+ if (!pcpu_schedule_trylock(cpu))
goto bail2;
if (v->processor != cpu)
goto bail1;
ia64_dv_serialize_data();
args->vcpu = NULL;
bail1:
- spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+ pcpu_schedule_unlock(cpu);
bail2:
local_irq_restore(flags);
}
spc->runq_sort_last = sort_epoch;
- spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags);
+ pcpu_schedule_lock_irqsave(cpu, flags);
runq = &spc->runq;
elem = runq->next;
elem = next;
}
- spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags);
+ pcpu_schedule_unlock_irqrestore(cpu, flags);
}
static void
* cause a deadlock if the peer CPU is also load balancing and trying
* to lock this CPU.
*/
- if ( !spin_trylock(per_cpu(schedule_data, peer_cpu).schedule_lock) )
+ if ( !pcpu_schedule_trylock(peer_cpu) )
{
CSCHED_STAT_CRANK(steal_trylock_failed);
continue;
* Any work over there to steal?
*/
speer = csched_runq_steal(peer_cpu, cpu, snext->pri);
- spin_unlock(per_cpu(schedule_data, peer_cpu).schedule_lock);
+ pcpu_schedule_unlock(peer_cpu);
if ( speer != NULL )
{
*stolen = 1;
atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
}
- /* Switch to new CPU, then unlock old CPU. */
+ /* Switch to new CPU, then unlock old CPU. This is safe because
+ * the lock pointer cant' change while the current lock is held. */
v->processor = new_cpu;
spin_unlock_irqrestore(
per_cpu(schedule_data, old_cpu).schedule_lock, flags);
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv);
- spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags);
+ pcpu_schedule_lock_irqsave(cpu, flags);
SCHED_OP(old_ops, tick_suspend, cpu);
vpriv_old = idle->sched_priv;
SCHED_OP(new_ops, tick_resume, cpu);
SCHED_OP(new_ops, insert_vcpu, idle);
- spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags);
+ pcpu_schedule_unlock_irqrestore(cpu, flags);
SCHED_OP(old_ops, free_vdata, vpriv_old);
SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
for_each_cpu_mask (i, *cpus)
{
- spin_lock(per_cpu(schedule_data, i).schedule_lock);
+ pcpu_schedule_lock(i);
printk("CPU[%02d] ", i);
SCHED_OP(sched, dump_cpu_state, i);
- spin_unlock(per_cpu(schedule_data, i).schedule_lock);
+ pcpu_schedule_unlock(i);
}
}
DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);
+static inline spinlock_t * pcpu_schedule_lock(int cpu)
+{
+ spinlock_t * lock=NULL;
+
+ for ( ; ; )
+ {
+ /* The per_cpu(v->processor) may also change, if changing
+ * cpu pool also changes the scheduler lock. Retry
+ * until they match.
+ */
+ lock=per_cpu(schedule_data, cpu).schedule_lock;
+
+ spin_lock(lock);
+ if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
+ break;
+ spin_unlock(lock);
+ }
+ return lock;
+}
+
+static inline int pcpu_schedule_trylock(int cpu)
+{
+ spinlock_t * lock=NULL;
+
+ lock=per_cpu(schedule_data, cpu).schedule_lock;
+ if ( ! spin_trylock(lock) )
+ return 0;
+ if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+ return 1;
+ else
+ {
+ spin_unlock(lock);
+ return 0;
+ }
+}
+
+#define pcpu_schedule_lock_irq(p) \
+ do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
+#define pcpu_schedule_lock_irqsave(p, flags) \
+ do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
+
+static inline void pcpu_schedule_unlock(int cpu)
+{
+ spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+}
+
+#define pcpu_schedule_unlock_irq(p) \
+ do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
+#define pcpu_schedule_unlock_irqrestore(p, flags) \
+ do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
+
static inline void vcpu_schedule_lock(struct vcpu *v)
{
spinlock_t * lock;