sched_move_irqs(v);
}
-static void vcpu_migrate(struct vcpu *v)
+/*
+ * Initiating migration
+ *
+ * In order to migrate, we need the vcpu in question to have stopped
+ * running and had SCHED_OP(sleep) called (to take it off any
+ * runqueues, for instance); and if it is currently running, it needs
+ * to be scheduled out. Finally, we need to hold the scheduling locks
+ * for both the processor we're migrating from, and the processor
+ * we're migrating to.
+ *
+ * In order to avoid deadlock while satisfying the final requirement,
+ * we must release any scheduling lock we hold, then try to grab both
+ * locks we want, then double-check to make sure that what we started
+ * to do hasn't been changed in the mean time.
+ *
+ * These steps are encapsulated in the following two functions; they
+ * should be called like this:
+ *
+ * lock = vcpu_schedule_lock_irq(v);
+ * vcpu_migrate_start(v);
+ * vcpu_schedule_unlock_irq(lock, v)
+ * vcpu_migrate_finish(v);
+ *
+ * vcpu_migrate_finish() will do the work now if it can, or simply
+ * return if it can't (because v is still running); in that case
+ * vcpu_migrate_finish() will be called by context_saved().
+ */
+void vcpu_migrate_start(struct vcpu *v)
+{
+ set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_sleep_nosync_locked(v);
+}
+
+static void vcpu_migrate_finish(struct vcpu *v)
{
unsigned long flags;
unsigned int old_cpu, new_cpu;
spinlock_t *old_lock, *new_lock;
bool_t pick_called = 0;
+ /*
+ * If the vcpu is currently running, this will be handled by
+ * context_saved(); and in any case, if the bit is cleared, then
+ * someone else has already done the work so we don't need to.
+ */
+ if ( v->is_running || !test_bit(_VPF_migrating, &v->pause_flags) )
+ return;
+
old_cpu = new_cpu = v->processor;
for ( ; ; )
{
spinlock_t *lock = vcpu_schedule_lock_irq(v);
if ( v->is_running )
- set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_migrate_start(v);
+
vcpu_schedule_unlock_irq(lock, v);
- if ( v->pause_flags & VPF_migrating )
- {
- vcpu_sleep_nosync(v);
- vcpu_migrate(v);
- }
+ vcpu_migrate_finish(v);
}
void restore_vcpu_affinity(struct domain *d)
* * the scheduler will always fine a suitable solution, or
* things would have failed before getting in here.
*/
- set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_migrate_start(v);
vcpu_schedule_unlock_irqrestore(lock, flags, v);
- vcpu_sleep_nosync(v);
- vcpu_migrate(v);
+
+ vcpu_migrate_finish(v);
/*
* The only caveat, in this case, is that if a vcpu active in
ASSERT(which == v->cpu_soft_affinity);
sched_set_affinity(v, NULL, affinity);
}
- set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_migrate_start(v);
}
vcpu_schedule_unlock_irq(lock, v);
domain_update_node_affinity(v->domain);
- if ( v->pause_flags & VPF_migrating )
- {
- vcpu_sleep_nosync(v);
- vcpu_migrate(v);
- }
+ vcpu_migrate_finish(v);
return ret;
}
{
sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL);
v->affinity_broken = 0;
- set_bit(_VPF_migrating, &v->pause_flags);
ret = 0;
}
}
cpumask_copy(v->cpu_hard_affinity_saved, v->cpu_hard_affinity);
v->affinity_broken = 1;
sched_set_affinity(v, cpumask_of(cpu), NULL);
- set_bit(_VPF_migrating, &v->pause_flags);
ret = 0;
}
}
+ if ( ret == 0 )
+ vcpu_migrate_start(v);
+
vcpu_schedule_unlock_irq(lock, v);
domain_update_node_affinity(v->domain);
- if ( v->pause_flags & VPF_migrating )
- {
- vcpu_sleep_nosync(v);
- vcpu_migrate(v);
- }
+ vcpu_migrate_finish(v);
return ret;
}
SCHED_OP(vcpu_scheduler(prev), context_saved, prev);
- if ( unlikely(prev->pause_flags & VPF_migrating) )
- vcpu_migrate(prev);
+ vcpu_migrate_finish(prev);
}
/* The scheduler timer: force a run through the scheduler */