#include <xen/init.h>
#include <xen/sched.h>
#include <xen/spinlock.h>
-#include <xen/softirq.h>
+#include <xen/tasklet.h>
#include <xen/stop_machine.h>
#include <xen/errno.h>
#include <xen/smp.h>
void *fn_data;
};
+static DEFINE_PER_CPU(struct tasklet, stopmachine_tasklet);
static struct stopmachine_data stopmachine_data;
static DEFINE_SPINLOCK(stopmachine_lock);
return (*fn)(data);
}
- /* Note: We shouldn't spin on lock when it's held by others since others
- * is expecting this cpus to enter softirq context. Or else deadlock
- * is caused.
- */
+ /* Must not spin here as the holder will expect us to be descheduled. */
if ( !spin_trylock(&stopmachine_lock) )
return -EBUSY;
smp_wmb();
for_each_cpu_mask ( i, allbutself )
- cpu_raise_softirq(i, STOPMACHINE_SOFTIRQ);
+ tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
+ sync_local_execstate();
stopmachine_set_state(STOPMACHINE_PREPARE);
local_irq_disable();
return ret;
}
-static void stopmachine_softirq(void)
+static void stopmachine_action(unsigned long unused)
{
enum stopmachine_state state = STOPMACHINE_START;
+ sync_local_execstate();
smp_mb();
while ( state != STOPMACHINE_EXIT )
static int __init cpu_stopmachine_init(void)
{
- open_softirq(STOPMACHINE_SOFTIRQ, stopmachine_softirq);
+ unsigned int cpu;
+ for_each_possible_cpu ( cpu )
+ tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
+ stopmachine_action, 0);
return 0;
}
__initcall(cpu_stopmachine_init);
*/
void sync_vcpu_execstate(struct vcpu *v);
+/* As above, for any lazy state being held on the local CPU. */
+void sync_local_execstate(void);
+
/*
* Called by the scheduler to switch to another VCPU. This function must
* call context_saved(@prev) when the local CPU is no longer running in