static struct scheduler __read_mostly ops;
+static bool scheduler_active;
+
static void sched_set_affinity(
struct sched_unit *unit, const cpumask_t *hard, const cpumask_t *soft);
cpu_relax();
*lock = pcpu_schedule_lock_irq(cpu);
+
+ if ( unlikely(!scheduler_active) )
+ {
+ ASSERT(is_idle_unit(prev));
+ atomic_set(&prev->next_task->rendezvous_out_cnt, 0);
+ prev->rendezvous_in_cnt = 0;
+ }
}
return prev->next_task;
return mask;
}
+static void schedule_dummy(void)
+{
+ sched_tasklet_check_cpu(smp_processor_id());
+}
+
+void scheduler_disable(void)
+{
+ scheduler_active = false;
+ open_softirq(SCHEDULE_SOFTIRQ, schedule_dummy);
+ open_softirq(SCHED_SLAVE_SOFTIRQ, schedule_dummy);
+}
+
+void scheduler_enable(void)
+{
+ open_softirq(SCHEDULE_SOFTIRQ, schedule);
+ open_softirq(SCHED_SLAVE_SOFTIRQ, sched_slave);
+ scheduler_active = true;
+}
+
/* Initialise the data structures. */
void __init scheduler_init(void)
{
struct domain *idle_domain;
int i;
- open_softirq(SCHEDULE_SOFTIRQ, schedule);
- open_softirq(SCHED_SLAVE_SOFTIRQ, sched_slave);
+ scheduler_enable();
for ( i = 0; i < NUM_SCHEDULERS; i++)
{
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
void sched_guest_idle(void (*idle) (void), unsigned int cpu);
+void scheduler_enable(void);
+void scheduler_disable(void);
/*
* Used by idle loop to decide whether there is work to do: