void idle_loop(void)
{
+ unsigned int cpu = smp_processor_id();
+
for ( ; ; )
{
- if ( cpu_is_offline(smp_processor_id()) )
+ if ( cpu_is_offline(cpu) )
stop_cpu();
- local_irq_disable();
- if ( cpu_is_haltable(smp_processor_id()) )
+ /* Are we here for running vcpu context tasklets, or for idling? */
+ if ( unlikely(tasklet_work_to_do(cpu)) )
+ do_tasklet();
+ else
{
- dsb(sy);
- wfi();
+ local_irq_disable();
+ if ( cpu_is_haltable(cpu) )
+ {
+ dsb(sy);
+ wfi();
+ }
+ local_irq_enable();
}
- local_irq_enable();
- do_tasklet();
do_softirq();
/*
* We MUST be last (or before dsb, wfi). Otherwise after we get the
static void idle_loop(void)
{
+ unsigned int cpu = smp_processor_id();
+
for ( ; ; )
{
- if ( cpu_is_offline(smp_processor_id()) )
+ if ( cpu_is_offline(cpu) )
play_dead();
- (*pm_idle)();
- do_tasklet();
+
+ /* Are we here for running vcpu context tasklets, or for idling? */
+ if ( unlikely(tasklet_work_to_do(cpu)) )
+ do_tasklet();
+ else
+ pm_idle();
do_softirq();
/*
* We MUST be last (or before pm_idle). Otherwise after we get the
struct list_head *list = &per_cpu(tasklet_list, cpu);
/*
- * Work must be enqueued *and* scheduled. Otherwise there is no work to
- * do, and/or scheduler needs to run to update idle vcpu priority.
+ * We want to be sure any caller has checked that a tasklet is both
+ * enqueued and scheduled, before calling this. And, if the caller has
+ * actually checked, it's not an issue that we are outside of the
+ * critical region, in fact:
+ * - TASKLET_enqueued is cleared only here,
+ * - TASKLET_scheduled is only cleared when schedule() find it set,
+ * without TASKLET_enqueued being set as well.
*/
- if ( likely(*work_to_do != (TASKLET_enqueued|TASKLET_scheduled)) )
- return;
+ ASSERT(tasklet_work_to_do(cpu));
spin_lock_irq(&tasklet_lock);
/*
* Used by idle loop to decide whether there is work to do:
* (1) Run softirqs; or (2) Play dead; or (3) Run tasklets.
+ *
+ * About (3), if a tasklet is enqueued, it will be scheduled
+ * really really soon, and hence it's pointless to try to
+ * sleep between these two events (that's why we don't call
+ * the tasklet_work_to_do() helper).
*/
#define cpu_is_haltable(cpu) \
(!softirq_pending(cpu) && \
#define TASKLET_enqueued (1ul << _TASKLET_enqueued)
#define TASKLET_scheduled (1ul << _TASKLET_scheduled)
+static inline bool tasklet_work_to_do(unsigned int cpu)
+{
+ /*
+ * Work must be enqueued *and* scheduled. Otherwise there is no work to
+ * do, and/or scheduler needs to run to update idle vcpu priority.
+ */
+ return per_cpu(tasklet_work_to_do, cpu) == (TASKLET_enqueued|
+ TASKLET_scheduled);
+}
+
void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu);
void tasklet_schedule(struct tasklet *t);
void do_tasklet(void);