static void default_idle(void)
{
local_irq_disable();
- if ( !softirq_pending(smp_processor_id()) ) {
+ if ( cpu_is_haltable(smp_processor_id()) ) {
if (can_do_pal_halt)
safe_halt();
else
#else
irq_stat[cpu].idle_timestamp = jiffies;
#endif
- while ( !softirq_pending(cpu) )
+ while ( cpu_is_haltable(cpu) )
default_idle();
raise_softirq(SCHEDULE_SOFTIRQ);
+ do_tasklet();
do_softirq();
if (!cpu_online(cpu))
play_dead();
*/
local_irq_disable();
- if ( softirq_pending(smp_processor_id()) ||
- cpu_is_offline(smp_processor_id()) )
+ if ( !cpu_is_haltable(smp_processor_id()) )
{
local_irq_enable();
sched_tick_resume();
static void default_idle(void)
{
local_irq_disable();
- if ( !softirq_pending(smp_processor_id()) &&
- cpu_online(smp_processor_id()) )
+ if ( cpu_is_haltable(smp_processor_id()) )
safe_halt();
else
local_irq_enable();
if ( cpu_is_offline(smp_processor_id()) )
play_dead();
(*pm_idle)();
+ do_tasklet();
do_softirq();
}
}
CSCHED_STAT_CRANK(schedule);
CSCHED_VCPU_CHECK(current);
- /* Update credits */
if ( !is_idle_vcpu(scurr->vcpu) )
{
+ /* Update credits of a non-idle VCPU. */
burn_credits(scurr, now);
scurr->start_time -= now;
}
+ else
+ {
+ /* Re-instate a boosted idle VCPU as normal-idle. */
+ scurr->pri = CSCHED_PRI_IDLE;
+ }
/*
* Select next runnable local VCPU (ie top of local runq)
snext = __runq_elem(runq->next);
+ /* Tasklet work (which runs in idle VCPU context) overrides all else. */
+ if ( !tasklet_queue_empty(cpu) )
+ {
+ snext = CSCHED_VCPU(idle_vcpu[cpu]);
+ snext->pri = CSCHED_PRI_TS_BOOST;
+ }
+
/*
* SMP Load balance:
*
struct csched_private {
spinlock_t lock;
uint32_t ncpus;
- struct domain *idle_domain;
struct list_head sdom; /* Used mostly for dump keyhandler. */
BUG_ON( sdom != NULL );
svc->credit = CSCHED_IDLE_CREDIT;
svc->weight = 0;
- if ( csched_priv.idle_domain == NULL )
- csched_priv.idle_domain = dom;
}
CSCHED_VCPU_CHECK(vc);
/* Update credits */
burn_credits(rqd, scurr, now);
+ /* Tasklet work (which runs in idle VCPU context) overrides all else. */
+ if ( !tasklet_queue_empty(cpu) )
+ {
+ snext = CSCHED_VCPU(idle_vcpu[cpu]);
+ goto out;
+ }
+
/*
* Select next runnable local VCPU (ie top of local runq).
*
* vcpu for this processor.
*/
if ( list_empty(runq) )
- snext = CSCHED_VCPU(csched_priv.idle_domain->vcpu[cpu]);
+ snext = CSCHED_VCPU(idle_vcpu[cpu]);
else
snext = __runq_elem(runq->next);
snext->start_time = now;
snext->vcpu->processor = cpu; /* Safe because lock for old processor is held */
}
+
+ out:
/*
* Return task to run next...
*/
/*now simply pick the first domain from the runqueue, which has the
earliest deadline, because the list is sorted*/
- if ( !list_empty(runq) )
+ /* Tasklet work (which runs in idle VCPU context) overrides all else. */
+ if ( !tasklet_queue_empty(cpu) || (list_empty(runq) && list_empty(waitq)) )
+ {
+ ret.task = IDLETASK(cpu);
+ ret.time = SECONDS(1);
+ }
+ else if ( !list_empty(runq) )
{
runinf = list_entry(runq->next,struct sedf_vcpu_info,list);
ret.task = runinf->vcpu;
{
ret.time = runinf->slice - runinf->cputime;
}
- CHECK(ret.time > 0);
- goto sched_done;
}
-
- if ( !list_empty(waitq) )
+ else
{
waitinf = list_entry(waitq->next,struct sedf_vcpu_info, list);
/*we could not find any suitable domain
=> look for domains that are aware of extratime*/
ret = sedf_do_extra_schedule(now, PERIOD_BEGIN(waitinf),
extraq, cpu);
- CHECK(ret.time > 0);
- }
- else
- {
- /*this could probably never happen, but one never knows...*/
- /*it can... imagine a second CPU, which is pure scifi ATM,
- but one never knows ;)*/
- ret.task = IDLETASK(cpu);
- ret.time = SECONDS(1);
}
- sched_done:
/*TODO: Do something USEFUL when this happens and find out, why it
still can happen!!!*/
if ( ret.time < 0)
/******************************************************************************
* tasklet.c
*
- * Dynamically-allocatable tasks run in softirq context on at most one CPU at
- * a time.
+ * Tasklets are dynamically-allocatable tasks run in VCPU context
+ * (specifically, the idle VCPU's context) on at most one CPU at a time.
*
* Copyright (c) 2010, Citrix Systems, Inc.
* Copyright (c) 1992, Linus Torvalds
#include <xen/softirq.h>
#include <xen/tasklet.h>
+/* Some subsystems call into us before we are initialised. We ignore them. */
static bool_t tasklets_initialised;
+
+/*
+ * NB. Any modification to a tasklet_list requires the scheduler to run
+ * on the related CPU so that its idle VCPU's priority is set correctly.
+ */
static DEFINE_PER_CPU(struct list_head, tasklet_list);
+
+/* Protects all lists and tasklet structures. */
static DEFINE_SPINLOCK(tasklet_lock);
void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
{
list_del(&t->list);
list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
- cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
}
tasklet_schedule_on_cpu(t, smp_processor_id());
}
-static void tasklet_action(void)
+void do_tasklet(void)
{
unsigned int cpu = smp_processor_id();
struct list_head *list = &per_cpu(tasklet_list, cpu);
BUG_ON(t->is_dead || !list_empty(&t->list));
list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on));
if ( t->scheduled_on != cpu )
- cpu_raise_softirq(t->scheduled_on, TASKLET_SOFTIRQ);
+ cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
}
- /*
- * If there is more work to do then reschedule. We don't grab more work
- * immediately as we want to allow other softirq work to happen first.
- */
- if ( !list_empty(list) )
- raise_softirq(TASKLET_SOFTIRQ);
+ raise_softirq(SCHEDULE_SOFTIRQ);
spin_unlock_irq(&tasklet_lock);
}
+bool_t tasklet_queue_empty(unsigned int cpu)
+{
+ return list_empty(&per_cpu(tasklet_list, cpu));
+}
+
void tasklet_kill(struct tasklet *t)
{
unsigned long flags;
{
BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
list_del_init(&t->list);
+ cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
}
+
t->scheduled_on = -1;
t->is_dead = 1;
list_add_tail(&t->list, &this_cpu(tasklet_list));
}
- raise_softirq(TASKLET_SOFTIRQ);
+ raise_softirq(SCHEDULE_SOFTIRQ);
spin_unlock_irqrestore(&tasklet_lock, flags);
}
for_each_possible_cpu ( cpu )
INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
- open_softirq(TASKLET_SOFTIRQ, tasklet_action);
-
tasklets_initialised = 1;
}
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
+/*
+ * Used by idle loop to decide whether there is work to do:
+ * (1) Run softirqs; or (2) Play dead; or (3) Run tasklets.
+ */
+#define cpu_is_haltable(cpu) \
+ (!softirq_pending(cpu) && cpu_online(cpu) && tasklet_queue_empty(cpu))
+
#define IS_PRIV(_d) ((_d)->is_privileged)
#define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))
PAGE_SCRUB_SOFTIRQ,
RCU_SOFTIRQ,
STOPMACHINE_SOFTIRQ,
- TASKLET_SOFTIRQ,
NR_COMMON_SOFTIRQS
};
/******************************************************************************
* tasklet.h
*
- * Dynamically-allocatable tasks run in softirq context on at most one CPU at
- * a time.
+ * Tasklets are dynamically-allocatable tasks run in VCPU context
+ * (specifically, the idle VCPU's context) on at most one CPU at a time.
*/
#ifndef __XEN_TASKLET_H__
void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu);
void tasklet_schedule(struct tasklet *t);
+void do_tasklet(void);
+bool_t tasklet_queue_empty(unsigned int cpu);
void tasklet_kill(struct tasklet *t);
void migrate_tasklets_from_cpu(unsigned int cpu);
void tasklet_init(