xsm_init(&initrdidx, mbi, initial_images_start);
+ timer_init();
+
init_idle_domain();
trap_init();
rcu_init();
- timer_init();
-
early_time_init();
arch_init_memory();
static struct cpupool *cpupool_list; /* linked list, sorted by poolid */
-static int cpupool0_max_cpus;
-integer_param("pool0_max_cpus", cpupool0_max_cpus);
-
static int cpupool_moving_cpu = -1;
static struct cpupool *cpupool_cpu_moving = NULL;
static cpumask_t cpupool_locked_cpus = CPU_MASK_NONE;
}
*q = c;
c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
- if ( schedule_init_global(sched, &(c->sched)) )
+ if ( (c->sched = scheduler_alloc(sched)) == NULL )
{
spin_unlock(&cpupool_lock);
cpupool_destroy(c);
spin_unlock(&cpupool_lock);
printk("Created cpupool %d with scheduler %s (%s)\n", c->cpupool_id,
- c->sched.name, c->sched.opt_name);
+ c->sched->name, c->sched->opt_name);
return c;
}
*q = c->next;
spin_unlock(&cpupool_lock);
printk(XENLOG_DEBUG "cpupool_destroy(pool=%d)\n", c->cpupool_id);
- schedule_deinit_global(&(c->sched));
+ scheduler_free(c->sched);
free_cpupool_struct(c);
return 0;
}
return 0;
}
-/*
- * assign free physical cpus to a cpupool
- * cpus assigned are unused cpus with lowest possible ids
- * returns the number of cpus assigned
- */
-int cpupool_assign_ncpu(struct cpupool *c, int ncpu)
-{
- int i, n = 0;
-
- spin_lock(&cpupool_lock);
- for_each_cpu_mask(i, cpupool_free_cpus)
- {
- if ( cpupool_assign_cpu_locked(c, i) == 0 )
- n++;
- if ( n == ncpu )
- break;
- }
- spin_unlock(&cpupool_lock);
- printk(XENLOG_DEBUG "cpupool_assign_ncpu(pool=%d,ncpu=%d) rc %d\n",
- c->cpupool_id, ncpu, n);
- return n;
-}
-
static long cpupool_unassign_cpu_helper(void *info)
{
struct cpupool *c = (struct cpupool *)info;
spin_lock(&cpupool_lock);
cpu_clear(cpu, cpupool_locked_cpus);
cpu_set(cpu, cpupool_free_cpus);
- if ( cpupool0 != NULL )
- cpupool_assign_cpu_locked(cpupool0, cpu);
+ cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
}
if ( c == NULL )
break;
op->cpupool_id = c->cpupool_id;
- op->sched_id = c->sched.sched_id;
+ op->sched_id = c->sched->sched_id;
op->n_dom = c->n_dom;
ret = cpumask_to_xenctl_cpumap(&(op->cpumap), &(c->cpu_valid));
}
static int __init cpupool_presmp_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
+ cpupool0 = cpupool_create(0, NULL);
+ BUG_ON(cpupool0 == NULL);
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
register_cpu_notifier(&cpu_nfb);
return 0;
}
presmp_initcall(cpupool_presmp_init);
-static int __init cpupool_init(void)
-{
- cpupool0 = cpupool_create(0, NULL);
- BUG_ON(cpupool0 == NULL);
-
- if ( (cpupool0_max_cpus == 0) || (cpupool0_max_cpus > num_online_cpus()) )
- cpupool0_max_cpus = num_online_cpus();
-
- if ( !cpupool_assign_ncpu(cpupool0, cpupool0_max_cpus) )
- BUG();
-
- return 0;
-}
-__initcall(cpupool_init);
-
/*
* Local variables:
* mode: C
uint32_t credit;
int credit_balance;
uint32_t runq_sort;
- int ticker_active;
};
-
-/*
- * Global variables
- */
-static struct csched_private *csched_priv0 = NULL;
-
static void csched_tick(void *_cpu);
static void csched_acct(void *dummy);
prv->credit += CSCHED_CREDITS_PER_ACCT;
prv->ncpus++;
cpu_set(cpu, prv->cpus);
- if ( (prv->ncpus == 1) && (prv != csched_priv0) )
+ if ( prv->ncpus == 1 )
{
prv->master = cpu;
- init_timer( &prv->master_ticker, csched_acct, prv, cpu);
- prv->ticker_active = 2;
+ init_timer(&prv->master_ticker, csched_acct, prv, cpu);
+ set_timer(&prv->master_ticker, NOW() +
+ MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT);
}
init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu);
-
- if ( prv == csched_priv0 )
- prv->master = first_cpu(prv->cpus);
+ set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
INIT_LIST_HEAD(&spc->runq);
spc->runq_sort_last = prv->runq_sort;
}
static int
-csched_init(struct scheduler *ops, int pool0)
+csched_init(struct scheduler *ops)
{
struct csched_private *prv;
prv = xmalloc(struct csched_private);
if ( prv == NULL )
- return 1;
+ return -ENOMEM;
+
memset(prv, 0, sizeof(*prv));
- if ( pool0 )
- csched_priv0 = prv;
ops->sched_data = prv;
spin_lock_init(&prv->lock);
INIT_LIST_HEAD(&prv->active_sdom);
- prv->ncpus = 0;
prv->master = UINT_MAX;
- cpus_clear(prv->idlers);
- prv->weight = 0U;
- prv->credit = 0U;
- prv->credit_balance = 0;
- prv->runq_sort = 0U;
- prv->ticker_active = (csched_priv0 == prv) ? 0 : 1;
-
- return 0;
-}
-
-/* Tickers cannot be kicked until SMP subsystem is alive. */
-static __init int csched_start_tickers(void)
-{
- struct csched_pcpu *spc;
- unsigned int cpu;
-
- /* Is the credit scheduler initialised? */
- if ( (csched_priv0 == NULL) || (csched_priv0->ncpus == 0) )
- return 0;
-
- csched_priv0->ticker_active = 1;
-
- for_each_online_cpu ( cpu )
- {
- spc = CSCHED_PCPU(cpu);
- set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
- }
-
- init_timer( &csched_priv0->master_ticker, csched_acct, csched_priv0,
- csched_priv0->master);
-
- set_timer( &csched_priv0->master_ticker, NOW() +
- MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
return 0;
}
-__initcall(csched_start_tickers);
static void
csched_deinit(const struct scheduler *ops)
{
struct csched_pcpu *spc;
uint64_t now = NOW();
- struct csched_private *prv;
-
- prv = CSCHED_PRIV(ops);
- if ( !prv->ticker_active )
- return;
-
spc = CSCHED_PCPU(cpu);
set_timer(&spc->ticker, now + MILLISECS(CSCHED_MSECS_PER_TICK)
- now % MILLISECS(CSCHED_MSECS_PER_TICK) );
-
- if ( (prv->ticker_active == 2) && (prv->master == cpu) )
- {
- set_timer( &prv->master_ticker, now +
- MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT -
- now % MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT);
- prv->ticker_active = 1;
- }
}
static struct csched_private _csched_priv;
}
static int
-csched_init(struct scheduler *ops, int pool0)
+csched_init(struct scheduler *ops)
{
int i;
struct csched_private *prv;
prv = xmalloc(struct csched_private);
if ( prv == NULL )
- return 1;
+ return -ENOMEM;
memset(prv, 0, sizeof(*prv));
ops->sched_data = prv;
(( (opsptr)->fn != NULL ) ? (opsptr)->fn(opsptr, ##__VA_ARGS__ ) \
: (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0 )
-#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : &((_d)->cpupool->sched))
+#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched))
#define VCPU2OP(_v) (DOM2OP((_v)->domain))
#define VCPU2ONLINE(_v) \
(((_v)->domain->cpupool == NULL) ? &cpu_online_map \
void **vcpu_priv;
void *domdata;
- domdata = SCHED_OP(&(c->sched), alloc_domdata, d);
+ domdata = SCHED_OP(c->sched, alloc_domdata, d);
if ( domdata == NULL )
return -ENOMEM;
vcpu_priv = xmalloc_array(void *, d->max_vcpus);
if ( vcpu_priv == NULL )
{
- SCHED_OP(&(c->sched), free_domdata, domdata);
+ SCHED_OP(c->sched, free_domdata, domdata);
return -ENOMEM;
}
memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *));
for_each_vcpu ( d, v )
{
- vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata);
+ vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, v, domdata);
if ( vcpu_priv[v->vcpu_id] == NULL )
{
for_each_vcpu ( d, v )
xfree(vcpu_priv[v->vcpu_id]);
}
xfree(vcpu_priv);
- SCHED_OP(&(c->sched), free_domdata, domdata);
+ SCHED_OP(c->sched, free_domdata, domdata);
return -ENOMEM;
}
}
if ( strcmp(ops.opt_name, opt_sched) == 0 )
break;
}
-
+
if ( schedulers[i] == NULL )
{
printk("Could not find scheduler: %s\n", opt_sched);
register_cpu_notifier(&cpu_nfb);
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
- if ( SCHED_OP(&ops, init, 1) )
+ if ( SCHED_OP(&ops, init) )
panic("scheduler returned error on init\n");
}
-/* switch scheduler on cpu */
void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
{
unsigned long flags;
struct vcpu *v;
- void *vpriv = NULL;
- void *ppriv;
- void *ppriv_old;
- struct scheduler *old_ops;
- struct scheduler *new_ops;
-
- old_ops = per_cpu(scheduler, cpu);
- new_ops = (c == NULL) ? &ops : &(c->sched);
+ void *ppriv, *ppriv_old, *vpriv = NULL;
+ struct scheduler *old_ops = per_cpu(scheduler, cpu);
+ struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
+
+ if ( old_ops == new_ops )
+ return;
+
v = per_cpu(schedule_data, cpu).idle;
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
if ( c != NULL )
SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
}
-/* init scheduler global data */
-int schedule_init_global(char *name, struct scheduler *sched)
+struct scheduler *scheduler_alloc(char *name)
{
int i;
const struct scheduler *data;
+ struct scheduler *sched;
+
+ if ( name == NULL )
+ return &ops;
data = &ops;
for ( i = 0; (schedulers[i] != NULL) && (name != NULL) ; i++ )
break;
}
}
+
+ if ( (sched = xmalloc(struct scheduler)) == NULL )
+ return NULL;
memcpy(sched, data, sizeof(*sched));
- return SCHED_OP(sched, init, 0);
+ if ( SCHED_OP(sched, init) != 0 )
+ {
+ xfree(sched);
+ sched = NULL;
+ }
+
+ return sched;
}
-/* deinitialize scheduler global data */
-void schedule_deinit_global(struct scheduler *sched)
+void scheduler_free(struct scheduler *sched)
{
+ BUG_ON(sched == &ops);
SCHED_OP(sched, deinit);
+ xfree(sched);
}
void schedule_dump(struct cpupool *c)
struct scheduler *sched;
cpumask_t *cpus;
- sched = (c == NULL) ? &ops : &(c->sched);
+ sched = (c == NULL) ? &ops : c->sched;
cpus = (c == NULL) ? &cpupool_free_cpus : &c->cpu_valid;
printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
SCHED_OP(sched, dump_settings);
unsigned int sched_id; /* ID for this scheduler */
void *sched_data; /* global data pointer */
- int (*init) (struct scheduler *, int);
+ int (*init) (struct scheduler *);
void (*deinit) (const struct scheduler *);
void (*free_vdata) (const struct scheduler *, void *);
cpumask_t cpu_valid; /* all cpus assigned to pool */
struct cpupool *next;
unsigned int n_dom;
- struct scheduler sched;
+ struct scheduler *sched;
};
const struct scheduler *scheduler_get_by_id(unsigned int id);
struct scheduler;
-int schedule_init_global(char *name, struct scheduler *sched);
-void schedule_deinit_global(struct scheduler *sched);
+struct scheduler *scheduler_alloc(char *name);
+void scheduler_free(struct scheduler *sched);
void schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
void vcpu_force_reschedule(struct vcpu *v);
int cpu_disable_scheduler(unsigned int cpu);