update_schedule_vcpus(ops);
}
-/**
- * This function allocates scheduler-specific data for a physical CPU
- *
- * We do not actually make use of any per-CPU data but the hypervisor expects
- * a non-NULL return value
- *
- * @param ops Pointer to this instance of the scheduler structure
- *
- * @return Pointer to the allocated data
- */
-static void *
-a653sched_alloc_pdata(const struct scheduler *ops, int cpu)
-{
- /* return a non-NULL value to keep schedule.c happy */
- return SCHED_PRIV(ops);
-}
-
-/**
- * This function frees scheduler-specific data for a physical CPU
- *
- * @param ops Pointer to this instance of the scheduler structure
- */
-static void
-a653sched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
-{
- /* nop */
-}
-
/**
* This function allocates scheduler-specific data for a domain
*
.free_vdata = a653sched_free_vdata,
.alloc_vdata = a653sched_alloc_vdata,
- .free_pdata = a653sched_free_pdata,
- .alloc_pdata = a653sched_alloc_pdata,
-
.free_domdata = a653sched_free_domdata,
.alloc_domdata = a653sched_alloc_domdata,
#include <xen/cpu.h>
#include <xen/keyhandler.h>
#include <xen/trace.h>
+#include <xen/err.h>
#include <xen/guest_access.h>
/*
spin_unlock_irqrestore(old_lock, flags);
if ( !alloc_cpumask_var(&_cpumask_scratch[cpu]) )
- return NULL;
+ return ERR_PTR(-ENOMEM);
if ( prv->repl_timer == NULL )
{
prv->repl_timer = xzalloc(struct timer);
if ( prv->repl_timer == NULL )
- return NULL;
+ return ERR_PTR(-ENOMEM);
init_timer(prv->repl_timer, repl_timer_handler, (void *)ops, cpu);
}
- /* 1 indicates alloc. succeed in schedule.c */
- return (void *)1;
+ return NULL;
}
static void
#include <xen/event.h>
#include <public/sched.h>
#include <xsm/xsm.h>
+#include <xen/err.h>
/* opt_sched: scheduler - default to configured value */
static char __initdata opt_sched[10] = CONFIG_SCHED_DEFAULT;
static int cpu_schedule_up(unsigned int cpu)
{
struct schedule_data *sd = &per_cpu(schedule_data, cpu);
+ void *sched_priv;
per_cpu(scheduler, cpu) = &ops;
spin_lock_init(&sd->_lock);
if ( idle_vcpu[cpu] == NULL )
return -ENOMEM;
- if ( (ops.alloc_pdata != NULL) &&
- ((sd->sched_priv = ops.alloc_pdata(&ops, cpu)) == NULL) )
- return -ENOMEM;
+ /*
+ * We don't want to risk calling xfree() on an sd->sched_priv
+ * (e.g., inside free_pdata, from cpu_schedule_down() called
+ * during CPU_UP_CANCELLED) that contains an IS_ERR value.
+ */
+ sched_priv = SCHED_OP(&ops, alloc_pdata, cpu);
+ if ( IS_ERR(sched_priv) )
+ return PTR_ERR(sched_priv);
+
+ sd->sched_priv = sched_priv;
return 0;
}
struct schedule_data *sd = &per_cpu(schedule_data, cpu);
struct scheduler *sched = per_cpu(scheduler, cpu);
- if ( sd->sched_priv != NULL )
- SCHED_OP(sched, free_pdata, sd->sched_priv, cpu);
+ SCHED_OP(sched, free_pdata, sd->sched_priv, cpu);
SCHED_OP(sched, free_vdata, idle_vcpu[cpu]->sched_priv);
idle_vcpu[cpu]->sched_priv = NULL;
idle_domain->max_vcpus = nr_cpu_ids;
if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
BUG();
- if ( ops.alloc_pdata &&
- !(this_cpu(schedule_data).sched_priv = ops.alloc_pdata(&ops, 0)) )
- BUG();
+ this_cpu(schedule_data).sched_priv = SCHED_OP(&ops, alloc_pdata, 0);
+ BUG_ON(IS_ERR(this_cpu(schedule_data).sched_priv));
SCHED_OP(&ops, init_pdata, this_cpu(schedule_data).sched_priv, 0);
}
idle = idle_vcpu[cpu];
ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
- if ( ppriv == NULL )
- return -ENOMEM;
+ if ( IS_ERR(ppriv) )
+ return PTR_ERR(ppriv);
SCHED_OP(new_ops, init_pdata, ppriv, cpu);
vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv);
if ( vpriv == NULL )