* as it was obtained!
*/
static DEFINE_SPINLOCK(cpupool_lock);
+static DEFINE_SPINLOCK(cpupool_ctl_lock);
DEFINE_PER_CPU(struct cpupool *, cpupool);
int ret;
struct cpupool *c;
+ spin_lock(&cpupool_ctl_lock);
+
switch ( op->op )
{
case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
{
- spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 1);
- spin_unlock(&cpupool_lock);
ret = -ENOENT;
if ( c == NULL )
break;
case XEN_SYSCTL_CPUPOOL_OP_INFO:
{
- spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 0);
- spin_unlock(&cpupool_lock);
ret = -ENOENT;
if ( c == NULL )
break;
{
unsigned cpu;
- spin_lock(&cpupool_lock);
c = cpupool_find_by_id(op->cpupool_id, 0);
- spin_unlock(&cpupool_lock);
ret = -ENOENT;
if ( c == NULL )
break;
}
+ spin_unlock(&cpupool_ctl_lock);
+
return ret;
}