struct scheduler *old_ops;
void *old_domdata;
+ for_each_vcpu ( d, v )
+ {
+ if ( v->affinity_broken )
+ return -EBUSY;
+ }
+
domdata = SCHED_OP(c->sched, alloc_domdata, d);
if ( domdata == NULL )
return -ENOMEM;
if ( cpumask_empty(&online_affinity) &&
cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
{
+ if ( v->affinity_broken )
+ {
+ /* The vcpu is temporarily pinned, can't move it. */
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ ret = -EBUSY;
+ break;
+ }
+
if (system_state == SYS_STATE_suspend)
{
cpumask_copy(v->cpu_hard_affinity_saved,
struct vcpu *v, const cpumask_t *affinity, cpumask_t *which)
{
spinlock_t *lock;
+ int ret = 0;
lock = vcpu_schedule_lock_irq(v);
- cpumask_copy(which, affinity);
+ if ( v->affinity_broken )
+ ret = -EBUSY;
+ else
+ {
+ cpumask_copy(which, affinity);
- /* Always ask the scheduler to re-evaluate placement
- * when changing the affinity */
- set_bit(_VPF_migrating, &v->pause_flags);
+ /*
+ * Always ask the scheduler to re-evaluate placement
+ * when changing the affinity.
+ */
+ set_bit(_VPF_migrating, &v->pause_flags);
+ }
vcpu_schedule_unlock_irq(lock, v);
vcpu_migrate(v);
}
- return 0;
+ return ret;
}
int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity)
kill_timer(&d->watchdog_timer[i]);
}
+int vcpu_pin_override(struct vcpu *v, int cpu)
+{
+ spinlock_t *lock;
+ int ret = -EINVAL;
+
+ lock = vcpu_schedule_lock_irq(v);
+
+ if ( cpu < 0 )
+ {
+ if ( v->affinity_broken )
+ {
+ cpumask_copy(v->cpu_hard_affinity, v->cpu_hard_affinity_saved);
+ v->affinity_broken = 0;
+ set_bit(_VPF_migrating, &v->pause_flags);
+ ret = 0;
+ }
+ }
+ else if ( cpu < nr_cpu_ids )
+ {
+ if ( v->affinity_broken )
+ ret = -EBUSY;
+ else if ( cpumask_test_cpu(cpu, VCPU2ONLINE(v)) )
+ {
+ cpumask_copy(v->cpu_hard_affinity_saved, v->cpu_hard_affinity);
+ v->affinity_broken = 1;
+ cpumask_copy(v->cpu_hard_affinity, cpumask_of(cpu));
+ set_bit(_VPF_migrating, &v->pause_flags);
+ ret = 0;
+ }
+ }
+
+ vcpu_schedule_unlock_irq(lock, v);
+
+ domain_update_node_affinity(v->domain);
+
+ if ( v->pause_flags & VPF_migrating )
+ {
+ vcpu_sleep_nosync(v);
+ vcpu_migrate(v);
+ }
+
+ return ret;
+}
+
typedef long ret_t;
#endif /* !COMPAT */
break;
}
+ case SCHEDOP_pin_override:
+ {
+ struct sched_pin_override sched_pin_override;
+
+ ret = -EPERM;
+ if ( !is_hardware_domain(current->domain) )
+ break;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&sched_pin_override, arg, 1) )
+ break;
+
+ ret = vcpu_pin_override(current, sched_pin_override.pcpu);
+
+ break;
+ }
+
default:
ret = -ENOSYS;
}
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
+
+/*
+ * Override the current vcpu affinity by pinning it to one physical cpu or
+ * undo this override restoring the previous affinity.
+ * @arg == pointer to sched_pin_override_t structure.
+ *
+ * A negative pcpu value will undo a previous pin override and restore the
+ * previous cpu affinity.
+ * This call is allowed for the hardware domain only and requires the cpu
+ * to be part of the domain's cpupool.
+ */
+#define SCHEDOP_pin_override 7
/* ` } */
struct sched_shutdown {
typedef struct sched_watchdog sched_watchdog_t;
DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
+struct sched_pin_override {
+ int32_t pcpu;
+};
+typedef struct sched_pin_override sched_pin_override_t;
+DEFINE_XEN_GUEST_HANDLE(sched_pin_override_t);
+
/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does