if ( test_and_clear_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
{
SCHED_STAT_CRANK(unit_unpark);
- vcpu_unpause(svc->unit->vcpu_list);
+ sched_unit_unpause(svc->unit);
}
spin_lock_irq(&prv->lock);
!test_and_set_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags) )
{
SCHED_STAT_CRANK(unit_park);
- vcpu_pause_nosync(svc->unit->vcpu_list);
+ sched_unit_pause_nosync(svc->unit);
}
/* Lower bound on credits */
* if it is woken up here.
*/
SCHED_STAT_CRANK(unit_unpark);
- vcpu_unpause(svc->unit->vcpu_list);
+ sched_unit_unpause(svc->unit);
clear_bit(CSCHED_FLAG_UNIT_PARKED, &svc->flags);
}
return s->adjust_global ? s->adjust_global(s, op) : 0;
}
+static inline void sched_unit_pause_nosync(const struct sched_unit *unit)
+{
+ struct vcpu *v;
+
+ for_each_sched_unit_vcpu ( unit, v )
+ vcpu_pause_nosync(v);
+}
+
+static inline void sched_unit_unpause(const struct sched_unit *unit)
+{
+ struct vcpu *v;
+
+ for_each_sched_unit_vcpu ( unit, v )
+ vcpu_unpause(v);
+}
+
#define REGISTER_SCHEDULER(x) static const struct scheduler *x##_entry \
__used_section(".data.schedulers") = &x;