.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
+ .special_prio = 0, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
#define PRIO_MIN (-20)
#define PRIO_MAX 20
+#define PRIO_SPECIAL_IO -9999
#define PRIO_PROCESS 0
#define PRIO_PGRP 1
struct hlist_head preempt_notifiers;
#endif
+ int special_prio; /* XXX maybe move to struct io_context ??? */
+
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
error = -EPERM;
goto out;
}
+ if (niceval == PRIO_SPECIAL_IO) {
+ p->special_prio = PRIO_SPECIAL_IO;
+ error = 0;
+ goto out;
+ }
if (niceval < task_nice(p) && !can_nice(p, niceval)) {
error = -EACCES;
goto out;
/* normalize: avoid signed division (rounding problems) */
error = -ESRCH;
- if (niceval < -20)
- niceval = -20;
- if (niceval > 19)
- niceval = 19;
+ if (niceval == PRIO_SPECIAL_IO) {
+ if (which != PRIO_PROCESS)
+ return -EINVAL;
+ } else {
+ if (niceval < -20)
+ niceval = -20;
+ if (niceval > 19)
+ niceval = 19;
+ }
read_lock(&tasklist_lock);
switch (which) {
break;
if (pages_written >= write_chunk)
break; /* We've done our duty */
+ if (current->special_prio == PRIO_SPECIAL_IO)
+ break; /* Exempt IO processes */
congestion_wait(WRITE, HZ/10);
}