ia64/linux-2.6.18-xen.hg

annotate kernel/posix-cpu-timers.c @ 240:3e8752eb6d9c

Apply patch for 2.6.18.8.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Wed Oct 03 10:00:44 2007 +0100 (2007-10-03)
parents 831230e53067
children
rev   line source
ian@0 1 /*
ian@0 2 * Implement CPU time clocks for the POSIX clock interface.
ian@0 3 */
ian@0 4
ian@0 5 #include <linux/sched.h>
ian@0 6 #include <linux/posix-timers.h>
ian@0 7 #include <asm/uaccess.h>
ian@0 8 #include <linux/errno.h>
ian@0 9
ian@0 10 static int check_clock(const clockid_t which_clock)
ian@0 11 {
ian@0 12 int error = 0;
ian@0 13 struct task_struct *p;
ian@0 14 const pid_t pid = CPUCLOCK_PID(which_clock);
ian@0 15
ian@0 16 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
ian@0 17 return -EINVAL;
ian@0 18
ian@0 19 if (pid == 0)
ian@0 20 return 0;
ian@0 21
ian@0 22 read_lock(&tasklist_lock);
ian@0 23 p = find_task_by_pid(pid);
ian@0 24 if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
ian@0 25 p->tgid != current->tgid : p->tgid != pid)) {
ian@0 26 error = -EINVAL;
ian@0 27 }
ian@0 28 read_unlock(&tasklist_lock);
ian@0 29
ian@0 30 return error;
ian@0 31 }
ian@0 32
ian@0 33 static inline union cpu_time_count
ian@0 34 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
ian@0 35 {
ian@0 36 union cpu_time_count ret;
ian@0 37 ret.sched = 0; /* high half always zero when .cpu used */
ian@0 38 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ian@0 39 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
ian@0 40 } else {
ian@0 41 ret.cpu = timespec_to_cputime(tp);
ian@0 42 }
ian@0 43 return ret;
ian@0 44 }
ian@0 45
ian@0 46 static void sample_to_timespec(const clockid_t which_clock,
ian@0 47 union cpu_time_count cpu,
ian@0 48 struct timespec *tp)
ian@0 49 {
ian@0 50 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ian@0 51 tp->tv_sec = div_long_long_rem(cpu.sched,
ian@0 52 NSEC_PER_SEC, &tp->tv_nsec);
ian@0 53 } else {
ian@0 54 cputime_to_timespec(cpu.cpu, tp);
ian@0 55 }
ian@0 56 }
ian@0 57
ian@0 58 static inline int cpu_time_before(const clockid_t which_clock,
ian@0 59 union cpu_time_count now,
ian@0 60 union cpu_time_count then)
ian@0 61 {
ian@0 62 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ian@0 63 return now.sched < then.sched;
ian@0 64 } else {
ian@0 65 return cputime_lt(now.cpu, then.cpu);
ian@0 66 }
ian@0 67 }
ian@0 68 static inline void cpu_time_add(const clockid_t which_clock,
ian@0 69 union cpu_time_count *acc,
ian@0 70 union cpu_time_count val)
ian@0 71 {
ian@0 72 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ian@0 73 acc->sched += val.sched;
ian@0 74 } else {
ian@0 75 acc->cpu = cputime_add(acc->cpu, val.cpu);
ian@0 76 }
ian@0 77 }
ian@0 78 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
ian@0 79 union cpu_time_count a,
ian@0 80 union cpu_time_count b)
ian@0 81 {
ian@0 82 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ian@0 83 a.sched -= b.sched;
ian@0 84 } else {
ian@0 85 a.cpu = cputime_sub(a.cpu, b.cpu);
ian@0 86 }
ian@0 87 return a;
ian@0 88 }
ian@0 89
ian@0 90 /*
ian@240 91 * Divide and limit the result to res >= 1
ian@240 92 *
ian@240 93 * This is necessary to prevent signal delivery starvation, when the result of
ian@240 94 * the division would be rounded down to 0.
ian@240 95 */
ian@240 96 static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
ian@240 97 {
ian@240 98 cputime_t res = cputime_div(time, div);
ian@240 99
ian@240 100 return max_t(cputime_t, res, 1);
ian@240 101 }
ian@240 102
ian@240 103 /*
ian@0 104 * Update expiry time from increment, and increase overrun count,
ian@0 105 * given the current clock sample.
ian@0 106 */
ian@0 107 static void bump_cpu_timer(struct k_itimer *timer,
ian@0 108 union cpu_time_count now)
ian@0 109 {
ian@0 110 int i;
ian@0 111
ian@0 112 if (timer->it.cpu.incr.sched == 0)
ian@0 113 return;
ian@0 114
ian@0 115 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
ian@0 116 unsigned long long delta, incr;
ian@0 117
ian@0 118 if (now.sched < timer->it.cpu.expires.sched)
ian@0 119 return;
ian@0 120 incr = timer->it.cpu.incr.sched;
ian@0 121 delta = now.sched + incr - timer->it.cpu.expires.sched;
ian@0 122 /* Don't use (incr*2 < delta), incr*2 might overflow. */
ian@0 123 for (i = 0; incr < delta - incr; i++)
ian@0 124 incr = incr << 1;
ian@0 125 for (; i >= 0; incr >>= 1, i--) {
ian@0 126 if (delta < incr)
ian@0 127 continue;
ian@0 128 timer->it.cpu.expires.sched += incr;
ian@0 129 timer->it_overrun += 1 << i;
ian@0 130 delta -= incr;
ian@0 131 }
ian@0 132 } else {
ian@0 133 cputime_t delta, incr;
ian@0 134
ian@0 135 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
ian@0 136 return;
ian@0 137 incr = timer->it.cpu.incr.cpu;
ian@0 138 delta = cputime_sub(cputime_add(now.cpu, incr),
ian@0 139 timer->it.cpu.expires.cpu);
ian@0 140 /* Don't use (incr*2 < delta), incr*2 might overflow. */
ian@0 141 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
ian@0 142 incr = cputime_add(incr, incr);
ian@0 143 for (; i >= 0; incr = cputime_halve(incr), i--) {
ian@0 144 if (cputime_lt(delta, incr))
ian@0 145 continue;
ian@0 146 timer->it.cpu.expires.cpu =
ian@0 147 cputime_add(timer->it.cpu.expires.cpu, incr);
ian@0 148 timer->it_overrun += 1 << i;
ian@0 149 delta = cputime_sub(delta, incr);
ian@0 150 }
ian@0 151 }
ian@0 152 }
ian@0 153
ian@0 154 static inline cputime_t prof_ticks(struct task_struct *p)
ian@0 155 {
ian@0 156 return cputime_add(p->utime, p->stime);
ian@0 157 }
ian@0 158 static inline cputime_t virt_ticks(struct task_struct *p)
ian@0 159 {
ian@0 160 return p->utime;
ian@0 161 }
ian@0 162 static inline unsigned long long sched_ns(struct task_struct *p)
ian@0 163 {
ian@0 164 return (p == current) ? current_sched_time(p) : p->sched_time;
ian@0 165 }
ian@0 166
ian@0 167 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
ian@0 168 {
ian@0 169 int error = check_clock(which_clock);
ian@0 170 if (!error) {
ian@0 171 tp->tv_sec = 0;
ian@0 172 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
ian@0 173 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ian@0 174 /*
ian@0 175 * If sched_clock is using a cycle counter, we
ian@0 176 * don't have any idea of its true resolution
ian@0 177 * exported, but it is much more than 1s/HZ.
ian@0 178 */
ian@0 179 tp->tv_nsec = 1;
ian@0 180 }
ian@0 181 }
ian@0 182 return error;
ian@0 183 }
ian@0 184
ian@0 185 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
ian@0 186 {
ian@0 187 /*
ian@0 188 * You can never reset a CPU clock, but we check for other errors
ian@0 189 * in the call before failing with EPERM.
ian@0 190 */
ian@0 191 int error = check_clock(which_clock);
ian@0 192 if (error == 0) {
ian@0 193 error = -EPERM;
ian@0 194 }
ian@0 195 return error;
ian@0 196 }
ian@0 197
ian@0 198
ian@0 199 /*
ian@0 200 * Sample a per-thread clock for the given task.
ian@0 201 */
ian@0 202 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
ian@0 203 union cpu_time_count *cpu)
ian@0 204 {
ian@0 205 switch (CPUCLOCK_WHICH(which_clock)) {
ian@0 206 default:
ian@0 207 return -EINVAL;
ian@0 208 case CPUCLOCK_PROF:
ian@0 209 cpu->cpu = prof_ticks(p);
ian@0 210 break;
ian@0 211 case CPUCLOCK_VIRT:
ian@0 212 cpu->cpu = virt_ticks(p);
ian@0 213 break;
ian@0 214 case CPUCLOCK_SCHED:
ian@0 215 cpu->sched = sched_ns(p);
ian@0 216 break;
ian@0 217 }
ian@0 218 return 0;
ian@0 219 }
ian@0 220
ian@0 221 /*
ian@0 222 * Sample a process (thread group) clock for the given group_leader task.
ian@0 223 * Must be called with tasklist_lock held for reading.
ian@0 224 * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
ian@0 225 */
ian@0 226 static int cpu_clock_sample_group_locked(unsigned int clock_idx,
ian@0 227 struct task_struct *p,
ian@0 228 union cpu_time_count *cpu)
ian@0 229 {
ian@0 230 struct task_struct *t = p;
ian@0 231 switch (clock_idx) {
ian@0 232 default:
ian@0 233 return -EINVAL;
ian@0 234 case CPUCLOCK_PROF:
ian@0 235 cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
ian@0 236 do {
ian@0 237 cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
ian@0 238 t = next_thread(t);
ian@0 239 } while (t != p);
ian@0 240 break;
ian@0 241 case CPUCLOCK_VIRT:
ian@0 242 cpu->cpu = p->signal->utime;
ian@0 243 do {
ian@0 244 cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
ian@0 245 t = next_thread(t);
ian@0 246 } while (t != p);
ian@0 247 break;
ian@0 248 case CPUCLOCK_SCHED:
ian@0 249 cpu->sched = p->signal->sched_time;
ian@0 250 /* Add in each other live thread. */
ian@0 251 while ((t = next_thread(t)) != p) {
ian@0 252 cpu->sched += t->sched_time;
ian@0 253 }
ian@0 254 cpu->sched += sched_ns(p);
ian@0 255 break;
ian@0 256 }
ian@0 257 return 0;
ian@0 258 }
ian@0 259
ian@0 260 /*
ian@0 261 * Sample a process (thread group) clock for the given group_leader task.
ian@0 262 * Must be called with tasklist_lock held for reading.
ian@0 263 */
ian@0 264 static int cpu_clock_sample_group(const clockid_t which_clock,
ian@0 265 struct task_struct *p,
ian@0 266 union cpu_time_count *cpu)
ian@0 267 {
ian@0 268 int ret;
ian@0 269 unsigned long flags;
ian@0 270 spin_lock_irqsave(&p->sighand->siglock, flags);
ian@0 271 ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
ian@0 272 cpu);
ian@0 273 spin_unlock_irqrestore(&p->sighand->siglock, flags);
ian@0 274 return ret;
ian@0 275 }
ian@0 276
ian@0 277
ian@0 278 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
ian@0 279 {
ian@0 280 const pid_t pid = CPUCLOCK_PID(which_clock);
ian@0 281 int error = -EINVAL;
ian@0 282 union cpu_time_count rtn;
ian@0 283
ian@0 284 if (pid == 0) {
ian@0 285 /*
ian@0 286 * Special case constant value for our own clocks.
ian@0 287 * We don't have to do any lookup to find ourselves.
ian@0 288 */
ian@0 289 if (CPUCLOCK_PERTHREAD(which_clock)) {
ian@0 290 /*
ian@0 291 * Sampling just ourselves we can do with no locking.
ian@0 292 */
ian@0 293 error = cpu_clock_sample(which_clock,
ian@0 294 current, &rtn);
ian@0 295 } else {
ian@0 296 read_lock(&tasklist_lock);
ian@0 297 error = cpu_clock_sample_group(which_clock,
ian@0 298 current, &rtn);
ian@0 299 read_unlock(&tasklist_lock);
ian@0 300 }
ian@0 301 } else {
ian@0 302 /*
ian@0 303 * Find the given PID, and validate that the caller
ian@0 304 * should be able to see it.
ian@0 305 */
ian@0 306 struct task_struct *p;
ian@0 307 read_lock(&tasklist_lock);
ian@0 308 p = find_task_by_pid(pid);
ian@0 309 if (p) {
ian@0 310 if (CPUCLOCK_PERTHREAD(which_clock)) {
ian@0 311 if (p->tgid == current->tgid) {
ian@0 312 error = cpu_clock_sample(which_clock,
ian@0 313 p, &rtn);
ian@0 314 }
ian@0 315 } else if (p->tgid == pid && p->signal) {
ian@0 316 error = cpu_clock_sample_group(which_clock,
ian@0 317 p, &rtn);
ian@0 318 }
ian@0 319 }
ian@0 320 read_unlock(&tasklist_lock);
ian@0 321 }
ian@0 322
ian@0 323 if (error)
ian@0 324 return error;
ian@0 325 sample_to_timespec(which_clock, rtn, tp);
ian@0 326 return 0;
ian@0 327 }
ian@0 328
ian@0 329
ian@0 330 /*
ian@0 331 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
ian@0 332 * This is called from sys_timer_create with the new timer already locked.
ian@0 333 */
ian@0 334 int posix_cpu_timer_create(struct k_itimer *new_timer)
ian@0 335 {
ian@0 336 int ret = 0;
ian@0 337 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
ian@0 338 struct task_struct *p;
ian@0 339
ian@0 340 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
ian@0 341 return -EINVAL;
ian@0 342
ian@0 343 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
ian@0 344 new_timer->it.cpu.incr.sched = 0;
ian@0 345 new_timer->it.cpu.expires.sched = 0;
ian@0 346
ian@0 347 read_lock(&tasklist_lock);
ian@0 348 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
ian@0 349 if (pid == 0) {
ian@0 350 p = current;
ian@0 351 } else {
ian@0 352 p = find_task_by_pid(pid);
ian@0 353 if (p && p->tgid != current->tgid)
ian@0 354 p = NULL;
ian@0 355 }
ian@0 356 } else {
ian@0 357 if (pid == 0) {
ian@0 358 p = current->group_leader;
ian@0 359 } else {
ian@0 360 p = find_task_by_pid(pid);
ian@0 361 if (p && p->tgid != pid)
ian@0 362 p = NULL;
ian@0 363 }
ian@0 364 }
ian@0 365 new_timer->it.cpu.task = p;
ian@0 366 if (p) {
ian@0 367 get_task_struct(p);
ian@0 368 } else {
ian@0 369 ret = -EINVAL;
ian@0 370 }
ian@0 371 read_unlock(&tasklist_lock);
ian@0 372
ian@0 373 return ret;
ian@0 374 }
ian@0 375
ian@0 376 /*
ian@0 377 * Clean up a CPU-clock timer that is about to be destroyed.
ian@0 378 * This is called from timer deletion with the timer already locked.
ian@0 379 * If we return TIMER_RETRY, it's necessary to release the timer's lock
ian@0 380 * and try again. (This happens when the timer is in the middle of firing.)
ian@0 381 */
ian@0 382 int posix_cpu_timer_del(struct k_itimer *timer)
ian@0 383 {
ian@0 384 struct task_struct *p = timer->it.cpu.task;
ian@0 385 int ret = 0;
ian@0 386
ian@0 387 if (likely(p != NULL)) {
ian@0 388 read_lock(&tasklist_lock);
ian@0 389 if (unlikely(p->signal == NULL)) {
ian@0 390 /*
ian@0 391 * We raced with the reaping of the task.
ian@0 392 * The deletion should have cleared us off the list.
ian@0 393 */
ian@0 394 BUG_ON(!list_empty(&timer->it.cpu.entry));
ian@0 395 } else {
ian@0 396 spin_lock(&p->sighand->siglock);
ian@0 397 if (timer->it.cpu.firing)
ian@0 398 ret = TIMER_RETRY;
ian@0 399 else
ian@0 400 list_del(&timer->it.cpu.entry);
ian@0 401 spin_unlock(&p->sighand->siglock);
ian@0 402 }
ian@0 403 read_unlock(&tasklist_lock);
ian@0 404
ian@0 405 if (!ret)
ian@0 406 put_task_struct(p);
ian@0 407 }
ian@0 408
ian@0 409 return ret;
ian@0 410 }
ian@0 411
ian@0 412 /*
ian@0 413 * Clean out CPU timers still ticking when a thread exited. The task
ian@0 414 * pointer is cleared, and the expiry time is replaced with the residual
ian@0 415 * time for later timer_gettime calls to return.
ian@0 416 * This must be called with the siglock held.
ian@0 417 */
ian@0 418 static void cleanup_timers(struct list_head *head,
ian@0 419 cputime_t utime, cputime_t stime,
ian@0 420 unsigned long long sched_time)
ian@0 421 {
ian@0 422 struct cpu_timer_list *timer, *next;
ian@0 423 cputime_t ptime = cputime_add(utime, stime);
ian@0 424
ian@0 425 list_for_each_entry_safe(timer, next, head, entry) {
ian@0 426 list_del_init(&timer->entry);
ian@0 427 if (cputime_lt(timer->expires.cpu, ptime)) {
ian@0 428 timer->expires.cpu = cputime_zero;
ian@0 429 } else {
ian@0 430 timer->expires.cpu = cputime_sub(timer->expires.cpu,
ian@0 431 ptime);
ian@0 432 }
ian@0 433 }
ian@0 434
ian@0 435 ++head;
ian@0 436 list_for_each_entry_safe(timer, next, head, entry) {
ian@0 437 list_del_init(&timer->entry);
ian@0 438 if (cputime_lt(timer->expires.cpu, utime)) {
ian@0 439 timer->expires.cpu = cputime_zero;
ian@0 440 } else {
ian@0 441 timer->expires.cpu = cputime_sub(timer->expires.cpu,
ian@0 442 utime);
ian@0 443 }
ian@0 444 }
ian@0 445
ian@0 446 ++head;
ian@0 447 list_for_each_entry_safe(timer, next, head, entry) {
ian@0 448 list_del_init(&timer->entry);
ian@0 449 if (timer->expires.sched < sched_time) {
ian@0 450 timer->expires.sched = 0;
ian@0 451 } else {
ian@0 452 timer->expires.sched -= sched_time;
ian@0 453 }
ian@0 454 }
ian@0 455 }
ian@0 456
ian@0 457 /*
ian@0 458 * These are both called with the siglock held, when the current thread
ian@0 459 * is being reaped. When the final (leader) thread in the group is reaped,
ian@0 460 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
ian@0 461 */
ian@0 462 void posix_cpu_timers_exit(struct task_struct *tsk)
ian@0 463 {
ian@0 464 cleanup_timers(tsk->cpu_timers,
ian@0 465 tsk->utime, tsk->stime, tsk->sched_time);
ian@0 466
ian@0 467 }
ian@0 468 void posix_cpu_timers_exit_group(struct task_struct *tsk)
ian@0 469 {
ian@0 470 cleanup_timers(tsk->signal->cpu_timers,
ian@0 471 cputime_add(tsk->utime, tsk->signal->utime),
ian@0 472 cputime_add(tsk->stime, tsk->signal->stime),
ian@0 473 tsk->sched_time + tsk->signal->sched_time);
ian@0 474 }
ian@0 475
ian@0 476
ian@0 477 /*
ian@0 478 * Set the expiry times of all the threads in the process so one of them
ian@0 479 * will go off before the process cumulative expiry total is reached.
ian@0 480 */
ian@0 481 static void process_timer_rebalance(struct task_struct *p,
ian@0 482 unsigned int clock_idx,
ian@0 483 union cpu_time_count expires,
ian@0 484 union cpu_time_count val)
ian@0 485 {
ian@0 486 cputime_t ticks, left;
ian@0 487 unsigned long long ns, nsleft;
ian@0 488 struct task_struct *t = p;
ian@0 489 unsigned int nthreads = atomic_read(&p->signal->live);
ian@0 490
ian@0 491 if (!nthreads)
ian@0 492 return;
ian@0 493
ian@0 494 switch (clock_idx) {
ian@0 495 default:
ian@0 496 BUG();
ian@0 497 break;
ian@0 498 case CPUCLOCK_PROF:
ian@240 499 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
ian@240 500 nthreads);
ian@0 501 do {
ian@0 502 if (likely(!(t->flags & PF_EXITING))) {
ian@0 503 ticks = cputime_add(prof_ticks(t), left);
ian@0 504 if (cputime_eq(t->it_prof_expires,
ian@0 505 cputime_zero) ||
ian@0 506 cputime_gt(t->it_prof_expires, ticks)) {
ian@0 507 t->it_prof_expires = ticks;
ian@0 508 }
ian@0 509 }
ian@0 510 t = next_thread(t);
ian@0 511 } while (t != p);
ian@0 512 break;
ian@0 513 case CPUCLOCK_VIRT:
ian@240 514 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
ian@240 515 nthreads);
ian@0 516 do {
ian@0 517 if (likely(!(t->flags & PF_EXITING))) {
ian@0 518 ticks = cputime_add(virt_ticks(t), left);
ian@0 519 if (cputime_eq(t->it_virt_expires,
ian@0 520 cputime_zero) ||
ian@0 521 cputime_gt(t->it_virt_expires, ticks)) {
ian@0 522 t->it_virt_expires = ticks;
ian@0 523 }
ian@0 524 }
ian@0 525 t = next_thread(t);
ian@0 526 } while (t != p);
ian@0 527 break;
ian@0 528 case CPUCLOCK_SCHED:
ian@0 529 nsleft = expires.sched - val.sched;
ian@0 530 do_div(nsleft, nthreads);
ian@240 531 nsleft = max_t(unsigned long long, nsleft, 1);
ian@0 532 do {
ian@0 533 if (likely(!(t->flags & PF_EXITING))) {
ian@0 534 ns = t->sched_time + nsleft;
ian@0 535 if (t->it_sched_expires == 0 ||
ian@0 536 t->it_sched_expires > ns) {
ian@0 537 t->it_sched_expires = ns;
ian@0 538 }
ian@0 539 }
ian@0 540 t = next_thread(t);
ian@0 541 } while (t != p);
ian@0 542 break;
ian@0 543 }
ian@0 544 }
ian@0 545
ian@0 546 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
ian@0 547 {
ian@0 548 /*
ian@0 549 * That's all for this thread or process.
ian@0 550 * We leave our residual in expires to be reported.
ian@0 551 */
ian@0 552 put_task_struct(timer->it.cpu.task);
ian@0 553 timer->it.cpu.task = NULL;
ian@0 554 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
ian@0 555 timer->it.cpu.expires,
ian@0 556 now);
ian@0 557 }
ian@0 558
ian@0 559 /*
ian@0 560 * Insert the timer on the appropriate list before any timers that
ian@0 561 * expire later. This must be called with the tasklist_lock held
ian@0 562 * for reading, and interrupts disabled.
ian@0 563 */
ian@0 564 static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
ian@0 565 {
ian@0 566 struct task_struct *p = timer->it.cpu.task;
ian@0 567 struct list_head *head, *listpos;
ian@0 568 struct cpu_timer_list *const nt = &timer->it.cpu;
ian@0 569 struct cpu_timer_list *next;
ian@0 570 unsigned long i;
ian@0 571
ian@0 572 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
ian@0 573 p->cpu_timers : p->signal->cpu_timers);
ian@0 574 head += CPUCLOCK_WHICH(timer->it_clock);
ian@0 575
ian@0 576 BUG_ON(!irqs_disabled());
ian@0 577 spin_lock(&p->sighand->siglock);
ian@0 578
ian@0 579 listpos = head;
ian@0 580 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
ian@0 581 list_for_each_entry(next, head, entry) {
ian@0 582 if (next->expires.sched > nt->expires.sched)
ian@0 583 break;
ian@0 584 listpos = &next->entry;
ian@0 585 }
ian@0 586 } else {
ian@0 587 list_for_each_entry(next, head, entry) {
ian@0 588 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
ian@0 589 break;
ian@0 590 listpos = &next->entry;
ian@0 591 }
ian@0 592 }
ian@0 593 list_add(&nt->entry, listpos);
ian@0 594
ian@0 595 if (listpos == head) {
ian@0 596 /*
ian@0 597 * We are the new earliest-expiring timer.
ian@0 598 * If we are a thread timer, there can always
ian@0 599 * be a process timer telling us to stop earlier.
ian@0 600 */
ian@0 601
ian@0 602 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
ian@0 603 switch (CPUCLOCK_WHICH(timer->it_clock)) {
ian@0 604 default:
ian@0 605 BUG();
ian@0 606 case CPUCLOCK_PROF:
ian@0 607 if (cputime_eq(p->it_prof_expires,
ian@0 608 cputime_zero) ||
ian@0 609 cputime_gt(p->it_prof_expires,
ian@0 610 nt->expires.cpu))
ian@0 611 p->it_prof_expires = nt->expires.cpu;
ian@0 612 break;
ian@0 613 case CPUCLOCK_VIRT:
ian@0 614 if (cputime_eq(p->it_virt_expires,
ian@0 615 cputime_zero) ||
ian@0 616 cputime_gt(p->it_virt_expires,
ian@0 617 nt->expires.cpu))
ian@0 618 p->it_virt_expires = nt->expires.cpu;
ian@0 619 break;
ian@0 620 case CPUCLOCK_SCHED:
ian@0 621 if (p->it_sched_expires == 0 ||
ian@0 622 p->it_sched_expires > nt->expires.sched)
ian@0 623 p->it_sched_expires = nt->expires.sched;
ian@0 624 break;
ian@0 625 }
ian@0 626 } else {
ian@0 627 /*
ian@0 628 * For a process timer, we must balance
ian@0 629 * all the live threads' expirations.
ian@0 630 */
ian@0 631 switch (CPUCLOCK_WHICH(timer->it_clock)) {
ian@0 632 default:
ian@0 633 BUG();
ian@0 634 case CPUCLOCK_VIRT:
ian@0 635 if (!cputime_eq(p->signal->it_virt_expires,
ian@0 636 cputime_zero) &&
ian@0 637 cputime_lt(p->signal->it_virt_expires,
ian@0 638 timer->it.cpu.expires.cpu))
ian@0 639 break;
ian@0 640 goto rebalance;
ian@0 641 case CPUCLOCK_PROF:
ian@0 642 if (!cputime_eq(p->signal->it_prof_expires,
ian@0 643 cputime_zero) &&
ian@0 644 cputime_lt(p->signal->it_prof_expires,
ian@0 645 timer->it.cpu.expires.cpu))
ian@0 646 break;
ian@0 647 i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
ian@0 648 if (i != RLIM_INFINITY &&
ian@0 649 i <= cputime_to_secs(timer->it.cpu.expires.cpu))
ian@0 650 break;
ian@0 651 goto rebalance;
ian@0 652 case CPUCLOCK_SCHED:
ian@0 653 rebalance:
ian@0 654 process_timer_rebalance(
ian@0 655 timer->it.cpu.task,
ian@0 656 CPUCLOCK_WHICH(timer->it_clock),
ian@0 657 timer->it.cpu.expires, now);
ian@0 658 break;
ian@0 659 }
ian@0 660 }
ian@0 661 }
ian@0 662
ian@0 663 spin_unlock(&p->sighand->siglock);
ian@0 664 }
ian@0 665
ian@0 666 /*
ian@0 667 * The timer is locked, fire it and arrange for its reload.
ian@0 668 */
ian@0 669 static void cpu_timer_fire(struct k_itimer *timer)
ian@0 670 {
ian@0 671 if (unlikely(timer->sigq == NULL)) {
ian@0 672 /*
ian@0 673 * This a special case for clock_nanosleep,
ian@0 674 * not a normal timer from sys_timer_create.
ian@0 675 */
ian@0 676 wake_up_process(timer->it_process);
ian@0 677 timer->it.cpu.expires.sched = 0;
ian@0 678 } else if (timer->it.cpu.incr.sched == 0) {
ian@0 679 /*
ian@0 680 * One-shot timer. Clear it as soon as it's fired.
ian@0 681 */
ian@0 682 posix_timer_event(timer, 0);
ian@0 683 timer->it.cpu.expires.sched = 0;
ian@0 684 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
ian@0 685 /*
ian@0 686 * The signal did not get queued because the signal
ian@0 687 * was ignored, so we won't get any callback to
ian@0 688 * reload the timer. But we need to keep it
ian@0 689 * ticking in case the signal is deliverable next time.
ian@0 690 */
ian@0 691 posix_cpu_timer_schedule(timer);
ian@0 692 }
ian@0 693 }
ian@0 694
ian@0 695 /*
ian@0 696 * Guts of sys_timer_settime for CPU timers.
ian@0 697 * This is called with the timer locked and interrupts disabled.
ian@0 698 * If we return TIMER_RETRY, it's necessary to release the timer's lock
ian@0 699 * and try again. (This happens when the timer is in the middle of firing.)
ian@0 700 */
ian@0 701 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
ian@0 702 struct itimerspec *new, struct itimerspec *old)
ian@0 703 {
ian@0 704 struct task_struct *p = timer->it.cpu.task;
ian@0 705 union cpu_time_count old_expires, new_expires, val;
ian@0 706 int ret;
ian@0 707
ian@0 708 if (unlikely(p == NULL)) {
ian@0 709 /*
ian@0 710 * Timer refers to a dead task's clock.
ian@0 711 */
ian@0 712 return -ESRCH;
ian@0 713 }
ian@0 714
ian@0 715 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
ian@0 716
ian@0 717 read_lock(&tasklist_lock);
ian@0 718 /*
ian@0 719 * We need the tasklist_lock to protect against reaping that
ian@0 720 * clears p->signal. If p has just been reaped, we can no
ian@0 721 * longer get any information about it at all.
ian@0 722 */
ian@0 723 if (unlikely(p->signal == NULL)) {
ian@0 724 read_unlock(&tasklist_lock);
ian@0 725 put_task_struct(p);
ian@0 726 timer->it.cpu.task = NULL;
ian@0 727 return -ESRCH;
ian@0 728 }
ian@0 729
ian@0 730 /*
ian@0 731 * Disarm any old timer after extracting its expiry time.
ian@0 732 */
ian@0 733 BUG_ON(!irqs_disabled());
ian@0 734
ian@0 735 ret = 0;
ian@0 736 spin_lock(&p->sighand->siglock);
ian@0 737 old_expires = timer->it.cpu.expires;
ian@0 738 if (unlikely(timer->it.cpu.firing)) {
ian@0 739 timer->it.cpu.firing = -1;
ian@0 740 ret = TIMER_RETRY;
ian@0 741 } else
ian@0 742 list_del_init(&timer->it.cpu.entry);
ian@0 743 spin_unlock(&p->sighand->siglock);
ian@0 744
ian@0 745 /*
ian@0 746 * We need to sample the current value to convert the new
ian@0 747 * value from to relative and absolute, and to convert the
ian@0 748 * old value from absolute to relative. To set a process
ian@0 749 * timer, we need a sample to balance the thread expiry
ian@0 750 * times (in arm_timer). With an absolute time, we must
ian@0 751 * check if it's already passed. In short, we need a sample.
ian@0 752 */
ian@0 753 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
ian@0 754 cpu_clock_sample(timer->it_clock, p, &val);
ian@0 755 } else {
ian@0 756 cpu_clock_sample_group(timer->it_clock, p, &val);
ian@0 757 }
ian@0 758
ian@0 759 if (old) {
ian@0 760 if (old_expires.sched == 0) {
ian@0 761 old->it_value.tv_sec = 0;
ian@0 762 old->it_value.tv_nsec = 0;
ian@0 763 } else {
ian@0 764 /*
ian@0 765 * Update the timer in case it has
ian@0 766 * overrun already. If it has,
ian@0 767 * we'll report it as having overrun
ian@0 768 * and with the next reloaded timer
ian@0 769 * already ticking, though we are
ian@0 770 * swallowing that pending
ian@0 771 * notification here to install the
ian@0 772 * new setting.
ian@0 773 */
ian@0 774 bump_cpu_timer(timer, val);
ian@0 775 if (cpu_time_before(timer->it_clock, val,
ian@0 776 timer->it.cpu.expires)) {
ian@0 777 old_expires = cpu_time_sub(
ian@0 778 timer->it_clock,
ian@0 779 timer->it.cpu.expires, val);
ian@0 780 sample_to_timespec(timer->it_clock,
ian@0 781 old_expires,
ian@0 782 &old->it_value);
ian@0 783 } else {
ian@0 784 old->it_value.tv_nsec = 1;
ian@0 785 old->it_value.tv_sec = 0;
ian@0 786 }
ian@0 787 }
ian@0 788 }
ian@0 789
ian@0 790 if (unlikely(ret)) {
ian@0 791 /*
ian@0 792 * We are colliding with the timer actually firing.
ian@0 793 * Punt after filling in the timer's old value, and
ian@0 794 * disable this firing since we are already reporting
ian@0 795 * it as an overrun (thanks to bump_cpu_timer above).
ian@0 796 */
ian@0 797 read_unlock(&tasklist_lock);
ian@0 798 goto out;
ian@0 799 }
ian@0 800
ian@0 801 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
ian@0 802 cpu_time_add(timer->it_clock, &new_expires, val);
ian@0 803 }
ian@0 804
ian@0 805 /*
ian@0 806 * Install the new expiry time (or zero).
ian@0 807 * For a timer with no notification action, we don't actually
ian@0 808 * arm the timer (we'll just fake it for timer_gettime).
ian@0 809 */
ian@0 810 timer->it.cpu.expires = new_expires;
ian@0 811 if (new_expires.sched != 0 &&
ian@0 812 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
ian@0 813 cpu_time_before(timer->it_clock, val, new_expires)) {
ian@0 814 arm_timer(timer, val);
ian@0 815 }
ian@0 816
ian@0 817 read_unlock(&tasklist_lock);
ian@0 818
ian@0 819 /*
ian@0 820 * Install the new reload setting, and
ian@0 821 * set up the signal and overrun bookkeeping.
ian@0 822 */
ian@0 823 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
ian@0 824 &new->it_interval);
ian@0 825
ian@0 826 /*
ian@0 827 * This acts as a modification timestamp for the timer,
ian@0 828 * so any automatic reload attempt will punt on seeing
ian@0 829 * that we have reset the timer manually.
ian@0 830 */
ian@0 831 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
ian@0 832 ~REQUEUE_PENDING;
ian@0 833 timer->it_overrun_last = 0;
ian@0 834 timer->it_overrun = -1;
ian@0 835
ian@0 836 if (new_expires.sched != 0 &&
ian@0 837 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
ian@0 838 !cpu_time_before(timer->it_clock, val, new_expires)) {
ian@0 839 /*
ian@0 840 * The designated time already passed, so we notify
ian@0 841 * immediately, even if the thread never runs to
ian@0 842 * accumulate more time on this clock.
ian@0 843 */
ian@0 844 cpu_timer_fire(timer);
ian@0 845 }
ian@0 846
ian@0 847 ret = 0;
ian@0 848 out:
ian@0 849 if (old) {
ian@0 850 sample_to_timespec(timer->it_clock,
ian@0 851 timer->it.cpu.incr, &old->it_interval);
ian@0 852 }
ian@0 853 return ret;
ian@0 854 }
ian@0 855
ian@0 856 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
ian@0 857 {
ian@0 858 union cpu_time_count now;
ian@0 859 struct task_struct *p = timer->it.cpu.task;
ian@0 860 int clear_dead;
ian@0 861
ian@0 862 /*
ian@0 863 * Easy part: convert the reload time.
ian@0 864 */
ian@0 865 sample_to_timespec(timer->it_clock,
ian@0 866 timer->it.cpu.incr, &itp->it_interval);
ian@0 867
ian@0 868 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
ian@0 869 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
ian@0 870 return;
ian@0 871 }
ian@0 872
ian@0 873 if (unlikely(p == NULL)) {
ian@0 874 /*
ian@0 875 * This task already died and the timer will never fire.
ian@0 876 * In this case, expires is actually the dead value.
ian@0 877 */
ian@0 878 dead:
ian@0 879 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
ian@0 880 &itp->it_value);
ian@0 881 return;
ian@0 882 }
ian@0 883
ian@0 884 /*
ian@0 885 * Sample the clock to take the difference with the expiry time.
ian@0 886 */
ian@0 887 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
ian@0 888 cpu_clock_sample(timer->it_clock, p, &now);
ian@0 889 clear_dead = p->exit_state;
ian@0 890 } else {
ian@0 891 read_lock(&tasklist_lock);
ian@0 892 if (unlikely(p->signal == NULL)) {
ian@0 893 /*
ian@0 894 * The process has been reaped.
ian@0 895 * We can't even collect a sample any more.
ian@0 896 * Call the timer disarmed, nothing else to do.
ian@0 897 */
ian@0 898 put_task_struct(p);
ian@0 899 timer->it.cpu.task = NULL;
ian@0 900 timer->it.cpu.expires.sched = 0;
ian@0 901 read_unlock(&tasklist_lock);
ian@0 902 goto dead;
ian@0 903 } else {
ian@0 904 cpu_clock_sample_group(timer->it_clock, p, &now);
ian@0 905 clear_dead = (unlikely(p->exit_state) &&
ian@0 906 thread_group_empty(p));
ian@0 907 }
ian@0 908 read_unlock(&tasklist_lock);
ian@0 909 }
ian@0 910
ian@0 911 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
ian@0 912 if (timer->it.cpu.incr.sched == 0 &&
ian@0 913 cpu_time_before(timer->it_clock,
ian@0 914 timer->it.cpu.expires, now)) {
ian@0 915 /*
ian@0 916 * Do-nothing timer expired and has no reload,
ian@0 917 * so it's as if it was never set.
ian@0 918 */
ian@0 919 timer->it.cpu.expires.sched = 0;
ian@0 920 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
ian@0 921 return;
ian@0 922 }
ian@0 923 /*
ian@0 924 * Account for any expirations and reloads that should
ian@0 925 * have happened.
ian@0 926 */
ian@0 927 bump_cpu_timer(timer, now);
ian@0 928 }
ian@0 929
ian@0 930 if (unlikely(clear_dead)) {
ian@0 931 /*
ian@0 932 * We've noticed that the thread is dead, but
ian@0 933 * not yet reaped. Take this opportunity to
ian@0 934 * drop our task ref.
ian@0 935 */
ian@0 936 clear_dead_task(timer, now);
ian@0 937 goto dead;
ian@0 938 }
ian@0 939
ian@0 940 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
ian@0 941 sample_to_timespec(timer->it_clock,
ian@0 942 cpu_time_sub(timer->it_clock,
ian@0 943 timer->it.cpu.expires, now),
ian@0 944 &itp->it_value);
ian@0 945 } else {
ian@0 946 /*
ian@0 947 * The timer should have expired already, but the firing
ian@0 948 * hasn't taken place yet. Say it's just about to expire.
ian@0 949 */
ian@0 950 itp->it_value.tv_nsec = 1;
ian@0 951 itp->it_value.tv_sec = 0;
ian@0 952 }
ian@0 953 }
ian@0 954
ian@0 955 /*
ian@0 956 * Check for any per-thread CPU timers that have fired and move them off
ian@0 957 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
ian@0 958 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
ian@0 959 */
ian@0 960 static void check_thread_timers(struct task_struct *tsk,
ian@0 961 struct list_head *firing)
ian@0 962 {
ian@0 963 int maxfire;
ian@0 964 struct list_head *timers = tsk->cpu_timers;
ian@0 965
ian@0 966 maxfire = 20;
ian@0 967 tsk->it_prof_expires = cputime_zero;
ian@0 968 while (!list_empty(timers)) {
ian@0 969 struct cpu_timer_list *t = list_entry(timers->next,
ian@0 970 struct cpu_timer_list,
ian@0 971 entry);
ian@0 972 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
ian@0 973 tsk->it_prof_expires = t->expires.cpu;
ian@0 974 break;
ian@0 975 }
ian@0 976 t->firing = 1;
ian@0 977 list_move_tail(&t->entry, firing);
ian@0 978 }
ian@0 979
ian@0 980 ++timers;
ian@0 981 maxfire = 20;
ian@0 982 tsk->it_virt_expires = cputime_zero;
ian@0 983 while (!list_empty(timers)) {
ian@0 984 struct cpu_timer_list *t = list_entry(timers->next,
ian@0 985 struct cpu_timer_list,
ian@0 986 entry);
ian@0 987 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
ian@0 988 tsk->it_virt_expires = t->expires.cpu;
ian@0 989 break;
ian@0 990 }
ian@0 991 t->firing = 1;
ian@0 992 list_move_tail(&t->entry, firing);
ian@0 993 }
ian@0 994
ian@0 995 ++timers;
ian@0 996 maxfire = 20;
ian@0 997 tsk->it_sched_expires = 0;
ian@0 998 while (!list_empty(timers)) {
ian@0 999 struct cpu_timer_list *t = list_entry(timers->next,
ian@0 1000 struct cpu_timer_list,
ian@0 1001 entry);
ian@0 1002 if (!--maxfire || tsk->sched_time < t->expires.sched) {
ian@0 1003 tsk->it_sched_expires = t->expires.sched;
ian@0 1004 break;
ian@0 1005 }
ian@0 1006 t->firing = 1;
ian@0 1007 list_move_tail(&t->entry, firing);
ian@0 1008 }
ian@0 1009 }
ian@0 1010
ian@0 1011 /*
ian@0 1012 * Check for any per-thread CPU timers that have fired and move them
ian@0 1013 * off the tsk->*_timers list onto the firing list. Per-thread timers
ian@0 1014 * have already been taken off.
ian@0 1015 */
ian@0 1016 static void check_process_timers(struct task_struct *tsk,
ian@0 1017 struct list_head *firing)
ian@0 1018 {
ian@0 1019 int maxfire;
ian@0 1020 struct signal_struct *const sig = tsk->signal;
ian@0 1021 cputime_t utime, stime, ptime, virt_expires, prof_expires;
ian@0 1022 unsigned long long sched_time, sched_expires;
ian@0 1023 struct task_struct *t;
ian@0 1024 struct list_head *timers = sig->cpu_timers;
ian@0 1025
ian@0 1026 /*
ian@0 1027 * Don't sample the current process CPU clocks if there are no timers.
ian@0 1028 */
ian@0 1029 if (list_empty(&timers[CPUCLOCK_PROF]) &&
ian@0 1030 cputime_eq(sig->it_prof_expires, cputime_zero) &&
ian@0 1031 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
ian@0 1032 list_empty(&timers[CPUCLOCK_VIRT]) &&
ian@0 1033 cputime_eq(sig->it_virt_expires, cputime_zero) &&
ian@0 1034 list_empty(&timers[CPUCLOCK_SCHED]))
ian@0 1035 return;
ian@0 1036
ian@0 1037 /*
ian@0 1038 * Collect the current process totals.
ian@0 1039 */
ian@0 1040 utime = sig->utime;
ian@0 1041 stime = sig->stime;
ian@0 1042 sched_time = sig->sched_time;
ian@0 1043 t = tsk;
ian@0 1044 do {
ian@0 1045 utime = cputime_add(utime, t->utime);
ian@0 1046 stime = cputime_add(stime, t->stime);
ian@0 1047 sched_time += t->sched_time;
ian@0 1048 t = next_thread(t);
ian@0 1049 } while (t != tsk);
ian@0 1050 ptime = cputime_add(utime, stime);
ian@0 1051
ian@0 1052 maxfire = 20;
ian@0 1053 prof_expires = cputime_zero;
ian@0 1054 while (!list_empty(timers)) {
ian@0 1055 struct cpu_timer_list *t = list_entry(timers->next,
ian@0 1056 struct cpu_timer_list,
ian@0 1057 entry);
ian@0 1058 if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
ian@0 1059 prof_expires = t->expires.cpu;
ian@0 1060 break;
ian@0 1061 }
ian@0 1062 t->firing = 1;
ian@0 1063 list_move_tail(&t->entry, firing);
ian@0 1064 }
ian@0 1065
ian@0 1066 ++timers;
ian@0 1067 maxfire = 20;
ian@0 1068 virt_expires = cputime_zero;
ian@0 1069 while (!list_empty(timers)) {
ian@0 1070 struct cpu_timer_list *t = list_entry(timers->next,
ian@0 1071 struct cpu_timer_list,
ian@0 1072 entry);
ian@0 1073 if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
ian@0 1074 virt_expires = t->expires.cpu;
ian@0 1075 break;
ian@0 1076 }
ian@0 1077 t->firing = 1;
ian@0 1078 list_move_tail(&t->entry, firing);
ian@0 1079 }
ian@0 1080
ian@0 1081 ++timers;
ian@0 1082 maxfire = 20;
ian@0 1083 sched_expires = 0;
ian@0 1084 while (!list_empty(timers)) {
ian@0 1085 struct cpu_timer_list *t = list_entry(timers->next,
ian@0 1086 struct cpu_timer_list,
ian@0 1087 entry);
ian@0 1088 if (!--maxfire || sched_time < t->expires.sched) {
ian@0 1089 sched_expires = t->expires.sched;
ian@0 1090 break;
ian@0 1091 }
ian@0 1092 t->firing = 1;
ian@0 1093 list_move_tail(&t->entry, firing);
ian@0 1094 }
ian@0 1095
ian@0 1096 /*
ian@0 1097 * Check for the special case process timers.
ian@0 1098 */
ian@0 1099 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
ian@0 1100 if (cputime_ge(ptime, sig->it_prof_expires)) {
ian@0 1101 /* ITIMER_PROF fires and reloads. */
ian@0 1102 sig->it_prof_expires = sig->it_prof_incr;
ian@0 1103 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
ian@0 1104 sig->it_prof_expires = cputime_add(
ian@0 1105 sig->it_prof_expires, ptime);
ian@0 1106 }
ian@0 1107 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
ian@0 1108 }
ian@0 1109 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
ian@0 1110 (cputime_eq(prof_expires, cputime_zero) ||
ian@0 1111 cputime_lt(sig->it_prof_expires, prof_expires))) {
ian@0 1112 prof_expires = sig->it_prof_expires;
ian@0 1113 }
ian@0 1114 }
ian@0 1115 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
ian@0 1116 if (cputime_ge(utime, sig->it_virt_expires)) {
ian@0 1117 /* ITIMER_VIRTUAL fires and reloads. */
ian@0 1118 sig->it_virt_expires = sig->it_virt_incr;
ian@0 1119 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
ian@0 1120 sig->it_virt_expires = cputime_add(
ian@0 1121 sig->it_virt_expires, utime);
ian@0 1122 }
ian@0 1123 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
ian@0 1124 }
ian@0 1125 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
ian@0 1126 (cputime_eq(virt_expires, cputime_zero) ||
ian@0 1127 cputime_lt(sig->it_virt_expires, virt_expires))) {
ian@0 1128 virt_expires = sig->it_virt_expires;
ian@0 1129 }
ian@0 1130 }
ian@0 1131 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
ian@0 1132 unsigned long psecs = cputime_to_secs(ptime);
ian@0 1133 cputime_t x;
ian@0 1134 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
ian@0 1135 /*
ian@0 1136 * At the hard limit, we just die.
ian@0 1137 * No need to calculate anything else now.
ian@0 1138 */
ian@0 1139 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
ian@0 1140 return;
ian@0 1141 }
ian@0 1142 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
ian@0 1143 /*
ian@0 1144 * At the soft limit, send a SIGXCPU every second.
ian@0 1145 */
ian@0 1146 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
ian@0 1147 if (sig->rlim[RLIMIT_CPU].rlim_cur
ian@0 1148 < sig->rlim[RLIMIT_CPU].rlim_max) {
ian@0 1149 sig->rlim[RLIMIT_CPU].rlim_cur++;
ian@0 1150 }
ian@0 1151 }
ian@0 1152 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
ian@0 1153 if (cputime_eq(prof_expires, cputime_zero) ||
ian@0 1154 cputime_lt(x, prof_expires)) {
ian@0 1155 prof_expires = x;
ian@0 1156 }
ian@0 1157 }
ian@0 1158
ian@0 1159 if (!cputime_eq(prof_expires, cputime_zero) ||
ian@0 1160 !cputime_eq(virt_expires, cputime_zero) ||
ian@0 1161 sched_expires != 0) {
ian@0 1162 /*
ian@0 1163 * Rebalance the threads' expiry times for the remaining
ian@0 1164 * process CPU timers.
ian@0 1165 */
ian@0 1166
ian@0 1167 cputime_t prof_left, virt_left, ticks;
ian@0 1168 unsigned long long sched_left, sched;
ian@0 1169 const unsigned int nthreads = atomic_read(&sig->live);
ian@0 1170
ian@0 1171 if (!nthreads)
ian@0 1172 return;
ian@0 1173
ian@0 1174 prof_left = cputime_sub(prof_expires, utime);
ian@0 1175 prof_left = cputime_sub(prof_left, stime);
ian@240 1176 prof_left = cputime_div_non_zero(prof_left, nthreads);
ian@0 1177 virt_left = cputime_sub(virt_expires, utime);
ian@240 1178 virt_left = cputime_div_non_zero(virt_left, nthreads);
ian@0 1179 if (sched_expires) {
ian@0 1180 sched_left = sched_expires - sched_time;
ian@0 1181 do_div(sched_left, nthreads);
ian@240 1182 sched_left = max_t(unsigned long long, sched_left, 1);
ian@0 1183 } else {
ian@0 1184 sched_left = 0;
ian@0 1185 }
ian@0 1186 t = tsk;
ian@0 1187 do {
ian@0 1188 if (unlikely(t->flags & PF_EXITING))
ian@0 1189 continue;
ian@0 1190
ian@0 1191 ticks = cputime_add(cputime_add(t->utime, t->stime),
ian@0 1192 prof_left);
ian@0 1193 if (!cputime_eq(prof_expires, cputime_zero) &&
ian@0 1194 (cputime_eq(t->it_prof_expires, cputime_zero) ||
ian@0 1195 cputime_gt(t->it_prof_expires, ticks))) {
ian@0 1196 t->it_prof_expires = ticks;
ian@0 1197 }
ian@0 1198
ian@0 1199 ticks = cputime_add(t->utime, virt_left);
ian@0 1200 if (!cputime_eq(virt_expires, cputime_zero) &&
ian@0 1201 (cputime_eq(t->it_virt_expires, cputime_zero) ||
ian@0 1202 cputime_gt(t->it_virt_expires, ticks))) {
ian@0 1203 t->it_virt_expires = ticks;
ian@0 1204 }
ian@0 1205
ian@0 1206 sched = t->sched_time + sched_left;
ian@0 1207 if (sched_expires && (t->it_sched_expires == 0 ||
ian@0 1208 t->it_sched_expires > sched)) {
ian@0 1209 t->it_sched_expires = sched;
ian@0 1210 }
ian@0 1211 } while ((t = next_thread(t)) != tsk);
ian@0 1212 }
ian@0 1213 }
ian@0 1214
ian@0 1215 /*
ian@0 1216 * This is called from the signal code (via do_schedule_next_timer)
ian@0 1217 * when the last timer signal was delivered and we have to reload the timer.
ian@0 1218 */
ian@0 1219 void posix_cpu_timer_schedule(struct k_itimer *timer)
ian@0 1220 {
ian@0 1221 struct task_struct *p = timer->it.cpu.task;
ian@0 1222 union cpu_time_count now;
ian@0 1223
ian@0 1224 if (unlikely(p == NULL))
ian@0 1225 /*
ian@0 1226 * The task was cleaned up already, no future firings.
ian@0 1227 */
ian@0 1228 goto out;
ian@0 1229
ian@0 1230 /*
ian@0 1231 * Fetch the current sample and update the timer's expiry time.
ian@0 1232 */
ian@0 1233 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
ian@0 1234 cpu_clock_sample(timer->it_clock, p, &now);
ian@0 1235 bump_cpu_timer(timer, now);
ian@0 1236 if (unlikely(p->exit_state)) {
ian@0 1237 clear_dead_task(timer, now);
ian@0 1238 goto out;
ian@0 1239 }
ian@0 1240 read_lock(&tasklist_lock); /* arm_timer needs it. */
ian@0 1241 } else {
ian@0 1242 read_lock(&tasklist_lock);
ian@0 1243 if (unlikely(p->signal == NULL)) {
ian@0 1244 /*
ian@0 1245 * The process has been reaped.
ian@0 1246 * We can't even collect a sample any more.
ian@0 1247 */
ian@0 1248 put_task_struct(p);
ian@0 1249 timer->it.cpu.task = p = NULL;
ian@0 1250 timer->it.cpu.expires.sched = 0;
ian@0 1251 goto out_unlock;
ian@0 1252 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
ian@0 1253 /*
ian@0 1254 * We've noticed that the thread is dead, but
ian@0 1255 * not yet reaped. Take this opportunity to
ian@0 1256 * drop our task ref.
ian@0 1257 */
ian@0 1258 clear_dead_task(timer, now);
ian@0 1259 goto out_unlock;
ian@0 1260 }
ian@0 1261 cpu_clock_sample_group(timer->it_clock, p, &now);
ian@0 1262 bump_cpu_timer(timer, now);
ian@0 1263 /* Leave the tasklist_lock locked for the call below. */
ian@0 1264 }
ian@0 1265
ian@0 1266 /*
ian@0 1267 * Now re-arm for the new expiry time.
ian@0 1268 */
ian@0 1269 arm_timer(timer, now);
ian@0 1270
ian@0 1271 out_unlock:
ian@0 1272 read_unlock(&tasklist_lock);
ian@0 1273
ian@0 1274 out:
ian@0 1275 timer->it_overrun_last = timer->it_overrun;
ian@0 1276 timer->it_overrun = -1;
ian@0 1277 ++timer->it_requeue_pending;
ian@0 1278 }
ian@0 1279
ian@0 1280 /*
ian@0 1281 * This is called from the timer interrupt handler. The irq handler has
ian@0 1282 * already updated our counts. We need to check if any timers fire now.
ian@0 1283 * Interrupts are disabled.
ian@0 1284 */
ian@0 1285 void run_posix_cpu_timers(struct task_struct *tsk)
ian@0 1286 {
ian@0 1287 LIST_HEAD(firing);
ian@0 1288 struct k_itimer *timer, *next;
ian@0 1289
ian@0 1290 BUG_ON(!irqs_disabled());
ian@0 1291
ian@0 1292 #define UNEXPIRED(clock) \
ian@0 1293 (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
ian@0 1294 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
ian@0 1295
ian@0 1296 if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
ian@0 1297 (tsk->it_sched_expires == 0 ||
ian@0 1298 tsk->sched_time < tsk->it_sched_expires))
ian@0 1299 return;
ian@0 1300
ian@0 1301 #undef UNEXPIRED
ian@0 1302
ian@0 1303 /*
ian@0 1304 * Double-check with locks held.
ian@0 1305 */
ian@0 1306 read_lock(&tasklist_lock);
ian@0 1307 if (likely(tsk->signal != NULL)) {
ian@0 1308 spin_lock(&tsk->sighand->siglock);
ian@0 1309
ian@0 1310 /*
ian@0 1311 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
ian@0 1312 * all the timers that are firing, and put them on the firing list.
ian@0 1313 */
ian@0 1314 check_thread_timers(tsk, &firing);
ian@0 1315 check_process_timers(tsk, &firing);
ian@0 1316
ian@0 1317 /*
ian@0 1318 * We must release these locks before taking any timer's lock.
ian@0 1319 * There is a potential race with timer deletion here, as the
ian@0 1320 * siglock now protects our private firing list. We have set
ian@0 1321 * the firing flag in each timer, so that a deletion attempt
ian@0 1322 * that gets the timer lock before we do will give it up and
ian@0 1323 * spin until we've taken care of that timer below.
ian@0 1324 */
ian@0 1325 spin_unlock(&tsk->sighand->siglock);
ian@0 1326 }
ian@0 1327 read_unlock(&tasklist_lock);
ian@0 1328
ian@0 1329 /*
ian@0 1330 * Now that all the timers on our list have the firing flag,
ian@0 1331 * noone will touch their list entries but us. We'll take
ian@0 1332 * each timer's lock before clearing its firing flag, so no
ian@0 1333 * timer call will interfere.
ian@0 1334 */
ian@0 1335 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
ian@0 1336 int firing;
ian@0 1337 spin_lock(&timer->it_lock);
ian@0 1338 list_del_init(&timer->it.cpu.entry);
ian@0 1339 firing = timer->it.cpu.firing;
ian@0 1340 timer->it.cpu.firing = 0;
ian@0 1341 /*
ian@0 1342 * The firing flag is -1 if we collided with a reset
ian@0 1343 * of the timer, which already reported this
ian@0 1344 * almost-firing as an overrun. So don't generate an event.
ian@0 1345 */
ian@0 1346 if (likely(firing >= 0)) {
ian@0 1347 cpu_timer_fire(timer);
ian@0 1348 }
ian@0 1349 spin_unlock(&timer->it_lock);
ian@0 1350 }
ian@0 1351 }
ian@0 1352
ian@0 1353 /*
ian@0 1354 * Set one of the process-wide special case CPU timers.
ian@0 1355 * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
ian@0 1356 * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
ian@0 1357 * absolute; non-null for ITIMER_*, where *newval is relative and we update
ian@0 1358 * it to be absolute, *oldval is absolute and we update it to be relative.
ian@0 1359 */
ian@0 1360 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
ian@0 1361 cputime_t *newval, cputime_t *oldval)
ian@0 1362 {
ian@0 1363 union cpu_time_count now;
ian@0 1364 struct list_head *head;
ian@0 1365
ian@0 1366 BUG_ON(clock_idx == CPUCLOCK_SCHED);
ian@0 1367 cpu_clock_sample_group_locked(clock_idx, tsk, &now);
ian@0 1368
ian@0 1369 if (oldval) {
ian@0 1370 if (!cputime_eq(*oldval, cputime_zero)) {
ian@0 1371 if (cputime_le(*oldval, now.cpu)) {
ian@0 1372 /* Just about to fire. */
ian@0 1373 *oldval = jiffies_to_cputime(1);
ian@0 1374 } else {
ian@0 1375 *oldval = cputime_sub(*oldval, now.cpu);
ian@0 1376 }
ian@0 1377 }
ian@0 1378
ian@0 1379 if (cputime_eq(*newval, cputime_zero))
ian@0 1380 return;
ian@0 1381 *newval = cputime_add(*newval, now.cpu);
ian@0 1382
ian@0 1383 /*
ian@0 1384 * If the RLIMIT_CPU timer will expire before the
ian@0 1385 * ITIMER_PROF timer, we have nothing else to do.
ian@0 1386 */
ian@0 1387 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
ian@0 1388 < cputime_to_secs(*newval))
ian@0 1389 return;
ian@0 1390 }
ian@0 1391
ian@0 1392 /*
ian@0 1393 * Check whether there are any process timers already set to fire
ian@0 1394 * before this one. If so, we don't have anything more to do.
ian@0 1395 */
ian@0 1396 head = &tsk->signal->cpu_timers[clock_idx];
ian@0 1397 if (list_empty(head) ||
ian@0 1398 cputime_ge(list_entry(head->next,
ian@0 1399 struct cpu_timer_list, entry)->expires.cpu,
ian@0 1400 *newval)) {
ian@0 1401 /*
ian@0 1402 * Rejigger each thread's expiry time so that one will
ian@0 1403 * notice before we hit the process-cumulative expiry time.
ian@0 1404 */
ian@0 1405 union cpu_time_count expires = { .sched = 0 };
ian@0 1406 expires.cpu = *newval;
ian@0 1407 process_timer_rebalance(tsk, clock_idx, expires, now);
ian@0 1408 }
ian@0 1409 }
ian@0 1410
ian@0 1411 static long posix_cpu_clock_nanosleep_restart(struct restart_block *);
ian@0 1412
ian@0 1413 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
ian@0 1414 struct timespec *rqtp, struct timespec __user *rmtp)
ian@0 1415 {
ian@0 1416 struct restart_block *restart_block =
ian@0 1417 &current_thread_info()->restart_block;
ian@0 1418 struct k_itimer timer;
ian@0 1419 int error;
ian@0 1420
ian@0 1421 /*
ian@0 1422 * Diagnose required errors first.
ian@0 1423 */
ian@0 1424 if (CPUCLOCK_PERTHREAD(which_clock) &&
ian@0 1425 (CPUCLOCK_PID(which_clock) == 0 ||
ian@0 1426 CPUCLOCK_PID(which_clock) == current->pid))
ian@0 1427 return -EINVAL;
ian@0 1428
ian@0 1429 /*
ian@0 1430 * Set up a temporary timer and then wait for it to go off.
ian@0 1431 */
ian@0 1432 memset(&timer, 0, sizeof timer);
ian@0 1433 spin_lock_init(&timer.it_lock);
ian@0 1434 timer.it_clock = which_clock;
ian@0 1435 timer.it_overrun = -1;
ian@0 1436 error = posix_cpu_timer_create(&timer);
ian@0 1437 timer.it_process = current;
ian@0 1438 if (!error) {
ian@0 1439 static struct itimerspec zero_it;
ian@0 1440 struct itimerspec it = { .it_value = *rqtp,
ian@0 1441 .it_interval = {} };
ian@0 1442
ian@0 1443 spin_lock_irq(&timer.it_lock);
ian@0 1444 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
ian@0 1445 if (error) {
ian@0 1446 spin_unlock_irq(&timer.it_lock);
ian@0 1447 return error;
ian@0 1448 }
ian@0 1449
ian@0 1450 while (!signal_pending(current)) {
ian@0 1451 if (timer.it.cpu.expires.sched == 0) {
ian@0 1452 /*
ian@0 1453 * Our timer fired and was reset.
ian@0 1454 */
ian@0 1455 spin_unlock_irq(&timer.it_lock);
ian@0 1456 return 0;
ian@0 1457 }
ian@0 1458
ian@0 1459 /*
ian@0 1460 * Block until cpu_timer_fire (or a signal) wakes us.
ian@0 1461 */
ian@0 1462 __set_current_state(TASK_INTERRUPTIBLE);
ian@0 1463 spin_unlock_irq(&timer.it_lock);
ian@0 1464 schedule();
ian@0 1465 spin_lock_irq(&timer.it_lock);
ian@0 1466 }
ian@0 1467
ian@0 1468 /*
ian@0 1469 * We were interrupted by a signal.
ian@0 1470 */
ian@0 1471 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
ian@0 1472 posix_cpu_timer_set(&timer, 0, &zero_it, &it);
ian@0 1473 spin_unlock_irq(&timer.it_lock);
ian@0 1474
ian@0 1475 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
ian@0 1476 /*
ian@0 1477 * It actually did fire already.
ian@0 1478 */
ian@0 1479 return 0;
ian@0 1480 }
ian@0 1481
ian@0 1482 /*
ian@0 1483 * Report back to the user the time still remaining.
ian@0 1484 */
ian@0 1485 if (rmtp != NULL && !(flags & TIMER_ABSTIME) &&
ian@0 1486 copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
ian@0 1487 return -EFAULT;
ian@0 1488
ian@0 1489 restart_block->fn = posix_cpu_clock_nanosleep_restart;
ian@0 1490 /* Caller already set restart_block->arg1 */
ian@0 1491 restart_block->arg0 = which_clock;
ian@0 1492 restart_block->arg1 = (unsigned long) rmtp;
ian@0 1493 restart_block->arg2 = rqtp->tv_sec;
ian@0 1494 restart_block->arg3 = rqtp->tv_nsec;
ian@0 1495
ian@0 1496 error = -ERESTART_RESTARTBLOCK;
ian@0 1497 }
ian@0 1498
ian@0 1499 return error;
ian@0 1500 }
ian@0 1501
ian@0 1502 static long
ian@0 1503 posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block)
ian@0 1504 {
ian@0 1505 clockid_t which_clock = restart_block->arg0;
ian@0 1506 struct timespec __user *rmtp;
ian@0 1507 struct timespec t;
ian@0 1508
ian@0 1509 rmtp = (struct timespec __user *) restart_block->arg1;
ian@0 1510 t.tv_sec = restart_block->arg2;
ian@0 1511 t.tv_nsec = restart_block->arg3;
ian@0 1512
ian@0 1513 restart_block->fn = do_no_restart_syscall;
ian@0 1514 return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t, rmtp);
ian@0 1515 }
ian@0 1516
ian@0 1517
ian@0 1518 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
ian@0 1519 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
ian@0 1520
ian@0 1521 static int process_cpu_clock_getres(const clockid_t which_clock,
ian@0 1522 struct timespec *tp)
ian@0 1523 {
ian@0 1524 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
ian@0 1525 }
ian@0 1526 static int process_cpu_clock_get(const clockid_t which_clock,
ian@0 1527 struct timespec *tp)
ian@0 1528 {
ian@0 1529 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
ian@0 1530 }
ian@0 1531 static int process_cpu_timer_create(struct k_itimer *timer)
ian@0 1532 {
ian@0 1533 timer->it_clock = PROCESS_CLOCK;
ian@0 1534 return posix_cpu_timer_create(timer);
ian@0 1535 }
ian@0 1536 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
ian@0 1537 struct timespec *rqtp,
ian@0 1538 struct timespec __user *rmtp)
ian@0 1539 {
ian@0 1540 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
ian@0 1541 }
ian@0 1542 static int thread_cpu_clock_getres(const clockid_t which_clock,
ian@0 1543 struct timespec *tp)
ian@0 1544 {
ian@0 1545 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
ian@0 1546 }
ian@0 1547 static int thread_cpu_clock_get(const clockid_t which_clock,
ian@0 1548 struct timespec *tp)
ian@0 1549 {
ian@0 1550 return posix_cpu_clock_get(THREAD_CLOCK, tp);
ian@0 1551 }
ian@0 1552 static int thread_cpu_timer_create(struct k_itimer *timer)
ian@0 1553 {
ian@0 1554 timer->it_clock = THREAD_CLOCK;
ian@0 1555 return posix_cpu_timer_create(timer);
ian@0 1556 }
ian@0 1557 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
ian@0 1558 struct timespec *rqtp, struct timespec __user *rmtp)
ian@0 1559 {
ian@0 1560 return -EINVAL;
ian@0 1561 }
ian@0 1562
ian@0 1563 static __init int init_posix_cpu_timers(void)
ian@0 1564 {
ian@0 1565 struct k_clock process = {
ian@0 1566 .clock_getres = process_cpu_clock_getres,
ian@0 1567 .clock_get = process_cpu_clock_get,
ian@0 1568 .clock_set = do_posix_clock_nosettime,
ian@0 1569 .timer_create = process_cpu_timer_create,
ian@0 1570 .nsleep = process_cpu_nsleep,
ian@0 1571 };
ian@0 1572 struct k_clock thread = {
ian@0 1573 .clock_getres = thread_cpu_clock_getres,
ian@0 1574 .clock_get = thread_cpu_clock_get,
ian@0 1575 .clock_set = do_posix_clock_nosettime,
ian@0 1576 .timer_create = thread_cpu_timer_create,
ian@0 1577 .nsleep = thread_cpu_nsleep,
ian@0 1578 };
ian@0 1579
ian@0 1580 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
ian@0 1581 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
ian@0 1582
ian@0 1583 return 0;
ian@0 1584 }
ian@0 1585 __initcall(init_posix_cpu_timers);