ia64/linux-2.6.18-xen.hg

view kernel/taskstats.c @ 562:66faefe721eb

pvSCSI backend driver

Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 02 09:58:27 2008 +0100 (2008-06-02)
parents 3e8752eb6d9c
children
line source
1 /*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/delayacct.h>
22 #include <linux/cpumask.h>
23 #include <linux/percpu.h>
24 #include <net/genetlink.h>
25 #include <asm/atomic.h>
27 /*
28 * Maximum length of a cpumask that can be specified in
29 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
30 */
31 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
33 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
34 static int family_registered;
35 kmem_cache_t *taskstats_cache;
37 static struct genl_family family = {
38 .id = GENL_ID_GENERATE,
39 .name = TASKSTATS_GENL_NAME,
40 .version = TASKSTATS_GENL_VERSION,
41 .maxattr = TASKSTATS_CMD_ATTR_MAX,
42 };
44 static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
45 __read_mostly = {
46 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
47 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
48 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
49 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
51 struct listener {
52 struct list_head list;
53 pid_t pid;
54 char valid;
55 };
57 struct listener_list {
58 struct rw_semaphore sem;
59 struct list_head list;
60 };
61 static DEFINE_PER_CPU(struct listener_list, listener_array);
63 enum actions {
64 REGISTER,
65 DEREGISTER,
66 CPU_DONT_CARE
67 };
69 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
70 void **replyp, size_t size)
71 {
72 struct sk_buff *skb;
73 void *reply;
75 /*
76 * If new attributes are added, please revisit this allocation
77 */
78 skb = nlmsg_new(size);
79 if (!skb)
80 return -ENOMEM;
82 if (!info) {
83 int seq = get_cpu_var(taskstats_seqnum)++;
84 put_cpu_var(taskstats_seqnum);
86 reply = genlmsg_put(skb, 0, seq,
87 family.id, 0, 0,
88 cmd, family.version);
89 } else
90 reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
91 family.id, 0, 0,
92 cmd, family.version);
93 if (reply == NULL) {
94 nlmsg_free(skb);
95 return -EINVAL;
96 }
98 *skbp = skb;
99 *replyp = reply;
100 return 0;
101 }
103 /*
104 * Send taskstats data in @skb to listener with nl_pid @pid
105 */
106 static int send_reply(struct sk_buff *skb, pid_t pid)
107 {
108 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
109 void *reply = genlmsg_data(genlhdr);
110 int rc;
112 rc = genlmsg_end(skb, reply);
113 if (rc < 0) {
114 nlmsg_free(skb);
115 return rc;
116 }
118 return genlmsg_unicast(skb, pid);
119 }
121 /*
122 * Send taskstats data in @skb to listeners registered for @cpu's exit data
123 */
124 static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
125 {
126 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
127 struct listener_list *listeners;
128 struct listener *s, *tmp;
129 struct sk_buff *skb_next, *skb_cur = skb;
130 void *reply = genlmsg_data(genlhdr);
131 int rc, delcount = 0;
133 rc = genlmsg_end(skb, reply);
134 if (rc < 0) {
135 nlmsg_free(skb);
136 return;
137 }
139 rc = 0;
140 listeners = &per_cpu(listener_array, cpu);
141 down_read(&listeners->sem);
142 list_for_each_entry(s, &listeners->list, list) {
143 skb_next = NULL;
144 if (!list_is_last(&s->list, &listeners->list)) {
145 skb_next = skb_clone(skb_cur, GFP_KERNEL);
146 if (!skb_next)
147 break;
148 }
149 rc = genlmsg_unicast(skb_cur, s->pid);
150 if (rc == -ECONNREFUSED) {
151 s->valid = 0;
152 delcount++;
153 }
154 skb_cur = skb_next;
155 }
156 up_read(&listeners->sem);
158 if (skb_cur)
159 nlmsg_free(skb_cur);
161 if (!delcount)
162 return;
164 /* Delete invalidated entries */
165 down_write(&listeners->sem);
166 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
167 if (!s->valid) {
168 list_del(&s->list);
169 kfree(s);
170 }
171 }
172 up_write(&listeners->sem);
173 }
175 static int fill_pid(pid_t pid, struct task_struct *pidtsk,
176 struct taskstats *stats)
177 {
178 int rc = 0;
179 struct task_struct *tsk = pidtsk;
181 if (!pidtsk) {
182 read_lock(&tasklist_lock);
183 tsk = find_task_by_pid(pid);
184 if (!tsk) {
185 read_unlock(&tasklist_lock);
186 return -ESRCH;
187 }
188 get_task_struct(tsk);
189 read_unlock(&tasklist_lock);
190 } else
191 get_task_struct(tsk);
193 /*
194 * Each accounting subsystem adds calls to its functions to
195 * fill in relevant parts of struct taskstsats as follows
196 *
197 * per-task-foo(stats, tsk);
198 */
200 delayacct_add_tsk(stats, tsk);
201 stats->version = TASKSTATS_VERSION;
203 /* Define err: label here if needed */
204 put_task_struct(tsk);
205 return rc;
207 }
209 static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
210 struct taskstats *stats)
211 {
212 struct task_struct *tsk, *first;
213 unsigned long flags;
215 /*
216 * Add additional stats from live tasks except zombie thread group
217 * leaders who are already counted with the dead tasks
218 */
219 first = tgidtsk;
220 if (!first) {
221 read_lock(&tasklist_lock);
222 first = find_task_by_pid(tgid);
223 if (!first) {
224 read_unlock(&tasklist_lock);
225 return -ESRCH;
226 }
227 get_task_struct(first);
228 read_unlock(&tasklist_lock);
229 } else
230 get_task_struct(first);
233 tsk = first;
234 read_lock(&tasklist_lock);
235 /* Start with stats from dead tasks */
236 if (first->signal) {
237 spin_lock_irqsave(&first->signal->stats_lock, flags);
238 if (first->signal->stats)
239 memcpy(stats, first->signal->stats, sizeof(*stats));
240 spin_unlock_irqrestore(&first->signal->stats_lock, flags);
241 }
243 do {
244 if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk))
245 continue;
246 /*
247 * Accounting subsystem can call its functions here to
248 * fill in relevant parts of struct taskstsats as follows
249 *
250 * per-task-foo(stats, tsk);
251 */
252 delayacct_add_tsk(stats, tsk);
254 } while_each_thread(first, tsk);
255 read_unlock(&tasklist_lock);
256 stats->version = TASKSTATS_VERSION;
258 /*
259 * Accounting subsytems can also add calls here to modify
260 * fields of taskstats.
261 */
262 put_task_struct(first);
263 return 0;
264 }
267 static void fill_tgid_exit(struct task_struct *tsk)
268 {
269 unsigned long flags;
271 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
272 if (!tsk->signal->stats)
273 goto ret;
275 /*
276 * Each accounting subsystem calls its functions here to
277 * accumalate its per-task stats for tsk, into the per-tgid structure
278 *
279 * per-task-foo(tsk->signal->stats, tsk);
280 */
281 delayacct_add_tsk(tsk->signal->stats, tsk);
282 ret:
283 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
284 return;
285 }
287 static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
288 {
289 struct listener_list *listeners;
290 struct listener *s, *tmp;
291 unsigned int cpu;
292 cpumask_t mask = *maskp;
294 if (!cpus_subset(mask, cpu_possible_map))
295 return -EINVAL;
297 if (isadd == REGISTER) {
298 for_each_cpu_mask(cpu, mask) {
299 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
300 cpu_to_node(cpu));
301 if (!s)
302 goto cleanup;
303 s->pid = pid;
304 INIT_LIST_HEAD(&s->list);
305 s->valid = 1;
307 listeners = &per_cpu(listener_array, cpu);
308 down_write(&listeners->sem);
309 list_add(&s->list, &listeners->list);
310 up_write(&listeners->sem);
311 }
312 return 0;
313 }
315 /* Deregister or cleanup */
316 cleanup:
317 for_each_cpu_mask(cpu, mask) {
318 listeners = &per_cpu(listener_array, cpu);
319 down_write(&listeners->sem);
320 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
321 if (s->pid == pid) {
322 list_del(&s->list);
323 kfree(s);
324 break;
325 }
326 }
327 up_write(&listeners->sem);
328 }
329 return 0;
330 }
332 static int parse(struct nlattr *na, cpumask_t *mask)
333 {
334 char *data;
335 int len;
336 int ret;
338 if (na == NULL)
339 return 1;
340 len = nla_len(na);
341 if (len > TASKSTATS_CPUMASK_MAXLEN)
342 return -E2BIG;
343 if (len < 1)
344 return -EINVAL;
345 data = kmalloc(len, GFP_KERNEL);
346 if (!data)
347 return -ENOMEM;
348 nla_strlcpy(data, na, len);
349 ret = cpulist_parse(data, *mask);
350 kfree(data);
351 return ret;
352 }
354 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
355 {
356 int rc = 0;
357 struct sk_buff *rep_skb;
358 struct taskstats stats;
359 void *reply;
360 size_t size;
361 struct nlattr *na;
362 cpumask_t mask;
364 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
365 if (rc < 0)
366 return rc;
367 if (rc == 0)
368 return add_del_listener(info->snd_pid, &mask, REGISTER);
370 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
371 if (rc < 0)
372 return rc;
373 if (rc == 0)
374 return add_del_listener(info->snd_pid, &mask, DEREGISTER);
376 /*
377 * Size includes space for nested attributes
378 */
379 size = nla_total_size(sizeof(u32)) +
380 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
382 memset(&stats, 0, sizeof(stats));
383 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
384 if (rc < 0)
385 return rc;
387 if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
388 u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
389 rc = fill_pid(pid, NULL, &stats);
390 if (rc < 0)
391 goto err;
393 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
394 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
395 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
396 stats);
397 } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
398 u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
399 rc = fill_tgid(tgid, NULL, &stats);
400 if (rc < 0)
401 goto err;
403 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
404 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
405 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
406 stats);
407 } else {
408 rc = -EINVAL;
409 goto err;
410 }
412 nla_nest_end(rep_skb, na);
414 return send_reply(rep_skb, info->snd_pid);
416 nla_put_failure:
417 return genlmsg_cancel(rep_skb, reply);
418 err:
419 nlmsg_free(rep_skb);
420 return rc;
421 }
423 void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
424 {
425 struct listener_list *listeners;
426 struct taskstats *tmp;
427 /*
428 * This is the cpu on which the task is exiting currently and will
429 * be the one for which the exit event is sent, even if the cpu
430 * on which this function is running changes later.
431 */
432 *mycpu = raw_smp_processor_id();
434 *ptidstats = NULL;
435 tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
436 if (!tmp)
437 return;
439 listeners = &per_cpu(listener_array, *mycpu);
440 down_read(&listeners->sem);
441 if (!list_empty(&listeners->list)) {
442 *ptidstats = tmp;
443 tmp = NULL;
444 }
445 up_read(&listeners->sem);
446 kfree(tmp);
447 }
449 /* Send pid data out on exit */
450 void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
451 int group_dead, unsigned int mycpu)
452 {
453 int rc;
454 struct sk_buff *rep_skb;
455 void *reply;
456 size_t size;
457 int is_thread_group;
458 struct nlattr *na;
459 unsigned long flags;
461 if (!family_registered || !tidstats)
462 return;
464 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
465 is_thread_group = tsk->signal->stats ? 1 : 0;
466 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
468 rc = 0;
469 /*
470 * Size includes space for nested attributes
471 */
472 size = nla_total_size(sizeof(u32)) +
473 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
475 if (is_thread_group)
476 size = 2 * size; /* PID + STATS + TGID + STATS */
478 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
479 if (rc < 0)
480 goto ret;
482 rc = fill_pid(tsk->pid, tsk, tidstats);
483 if (rc < 0)
484 goto err_skb;
486 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
487 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
488 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
489 *tidstats);
490 nla_nest_end(rep_skb, na);
492 if (!is_thread_group)
493 goto send;
495 /*
496 * tsk has/had a thread group so fill the tsk->signal->stats structure
497 * Doesn't matter if tsk is the leader or the last group member leaving
498 */
500 fill_tgid_exit(tsk);
501 if (!group_dead)
502 goto send;
504 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
505 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
506 /* No locking needed for tsk->signal->stats since group is dead */
507 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
508 *tsk->signal->stats);
509 nla_nest_end(rep_skb, na);
511 send:
512 send_cpu_listeners(rep_skb, mycpu);
513 return;
515 nla_put_failure:
516 genlmsg_cancel(rep_skb, reply);
517 goto ret;
518 err_skb:
519 nlmsg_free(rep_skb);
520 ret:
521 return;
522 }
524 static struct genl_ops taskstats_ops = {
525 .cmd = TASKSTATS_CMD_GET,
526 .doit = taskstats_user_cmd,
527 .policy = taskstats_cmd_get_policy,
528 };
530 /* Needed early in initialization */
531 void __init taskstats_init_early(void)
532 {
533 unsigned int i;
535 taskstats_cache = kmem_cache_create("taskstats_cache",
536 sizeof(struct taskstats),
537 0, SLAB_PANIC, NULL, NULL);
538 for_each_possible_cpu(i) {
539 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
540 init_rwsem(&(per_cpu(listener_array, i).sem));
541 }
542 }
544 static int __init taskstats_init(void)
545 {
546 int rc;
548 rc = genl_register_family(&family);
549 if (rc)
550 return rc;
552 rc = genl_register_ops(&family, &taskstats_ops);
553 if (rc < 0)
554 goto err;
556 family_registered = 1;
557 return 0;
558 err:
559 genl_unregister_family(&family);
560 return rc;
561 }
563 /*
564 * late initcall ensures initialization of statistics collection
565 * mechanisms precedes initialization of the taskstats interface
566 */
567 late_initcall(taskstats_init);