ia64/linux-2.6.18-xen.hg

annotate kernel/profile.c @ 0:831230e53067

Import 2.6.18 from kernel.org tarball.
author Ian Campbell <ian.campbell@xensource.com>
date Wed Apr 11 14:15:44 2007 +0100 (2007-04-11)
parents
children
rev   line source
ian@0 1 /*
ian@0 2 * linux/kernel/profile.c
ian@0 3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
ian@0 4 * with configurable resolution, support for restricting the cpus on
ian@0 5 * which profiling is done, and switching between cpu time and
ian@0 6 * schedule() calls via kernel command line parameters passed at boot.
ian@0 7 *
ian@0 8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
ian@0 9 * Red Hat, July 2004
ian@0 10 * Consolidation of architecture support code for profiling,
ian@0 11 * William Irwin, Oracle, July 2004
ian@0 12 * Amortized hit count accounting via per-cpu open-addressed hashtables
ian@0 13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
ian@0 14 */
ian@0 15
ian@0 16 #include <linux/module.h>
ian@0 17 #include <linux/profile.h>
ian@0 18 #include <linux/bootmem.h>
ian@0 19 #include <linux/notifier.h>
ian@0 20 #include <linux/mm.h>
ian@0 21 #include <linux/cpumask.h>
ian@0 22 #include <linux/cpu.h>
ian@0 23 #include <linux/profile.h>
ian@0 24 #include <linux/highmem.h>
ian@0 25 #include <linux/mutex.h>
ian@0 26 #include <asm/sections.h>
ian@0 27 #include <asm/semaphore.h>
ian@0 28
ian@0 29 struct profile_hit {
ian@0 30 u32 pc, hits;
ian@0 31 };
ian@0 32 #define PROFILE_GRPSHIFT 3
ian@0 33 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
ian@0 34 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
ian@0 35 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
ian@0 36
ian@0 37 /* Oprofile timer tick hook */
ian@0 38 int (*timer_hook)(struct pt_regs *) __read_mostly;
ian@0 39
ian@0 40 static atomic_t *prof_buffer;
ian@0 41 static unsigned long prof_len, prof_shift;
ian@0 42 static int prof_on __read_mostly;
ian@0 43 static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
ian@0 44 #ifdef CONFIG_SMP
ian@0 45 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
ian@0 46 static DEFINE_PER_CPU(int, cpu_profile_flip);
ian@0 47 static DEFINE_MUTEX(profile_flip_mutex);
ian@0 48 #endif /* CONFIG_SMP */
ian@0 49
ian@0 50 static int __init profile_setup(char * str)
ian@0 51 {
ian@0 52 static char __initdata schedstr[] = "schedule";
ian@0 53 int par;
ian@0 54
ian@0 55 if (!strncmp(str, schedstr, strlen(schedstr))) {
ian@0 56 prof_on = SCHED_PROFILING;
ian@0 57 if (str[strlen(schedstr)] == ',')
ian@0 58 str += strlen(schedstr) + 1;
ian@0 59 if (get_option(&str, &par))
ian@0 60 prof_shift = par;
ian@0 61 printk(KERN_INFO
ian@0 62 "kernel schedule profiling enabled (shift: %ld)\n",
ian@0 63 prof_shift);
ian@0 64 } else if (get_option(&str, &par)) {
ian@0 65 prof_shift = par;
ian@0 66 prof_on = CPU_PROFILING;
ian@0 67 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
ian@0 68 prof_shift);
ian@0 69 }
ian@0 70 return 1;
ian@0 71 }
ian@0 72 __setup("profile=", profile_setup);
ian@0 73
ian@0 74
ian@0 75 void __init profile_init(void)
ian@0 76 {
ian@0 77 if (!prof_on)
ian@0 78 return;
ian@0 79
ian@0 80 /* only text is profiled */
ian@0 81 prof_len = (_etext - _stext) >> prof_shift;
ian@0 82 prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t));
ian@0 83 }
ian@0 84
ian@0 85 /* Profile event notifications */
ian@0 86
ian@0 87 #ifdef CONFIG_PROFILING
ian@0 88
ian@0 89 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
ian@0 90 static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
ian@0 91 static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
ian@0 92
ian@0 93 void profile_task_exit(struct task_struct * task)
ian@0 94 {
ian@0 95 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
ian@0 96 }
ian@0 97
ian@0 98 int profile_handoff_task(struct task_struct * task)
ian@0 99 {
ian@0 100 int ret;
ian@0 101 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
ian@0 102 return (ret == NOTIFY_OK) ? 1 : 0;
ian@0 103 }
ian@0 104
ian@0 105 void profile_munmap(unsigned long addr)
ian@0 106 {
ian@0 107 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
ian@0 108 }
ian@0 109
ian@0 110 int task_handoff_register(struct notifier_block * n)
ian@0 111 {
ian@0 112 return atomic_notifier_chain_register(&task_free_notifier, n);
ian@0 113 }
ian@0 114
ian@0 115 int task_handoff_unregister(struct notifier_block * n)
ian@0 116 {
ian@0 117 return atomic_notifier_chain_unregister(&task_free_notifier, n);
ian@0 118 }
ian@0 119
ian@0 120 int profile_event_register(enum profile_type type, struct notifier_block * n)
ian@0 121 {
ian@0 122 int err = -EINVAL;
ian@0 123
ian@0 124 switch (type) {
ian@0 125 case PROFILE_TASK_EXIT:
ian@0 126 err = blocking_notifier_chain_register(
ian@0 127 &task_exit_notifier, n);
ian@0 128 break;
ian@0 129 case PROFILE_MUNMAP:
ian@0 130 err = blocking_notifier_chain_register(
ian@0 131 &munmap_notifier, n);
ian@0 132 break;
ian@0 133 }
ian@0 134
ian@0 135 return err;
ian@0 136 }
ian@0 137
ian@0 138
ian@0 139 int profile_event_unregister(enum profile_type type, struct notifier_block * n)
ian@0 140 {
ian@0 141 int err = -EINVAL;
ian@0 142
ian@0 143 switch (type) {
ian@0 144 case PROFILE_TASK_EXIT:
ian@0 145 err = blocking_notifier_chain_unregister(
ian@0 146 &task_exit_notifier, n);
ian@0 147 break;
ian@0 148 case PROFILE_MUNMAP:
ian@0 149 err = blocking_notifier_chain_unregister(
ian@0 150 &munmap_notifier, n);
ian@0 151 break;
ian@0 152 }
ian@0 153
ian@0 154 return err;
ian@0 155 }
ian@0 156
ian@0 157 int register_timer_hook(int (*hook)(struct pt_regs *))
ian@0 158 {
ian@0 159 if (timer_hook)
ian@0 160 return -EBUSY;
ian@0 161 timer_hook = hook;
ian@0 162 return 0;
ian@0 163 }
ian@0 164
ian@0 165 void unregister_timer_hook(int (*hook)(struct pt_regs *))
ian@0 166 {
ian@0 167 WARN_ON(hook != timer_hook);
ian@0 168 timer_hook = NULL;
ian@0 169 /* make sure all CPUs see the NULL hook */
ian@0 170 synchronize_sched(); /* Allow ongoing interrupts to complete. */
ian@0 171 }
ian@0 172
ian@0 173 EXPORT_SYMBOL_GPL(register_timer_hook);
ian@0 174 EXPORT_SYMBOL_GPL(unregister_timer_hook);
ian@0 175 EXPORT_SYMBOL_GPL(task_handoff_register);
ian@0 176 EXPORT_SYMBOL_GPL(task_handoff_unregister);
ian@0 177
ian@0 178 #endif /* CONFIG_PROFILING */
ian@0 179
ian@0 180 EXPORT_SYMBOL_GPL(profile_event_register);
ian@0 181 EXPORT_SYMBOL_GPL(profile_event_unregister);
ian@0 182
ian@0 183 #ifdef CONFIG_SMP
ian@0 184 /*
ian@0 185 * Each cpu has a pair of open-addressed hashtables for pending
ian@0 186 * profile hits. read_profile() IPI's all cpus to request them
ian@0 187 * to flip buffers and flushes their contents to prof_buffer itself.
ian@0 188 * Flip requests are serialized by the profile_flip_mutex. The sole
ian@0 189 * use of having a second hashtable is for avoiding cacheline
ian@0 190 * contention that would otherwise happen during flushes of pending
ian@0 191 * profile hits required for the accuracy of reported profile hits
ian@0 192 * and so resurrect the interrupt livelock issue.
ian@0 193 *
ian@0 194 * The open-addressed hashtables are indexed by profile buffer slot
ian@0 195 * and hold the number of pending hits to that profile buffer slot on
ian@0 196 * a cpu in an entry. When the hashtable overflows, all pending hits
ian@0 197 * are accounted to their corresponding profile buffer slots with
ian@0 198 * atomic_add() and the hashtable emptied. As numerous pending hits
ian@0 199 * may be accounted to a profile buffer slot in a hashtable entry,
ian@0 200 * this amortizes a number of atomic profile buffer increments likely
ian@0 201 * to be far larger than the number of entries in the hashtable,
ian@0 202 * particularly given that the number of distinct profile buffer
ian@0 203 * positions to which hits are accounted during short intervals (e.g.
ian@0 204 * several seconds) is usually very small. Exclusion from buffer
ian@0 205 * flipping is provided by interrupt disablement (note that for
ian@0 206 * SCHED_PROFILING profile_hit() may be called from process context).
ian@0 207 * The hash function is meant to be lightweight as opposed to strong,
ian@0 208 * and was vaguely inspired by ppc64 firmware-supported inverted
ian@0 209 * pagetable hash functions, but uses a full hashtable full of finite
ian@0 210 * collision chains, not just pairs of them.
ian@0 211 *
ian@0 212 * -- wli
ian@0 213 */
ian@0 214 static void __profile_flip_buffers(void *unused)
ian@0 215 {
ian@0 216 int cpu = smp_processor_id();
ian@0 217
ian@0 218 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
ian@0 219 }
ian@0 220
ian@0 221 static void profile_flip_buffers(void)
ian@0 222 {
ian@0 223 int i, j, cpu;
ian@0 224
ian@0 225 mutex_lock(&profile_flip_mutex);
ian@0 226 j = per_cpu(cpu_profile_flip, get_cpu());
ian@0 227 put_cpu();
ian@0 228 on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
ian@0 229 for_each_online_cpu(cpu) {
ian@0 230 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
ian@0 231 for (i = 0; i < NR_PROFILE_HIT; ++i) {
ian@0 232 if (!hits[i].hits) {
ian@0 233 if (hits[i].pc)
ian@0 234 hits[i].pc = 0;
ian@0 235 continue;
ian@0 236 }
ian@0 237 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
ian@0 238 hits[i].hits = hits[i].pc = 0;
ian@0 239 }
ian@0 240 }
ian@0 241 mutex_unlock(&profile_flip_mutex);
ian@0 242 }
ian@0 243
ian@0 244 static void profile_discard_flip_buffers(void)
ian@0 245 {
ian@0 246 int i, cpu;
ian@0 247
ian@0 248 mutex_lock(&profile_flip_mutex);
ian@0 249 i = per_cpu(cpu_profile_flip, get_cpu());
ian@0 250 put_cpu();
ian@0 251 on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
ian@0 252 for_each_online_cpu(cpu) {
ian@0 253 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
ian@0 254 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
ian@0 255 }
ian@0 256 mutex_unlock(&profile_flip_mutex);
ian@0 257 }
ian@0 258
ian@0 259 void profile_hit(int type, void *__pc)
ian@0 260 {
ian@0 261 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
ian@0 262 int i, j, cpu;
ian@0 263 struct profile_hit *hits;
ian@0 264
ian@0 265 if (prof_on != type || !prof_buffer)
ian@0 266 return;
ian@0 267 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
ian@0 268 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
ian@0 269 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
ian@0 270 cpu = get_cpu();
ian@0 271 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
ian@0 272 if (!hits) {
ian@0 273 put_cpu();
ian@0 274 return;
ian@0 275 }
ian@0 276 local_irq_save(flags);
ian@0 277 do {
ian@0 278 for (j = 0; j < PROFILE_GRPSZ; ++j) {
ian@0 279 if (hits[i + j].pc == pc) {
ian@0 280 hits[i + j].hits++;
ian@0 281 goto out;
ian@0 282 } else if (!hits[i + j].hits) {
ian@0 283 hits[i + j].pc = pc;
ian@0 284 hits[i + j].hits = 1;
ian@0 285 goto out;
ian@0 286 }
ian@0 287 }
ian@0 288 i = (i + secondary) & (NR_PROFILE_HIT - 1);
ian@0 289 } while (i != primary);
ian@0 290 atomic_inc(&prof_buffer[pc]);
ian@0 291 for (i = 0; i < NR_PROFILE_HIT; ++i) {
ian@0 292 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
ian@0 293 hits[i].pc = hits[i].hits = 0;
ian@0 294 }
ian@0 295 out:
ian@0 296 local_irq_restore(flags);
ian@0 297 put_cpu();
ian@0 298 }
ian@0 299
ian@0 300 #ifdef CONFIG_HOTPLUG_CPU
ian@0 301 static int __devinit profile_cpu_callback(struct notifier_block *info,
ian@0 302 unsigned long action, void *__cpu)
ian@0 303 {
ian@0 304 int node, cpu = (unsigned long)__cpu;
ian@0 305 struct page *page;
ian@0 306
ian@0 307 switch (action) {
ian@0 308 case CPU_UP_PREPARE:
ian@0 309 node = cpu_to_node(cpu);
ian@0 310 per_cpu(cpu_profile_flip, cpu) = 0;
ian@0 311 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
ian@0 312 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
ian@0 313 if (!page)
ian@0 314 return NOTIFY_BAD;
ian@0 315 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
ian@0 316 }
ian@0 317 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
ian@0 318 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
ian@0 319 if (!page)
ian@0 320 goto out_free;
ian@0 321 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
ian@0 322 }
ian@0 323 break;
ian@0 324 out_free:
ian@0 325 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
ian@0 326 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
ian@0 327 __free_page(page);
ian@0 328 return NOTIFY_BAD;
ian@0 329 case CPU_ONLINE:
ian@0 330 cpu_set(cpu, prof_cpu_mask);
ian@0 331 break;
ian@0 332 case CPU_UP_CANCELED:
ian@0 333 case CPU_DEAD:
ian@0 334 cpu_clear(cpu, prof_cpu_mask);
ian@0 335 if (per_cpu(cpu_profile_hits, cpu)[0]) {
ian@0 336 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
ian@0 337 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
ian@0 338 __free_page(page);
ian@0 339 }
ian@0 340 if (per_cpu(cpu_profile_hits, cpu)[1]) {
ian@0 341 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
ian@0 342 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
ian@0 343 __free_page(page);
ian@0 344 }
ian@0 345 break;
ian@0 346 }
ian@0 347 return NOTIFY_OK;
ian@0 348 }
ian@0 349 #endif /* CONFIG_HOTPLUG_CPU */
ian@0 350 #else /* !CONFIG_SMP */
ian@0 351 #define profile_flip_buffers() do { } while (0)
ian@0 352 #define profile_discard_flip_buffers() do { } while (0)
ian@0 353
ian@0 354 void profile_hit(int type, void *__pc)
ian@0 355 {
ian@0 356 unsigned long pc;
ian@0 357
ian@0 358 if (prof_on != type || !prof_buffer)
ian@0 359 return;
ian@0 360 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
ian@0 361 atomic_inc(&prof_buffer[min(pc, prof_len - 1)]);
ian@0 362 }
ian@0 363 #endif /* !CONFIG_SMP */
ian@0 364
ian@0 365 void profile_tick(int type, struct pt_regs *regs)
ian@0 366 {
ian@0 367 if (type == CPU_PROFILING && timer_hook)
ian@0 368 timer_hook(regs);
ian@0 369 if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
ian@0 370 profile_hit(type, (void *)profile_pc(regs));
ian@0 371 }
ian@0 372
ian@0 373 #ifdef CONFIG_PROC_FS
ian@0 374 #include <linux/proc_fs.h>
ian@0 375 #include <asm/uaccess.h>
ian@0 376 #include <asm/ptrace.h>
ian@0 377
ian@0 378 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
ian@0 379 int count, int *eof, void *data)
ian@0 380 {
ian@0 381 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
ian@0 382 if (count - len < 2)
ian@0 383 return -EINVAL;
ian@0 384 len += sprintf(page + len, "\n");
ian@0 385 return len;
ian@0 386 }
ian@0 387
ian@0 388 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
ian@0 389 unsigned long count, void *data)
ian@0 390 {
ian@0 391 cpumask_t *mask = (cpumask_t *)data;
ian@0 392 unsigned long full_count = count, err;
ian@0 393 cpumask_t new_value;
ian@0 394
ian@0 395 err = cpumask_parse(buffer, count, new_value);
ian@0 396 if (err)
ian@0 397 return err;
ian@0 398
ian@0 399 *mask = new_value;
ian@0 400 return full_count;
ian@0 401 }
ian@0 402
ian@0 403 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
ian@0 404 {
ian@0 405 struct proc_dir_entry *entry;
ian@0 406
ian@0 407 /* create /proc/irq/prof_cpu_mask */
ian@0 408 if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir)))
ian@0 409 return;
ian@0 410 entry->nlink = 1;
ian@0 411 entry->data = (void *)&prof_cpu_mask;
ian@0 412 entry->read_proc = prof_cpu_mask_read_proc;
ian@0 413 entry->write_proc = prof_cpu_mask_write_proc;
ian@0 414 }
ian@0 415
ian@0 416 /*
ian@0 417 * This function accesses profiling information. The returned data is
ian@0 418 * binary: the sampling step and the actual contents of the profile
ian@0 419 * buffer. Use of the program readprofile is recommended in order to
ian@0 420 * get meaningful info out of these data.
ian@0 421 */
ian@0 422 static ssize_t
ian@0 423 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
ian@0 424 {
ian@0 425 unsigned long p = *ppos;
ian@0 426 ssize_t read;
ian@0 427 char * pnt;
ian@0 428 unsigned int sample_step = 1 << prof_shift;
ian@0 429
ian@0 430 profile_flip_buffers();
ian@0 431 if (p >= (prof_len+1)*sizeof(unsigned int))
ian@0 432 return 0;
ian@0 433 if (count > (prof_len+1)*sizeof(unsigned int) - p)
ian@0 434 count = (prof_len+1)*sizeof(unsigned int) - p;
ian@0 435 read = 0;
ian@0 436
ian@0 437 while (p < sizeof(unsigned int) && count > 0) {
ian@0 438 put_user(*((char *)(&sample_step)+p),buf);
ian@0 439 buf++; p++; count--; read++;
ian@0 440 }
ian@0 441 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
ian@0 442 if (copy_to_user(buf,(void *)pnt,count))
ian@0 443 return -EFAULT;
ian@0 444 read += count;
ian@0 445 *ppos += read;
ian@0 446 return read;
ian@0 447 }
ian@0 448
ian@0 449 /*
ian@0 450 * Writing to /proc/profile resets the counters
ian@0 451 *
ian@0 452 * Writing a 'profiling multiplier' value into it also re-sets the profiling
ian@0 453 * interrupt frequency, on architectures that support this.
ian@0 454 */
ian@0 455 static ssize_t write_profile(struct file *file, const char __user *buf,
ian@0 456 size_t count, loff_t *ppos)
ian@0 457 {
ian@0 458 #ifdef CONFIG_SMP
ian@0 459 extern int setup_profiling_timer (unsigned int multiplier);
ian@0 460
ian@0 461 if (count == sizeof(int)) {
ian@0 462 unsigned int multiplier;
ian@0 463
ian@0 464 if (copy_from_user(&multiplier, buf, sizeof(int)))
ian@0 465 return -EFAULT;
ian@0 466
ian@0 467 if (setup_profiling_timer(multiplier))
ian@0 468 return -EINVAL;
ian@0 469 }
ian@0 470 #endif
ian@0 471 profile_discard_flip_buffers();
ian@0 472 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
ian@0 473 return count;
ian@0 474 }
ian@0 475
ian@0 476 static struct file_operations proc_profile_operations = {
ian@0 477 .read = read_profile,
ian@0 478 .write = write_profile,
ian@0 479 };
ian@0 480
ian@0 481 #ifdef CONFIG_SMP
ian@0 482 static void __init profile_nop(void *unused)
ian@0 483 {
ian@0 484 }
ian@0 485
ian@0 486 static int __init create_hash_tables(void)
ian@0 487 {
ian@0 488 int cpu;
ian@0 489
ian@0 490 for_each_online_cpu(cpu) {
ian@0 491 int node = cpu_to_node(cpu);
ian@0 492 struct page *page;
ian@0 493
ian@0 494 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
ian@0 495 if (!page)
ian@0 496 goto out_cleanup;
ian@0 497 per_cpu(cpu_profile_hits, cpu)[1]
ian@0 498 = (struct profile_hit *)page_address(page);
ian@0 499 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
ian@0 500 if (!page)
ian@0 501 goto out_cleanup;
ian@0 502 per_cpu(cpu_profile_hits, cpu)[0]
ian@0 503 = (struct profile_hit *)page_address(page);
ian@0 504 }
ian@0 505 return 0;
ian@0 506 out_cleanup:
ian@0 507 prof_on = 0;
ian@0 508 smp_mb();
ian@0 509 on_each_cpu(profile_nop, NULL, 0, 1);
ian@0 510 for_each_online_cpu(cpu) {
ian@0 511 struct page *page;
ian@0 512
ian@0 513 if (per_cpu(cpu_profile_hits, cpu)[0]) {
ian@0 514 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
ian@0 515 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
ian@0 516 __free_page(page);
ian@0 517 }
ian@0 518 if (per_cpu(cpu_profile_hits, cpu)[1]) {
ian@0 519 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
ian@0 520 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
ian@0 521 __free_page(page);
ian@0 522 }
ian@0 523 }
ian@0 524 return -1;
ian@0 525 }
ian@0 526 #else
ian@0 527 #define create_hash_tables() ({ 0; })
ian@0 528 #endif
ian@0 529
ian@0 530 static int __init create_proc_profile(void)
ian@0 531 {
ian@0 532 struct proc_dir_entry *entry;
ian@0 533
ian@0 534 if (!prof_on)
ian@0 535 return 0;
ian@0 536 if (create_hash_tables())
ian@0 537 return -1;
ian@0 538 if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL)))
ian@0 539 return 0;
ian@0 540 entry->proc_fops = &proc_profile_operations;
ian@0 541 entry->size = (1+prof_len) * sizeof(atomic_t);
ian@0 542 hotcpu_notifier(profile_cpu_callback, 0);
ian@0 543 return 0;
ian@0 544 }
ian@0 545 module_init(create_proc_profile);
ian@0 546 #endif /* CONFIG_PROC_FS */