ia64/xen-unstable

view xen/arch/ia64/linux-xen/unwind.c @ 9770:ced37bea0647

[IA64] FPH enabling + cleanup

Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Apr 25 22:35:41 2006 -0600 (2006-04-25)
parents bd9cb8dc97b6
children be1b7896c203
line source
1 /*
2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure.
6 */
7 /*
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
19 *
20 * SMP conventions:
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
28 */
29 #ifdef XEN
30 #include <xen/types.h>
31 #include <xen/elf.h>
32 #include <xen/kernel.h>
33 #include <xen/sched.h>
34 #include <xen/xmalloc.h>
35 #include <xen/spinlock.h>
37 // work around
38 #ifdef CONFIG_SMP
39 #define write_trylock(lock) _raw_write_trylock(lock)
40 #else
41 #define write_trylock(lock) ({1;})
42 #endif
44 #else
45 #include <linux/module.h>
46 #include <linux/bootmem.h>
47 #include <linux/elf.h>
48 #include <linux/kernel.h>
49 #include <linux/sched.h>
50 #include <linux/slab.h>
51 #endif
53 #include <asm/unwind.h>
55 #include <asm/delay.h>
56 #include <asm/page.h>
57 #include <asm/ptrace.h>
58 #include <asm/ptrace_offsets.h>
59 #include <asm/rse.h>
60 #include <asm/sections.h>
61 #include <asm/system.h>
62 #include <asm/uaccess.h>
64 #include "entry.h"
65 #include "unwind_i.h"
67 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
68 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
70 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
71 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
73 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
75 #ifdef UNW_DEBUG
76 static unsigned int unw_debug_level = UNW_DEBUG;
77 # define UNW_DEBUG_ON(n) unw_debug_level >= n
78 /* Do not code a printk level, not all debug lines end in newline */
79 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
80 # define inline
81 #else /* !UNW_DEBUG */
82 # define UNW_DEBUG_ON(n) 0
83 # define UNW_DPRINT(n, ...)
84 #endif /* UNW_DEBUG */
86 #if UNW_STATS
87 # define STAT(x...) x
88 #else
89 # define STAT(x...)
90 #endif
92 #ifdef XEN
93 #define alloc_reg_state() xmalloc(struct unw_reg_state)
94 #define free_reg_state(usr) xfree(usr)
95 #define alloc_labeled_state() xmalloc(struct unw_labeled_state)
96 #define free_labeled_state(usr) xfree(usr)
97 #else
98 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
99 #define free_reg_state(usr) kfree(usr)
100 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
101 #define free_labeled_state(usr) kfree(usr)
102 #endif
104 typedef unsigned long unw_word;
105 typedef unsigned char unw_hash_index_t;
107 static struct {
108 spinlock_t lock; /* spinlock for unwind data */
110 /* list of unwind tables (one per load-module) */
111 struct unw_table *tables;
113 unsigned long r0; /* constant 0 for r0 */
115 /* table of registers that prologues can save (and order in which they're saved): */
116 const unsigned char save_order[8];
118 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
119 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
121 unsigned short lru_head; /* index of lead-recently used script */
122 unsigned short lru_tail; /* index of most-recently used script */
124 /* index into unw_frame_info for preserved register i */
125 unsigned short preg_index[UNW_NUM_REGS];
127 short pt_regs_offsets[32];
129 /* unwind table for the kernel: */
130 struct unw_table kernel_table;
132 /* unwind table describing the gate page (kernel code that is mapped into user space): */
133 size_t gate_table_size;
134 unsigned long *gate_table;
136 /* hash table that maps instruction pointer to script index: */
137 unsigned short hash[UNW_HASH_SIZE];
139 /* script cache: */
140 struct unw_script cache[UNW_CACHE_SIZE];
142 # ifdef UNW_DEBUG
143 const char *preg_name[UNW_NUM_REGS];
144 # endif
145 # if UNW_STATS
146 struct {
147 struct {
148 int lookups;
149 int hinted_hits;
150 int normal_hits;
151 int collision_chain_traversals;
152 } cache;
153 struct {
154 unsigned long build_time;
155 unsigned long run_time;
156 unsigned long parse_time;
157 int builds;
158 int news;
159 int collisions;
160 int runs;
161 } script;
162 struct {
163 unsigned long init_time;
164 unsigned long unwind_time;
165 int inits;
166 int unwinds;
167 } api;
168 } stat;
169 # endif
170 } unw = {
171 .tables = &unw.kernel_table,
172 .lock = SPIN_LOCK_UNLOCKED,
173 .save_order = {
174 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
175 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
176 },
177 .preg_index = {
178 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
179 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
180 offsetof(struct unw_frame_info, bsp_loc)/8,
181 offsetof(struct unw_frame_info, bspstore_loc)/8,
182 offsetof(struct unw_frame_info, pfs_loc)/8,
183 offsetof(struct unw_frame_info, rnat_loc)/8,
184 offsetof(struct unw_frame_info, psp)/8,
185 offsetof(struct unw_frame_info, rp_loc)/8,
186 offsetof(struct unw_frame_info, r4)/8,
187 offsetof(struct unw_frame_info, r5)/8,
188 offsetof(struct unw_frame_info, r6)/8,
189 offsetof(struct unw_frame_info, r7)/8,
190 offsetof(struct unw_frame_info, unat_loc)/8,
191 offsetof(struct unw_frame_info, pr_loc)/8,
192 offsetof(struct unw_frame_info, lc_loc)/8,
193 offsetof(struct unw_frame_info, fpsr_loc)/8,
194 offsetof(struct unw_frame_info, b1_loc)/8,
195 offsetof(struct unw_frame_info, b2_loc)/8,
196 offsetof(struct unw_frame_info, b3_loc)/8,
197 offsetof(struct unw_frame_info, b4_loc)/8,
198 offsetof(struct unw_frame_info, b5_loc)/8,
199 offsetof(struct unw_frame_info, f2_loc)/8,
200 offsetof(struct unw_frame_info, f3_loc)/8,
201 offsetof(struct unw_frame_info, f4_loc)/8,
202 offsetof(struct unw_frame_info, f5_loc)/8,
203 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
204 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
205 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
206 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
207 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
208 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
209 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
210 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
211 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
212 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
213 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
214 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
215 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
216 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
217 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
218 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
219 },
220 .pt_regs_offsets = {
221 [0] = -1,
222 offsetof(struct pt_regs, r1),
223 offsetof(struct pt_regs, r2),
224 offsetof(struct pt_regs, r3),
225 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
226 offsetof(struct pt_regs, r8),
227 offsetof(struct pt_regs, r9),
228 offsetof(struct pt_regs, r10),
229 offsetof(struct pt_regs, r11),
230 offsetof(struct pt_regs, r12),
231 offsetof(struct pt_regs, r13),
232 offsetof(struct pt_regs, r14),
233 offsetof(struct pt_regs, r15),
234 offsetof(struct pt_regs, r16),
235 offsetof(struct pt_regs, r17),
236 offsetof(struct pt_regs, r18),
237 offsetof(struct pt_regs, r19),
238 offsetof(struct pt_regs, r20),
239 offsetof(struct pt_regs, r21),
240 offsetof(struct pt_regs, r22),
241 offsetof(struct pt_regs, r23),
242 offsetof(struct pt_regs, r24),
243 offsetof(struct pt_regs, r25),
244 offsetof(struct pt_regs, r26),
245 offsetof(struct pt_regs, r27),
246 offsetof(struct pt_regs, r28),
247 offsetof(struct pt_regs, r29),
248 offsetof(struct pt_regs, r30),
249 offsetof(struct pt_regs, r31),
250 },
251 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
252 #ifdef UNW_DEBUG
253 .preg_name = {
254 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
255 "r4", "r5", "r6", "r7",
256 "ar.unat", "pr", "ar.lc", "ar.fpsr",
257 "b1", "b2", "b3", "b4", "b5",
258 "f2", "f3", "f4", "f5",
259 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
260 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
261 }
262 #endif
263 };
265 static inline int
266 read_only (void *addr)
267 {
268 return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
269 }
271 /*
272 * Returns offset of rREG in struct pt_regs.
273 */
274 static inline unsigned long
275 pt_regs_off (unsigned long reg)
276 {
277 short off = -1;
279 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
280 off = unw.pt_regs_offsets[reg];
282 if (off < 0) {
283 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
284 off = 0;
285 }
286 return (unsigned long) off;
287 }
289 static inline struct pt_regs *
290 get_scratch_regs (struct unw_frame_info *info)
291 {
292 if (!info->pt) {
293 /* This should not happen with valid unwind info. */
294 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
295 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
296 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
297 else
298 info->pt = info->sp - 16;
299 }
300 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
301 return (struct pt_regs *) info->pt;
302 }
304 /* Unwind accessors. */
306 int
307 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
308 {
309 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
310 struct unw_ireg *ireg;
311 struct pt_regs *pt;
313 if ((unsigned) regnum - 1 >= 127) {
314 if (regnum == 0 && !write) {
315 *val = 0; /* read r0 always returns 0 */
316 *nat = 0;
317 return 0;
318 }
319 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
320 __FUNCTION__, regnum);
321 return -1;
322 }
324 if (regnum < 32) {
325 if (regnum >= 4 && regnum <= 7) {
326 /* access a preserved register */
327 ireg = &info->r4 + (regnum - 4);
328 addr = ireg->loc;
329 if (addr) {
330 nat_addr = addr + ireg->nat.off;
331 switch (ireg->nat.type) {
332 case UNW_NAT_VAL:
333 /* simulate getf.sig/setf.sig */
334 if (write) {
335 if (*nat) {
336 /* write NaTVal and be done with it */
337 addr[0] = 0;
338 addr[1] = 0x1fffe;
339 return 0;
340 }
341 addr[1] = 0x1003e;
342 } else {
343 if (addr[0] == 0 && addr[1] == 0x1ffe) {
344 /* return NaT and be done with it */
345 *val = 0;
346 *nat = 1;
347 return 0;
348 }
349 }
350 /* fall through */
351 case UNW_NAT_NONE:
352 dummy_nat = 0;
353 nat_addr = &dummy_nat;
354 break;
356 case UNW_NAT_MEMSTK:
357 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
358 break;
360 case UNW_NAT_REGSTK:
361 nat_addr = ia64_rse_rnat_addr(addr);
362 if ((unsigned long) addr < info->regstk.limit
363 || (unsigned long) addr >= info->regstk.top)
364 {
365 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
366 "[0x%lx-0x%lx)\n",
367 __FUNCTION__, (void *) addr,
368 info->regstk.limit,
369 info->regstk.top);
370 return -1;
371 }
372 if ((unsigned long) nat_addr >= info->regstk.top)
373 nat_addr = &info->sw->ar_rnat;
374 nat_mask = (1UL << ia64_rse_slot_num(addr));
375 break;
376 }
377 } else {
378 addr = &info->sw->r4 + (regnum - 4);
379 nat_addr = &info->sw->ar_unat;
380 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
381 }
382 } else {
383 /* access a scratch register */
384 pt = get_scratch_regs(info);
385 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
386 if (info->pri_unat_loc)
387 nat_addr = info->pri_unat_loc;
388 else
389 nat_addr = &info->sw->caller_unat;
390 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
391 }
392 } else {
393 /* access a stacked register */
394 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
395 nat_addr = ia64_rse_rnat_addr(addr);
396 if ((unsigned long) addr < info->regstk.limit
397 || (unsigned long) addr >= info->regstk.top)
398 {
399 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
400 "of rbs\n", __FUNCTION__);
401 return -1;
402 }
403 if ((unsigned long) nat_addr >= info->regstk.top)
404 nat_addr = &info->sw->ar_rnat;
405 nat_mask = (1UL << ia64_rse_slot_num(addr));
406 }
408 if (write) {
409 if (read_only(addr)) {
410 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
411 __FUNCTION__);
412 } else {
413 *addr = *val;
414 if (*nat)
415 *nat_addr |= nat_mask;
416 else
417 *nat_addr &= ~nat_mask;
418 }
419 } else {
420 if ((*nat_addr & nat_mask) == 0) {
421 *val = *addr;
422 *nat = 0;
423 } else {
424 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
425 *nat = 1;
426 }
427 }
428 return 0;
429 }
430 EXPORT_SYMBOL(unw_access_gr);
432 int
433 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
434 {
435 unsigned long *addr;
436 struct pt_regs *pt;
438 switch (regnum) {
439 /* scratch: */
440 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
441 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
442 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
444 /* preserved: */
445 case 1: case 2: case 3: case 4: case 5:
446 addr = *(&info->b1_loc + (regnum - 1));
447 if (!addr)
448 addr = &info->sw->b1 + (regnum - 1);
449 break;
451 default:
452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
453 __FUNCTION__, regnum);
454 return -1;
455 }
456 if (write)
457 if (read_only(addr)) {
458 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
459 __FUNCTION__);
460 } else
461 *addr = *val;
462 else
463 *val = *addr;
464 return 0;
465 }
466 EXPORT_SYMBOL(unw_access_br);
468 int
469 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
470 {
471 struct ia64_fpreg *addr = NULL;
472 struct pt_regs *pt;
474 if ((unsigned) (regnum - 2) >= 126) {
475 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
476 __FUNCTION__, regnum);
477 return -1;
478 }
480 if (regnum <= 5) {
481 addr = *(&info->f2_loc + (regnum - 2));
482 if (!addr)
483 addr = &info->sw->f2 + (regnum - 2);
484 } else if (regnum <= 15) {
485 if (regnum <= 11) {
486 pt = get_scratch_regs(info);
487 //XXX struct ia64_fpreg and struct pt_fpreg are same.
488 addr = (struct ia64_fpreg*)(&pt->f6 + (regnum - 6));
489 }
490 else
491 addr = &info->sw->f12 + (regnum - 12);
492 } else if (regnum <= 31) {
493 addr = info->fr_loc[regnum - 16];
494 if (!addr)
495 addr = &info->sw->f16 + (regnum - 16);
496 } else {
497 struct task_struct *t = info->task;
499 if (write)
500 ia64_sync_fph(t);
501 else
502 ia64_flush_fph(t);
503 #ifdef XEN
504 addr = t->arch._thread.fph + (regnum - 32);
505 #else
506 addr = t->thread.fph + (regnum - 32);
507 #endif
508 }
510 if (write)
511 if (read_only(addr)) {
512 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
513 __FUNCTION__);
514 } else
515 *addr = *val;
516 else
517 *val = *addr;
518 return 0;
519 }
520 EXPORT_SYMBOL(unw_access_fr);
522 int
523 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
524 {
525 unsigned long *addr;
526 struct pt_regs *pt;
528 switch (regnum) {
529 case UNW_AR_BSP:
530 addr = info->bsp_loc;
531 if (!addr)
532 addr = &info->sw->ar_bspstore;
533 break;
535 case UNW_AR_BSPSTORE:
536 addr = info->bspstore_loc;
537 if (!addr)
538 addr = &info->sw->ar_bspstore;
539 break;
541 case UNW_AR_PFS:
542 addr = info->pfs_loc;
543 if (!addr)
544 addr = &info->sw->ar_pfs;
545 break;
547 case UNW_AR_RNAT:
548 addr = info->rnat_loc;
549 if (!addr)
550 addr = &info->sw->ar_rnat;
551 break;
553 case UNW_AR_UNAT:
554 addr = info->unat_loc;
555 if (!addr)
556 addr = &info->sw->caller_unat;
557 break;
559 case UNW_AR_LC:
560 addr = info->lc_loc;
561 if (!addr)
562 addr = &info->sw->ar_lc;
563 break;
565 case UNW_AR_EC:
566 if (!info->cfm_loc)
567 return -1;
568 if (write)
569 *info->cfm_loc =
570 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
571 else
572 *val = (*info->cfm_loc >> 52) & 0x3f;
573 return 0;
575 case UNW_AR_FPSR:
576 addr = info->fpsr_loc;
577 if (!addr)
578 addr = &info->sw->ar_fpsr;
579 break;
581 case UNW_AR_RSC:
582 pt = get_scratch_regs(info);
583 addr = &pt->ar_rsc;
584 break;
586 case UNW_AR_CCV:
587 pt = get_scratch_regs(info);
588 addr = &pt->ar_ccv;
589 break;
591 case UNW_AR_CSD:
592 pt = get_scratch_regs(info);
593 addr = &pt->ar_csd;
594 break;
596 case UNW_AR_SSD:
597 pt = get_scratch_regs(info);
598 addr = &pt->ar_ssd;
599 break;
601 default:
602 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
603 __FUNCTION__, regnum);
604 return -1;
605 }
607 if (write) {
608 if (read_only(addr)) {
609 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
610 __FUNCTION__);
611 } else
612 *addr = *val;
613 } else
614 *val = *addr;
615 return 0;
616 }
617 EXPORT_SYMBOL(unw_access_ar);
619 int
620 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
621 {
622 unsigned long *addr;
624 addr = info->pr_loc;
625 if (!addr)
626 addr = &info->sw->pr;
628 if (write) {
629 if (read_only(addr)) {
630 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
631 __FUNCTION__);
632 } else
633 *addr = *val;
634 } else
635 *val = *addr;
636 return 0;
637 }
638 EXPORT_SYMBOL(unw_access_pr);
641 /* Routines to manipulate the state stack. */
643 static inline void
644 push (struct unw_state_record *sr)
645 {
646 struct unw_reg_state *rs;
648 rs = alloc_reg_state();
649 if (!rs) {
650 printk(KERN_ERR "unwind: cannot stack reg state!\n");
651 return;
652 }
653 memcpy(rs, &sr->curr, sizeof(*rs));
654 sr->curr.next = rs;
655 }
657 static void
658 pop (struct unw_state_record *sr)
659 {
660 struct unw_reg_state *rs = sr->curr.next;
662 if (!rs) {
663 printk(KERN_ERR "unwind: stack underflow!\n");
664 return;
665 }
666 memcpy(&sr->curr, rs, sizeof(*rs));
667 free_reg_state(rs);
668 }
670 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
671 static struct unw_reg_state *
672 dup_state_stack (struct unw_reg_state *rs)
673 {
674 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
676 while (rs) {
677 copy = alloc_reg_state();
678 if (!copy) {
679 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
680 return NULL;
681 }
682 memcpy(copy, rs, sizeof(*copy));
683 if (first)
684 prev->next = copy;
685 else
686 first = copy;
687 rs = rs->next;
688 prev = copy;
689 }
690 return first;
691 }
693 /* Free all stacked register states (but not RS itself). */
694 static void
695 free_state_stack (struct unw_reg_state *rs)
696 {
697 struct unw_reg_state *p, *next;
699 for (p = rs->next; p != NULL; p = next) {
700 next = p->next;
701 free_reg_state(p);
702 }
703 rs->next = NULL;
704 }
706 /* Unwind decoder routines */
708 static enum unw_register_index __attribute_const__
709 decode_abreg (unsigned char abreg, int memory)
710 {
711 switch (abreg) {
712 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
713 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
714 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
715 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
716 case 0x60: return UNW_REG_PR;
717 case 0x61: return UNW_REG_PSP;
718 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
719 case 0x63: return UNW_REG_RP;
720 case 0x64: return UNW_REG_BSP;
721 case 0x65: return UNW_REG_BSPSTORE;
722 case 0x66: return UNW_REG_RNAT;
723 case 0x67: return UNW_REG_UNAT;
724 case 0x68: return UNW_REG_FPSR;
725 case 0x69: return UNW_REG_PFS;
726 case 0x6a: return UNW_REG_LC;
727 default:
728 break;
729 }
730 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
731 return UNW_REG_LC;
732 }
734 static void
735 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
736 {
737 reg->val = val;
738 reg->where = where;
739 if (reg->when == UNW_WHEN_NEVER)
740 reg->when = when;
741 }
743 static void
744 alloc_spill_area (unsigned long *offp, unsigned long regsize,
745 struct unw_reg_info *lo, struct unw_reg_info *hi)
746 {
747 struct unw_reg_info *reg;
749 for (reg = hi; reg >= lo; --reg) {
750 if (reg->where == UNW_WHERE_SPILL_HOME) {
751 reg->where = UNW_WHERE_PSPREL;
752 *offp -= regsize;
753 reg->val = *offp;
754 }
755 }
756 }
758 static inline void
759 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
760 {
761 struct unw_reg_info *reg;
763 for (reg = *regp; reg <= lim; ++reg) {
764 if (reg->where == UNW_WHERE_SPILL_HOME) {
765 reg->when = t;
766 *regp = reg + 1;
767 return;
768 }
769 }
770 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
771 }
773 static inline void
774 finish_prologue (struct unw_state_record *sr)
775 {
776 struct unw_reg_info *reg;
777 unsigned long off;
778 int i;
780 /*
781 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
782 * for Using Unwind Descriptors", rule 3):
783 */
784 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
785 reg = sr->curr.reg + unw.save_order[i];
786 if (reg->where == UNW_WHERE_GR_SAVE) {
787 reg->where = UNW_WHERE_GR;
788 reg->val = sr->gr_save_loc++;
789 }
790 }
792 /*
793 * Next, compute when the fp, general, and branch registers get
794 * saved. This must come before alloc_spill_area() because
795 * we need to know which registers are spilled to their home
796 * locations.
797 */
798 if (sr->imask) {
799 unsigned char kind, mask = 0, *cp = sr->imask;
800 int t;
801 static const unsigned char limit[3] = {
802 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
803 };
804 struct unw_reg_info *(regs[3]);
806 regs[0] = sr->curr.reg + UNW_REG_F2;
807 regs[1] = sr->curr.reg + UNW_REG_R4;
808 regs[2] = sr->curr.reg + UNW_REG_B1;
810 for (t = 0; t < sr->region_len; ++t) {
811 if ((t & 3) == 0)
812 mask = *cp++;
813 kind = (mask >> 2*(3-(t & 3))) & 3;
814 if (kind > 0)
815 spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
816 sr->region_start + t);
817 }
818 }
819 /*
820 * Next, lay out the memory stack spill area:
821 */
822 if (sr->any_spills) {
823 off = sr->spill_offset;
824 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
825 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
826 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
827 }
828 }
830 /*
831 * Region header descriptors.
832 */
834 static void
835 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
836 struct unw_state_record *sr)
837 {
838 int i, region_start;
840 if (!(sr->in_body || sr->first_region))
841 finish_prologue(sr);
842 sr->first_region = 0;
844 /* check if we're done: */
845 if (sr->when_target < sr->region_start + sr->region_len) {
846 sr->done = 1;
847 return;
848 }
850 region_start = sr->region_start + sr->region_len;
852 for (i = 0; i < sr->epilogue_count; ++i)
853 pop(sr);
854 sr->epilogue_count = 0;
855 sr->epilogue_start = UNW_WHEN_NEVER;
857 sr->region_start = region_start;
858 sr->region_len = rlen;
859 sr->in_body = body;
861 if (!body) {
862 push(sr);
864 for (i = 0; i < 4; ++i) {
865 if (mask & 0x8)
866 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
867 sr->region_start + sr->region_len - 1, grsave++);
868 mask <<= 1;
869 }
870 sr->gr_save_loc = grsave;
871 sr->any_spills = 0;
872 sr->imask = NULL;
873 sr->spill_offset = 0x10; /* default to psp+16 */
874 }
875 }
877 /*
878 * Prologue descriptors.
879 */
881 static inline void
882 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
883 {
884 if (abi == 3 && context == 'i') {
885 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
886 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
887 }
888 else
889 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
890 __FUNCTION__, abi, context);
891 }
893 static inline void
894 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
895 {
896 int i;
898 for (i = 0; i < 5; ++i) {
899 if (brmask & 1)
900 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
901 sr->region_start + sr->region_len - 1, gr++);
902 brmask >>= 1;
903 }
904 }
906 static inline void
907 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
908 {
909 int i;
911 for (i = 0; i < 5; ++i) {
912 if (brmask & 1) {
913 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
914 sr->region_start + sr->region_len - 1, 0);
915 sr->any_spills = 1;
916 }
917 brmask >>= 1;
918 }
919 }
921 static inline void
922 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
923 {
924 int i;
926 for (i = 0; i < 4; ++i) {
927 if ((grmask & 1) != 0) {
928 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
929 sr->region_start + sr->region_len - 1, 0);
930 sr->any_spills = 1;
931 }
932 grmask >>= 1;
933 }
934 for (i = 0; i < 20; ++i) {
935 if ((frmask & 1) != 0) {
936 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
937 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
938 sr->region_start + sr->region_len - 1, 0);
939 sr->any_spills = 1;
940 }
941 frmask >>= 1;
942 }
943 }
945 static inline void
946 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
947 {
948 int i;
950 for (i = 0; i < 4; ++i) {
951 if ((frmask & 1) != 0) {
952 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
953 sr->region_start + sr->region_len - 1, 0);
954 sr->any_spills = 1;
955 }
956 frmask >>= 1;
957 }
958 }
960 static inline void
961 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
962 {
963 int i;
965 for (i = 0; i < 4; ++i) {
966 if ((grmask & 1) != 0)
967 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
968 sr->region_start + sr->region_len - 1, gr++);
969 grmask >>= 1;
970 }
971 }
973 static inline void
974 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
975 {
976 int i;
978 for (i = 0; i < 4; ++i) {
979 if ((grmask & 1) != 0) {
980 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
981 sr->region_start + sr->region_len - 1, 0);
982 sr->any_spills = 1;
983 }
984 grmask >>= 1;
985 }
986 }
988 static inline void
989 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
990 {
991 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
992 sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
993 }
995 static inline void
996 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
997 {
998 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
999 }
1001 static inline void
1002 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
1004 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
1007 static inline void
1008 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
1010 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
1011 0x10 - 4*pspoff);
1014 static inline void
1015 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
1017 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
1018 4*spoff);
1021 static inline void
1022 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
1024 sr->return_link_reg = dst;
1027 static inline void
1028 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1030 struct unw_reg_info *reg = sr->curr.reg + regnum;
1032 if (reg->where == UNW_WHERE_NONE)
1033 reg->where = UNW_WHERE_GR_SAVE;
1034 reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1037 static inline void
1038 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1040 sr->spill_offset = 0x10 - 4*pspoff;
1043 static inline unsigned char *
1044 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1046 sr->imask = imaskp;
1047 return imaskp + (2*sr->region_len + 7)/8;
1050 /*
1051 * Body descriptors.
1052 */
1053 static inline void
1054 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1056 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1057 sr->epilogue_count = ecount + 1;
1060 static inline void
1061 desc_copy_state (unw_word label, struct unw_state_record *sr)
1063 struct unw_labeled_state *ls;
1065 for (ls = sr->labeled_states; ls; ls = ls->next) {
1066 if (ls->label == label) {
1067 free_state_stack(&sr->curr);
1068 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1069 sr->curr.next = dup_state_stack(ls->saved_state.next);
1070 return;
1073 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1076 static inline void
1077 desc_label_state (unw_word label, struct unw_state_record *sr)
1079 struct unw_labeled_state *ls;
1081 ls = alloc_labeled_state();
1082 if (!ls) {
1083 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1084 return;
1086 ls->label = label;
1087 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1088 ls->saved_state.next = dup_state_stack(sr->curr.next);
1090 /* insert into list of labeled states: */
1091 ls->next = sr->labeled_states;
1092 sr->labeled_states = ls;
1095 /*
1096 * General descriptors.
1097 */
1099 static inline int
1100 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1102 if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1103 return 0;
1104 if (qp > 0) {
1105 if ((sr->pr_val & (1UL << qp)) == 0)
1106 return 0;
1107 sr->pr_mask |= (1UL << qp);
1109 return 1;
1112 static inline void
1113 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1115 struct unw_reg_info *r;
1117 if (!desc_is_active(qp, t, sr))
1118 return;
1120 r = sr->curr.reg + decode_abreg(abreg, 0);
1121 r->where = UNW_WHERE_NONE;
1122 r->when = UNW_WHEN_NEVER;
1123 r->val = 0;
1126 static inline void
1127 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1128 unsigned char ytreg, struct unw_state_record *sr)
1130 enum unw_where where = UNW_WHERE_GR;
1131 struct unw_reg_info *r;
1133 if (!desc_is_active(qp, t, sr))
1134 return;
1136 if (x)
1137 where = UNW_WHERE_BR;
1138 else if (ytreg & 0x80)
1139 where = UNW_WHERE_FR;
1141 r = sr->curr.reg + decode_abreg(abreg, 0);
1142 r->where = where;
1143 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1144 r->val = (ytreg & 0x7f);
1147 static inline void
1148 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1149 struct unw_state_record *sr)
1151 struct unw_reg_info *r;
1153 if (!desc_is_active(qp, t, sr))
1154 return;
1156 r = sr->curr.reg + decode_abreg(abreg, 1);
1157 r->where = UNW_WHERE_PSPREL;
1158 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1159 r->val = 0x10 - 4*pspoff;
1162 static inline void
1163 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1164 struct unw_state_record *sr)
1166 struct unw_reg_info *r;
1168 if (!desc_is_active(qp, t, sr))
1169 return;
1171 r = sr->curr.reg + decode_abreg(abreg, 1);
1172 r->where = UNW_WHERE_SPREL;
1173 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1174 r->val = 4*spoff;
1177 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1178 code);
1180 /*
1181 * region headers:
1182 */
1183 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1184 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1185 /*
1186 * prologue descriptors:
1187 */
1188 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1189 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1190 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1191 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1192 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1193 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1194 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1195 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1196 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1197 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1198 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1199 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1200 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1201 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1202 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1203 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1204 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1205 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1206 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1207 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1208 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1209 /*
1210 * body descriptors:
1211 */
1212 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1213 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1214 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1215 /*
1216 * general unwind descriptors:
1217 */
1218 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1219 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1220 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1221 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1222 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1223 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1224 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1225 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1227 #include "unwind_decoder.c"
1230 /* Unwind scripts. */
1232 static inline unw_hash_index_t
1233 hash (unsigned long ip)
1235 # define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
1237 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1238 #undef hashmagic
1241 static inline long
1242 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1244 read_lock(&script->lock);
1245 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1246 /* keep the read lock... */
1247 return 1;
1248 read_unlock(&script->lock);
1249 return 0;
1252 static inline struct unw_script *
1253 script_lookup (struct unw_frame_info *info)
1255 struct unw_script *script = unw.cache + info->hint;
1256 unsigned short index;
1257 unsigned long ip, pr;
1259 if (UNW_DEBUG_ON(0))
1260 return NULL; /* Always regenerate scripts in debug mode */
1262 STAT(++unw.stat.cache.lookups);
1264 ip = info->ip;
1265 pr = info->pr;
1267 if (cache_match(script, ip, pr)) {
1268 STAT(++unw.stat.cache.hinted_hits);
1269 return script;
1272 index = unw.hash[hash(ip)];
1273 if (index >= UNW_CACHE_SIZE)
1274 return NULL;
1276 script = unw.cache + index;
1277 while (1) {
1278 if (cache_match(script, ip, pr)) {
1279 /* update hint; no locking required as single-word writes are atomic */
1280 STAT(++unw.stat.cache.normal_hits);
1281 unw.cache[info->prev_script].hint = script - unw.cache;
1282 return script;
1284 if (script->coll_chain >= UNW_HASH_SIZE)
1285 return NULL;
1286 script = unw.cache + script->coll_chain;
1287 STAT(++unw.stat.cache.collision_chain_traversals);
1291 /*
1292 * On returning, a write lock for the SCRIPT is still being held.
1293 */
1294 static inline struct unw_script *
1295 script_new (unsigned long ip)
1297 struct unw_script *script, *prev, *tmp;
1298 unw_hash_index_t index;
1299 unsigned short head;
1301 STAT(++unw.stat.script.news);
1303 /*
1304 * Can't (easily) use cmpxchg() here because of ABA problem
1305 * that is intrinsic in cmpxchg()...
1306 */
1307 head = unw.lru_head;
1308 script = unw.cache + head;
1309 unw.lru_head = script->lru_chain;
1311 /*
1312 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1313 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1314 * alternative would be to disable interrupts whenever we hold a read-lock, but
1315 * that seems silly.
1316 */
1317 if (!write_trylock(&script->lock))
1318 return NULL;
1320 /* re-insert script at the tail of the LRU chain: */
1321 unw.cache[unw.lru_tail].lru_chain = head;
1322 unw.lru_tail = head;
1324 /* remove the old script from the hash table (if it's there): */
1325 if (script->ip) {
1326 index = hash(script->ip);
1327 tmp = unw.cache + unw.hash[index];
1328 prev = NULL;
1329 while (1) {
1330 if (tmp == script) {
1331 if (prev)
1332 prev->coll_chain = tmp->coll_chain;
1333 else
1334 unw.hash[index] = tmp->coll_chain;
1335 break;
1336 } else
1337 prev = tmp;
1338 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1339 /* old script wasn't in the hash-table */
1340 break;
1341 tmp = unw.cache + tmp->coll_chain;
1345 /* enter new script in the hash table */
1346 index = hash(ip);
1347 script->coll_chain = unw.hash[index];
1348 unw.hash[index] = script - unw.cache;
1350 script->ip = ip; /* set new IP while we're holding the locks */
1352 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1354 script->flags = 0;
1355 script->hint = 0;
1356 script->count = 0;
1357 return script;
1360 static void
1361 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1363 script->pr_mask = sr->pr_mask;
1364 script->pr_val = sr->pr_val;
1365 /*
1366 * We could down-grade our write-lock on script->lock here but
1367 * the rwlock API doesn't offer atomic lock downgrading, so
1368 * we'll just keep the write-lock and release it later when
1369 * we're done using the script.
1370 */
1373 static inline void
1374 script_emit (struct unw_script *script, struct unw_insn insn)
1376 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1377 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1378 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1379 return;
1381 script->insn[script->count++] = insn;
1384 static inline void
1385 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1387 struct unw_reg_info *r = sr->curr.reg + i;
1388 enum unw_insn_opcode opc;
1389 struct unw_insn insn;
1390 unsigned long val = 0;
1392 switch (r->where) {
1393 case UNW_WHERE_GR:
1394 if (r->val >= 32) {
1395 /* register got spilled to a stacked register */
1396 opc = UNW_INSN_SETNAT_TYPE;
1397 val = UNW_NAT_REGSTK;
1398 } else
1399 /* register got spilled to a scratch register */
1400 opc = UNW_INSN_SETNAT_MEMSTK;
1401 break;
1403 case UNW_WHERE_FR:
1404 opc = UNW_INSN_SETNAT_TYPE;
1405 val = UNW_NAT_VAL;
1406 break;
1408 case UNW_WHERE_BR:
1409 opc = UNW_INSN_SETNAT_TYPE;
1410 val = UNW_NAT_NONE;
1411 break;
1413 case UNW_WHERE_PSPREL:
1414 case UNW_WHERE_SPREL:
1415 opc = UNW_INSN_SETNAT_MEMSTK;
1416 break;
1418 default:
1419 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1420 __FUNCTION__, r->where);
1421 return;
1423 insn.opc = opc;
1424 insn.dst = unw.preg_index[i];
1425 insn.val = val;
1426 script_emit(script, insn);
1429 static void
1430 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1432 struct unw_reg_info *r = sr->curr.reg + i;
1433 enum unw_insn_opcode opc;
1434 unsigned long val, rval;
1435 struct unw_insn insn;
1436 long need_nat_info;
1438 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1439 return;
1441 opc = UNW_INSN_MOVE;
1442 val = rval = r->val;
1443 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1445 switch (r->where) {
1446 case UNW_WHERE_GR:
1447 if (rval >= 32) {
1448 opc = UNW_INSN_MOVE_STACKED;
1449 val = rval - 32;
1450 } else if (rval >= 4 && rval <= 7) {
1451 if (need_nat_info) {
1452 opc = UNW_INSN_MOVE2;
1453 need_nat_info = 0;
1455 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1456 } else if (rval == 0) {
1457 opc = UNW_INSN_MOVE_CONST;
1458 val = 0;
1459 } else {
1460 /* register got spilled to a scratch register */
1461 opc = UNW_INSN_MOVE_SCRATCH;
1462 val = pt_regs_off(rval);
1464 break;
1466 case UNW_WHERE_FR:
1467 if (rval <= 5)
1468 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1469 else if (rval >= 16 && rval <= 31)
1470 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1471 else {
1472 opc = UNW_INSN_MOVE_SCRATCH;
1473 if (rval <= 11)
1474 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1475 else
1476 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1477 __FUNCTION__, rval);
1479 break;
1481 case UNW_WHERE_BR:
1482 if (rval >= 1 && rval <= 5)
1483 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1484 else {
1485 opc = UNW_INSN_MOVE_SCRATCH;
1486 if (rval == 0)
1487 val = offsetof(struct pt_regs, b0);
1488 else if (rval == 6)
1489 val = offsetof(struct pt_regs, b6);
1490 else
1491 val = offsetof(struct pt_regs, b7);
1493 break;
1495 case UNW_WHERE_SPREL:
1496 opc = UNW_INSN_ADD_SP;
1497 break;
1499 case UNW_WHERE_PSPREL:
1500 opc = UNW_INSN_ADD_PSP;
1501 break;
1503 default:
1504 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1505 __FUNCTION__, i, r->where);
1506 break;
1508 insn.opc = opc;
1509 insn.dst = unw.preg_index[i];
1510 insn.val = val;
1511 script_emit(script, insn);
1512 if (need_nat_info)
1513 emit_nat_info(sr, i, script);
1515 if (i == UNW_REG_PSP) {
1516 /*
1517 * info->psp must contain the _value_ of the previous
1518 * sp, not it's save location. We get this by
1519 * dereferencing the value we just stored in
1520 * info->psp:
1521 */
1522 insn.opc = UNW_INSN_LOAD;
1523 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1524 script_emit(script, insn);
1528 static inline const struct unw_table_entry *
1529 lookup (struct unw_table *table, unsigned long rel_ip)
1531 const struct unw_table_entry *e = NULL;
1532 unsigned long lo, hi, mid;
1534 /* do a binary search for right entry: */
1535 for (lo = 0, hi = table->length; lo < hi; ) {
1536 mid = (lo + hi) / 2;
1537 e = &table->array[mid];
1538 if (rel_ip < e->start_offset)
1539 hi = mid;
1540 else if (rel_ip >= e->end_offset)
1541 lo = mid + 1;
1542 else
1543 break;
1545 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1546 return NULL;
1547 return e;
1550 /*
1551 * Build an unwind script that unwinds from state OLD_STATE to the
1552 * entrypoint of the function that called OLD_STATE.
1553 */
1554 static inline struct unw_script *
1555 build_script (struct unw_frame_info *info)
1557 const struct unw_table_entry *e = NULL;
1558 struct unw_script *script = NULL;
1559 struct unw_labeled_state *ls, *next;
1560 unsigned long ip = info->ip;
1561 struct unw_state_record sr;
1562 struct unw_table *table;
1563 struct unw_reg_info *r;
1564 struct unw_insn insn;
1565 u8 *dp, *desc_end;
1566 u64 hdr;
1567 int i;
1568 STAT(unsigned long start, parse_start;)
1570 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1572 /* build state record */
1573 memset(&sr, 0, sizeof(sr));
1574 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1575 r->when = UNW_WHEN_NEVER;
1576 sr.pr_val = info->pr;
1578 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1579 script = script_new(ip);
1580 if (!script) {
1581 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1582 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1583 return NULL;
1585 unw.cache[info->prev_script].hint = script - unw.cache;
1587 /* search the kernels and the modules' unwind tables for IP: */
1589 STAT(parse_start = ia64_get_itc());
1591 for (table = unw.tables; table; table = table->next) {
1592 if (ip >= table->start && ip < table->end) {
1593 e = lookup(table, ip - table->segment_base);
1594 break;
1597 if (!e) {
1598 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1599 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1600 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1601 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1602 sr.curr.reg[UNW_REG_RP].when = -1;
1603 sr.curr.reg[UNW_REG_RP].val = 0;
1604 compile_reg(&sr, UNW_REG_RP, script);
1605 script_finalize(script, &sr);
1606 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1607 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1608 return script;
1611 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1612 + (ip & 0xfUL));
1613 hdr = *(u64 *) (table->segment_base + e->info_offset);
1614 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1615 desc_end = dp + 8*UNW_LENGTH(hdr);
1617 while (!sr.done && dp < desc_end)
1618 dp = unw_decode(dp, sr.in_body, &sr);
1620 if (sr.when_target > sr.epilogue_start) {
1621 /*
1622 * sp has been restored and all values on the memory stack below
1623 * psp also have been restored.
1624 */
1625 sr.curr.reg[UNW_REG_PSP].val = 0;
1626 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1627 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1629 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1630 || r->where == UNW_WHERE_SPREL)
1632 r->val = 0;
1633 r->where = UNW_WHERE_NONE;
1634 r->when = UNW_WHEN_NEVER;
1638 script->flags = sr.flags;
1640 /*
1641 * If RP did't get saved, generate entry for the return link
1642 * register.
1643 */
1644 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1645 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1646 sr.curr.reg[UNW_REG_RP].when = -1;
1647 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1648 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1649 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1650 sr.curr.reg[UNW_REG_RP].val);
1653 #ifdef UNW_DEBUG
1654 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1655 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1656 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1657 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1658 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1659 switch (r->where) {
1660 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1661 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1662 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1663 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1664 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1665 case UNW_WHERE_NONE:
1666 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1667 break;
1669 default:
1670 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1671 break;
1673 UNW_DPRINT(1, "\t\t%d\n", r->when);
1676 #endif
1678 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1680 /* translate state record into unwinder instructions: */
1682 /*
1683 * First, set psp if we're dealing with a fixed-size frame;
1684 * subsequent instructions may depend on this value.
1685 */
1686 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1687 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1688 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1689 /* new psp is sp plus frame size */
1690 insn.opc = UNW_INSN_ADD;
1691 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1692 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1693 script_emit(script, insn);
1696 /* determine where the primary UNaT is: */
1697 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1698 i = UNW_REG_PRI_UNAT_MEM;
1699 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1700 i = UNW_REG_PRI_UNAT_GR;
1701 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1702 i = UNW_REG_PRI_UNAT_MEM;
1703 else
1704 i = UNW_REG_PRI_UNAT_GR;
1706 compile_reg(&sr, i, script);
1708 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1709 compile_reg(&sr, i, script);
1711 /* free labeled register states & stack: */
1713 STAT(parse_start = ia64_get_itc());
1714 for (ls = sr.labeled_states; ls; ls = next) {
1715 next = ls->next;
1716 free_state_stack(&ls->saved_state);
1717 free_labeled_state(ls);
1719 free_state_stack(&sr.curr);
1720 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1722 script_finalize(script, &sr);
1723 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1724 return script;
1727 /*
1728 * Apply the unwinding actions represented by OPS and update SR to
1729 * reflect the state that existed upon entry to the function that this
1730 * unwinder represents.
1731 */
1732 static inline void
1733 run_script (struct unw_script *script, struct unw_frame_info *state)
1735 struct unw_insn *ip, *limit, next_insn;
1736 unsigned long opc, dst, val, off;
1737 unsigned long *s = (unsigned long *) state;
1738 STAT(unsigned long start;)
1740 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1741 state->flags = script->flags;
1742 ip = script->insn;
1743 limit = script->insn + script->count;
1744 next_insn = *ip;
1746 while (ip++ < limit) {
1747 opc = next_insn.opc;
1748 dst = next_insn.dst;
1749 val = next_insn.val;
1750 next_insn = *ip;
1752 redo:
1753 switch (opc) {
1754 case UNW_INSN_ADD:
1755 s[dst] += val;
1756 break;
1758 case UNW_INSN_MOVE2:
1759 if (!s[val])
1760 goto lazy_init;
1761 s[dst+1] = s[val+1];
1762 s[dst] = s[val];
1763 break;
1765 case UNW_INSN_MOVE:
1766 if (!s[val])
1767 goto lazy_init;
1768 s[dst] = s[val];
1769 break;
1771 case UNW_INSN_MOVE_SCRATCH:
1772 if (state->pt) {
1773 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1774 } else {
1775 s[dst] = 0;
1776 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1777 __FUNCTION__, dst, val);
1779 break;
1781 case UNW_INSN_MOVE_CONST:
1782 if (val == 0)
1783 s[dst] = (unsigned long) &unw.r0;
1784 else {
1785 s[dst] = 0;
1786 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1787 __FUNCTION__, val);
1789 break;
1792 case UNW_INSN_MOVE_STACKED:
1793 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1794 val);
1795 break;
1797 case UNW_INSN_ADD_PSP:
1798 s[dst] = state->psp + val;
1799 break;
1801 case UNW_INSN_ADD_SP:
1802 s[dst] = state->sp + val;
1803 break;
1805 case UNW_INSN_SETNAT_MEMSTK:
1806 if (!state->pri_unat_loc)
1807 state->pri_unat_loc = &state->sw->caller_unat;
1808 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1809 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1810 break;
1812 case UNW_INSN_SETNAT_TYPE:
1813 s[dst+1] = val;
1814 break;
1816 case UNW_INSN_LOAD:
1817 #ifdef UNW_DEBUG
1818 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1819 #ifndef XEN
1820 || s[val] < TASK_SIZE
1821 #endif
1824 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1825 __FUNCTION__, s[val]);
1826 break;
1828 #endif
1829 s[dst] = *(unsigned long *) s[val];
1830 break;
1833 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1834 return;
1836 lazy_init:
1837 off = unw.sw_off[val];
1838 s[val] = (unsigned long) state->sw + off;
1839 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1840 /*
1841 * We're initializing a general register: init NaT info, too. Note that
1842 * the offset is a multiple of 8 which gives us the 3 bits needed for
1843 * the type field.
1844 */
1845 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1846 goto redo;
1849 static int
1850 find_save_locs (struct unw_frame_info *info)
1852 int have_write_lock = 0;
1853 struct unw_script *scr;
1854 unsigned long flags = 0;
1856 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf))
1857 #ifndef XEN
1858 || info->ip < TASK_SIZE
1859 #endif
1860 ) {
1861 /* don't let obviously bad addresses pollute the cache */
1862 /* FIXME: should really be level 0 but it occurs too often. KAO */
1863 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1864 info->rp_loc = NULL;
1865 return -1;
1868 scr = script_lookup(info);
1869 if (!scr) {
1870 spin_lock_irqsave(&unw.lock, flags);
1871 scr = build_script(info);
1872 if (!scr) {
1873 spin_unlock_irqrestore(&unw.lock, flags);
1874 UNW_DPRINT(0,
1875 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1876 __FUNCTION__, info->ip);
1877 return -1;
1879 have_write_lock = 1;
1881 info->hint = scr->hint;
1882 info->prev_script = scr - unw.cache;
1884 run_script(scr, info);
1886 if (have_write_lock) {
1887 write_unlock(&scr->lock);
1888 spin_unlock_irqrestore(&unw.lock, flags);
1889 } else
1890 read_unlock(&scr->lock);
1891 return 0;
1894 int
1895 unw_unwind (struct unw_frame_info *info)
1897 unsigned long prev_ip, prev_sp, prev_bsp;
1898 unsigned long ip, pr, num_regs;
1899 STAT(unsigned long start, flags;)
1900 int retval;
1902 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1904 prev_ip = info->ip;
1905 prev_sp = info->sp;
1906 prev_bsp = info->bsp;
1908 /* restore the ip */
1909 if (!info->rp_loc) {
1910 /* FIXME: should really be level 0 but it occurs too often. KAO */
1911 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1912 __FUNCTION__, info->ip);
1913 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1914 return -1;
1916 ip = info->ip = *info->rp_loc;
1917 if (ip < GATE_ADDR) {
1918 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1919 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1920 return -1;
1923 /* restore the cfm: */
1924 if (!info->pfs_loc) {
1925 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1926 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1927 return -1;
1929 info->cfm_loc = info->pfs_loc;
1931 /* restore the bsp: */
1932 pr = info->pr;
1933 num_regs = 0;
1934 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1935 info->pt = info->sp + 16;
1936 if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1937 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1938 info->pfs_loc =
1939 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1940 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1941 } else
1942 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1943 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1944 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1945 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1946 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1947 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1948 return -1;
1951 /* restore the sp: */
1952 info->sp = info->psp;
1953 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1954 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1955 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1956 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1957 return -1;
1960 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1961 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1962 __FUNCTION__, ip);
1963 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1964 return -1;
1967 /* as we unwind, the saved ar.unat becomes the primary unat: */
1968 info->pri_unat_loc = info->unat_loc;
1970 /* finally, restore the predicates: */
1971 unw_get_pr(info, &info->pr);
1973 retval = find_save_locs(info);
1974 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1975 return retval;
1977 EXPORT_SYMBOL(unw_unwind);
1979 int
1980 unw_unwind_to_user (struct unw_frame_info *info)
1982 unsigned long ip, sp, pr = 0;
1984 while (unw_unwind(info) >= 0) {
1985 unw_get_sp(info, &sp);
1986 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1987 < IA64_PT_REGS_SIZE) {
1988 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1989 __FUNCTION__);
1990 break;
1992 if (unw_is_intr_frame(info) &&
1993 (pr & (1UL << PRED_USER_STACK)))
1994 return 0;
1995 if (unw_get_pr (info, &pr) < 0) {
1996 unw_get_rp(info, &ip);
1997 UNW_DPRINT(0, "unwind.%s: failed to read "
1998 "predicate register (ip=0x%lx)\n",
1999 __FUNCTION__, ip);
2000 return -1;
2003 unw_get_ip(info, &ip);
2004 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
2005 __FUNCTION__, ip);
2006 return -1;
2008 EXPORT_SYMBOL(unw_unwind_to_user);
2010 static void
2011 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2012 struct switch_stack *sw, unsigned long stktop)
2014 unsigned long rbslimit, rbstop, stklimit;
2015 STAT(unsigned long start, flags;)
2017 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
2019 /*
2020 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2021 * don't want to do that because it would be slow as each preserved register would
2022 * have to be processed. Instead, what we do here is zero out the frame info and
2023 * start the unwind process at the function that created the switch_stack frame.
2024 * When a preserved value in switch_stack needs to be accessed, run_script() will
2025 * initialize the appropriate pointer on demand.
2026 */
2027 memset(info, 0, sizeof(*info));
2029 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
2030 rbstop = sw->ar_bspstore;
2031 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
2032 rbstop = rbslimit;
2034 stklimit = (unsigned long) t + IA64_STK_OFFSET;
2035 if (stktop <= rbstop)
2036 stktop = rbstop;
2038 info->regstk.limit = rbslimit;
2039 info->regstk.top = rbstop;
2040 info->memstk.limit = stklimit;
2041 info->memstk.top = stktop;
2042 info->task = t;
2043 info->sw = sw;
2044 info->sp = info->psp = stktop;
2045 info->pr = sw->pr;
2046 UNW_DPRINT(3, "unwind.%s:\n"
2047 " task 0x%lx\n"
2048 " rbs = [0x%lx-0x%lx)\n"
2049 " stk = [0x%lx-0x%lx)\n"
2050 " pr 0x%lx\n"
2051 " sw 0x%lx\n"
2052 " sp 0x%lx\n",
2053 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2054 info->pr, (unsigned long) info->sw, info->sp);
2055 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2058 void
2059 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2061 unsigned long sol;
2063 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2064 info->cfm_loc = &sw->ar_pfs;
2065 sol = (*info->cfm_loc >> 7) & 0x7f;
2066 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2067 info->ip = sw->b0;
2068 UNW_DPRINT(3, "unwind.%s:\n"
2069 " bsp 0x%lx\n"
2070 " sol 0x%lx\n"
2071 " ip 0x%lx\n",
2072 __FUNCTION__, info->bsp, sol, info->ip);
2073 find_save_locs(info);
2076 EXPORT_SYMBOL(unw_init_frame_info);
2078 void
2079 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2081 #ifdef XEN
2082 struct switch_stack *sw = (struct switch_stack *) (t->arch._thread.ksp + 16);
2083 #else
2084 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2085 #endif
2087 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2088 unw_init_frame_info(info, t, sw);
2090 EXPORT_SYMBOL(unw_init_from_blocked_task);
2092 static void
2093 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2094 unsigned long gp, const void *table_start, const void *table_end)
2096 const struct unw_table_entry *start = table_start, *end = table_end;
2098 table->name = name;
2099 table->segment_base = segment_base;
2100 table->gp = gp;
2101 table->start = segment_base + start[0].start_offset;
2102 table->end = segment_base + end[-1].end_offset;
2103 table->array = start;
2104 table->length = end - start;
2107 #ifndef XEN
2108 void *
2109 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2110 const void *table_start, const void *table_end)
2112 const struct unw_table_entry *start = table_start, *end = table_end;
2113 struct unw_table *table;
2114 unsigned long flags;
2116 if (end - start <= 0) {
2117 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2118 __FUNCTION__);
2119 return NULL;
2122 table = kmalloc(sizeof(*table), GFP_USER);
2123 if (!table)
2124 return NULL;
2126 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2128 spin_lock_irqsave(&unw.lock, flags);
2130 /* keep kernel unwind table at the front (it's searched most commonly): */
2131 table->next = unw.tables->next;
2132 unw.tables->next = table;
2134 spin_unlock_irqrestore(&unw.lock, flags);
2136 return table;
2139 void
2140 unw_remove_unwind_table (void *handle)
2142 struct unw_table *table, *prev;
2143 struct unw_script *tmp;
2144 unsigned long flags;
2145 long index;
2147 if (!handle) {
2148 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2149 __FUNCTION__);
2150 return;
2153 table = handle;
2154 if (table == &unw.kernel_table) {
2155 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2156 "no-can-do!\n", __FUNCTION__);
2157 return;
2160 spin_lock_irqsave(&unw.lock, flags);
2162 /* first, delete the table: */
2164 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2165 if (prev->next == table)
2166 break;
2167 if (!prev) {
2168 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2169 __FUNCTION__, (void *) table);
2170 spin_unlock_irqrestore(&unw.lock, flags);
2171 return;
2173 prev->next = table->next;
2175 spin_unlock_irqrestore(&unw.lock, flags);
2177 /* next, remove hash table entries for this table */
2179 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2180 tmp = unw.cache + unw.hash[index];
2181 if (unw.hash[index] >= UNW_CACHE_SIZE
2182 || tmp->ip < table->start || tmp->ip >= table->end)
2183 continue;
2185 write_lock(&tmp->lock);
2187 if (tmp->ip >= table->start && tmp->ip < table->end) {
2188 unw.hash[index] = tmp->coll_chain;
2189 tmp->ip = 0;
2192 write_unlock(&tmp->lock);
2195 kfree(table);
2198 static int __init
2199 create_gate_table (void)
2201 const struct unw_table_entry *entry, *start, *end;
2202 unsigned long *lp, segbase = GATE_ADDR;
2203 size_t info_size, size;
2204 char *info;
2205 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2206 int i;
2208 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2209 if (phdr->p_type == PT_IA_64_UNWIND) {
2210 punw = phdr;
2211 break;
2214 if (!punw) {
2215 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2216 return 0;
2219 start = (const struct unw_table_entry *) punw->p_vaddr;
2220 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2221 size = 0;
2223 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2225 for (entry = start; entry < end; ++entry)
2226 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2227 size += 8; /* reserve space for "end of table" marker */
2229 unw.gate_table = kmalloc(size, GFP_KERNEL);
2230 if (!unw.gate_table) {
2231 unw.gate_table_size = 0;
2232 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2233 return 0;
2235 unw.gate_table_size = size;
2237 lp = unw.gate_table;
2238 info = (char *) unw.gate_table + size;
2240 for (entry = start; entry < end; ++entry, lp += 3) {
2241 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2242 info -= info_size;
2243 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2245 lp[0] = segbase + entry->start_offset; /* start */
2246 lp[1] = segbase + entry->end_offset; /* end */
2247 lp[2] = info - (char *) unw.gate_table; /* info */
2249 *lp = 0; /* end-of-table marker */
2250 return 0;
2253 __initcall(create_gate_table);
2254 #endif // !XEN
2256 void __init
2257 unw_init (void)
2259 extern char __gp[];
2260 extern void unw_hash_index_t_is_too_narrow (void);
2261 long i, off;
2263 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2264 unw_hash_index_t_is_too_narrow();
2266 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2267 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2268 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2269 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2270 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2271 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2272 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2273 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2274 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2275 unw.sw_off[unw.preg_index[i]] = off;
2276 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2277 unw.sw_off[unw.preg_index[i]] = off;
2278 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2279 unw.sw_off[unw.preg_index[i]] = off;
2280 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2281 unw.sw_off[unw.preg_index[i]] = off;
2283 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2284 if (i > 0)
2285 unw.cache[i].lru_chain = (i - 1);
2286 unw.cache[i].coll_chain = -1;
2287 rwlock_init(&unw.cache[i].lock);
2289 unw.lru_head = UNW_CACHE_SIZE - 1;
2290 unw.lru_tail = 0;
2292 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2293 __start_unwind, __end_unwind);
2296 /*
2297 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2299 * This system call has been deprecated. The new and improved way to get
2300 * at the kernel's unwind info is via the gate DSO. The address of the
2301 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2303 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2305 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2306 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2307 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2308 * unwind data.
2310 * The first portion of the unwind data contains an unwind table and rest contains the
2311 * associated unwind info (in no particular order). The unwind table consists of a table
2312 * of entries of the form:
2314 * u64 start; (64-bit address of start of function)
2315 * u64 end; (64-bit address of start of function)
2316 * u64 info; (BUF-relative offset to unwind info)
2318 * The end of the unwind table is indicated by an entry with a START address of zero.
2320 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2321 * on the format of the unwind info.
2323 * ERRORS
2324 * EFAULT BUF points outside your accessible address space.
2325 */
2326 asmlinkage long
2327 sys_getunwind (void __user *buf, size_t buf_size)
2329 if (buf && buf_size >= unw.gate_table_size)
2330 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2331 return -EFAULT;
2332 return unw.gate_table_size;