ia64/xen-unstable

view xen/arch/ia64/linux-xen/unwind.c @ 18365:5bb2700e773a

[IA64] fix stack unwinder.

The stack unwinder can be called in interrupt context. On the other
hand xmalloc() can't be called in interrupt context.
Don't call xmalloc() if in the interrupt context.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Aug 25 19:04:37 2008 +0900 (2008-08-25)
parents 09cd682ac68e
children 5d35b3f7898b
line source
1 /*
2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure.
6 */
7 /*
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
19 *
20 * SMP conventions:
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
28 */
29 #ifdef XEN
30 #include <xen/types.h>
31 #include <xen/elf.h>
32 #include <xen/kernel.h>
33 #include <xen/sched.h>
34 #include <xen/xmalloc.h>
35 #include <xen/spinlock.h>
36 #include <xen/errno.h>
38 // work around
39 #ifdef CONFIG_SMP
40 #define write_trylock(lock) _raw_write_trylock(lock)
41 #else
42 #define write_trylock(lock) ({1;})
43 #endif
45 #else
46 #include <linux/module.h>
47 #include <linux/bootmem.h>
48 #include <linux/elf.h>
49 #include <linux/kernel.h>
50 #include <linux/sched.h>
51 #include <linux/slab.h>
52 #endif
54 #include <asm/unwind.h>
56 #include <asm/delay.h>
57 #include <asm/page.h>
58 #include <asm/ptrace.h>
59 #include <asm/ptrace_offsets.h>
60 #include <asm/rse.h>
61 #include <asm/sections.h>
62 #include <asm/system.h>
63 #include <asm/uaccess.h>
65 #include "entry.h"
66 #include "unwind_i.h"
68 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
69 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
71 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
72 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
74 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
76 #ifdef UNW_DEBUG
77 static unsigned int unw_debug_level = UNW_DEBUG;
78 # define UNW_DEBUG_ON(n) unw_debug_level >= n
79 /* Do not code a printk level, not all debug lines end in newline */
80 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
81 # define inline
82 #else /* !UNW_DEBUG */
83 # define UNW_DEBUG_ON(n) 0
84 # define UNW_DPRINT(n, ...)
85 #endif /* UNW_DEBUG */
87 #if UNW_STATS
88 # define STAT(x...) x
89 #else
90 # define STAT(x...)
91 #endif
93 #ifdef XEN
94 #define alloc_reg_state() ({in_irq()? NULL: xmalloc(struct unw_reg_state);})
95 #define free_reg_state(usr) xfree(usr)
96 #define alloc_labeled_state() ({in_irq()? NULL: xmalloc(struct unw_labeled_state);})
97 #define free_labeled_state(usr) xfree(usr)
98 #else
99 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
100 #define free_reg_state(usr) kfree(usr)
101 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
102 #define free_labeled_state(usr) kfree(usr)
103 #endif
105 typedef unsigned long unw_word;
106 typedef unsigned char unw_hash_index_t;
108 static struct {
109 spinlock_t lock; /* spinlock for unwind data */
111 /* list of unwind tables (one per load-module) */
112 struct unw_table *tables;
114 unsigned long r0; /* constant 0 for r0 */
116 /* table of registers that prologues can save (and order in which they're saved): */
117 const unsigned char save_order[8];
119 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
120 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
122 unsigned short lru_head; /* index of lead-recently used script */
123 unsigned short lru_tail; /* index of most-recently used script */
125 /* index into unw_frame_info for preserved register i */
126 unsigned short preg_index[UNW_NUM_REGS];
128 short pt_regs_offsets[32];
130 /* unwind table for the kernel: */
131 struct unw_table kernel_table;
133 /* unwind table describing the gate page (kernel code that is mapped into user space): */
134 size_t gate_table_size;
135 unsigned long *gate_table;
137 /* hash table that maps instruction pointer to script index: */
138 unsigned short hash[UNW_HASH_SIZE];
140 /* script cache: */
141 struct unw_script cache[UNW_CACHE_SIZE];
143 # ifdef UNW_DEBUG
144 const char *preg_name[UNW_NUM_REGS];
145 # endif
146 # if UNW_STATS
147 struct {
148 struct {
149 int lookups;
150 int hinted_hits;
151 int normal_hits;
152 int collision_chain_traversals;
153 } cache;
154 struct {
155 unsigned long build_time;
156 unsigned long run_time;
157 unsigned long parse_time;
158 int builds;
159 int news;
160 int collisions;
161 int runs;
162 } script;
163 struct {
164 unsigned long init_time;
165 unsigned long unwind_time;
166 int inits;
167 int unwinds;
168 } api;
169 } stat;
170 # endif
171 } unw = {
172 .tables = &unw.kernel_table,
173 .lock = SPIN_LOCK_UNLOCKED,
174 .save_order = {
175 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
176 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
177 },
178 .preg_index = {
179 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
180 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
181 offsetof(struct unw_frame_info, bsp_loc)/8,
182 offsetof(struct unw_frame_info, bspstore_loc)/8,
183 offsetof(struct unw_frame_info, pfs_loc)/8,
184 offsetof(struct unw_frame_info, rnat_loc)/8,
185 offsetof(struct unw_frame_info, psp)/8,
186 offsetof(struct unw_frame_info, rp_loc)/8,
187 offsetof(struct unw_frame_info, r4)/8,
188 offsetof(struct unw_frame_info, r5)/8,
189 offsetof(struct unw_frame_info, r6)/8,
190 offsetof(struct unw_frame_info, r7)/8,
191 offsetof(struct unw_frame_info, unat_loc)/8,
192 offsetof(struct unw_frame_info, pr_loc)/8,
193 offsetof(struct unw_frame_info, lc_loc)/8,
194 offsetof(struct unw_frame_info, fpsr_loc)/8,
195 offsetof(struct unw_frame_info, b1_loc)/8,
196 offsetof(struct unw_frame_info, b2_loc)/8,
197 offsetof(struct unw_frame_info, b3_loc)/8,
198 offsetof(struct unw_frame_info, b4_loc)/8,
199 offsetof(struct unw_frame_info, b5_loc)/8,
200 offsetof(struct unw_frame_info, f2_loc)/8,
201 offsetof(struct unw_frame_info, f3_loc)/8,
202 offsetof(struct unw_frame_info, f4_loc)/8,
203 offsetof(struct unw_frame_info, f5_loc)/8,
204 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
205 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
206 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
207 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
208 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
209 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
210 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
211 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
212 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
213 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
214 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
215 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
216 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
217 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
218 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
219 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
220 },
221 .pt_regs_offsets = {
222 [0] = -1,
223 offsetof(struct pt_regs, r1),
224 offsetof(struct pt_regs, r2),
225 offsetof(struct pt_regs, r3),
226 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
227 offsetof(struct pt_regs, r8),
228 offsetof(struct pt_regs, r9),
229 offsetof(struct pt_regs, r10),
230 offsetof(struct pt_regs, r11),
231 offsetof(struct pt_regs, r12),
232 offsetof(struct pt_regs, r13),
233 offsetof(struct pt_regs, r14),
234 offsetof(struct pt_regs, r15),
235 offsetof(struct pt_regs, r16),
236 offsetof(struct pt_regs, r17),
237 offsetof(struct pt_regs, r18),
238 offsetof(struct pt_regs, r19),
239 offsetof(struct pt_regs, r20),
240 offsetof(struct pt_regs, r21),
241 offsetof(struct pt_regs, r22),
242 offsetof(struct pt_regs, r23),
243 offsetof(struct pt_regs, r24),
244 offsetof(struct pt_regs, r25),
245 offsetof(struct pt_regs, r26),
246 offsetof(struct pt_regs, r27),
247 offsetof(struct pt_regs, r28),
248 offsetof(struct pt_regs, r29),
249 offsetof(struct pt_regs, r30),
250 offsetof(struct pt_regs, r31),
251 },
252 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
253 #ifdef UNW_DEBUG
254 .preg_name = {
255 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
256 "r4", "r5", "r6", "r7",
257 "ar.unat", "pr", "ar.lc", "ar.fpsr",
258 "b1", "b2", "b3", "b4", "b5",
259 "f2", "f3", "f4", "f5",
260 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
261 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
262 }
263 #endif
264 };
266 static inline int
267 read_only (void *addr)
268 {
269 return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
270 }
272 /*
273 * Returns offset of rREG in struct pt_regs.
274 */
275 static inline unsigned long
276 pt_regs_off (unsigned long reg)
277 {
278 short off = -1;
280 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
281 off = unw.pt_regs_offsets[reg];
283 if (off < 0) {
284 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
285 off = 0;
286 }
287 return (unsigned long) off;
288 }
290 static inline struct pt_regs *
291 get_scratch_regs (struct unw_frame_info *info)
292 {
293 if (!info->pt) {
294 /* This should not happen with valid unwind info. */
295 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
296 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
297 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
298 else
299 info->pt = info->sp - 16;
300 }
301 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
302 return (struct pt_regs *) info->pt;
303 }
305 /* Unwind accessors. */
307 int
308 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
309 {
310 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
311 struct unw_ireg *ireg;
312 struct pt_regs *pt;
314 if ((unsigned) regnum - 1 >= 127) {
315 if (regnum == 0 && !write) {
316 *val = 0; /* read r0 always returns 0 */
317 *nat = 0;
318 return 0;
319 }
320 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
321 __FUNCTION__, regnum);
322 return -1;
323 }
325 if (regnum < 32) {
326 if (regnum >= 4 && regnum <= 7) {
327 /* access a preserved register */
328 ireg = &info->r4 + (regnum - 4);
329 addr = ireg->loc;
330 if (addr) {
331 nat_addr = addr + ireg->nat.off;
332 switch (ireg->nat.type) {
333 case UNW_NAT_VAL:
334 /* simulate getf.sig/setf.sig */
335 if (write) {
336 if (*nat) {
337 /* write NaTVal and be done with it */
338 addr[0] = 0;
339 addr[1] = 0x1fffe;
340 return 0;
341 }
342 addr[1] = 0x1003e;
343 } else {
344 if (addr[0] == 0 && addr[1] == 0x1ffe) {
345 /* return NaT and be done with it */
346 *val = 0;
347 *nat = 1;
348 return 0;
349 }
350 }
351 /* fall through */
352 case UNW_NAT_NONE:
353 dummy_nat = 0;
354 nat_addr = &dummy_nat;
355 break;
357 case UNW_NAT_MEMSTK:
358 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
359 break;
361 case UNW_NAT_REGSTK:
362 nat_addr = ia64_rse_rnat_addr(addr);
363 if ((unsigned long) addr < info->regstk.limit
364 || (unsigned long) addr >= info->regstk.top)
365 {
366 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
367 "[0x%lx-0x%lx)\n",
368 __FUNCTION__, (void *) addr,
369 info->regstk.limit,
370 info->regstk.top);
371 return -1;
372 }
373 if ((unsigned long) nat_addr >= info->regstk.top)
374 nat_addr = &info->sw->ar_rnat;
375 nat_mask = (1UL << ia64_rse_slot_num(addr));
376 break;
377 }
378 } else {
379 addr = &info->sw->r4 + (regnum - 4);
380 nat_addr = &info->sw->ar_unat;
381 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
382 }
383 } else {
384 /* access a scratch register */
385 pt = get_scratch_regs(info);
386 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
387 if (info->pri_unat_loc)
388 nat_addr = info->pri_unat_loc;
389 else
390 nat_addr = &info->sw->caller_unat;
391 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
392 }
393 } else {
394 /* access a stacked register */
395 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
396 nat_addr = ia64_rse_rnat_addr(addr);
397 if ((unsigned long) addr < info->regstk.limit
398 || (unsigned long) addr >= info->regstk.top)
399 {
400 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
401 "of rbs\n", __FUNCTION__);
402 return -1;
403 }
404 if ((unsigned long) nat_addr >= info->regstk.top)
405 nat_addr = &info->sw->ar_rnat;
406 nat_mask = (1UL << ia64_rse_slot_num(addr));
407 }
409 if (write) {
410 if (read_only(addr)) {
411 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
412 __FUNCTION__);
413 } else {
414 *addr = *val;
415 if (*nat)
416 *nat_addr |= nat_mask;
417 else
418 *nat_addr &= ~nat_mask;
419 }
420 } else {
421 if ((*nat_addr & nat_mask) == 0) {
422 *val = *addr;
423 *nat = 0;
424 } else {
425 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
426 *nat = 1;
427 }
428 }
429 return 0;
430 }
431 EXPORT_SYMBOL(unw_access_gr);
433 int
434 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
435 {
436 unsigned long *addr;
437 struct pt_regs *pt;
439 switch (regnum) {
440 /* scratch: */
441 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
442 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
443 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
445 /* preserved: */
446 case 1: case 2: case 3: case 4: case 5:
447 addr = *(&info->b1_loc + (regnum - 1));
448 if (!addr)
449 addr = &info->sw->b1 + (regnum - 1);
450 break;
452 default:
453 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
454 __FUNCTION__, regnum);
455 return -1;
456 }
457 if (write)
458 if (read_only(addr)) {
459 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
460 __FUNCTION__);
461 } else
462 *addr = *val;
463 else
464 *val = *addr;
465 return 0;
466 }
467 EXPORT_SYMBOL(unw_access_br);
469 int
470 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
471 {
472 struct ia64_fpreg *addr = NULL;
473 struct pt_regs *pt;
475 if ((unsigned) (regnum - 2) >= 126) {
476 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
477 __FUNCTION__, regnum);
478 return -1;
479 }
481 if (regnum <= 5) {
482 addr = *(&info->f2_loc + (regnum - 2));
483 if (!addr)
484 addr = &info->sw->f2 + (regnum - 2);
485 } else if (regnum <= 15) {
486 if (regnum <= 11) {
487 pt = get_scratch_regs(info);
488 //XXX struct ia64_fpreg and struct pt_fpreg are same.
489 addr = (struct ia64_fpreg*)(&pt->f6 + (regnum - 6));
490 }
491 else
492 addr = &info->sw->f12 + (regnum - 12);
493 } else if (regnum <= 31) {
494 addr = info->fr_loc[regnum - 16];
495 if (!addr)
496 addr = &info->sw->f16 + (regnum - 16);
497 } else {
498 struct task_struct *t = info->task;
500 if (write)
501 ia64_sync_fph(t);
502 else
503 ia64_flush_fph(t);
504 #ifdef XEN
505 addr = t->arch._thread.fph + (regnum - 32);
506 #else
507 addr = t->thread.fph + (regnum - 32);
508 #endif
509 }
511 if (write)
512 if (read_only(addr)) {
513 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
514 __FUNCTION__);
515 } else
516 *addr = *val;
517 else
518 *val = *addr;
519 return 0;
520 }
521 EXPORT_SYMBOL(unw_access_fr);
523 int
524 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
525 {
526 unsigned long *addr;
527 struct pt_regs *pt;
529 switch (regnum) {
530 case UNW_AR_BSP:
531 addr = info->bsp_loc;
532 if (!addr)
533 addr = &info->sw->ar_bspstore;
534 break;
536 case UNW_AR_BSPSTORE:
537 addr = info->bspstore_loc;
538 if (!addr)
539 addr = &info->sw->ar_bspstore;
540 break;
542 case UNW_AR_PFS:
543 addr = info->pfs_loc;
544 if (!addr)
545 addr = &info->sw->ar_pfs;
546 break;
548 case UNW_AR_RNAT:
549 addr = info->rnat_loc;
550 if (!addr)
551 addr = &info->sw->ar_rnat;
552 break;
554 case UNW_AR_UNAT:
555 addr = info->unat_loc;
556 if (!addr)
557 addr = &info->sw->caller_unat;
558 break;
560 case UNW_AR_LC:
561 addr = info->lc_loc;
562 if (!addr)
563 addr = &info->sw->ar_lc;
564 break;
566 case UNW_AR_EC:
567 if (!info->cfm_loc)
568 return -1;
569 if (write)
570 *info->cfm_loc =
571 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
572 else
573 *val = (*info->cfm_loc >> 52) & 0x3f;
574 return 0;
576 case UNW_AR_FPSR:
577 addr = info->fpsr_loc;
578 if (!addr)
579 addr = &info->sw->ar_fpsr;
580 break;
582 case UNW_AR_RSC:
583 pt = get_scratch_regs(info);
584 addr = &pt->ar_rsc;
585 break;
587 case UNW_AR_CCV:
588 pt = get_scratch_regs(info);
589 addr = &pt->ar_ccv;
590 break;
592 case UNW_AR_CSD:
593 pt = get_scratch_regs(info);
594 addr = &pt->ar_csd;
595 break;
597 case UNW_AR_SSD:
598 pt = get_scratch_regs(info);
599 addr = &pt->ar_ssd;
600 break;
602 default:
603 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
604 __FUNCTION__, regnum);
605 return -1;
606 }
608 if (write) {
609 if (read_only(addr)) {
610 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
611 __FUNCTION__);
612 } else
613 *addr = *val;
614 } else
615 *val = *addr;
616 return 0;
617 }
618 EXPORT_SYMBOL(unw_access_ar);
620 int
621 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
622 {
623 unsigned long *addr;
625 addr = info->pr_loc;
626 if (!addr)
627 addr = &info->sw->pr;
629 if (write) {
630 if (read_only(addr)) {
631 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
632 __FUNCTION__);
633 } else
634 *addr = *val;
635 } else
636 *val = *addr;
637 return 0;
638 }
639 EXPORT_SYMBOL(unw_access_pr);
642 /* Routines to manipulate the state stack. */
644 static inline void
645 push (struct unw_state_record *sr)
646 {
647 struct unw_reg_state *rs;
649 rs = alloc_reg_state();
650 if (!rs) {
651 printk(KERN_ERR "unwind: cannot stack reg state!\n");
652 return;
653 }
654 memcpy(rs, &sr->curr, sizeof(*rs));
655 sr->curr.next = rs;
656 }
658 static void
659 pop (struct unw_state_record *sr)
660 {
661 struct unw_reg_state *rs = sr->curr.next;
663 if (!rs) {
664 printk(KERN_ERR "unwind: stack underflow!\n");
665 return;
666 }
667 memcpy(&sr->curr, rs, sizeof(*rs));
668 free_reg_state(rs);
669 }
671 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
672 static struct unw_reg_state *
673 dup_state_stack (struct unw_reg_state *rs)
674 {
675 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
677 while (rs) {
678 copy = alloc_reg_state();
679 if (!copy) {
680 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
681 return NULL;
682 }
683 memcpy(copy, rs, sizeof(*copy));
684 if (first)
685 prev->next = copy;
686 else
687 first = copy;
688 rs = rs->next;
689 prev = copy;
690 }
691 return first;
692 }
694 /* Free all stacked register states (but not RS itself). */
695 static void
696 free_state_stack (struct unw_reg_state *rs)
697 {
698 struct unw_reg_state *p, *next;
700 for (p = rs->next; p != NULL; p = next) {
701 next = p->next;
702 free_reg_state(p);
703 }
704 rs->next = NULL;
705 }
707 /* Unwind decoder routines */
709 static enum unw_register_index __attribute_const__
710 decode_abreg (unsigned char abreg, int memory)
711 {
712 switch (abreg) {
713 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
714 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
715 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
716 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
717 case 0x60: return UNW_REG_PR;
718 case 0x61: return UNW_REG_PSP;
719 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
720 case 0x63: return UNW_REG_RP;
721 case 0x64: return UNW_REG_BSP;
722 case 0x65: return UNW_REG_BSPSTORE;
723 case 0x66: return UNW_REG_RNAT;
724 case 0x67: return UNW_REG_UNAT;
725 case 0x68: return UNW_REG_FPSR;
726 case 0x69: return UNW_REG_PFS;
727 case 0x6a: return UNW_REG_LC;
728 default:
729 break;
730 }
731 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
732 return UNW_REG_LC;
733 }
735 static void
736 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
737 {
738 reg->val = val;
739 reg->where = where;
740 if (reg->when == UNW_WHEN_NEVER)
741 reg->when = when;
742 }
744 static void
745 alloc_spill_area (unsigned long *offp, unsigned long regsize,
746 struct unw_reg_info *lo, struct unw_reg_info *hi)
747 {
748 struct unw_reg_info *reg;
750 for (reg = hi; reg >= lo; --reg) {
751 if (reg->where == UNW_WHERE_SPILL_HOME) {
752 reg->where = UNW_WHERE_PSPREL;
753 *offp -= regsize;
754 reg->val = *offp;
755 }
756 }
757 }
759 static inline void
760 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
761 {
762 struct unw_reg_info *reg;
764 for (reg = *regp; reg <= lim; ++reg) {
765 if (reg->where == UNW_WHERE_SPILL_HOME) {
766 reg->when = t;
767 *regp = reg + 1;
768 return;
769 }
770 }
771 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
772 }
774 static inline void
775 finish_prologue (struct unw_state_record *sr)
776 {
777 struct unw_reg_info *reg;
778 unsigned long off;
779 int i;
781 /*
782 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
783 * for Using Unwind Descriptors", rule 3):
784 */
785 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
786 reg = sr->curr.reg + unw.save_order[i];
787 if (reg->where == UNW_WHERE_GR_SAVE) {
788 reg->where = UNW_WHERE_GR;
789 reg->val = sr->gr_save_loc++;
790 }
791 }
793 /*
794 * Next, compute when the fp, general, and branch registers get
795 * saved. This must come before alloc_spill_area() because
796 * we need to know which registers are spilled to their home
797 * locations.
798 */
799 if (sr->imask) {
800 unsigned char kind, mask = 0, *cp = sr->imask;
801 int t;
802 static const unsigned char limit[3] = {
803 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
804 };
805 struct unw_reg_info *(regs[3]);
807 regs[0] = sr->curr.reg + UNW_REG_F2;
808 regs[1] = sr->curr.reg + UNW_REG_R4;
809 regs[2] = sr->curr.reg + UNW_REG_B1;
811 for (t = 0; t < sr->region_len; ++t) {
812 if ((t & 3) == 0)
813 mask = *cp++;
814 kind = (mask >> 2*(3-(t & 3))) & 3;
815 if (kind > 0)
816 spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
817 sr->region_start + t);
818 }
819 }
820 /*
821 * Next, lay out the memory stack spill area:
822 */
823 if (sr->any_spills) {
824 off = sr->spill_offset;
825 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
826 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
827 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
828 }
829 }
831 /*
832 * Region header descriptors.
833 */
835 static void
836 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
837 struct unw_state_record *sr)
838 {
839 int i, region_start;
841 if (!(sr->in_body || sr->first_region))
842 finish_prologue(sr);
843 sr->first_region = 0;
845 /* check if we're done: */
846 if (sr->when_target < sr->region_start + sr->region_len) {
847 sr->done = 1;
848 return;
849 }
851 region_start = sr->region_start + sr->region_len;
853 for (i = 0; i < sr->epilogue_count; ++i)
854 pop(sr);
855 sr->epilogue_count = 0;
856 sr->epilogue_start = UNW_WHEN_NEVER;
858 sr->region_start = region_start;
859 sr->region_len = rlen;
860 sr->in_body = body;
862 if (!body) {
863 push(sr);
865 for (i = 0; i < 4; ++i) {
866 if (mask & 0x8)
867 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
868 sr->region_start + sr->region_len - 1, grsave++);
869 mask <<= 1;
870 }
871 sr->gr_save_loc = grsave;
872 sr->any_spills = 0;
873 sr->imask = NULL;
874 sr->spill_offset = 0x10; /* default to psp+16 */
875 }
876 }
878 /*
879 * Prologue descriptors.
880 */
882 static inline void
883 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
884 {
885 if (abi == 3 && context == 'i') {
886 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
887 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
888 }
889 else
890 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
891 __FUNCTION__, abi, context);
892 }
894 static inline void
895 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
896 {
897 int i;
899 for (i = 0; i < 5; ++i) {
900 if (brmask & 1)
901 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
902 sr->region_start + sr->region_len - 1, gr++);
903 brmask >>= 1;
904 }
905 }
907 static inline void
908 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
909 {
910 int i;
912 for (i = 0; i < 5; ++i) {
913 if (brmask & 1) {
914 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
915 sr->region_start + sr->region_len - 1, 0);
916 sr->any_spills = 1;
917 }
918 brmask >>= 1;
919 }
920 }
922 static inline void
923 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
924 {
925 int i;
927 for (i = 0; i < 4; ++i) {
928 if ((grmask & 1) != 0) {
929 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
930 sr->region_start + sr->region_len - 1, 0);
931 sr->any_spills = 1;
932 }
933 grmask >>= 1;
934 }
935 for (i = 0; i < 20; ++i) {
936 if ((frmask & 1) != 0) {
937 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
938 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
939 sr->region_start + sr->region_len - 1, 0);
940 sr->any_spills = 1;
941 }
942 frmask >>= 1;
943 }
944 }
946 static inline void
947 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
948 {
949 int i;
951 for (i = 0; i < 4; ++i) {
952 if ((frmask & 1) != 0) {
953 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
954 sr->region_start + sr->region_len - 1, 0);
955 sr->any_spills = 1;
956 }
957 frmask >>= 1;
958 }
959 }
961 static inline void
962 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
963 {
964 int i;
966 for (i = 0; i < 4; ++i) {
967 if ((grmask & 1) != 0)
968 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
969 sr->region_start + sr->region_len - 1, gr++);
970 grmask >>= 1;
971 }
972 }
974 static inline void
975 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
976 {
977 int i;
979 for (i = 0; i < 4; ++i) {
980 if ((grmask & 1) != 0) {
981 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
982 sr->region_start + sr->region_len - 1, 0);
983 sr->any_spills = 1;
984 }
985 grmask >>= 1;
986 }
987 }
989 static inline void
990 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
991 {
992 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
993 sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
994 }
996 static inline void
997 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
998 {
999 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
1002 static inline void
1003 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
1005 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
1008 static inline void
1009 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
1011 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
1012 0x10 - 4*pspoff);
1015 static inline void
1016 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
1018 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
1019 4*spoff);
1022 static inline void
1023 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
1025 sr->return_link_reg = dst;
1028 static inline void
1029 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1031 struct unw_reg_info *reg = sr->curr.reg + regnum;
1033 if (reg->where == UNW_WHERE_NONE)
1034 reg->where = UNW_WHERE_GR_SAVE;
1035 reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1038 static inline void
1039 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1041 sr->spill_offset = 0x10 - 4*pspoff;
1044 static inline unsigned char *
1045 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1047 sr->imask = imaskp;
1048 return imaskp + (2*sr->region_len + 7)/8;
1051 /*
1052 * Body descriptors.
1053 */
1054 static inline void
1055 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1057 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1058 sr->epilogue_count = ecount + 1;
1061 static inline void
1062 desc_copy_state (unw_word label, struct unw_state_record *sr)
1064 struct unw_labeled_state *ls;
1066 for (ls = sr->labeled_states; ls; ls = ls->next) {
1067 if (ls->label == label) {
1068 free_state_stack(&sr->curr);
1069 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1070 sr->curr.next = dup_state_stack(ls->saved_state.next);
1071 return;
1074 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1077 static inline void
1078 desc_label_state (unw_word label, struct unw_state_record *sr)
1080 struct unw_labeled_state *ls;
1082 ls = alloc_labeled_state();
1083 if (!ls) {
1084 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1085 return;
1087 ls->label = label;
1088 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1089 ls->saved_state.next = dup_state_stack(sr->curr.next);
1091 /* insert into list of labeled states: */
1092 ls->next = sr->labeled_states;
1093 sr->labeled_states = ls;
1096 /*
1097 * General descriptors.
1098 */
1100 static inline int
1101 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1103 if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1104 return 0;
1105 if (qp > 0) {
1106 if ((sr->pr_val & (1UL << qp)) == 0)
1107 return 0;
1108 sr->pr_mask |= (1UL << qp);
1110 return 1;
1113 static inline void
1114 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1116 struct unw_reg_info *r;
1118 if (!desc_is_active(qp, t, sr))
1119 return;
1121 r = sr->curr.reg + decode_abreg(abreg, 0);
1122 r->where = UNW_WHERE_NONE;
1123 r->when = UNW_WHEN_NEVER;
1124 r->val = 0;
1127 static inline void
1128 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1129 unsigned char ytreg, struct unw_state_record *sr)
1131 enum unw_where where = UNW_WHERE_GR;
1132 struct unw_reg_info *r;
1134 if (!desc_is_active(qp, t, sr))
1135 return;
1137 if (x)
1138 where = UNW_WHERE_BR;
1139 else if (ytreg & 0x80)
1140 where = UNW_WHERE_FR;
1142 r = sr->curr.reg + decode_abreg(abreg, 0);
1143 r->where = where;
1144 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1145 r->val = (ytreg & 0x7f);
1148 static inline void
1149 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1150 struct unw_state_record *sr)
1152 struct unw_reg_info *r;
1154 if (!desc_is_active(qp, t, sr))
1155 return;
1157 r = sr->curr.reg + decode_abreg(abreg, 1);
1158 r->where = UNW_WHERE_PSPREL;
1159 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1160 r->val = 0x10 - 4*pspoff;
1163 static inline void
1164 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1165 struct unw_state_record *sr)
1167 struct unw_reg_info *r;
1169 if (!desc_is_active(qp, t, sr))
1170 return;
1172 r = sr->curr.reg + decode_abreg(abreg, 1);
1173 r->where = UNW_WHERE_SPREL;
1174 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1175 r->val = 4*spoff;
1178 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1179 code);
1181 /*
1182 * region headers:
1183 */
1184 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1185 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1186 /*
1187 * prologue descriptors:
1188 */
1189 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1190 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1191 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1192 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1193 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1194 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1195 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1196 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1197 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1198 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1199 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1200 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1201 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1202 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1203 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1204 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1205 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1206 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1207 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1208 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1209 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1210 /*
1211 * body descriptors:
1212 */
1213 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1214 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1215 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1216 /*
1217 * general unwind descriptors:
1218 */
1219 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1220 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1221 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1222 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1223 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1224 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1225 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1226 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1228 #include "unwind_decoder.c"
1231 /* Unwind scripts. */
1233 static inline unw_hash_index_t
1234 hash (unsigned long ip)
1236 # define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
1238 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1239 #undef hashmagic
1242 static inline long
1243 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1245 read_lock(&script->lock);
1246 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1247 /* keep the read lock... */
1248 return 1;
1249 read_unlock(&script->lock);
1250 return 0;
1253 static inline struct unw_script *
1254 script_lookup (struct unw_frame_info *info)
1256 struct unw_script *script = unw.cache + info->hint;
1257 unsigned short index;
1258 unsigned long ip, pr;
1260 if (UNW_DEBUG_ON(0))
1261 return NULL; /* Always regenerate scripts in debug mode */
1263 STAT(++unw.stat.cache.lookups);
1265 ip = info->ip;
1266 pr = info->pr;
1268 if (cache_match(script, ip, pr)) {
1269 STAT(++unw.stat.cache.hinted_hits);
1270 return script;
1273 index = unw.hash[hash(ip)];
1274 if (index >= UNW_CACHE_SIZE)
1275 return NULL;
1277 script = unw.cache + index;
1278 while (1) {
1279 if (cache_match(script, ip, pr)) {
1280 /* update hint; no locking required as single-word writes are atomic */
1281 STAT(++unw.stat.cache.normal_hits);
1282 unw.cache[info->prev_script].hint = script - unw.cache;
1283 return script;
1285 if (script->coll_chain >= UNW_HASH_SIZE)
1286 return NULL;
1287 script = unw.cache + script->coll_chain;
1288 STAT(++unw.stat.cache.collision_chain_traversals);
1292 /*
1293 * On returning, a write lock for the SCRIPT is still being held.
1294 */
1295 static inline struct unw_script *
1296 script_new (unsigned long ip)
1298 struct unw_script *script, *prev, *tmp;
1299 unw_hash_index_t index;
1300 unsigned short head;
1302 STAT(++unw.stat.script.news);
1304 /*
1305 * Can't (easily) use cmpxchg() here because of ABA problem
1306 * that is intrinsic in cmpxchg()...
1307 */
1308 head = unw.lru_head;
1309 script = unw.cache + head;
1310 unw.lru_head = script->lru_chain;
1312 /*
1313 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1314 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1315 * alternative would be to disable interrupts whenever we hold a read-lock, but
1316 * that seems silly.
1317 */
1318 if (!write_trylock(&script->lock))
1319 return NULL;
1321 /* re-insert script at the tail of the LRU chain: */
1322 unw.cache[unw.lru_tail].lru_chain = head;
1323 unw.lru_tail = head;
1325 /* remove the old script from the hash table (if it's there): */
1326 if (script->ip) {
1327 index = hash(script->ip);
1328 tmp = unw.cache + unw.hash[index];
1329 prev = NULL;
1330 while (1) {
1331 if (tmp == script) {
1332 if (prev)
1333 prev->coll_chain = tmp->coll_chain;
1334 else
1335 unw.hash[index] = tmp->coll_chain;
1336 break;
1337 } else
1338 prev = tmp;
1339 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1340 /* old script wasn't in the hash-table */
1341 break;
1342 tmp = unw.cache + tmp->coll_chain;
1346 /* enter new script in the hash table */
1347 index = hash(ip);
1348 script->coll_chain = unw.hash[index];
1349 unw.hash[index] = script - unw.cache;
1351 script->ip = ip; /* set new IP while we're holding the locks */
1353 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1355 script->flags = 0;
1356 script->hint = 0;
1357 script->count = 0;
1358 return script;
1361 static void
1362 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1364 script->pr_mask = sr->pr_mask;
1365 script->pr_val = sr->pr_val;
1366 /*
1367 * We could down-grade our write-lock on script->lock here but
1368 * the rwlock API doesn't offer atomic lock downgrading, so
1369 * we'll just keep the write-lock and release it later when
1370 * we're done using the script.
1371 */
1374 static inline void
1375 script_emit (struct unw_script *script, struct unw_insn insn)
1377 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1378 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1379 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1380 return;
1382 script->insn[script->count++] = insn;
1385 static inline void
1386 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1388 struct unw_reg_info *r = sr->curr.reg + i;
1389 enum unw_insn_opcode opc;
1390 struct unw_insn insn;
1391 unsigned long val = 0;
1393 switch (r->where) {
1394 case UNW_WHERE_GR:
1395 if (r->val >= 32) {
1396 /* register got spilled to a stacked register */
1397 opc = UNW_INSN_SETNAT_TYPE;
1398 val = UNW_NAT_REGSTK;
1399 } else
1400 /* register got spilled to a scratch register */
1401 opc = UNW_INSN_SETNAT_MEMSTK;
1402 break;
1404 case UNW_WHERE_FR:
1405 opc = UNW_INSN_SETNAT_TYPE;
1406 val = UNW_NAT_VAL;
1407 break;
1409 case UNW_WHERE_BR:
1410 opc = UNW_INSN_SETNAT_TYPE;
1411 val = UNW_NAT_NONE;
1412 break;
1414 case UNW_WHERE_PSPREL:
1415 case UNW_WHERE_SPREL:
1416 opc = UNW_INSN_SETNAT_MEMSTK;
1417 break;
1419 default:
1420 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1421 __FUNCTION__, r->where);
1422 return;
1424 insn.opc = opc;
1425 insn.dst = unw.preg_index[i];
1426 insn.val = val;
1427 script_emit(script, insn);
1430 static void
1431 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1433 struct unw_reg_info *r = sr->curr.reg + i;
1434 enum unw_insn_opcode opc;
1435 unsigned long val, rval;
1436 struct unw_insn insn;
1437 long need_nat_info;
1439 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1440 return;
1442 opc = UNW_INSN_MOVE;
1443 val = rval = r->val;
1444 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1446 switch (r->where) {
1447 case UNW_WHERE_GR:
1448 if (rval >= 32) {
1449 opc = UNW_INSN_MOVE_STACKED;
1450 val = rval - 32;
1451 } else if (rval >= 4 && rval <= 7) {
1452 if (need_nat_info) {
1453 opc = UNW_INSN_MOVE2;
1454 need_nat_info = 0;
1456 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1457 } else if (rval == 0) {
1458 opc = UNW_INSN_MOVE_CONST;
1459 val = 0;
1460 } else {
1461 /* register got spilled to a scratch register */
1462 opc = UNW_INSN_MOVE_SCRATCH;
1463 val = pt_regs_off(rval);
1465 break;
1467 case UNW_WHERE_FR:
1468 if (rval <= 5)
1469 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1470 else if (rval >= 16 && rval <= 31)
1471 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1472 else {
1473 opc = UNW_INSN_MOVE_SCRATCH;
1474 if (rval <= 11)
1475 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1476 else
1477 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1478 __FUNCTION__, rval);
1480 break;
1482 case UNW_WHERE_BR:
1483 if (rval >= 1 && rval <= 5)
1484 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1485 else {
1486 opc = UNW_INSN_MOVE_SCRATCH;
1487 if (rval == 0)
1488 val = offsetof(struct pt_regs, b0);
1489 else if (rval == 6)
1490 val = offsetof(struct pt_regs, b6);
1491 else
1492 val = offsetof(struct pt_regs, b7);
1494 break;
1496 case UNW_WHERE_SPREL:
1497 opc = UNW_INSN_ADD_SP;
1498 break;
1500 case UNW_WHERE_PSPREL:
1501 opc = UNW_INSN_ADD_PSP;
1502 break;
1504 default:
1505 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1506 __FUNCTION__, i, r->where);
1507 break;
1509 insn.opc = opc;
1510 insn.dst = unw.preg_index[i];
1511 insn.val = val;
1512 script_emit(script, insn);
1513 if (need_nat_info)
1514 emit_nat_info(sr, i, script);
1516 if (i == UNW_REG_PSP) {
1517 /*
1518 * info->psp must contain the _value_ of the previous
1519 * sp, not it's save location. We get this by
1520 * dereferencing the value we just stored in
1521 * info->psp:
1522 */
1523 insn.opc = UNW_INSN_LOAD;
1524 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1525 script_emit(script, insn);
1529 static inline const struct unw_table_entry *
1530 lookup (struct unw_table *table, unsigned long rel_ip)
1532 const struct unw_table_entry *e = NULL;
1533 unsigned long lo, hi, mid;
1535 /* do a binary search for right entry: */
1536 for (lo = 0, hi = table->length; lo < hi; ) {
1537 mid = (lo + hi) / 2;
1538 e = &table->array[mid];
1539 if (rel_ip < e->start_offset)
1540 hi = mid;
1541 else if (rel_ip >= e->end_offset)
1542 lo = mid + 1;
1543 else
1544 break;
1546 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1547 return NULL;
1548 return e;
1551 /*
1552 * Build an unwind script that unwinds from state OLD_STATE to the
1553 * entrypoint of the function that called OLD_STATE.
1554 */
1555 static inline struct unw_script *
1556 build_script (struct unw_frame_info *info)
1558 const struct unw_table_entry *e = NULL;
1559 struct unw_script *script = NULL;
1560 struct unw_labeled_state *ls, *next;
1561 unsigned long ip = info->ip;
1562 struct unw_state_record sr;
1563 struct unw_table *table;
1564 struct unw_reg_info *r;
1565 struct unw_insn insn;
1566 u8 *dp, *desc_end;
1567 u64 hdr;
1568 int i;
1569 STAT(unsigned long start, parse_start;)
1571 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1573 /* build state record */
1574 memset(&sr, 0, sizeof(sr));
1575 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1576 r->when = UNW_WHEN_NEVER;
1577 sr.pr_val = info->pr;
1579 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1580 script = script_new(ip);
1581 if (!script) {
1582 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1583 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1584 return NULL;
1586 unw.cache[info->prev_script].hint = script - unw.cache;
1588 /* search the kernels and the modules' unwind tables for IP: */
1590 STAT(parse_start = ia64_get_itc());
1592 for (table = unw.tables; table; table = table->next) {
1593 if (ip >= table->start && ip < table->end) {
1594 e = lookup(table, ip - table->segment_base);
1595 break;
1598 if (!e) {
1599 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1600 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1601 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1602 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1603 sr.curr.reg[UNW_REG_RP].when = -1;
1604 sr.curr.reg[UNW_REG_RP].val = 0;
1605 compile_reg(&sr, UNW_REG_RP, script);
1606 script_finalize(script, &sr);
1607 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1608 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1609 return script;
1612 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1613 + (ip & 0xfUL));
1614 hdr = *(u64 *) (table->segment_base + e->info_offset);
1615 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1616 desc_end = dp + 8*UNW_LENGTH(hdr);
1618 while (!sr.done && dp < desc_end)
1619 dp = unw_decode(dp, sr.in_body, &sr);
1621 if (sr.when_target > sr.epilogue_start) {
1622 /*
1623 * sp has been restored and all values on the memory stack below
1624 * psp also have been restored.
1625 */
1626 sr.curr.reg[UNW_REG_PSP].val = 0;
1627 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1628 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1629 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1630 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1631 || r->where == UNW_WHERE_SPREL)
1633 r->val = 0;
1634 r->where = UNW_WHERE_NONE;
1635 r->when = UNW_WHEN_NEVER;
1639 script->flags = sr.flags;
1641 /*
1642 * If RP did't get saved, generate entry for the return link
1643 * register.
1644 */
1645 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1646 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1647 sr.curr.reg[UNW_REG_RP].when = -1;
1648 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1649 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1650 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1651 sr.curr.reg[UNW_REG_RP].val);
1654 #ifdef UNW_DEBUG
1655 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1656 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1657 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1658 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1659 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1660 switch (r->where) {
1661 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1662 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1663 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1664 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1665 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1666 case UNW_WHERE_NONE:
1667 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1668 break;
1670 default:
1671 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1672 break;
1674 UNW_DPRINT(1, "\t\t%d\n", r->when);
1677 #endif
1679 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1681 /* translate state record into unwinder instructions: */
1683 /*
1684 * First, set psp if we're dealing with a fixed-size frame;
1685 * subsequent instructions may depend on this value.
1686 */
1687 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1688 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1689 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1690 /* new psp is sp plus frame size */
1691 insn.opc = UNW_INSN_ADD;
1692 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1693 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1694 script_emit(script, insn);
1697 /* determine where the primary UNaT is: */
1698 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1699 i = UNW_REG_PRI_UNAT_MEM;
1700 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1701 i = UNW_REG_PRI_UNAT_GR;
1702 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1703 i = UNW_REG_PRI_UNAT_MEM;
1704 else
1705 i = UNW_REG_PRI_UNAT_GR;
1707 compile_reg(&sr, i, script);
1709 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1710 compile_reg(&sr, i, script);
1712 /* free labeled register states & stack: */
1714 STAT(parse_start = ia64_get_itc());
1715 for (ls = sr.labeled_states; ls; ls = next) {
1716 next = ls->next;
1717 free_state_stack(&ls->saved_state);
1718 free_labeled_state(ls);
1720 free_state_stack(&sr.curr);
1721 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1723 script_finalize(script, &sr);
1724 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1725 return script;
1728 /*
1729 * Apply the unwinding actions represented by OPS and update SR to
1730 * reflect the state that existed upon entry to the function that this
1731 * unwinder represents.
1732 */
1733 static inline void
1734 run_script (struct unw_script *script, struct unw_frame_info *state)
1736 struct unw_insn *ip, *limit, next_insn;
1737 unsigned long opc, dst, val, off;
1738 unsigned long *s = (unsigned long *) state;
1739 STAT(unsigned long start;)
1741 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1742 state->flags = script->flags;
1743 ip = script->insn;
1744 limit = script->insn + script->count;
1745 next_insn = *ip;
1747 while (ip++ < limit) {
1748 opc = next_insn.opc;
1749 dst = next_insn.dst;
1750 val = next_insn.val;
1751 next_insn = *ip;
1753 redo:
1754 switch (opc) {
1755 case UNW_INSN_ADD:
1756 s[dst] += val;
1757 break;
1759 case UNW_INSN_MOVE2:
1760 if (!s[val])
1761 goto lazy_init;
1762 s[dst+1] = s[val+1];
1763 s[dst] = s[val];
1764 break;
1766 case UNW_INSN_MOVE:
1767 if (!s[val])
1768 goto lazy_init;
1769 s[dst] = s[val];
1770 break;
1772 case UNW_INSN_MOVE_SCRATCH:
1773 if (state->pt) {
1774 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1775 } else {
1776 s[dst] = 0;
1777 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1778 __FUNCTION__, dst, val);
1780 break;
1782 case UNW_INSN_MOVE_CONST:
1783 if (val == 0)
1784 s[dst] = (unsigned long) &unw.r0;
1785 else {
1786 s[dst] = 0;
1787 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1788 __FUNCTION__, val);
1790 break;
1793 case UNW_INSN_MOVE_STACKED:
1794 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1795 val);
1796 break;
1798 case UNW_INSN_ADD_PSP:
1799 s[dst] = state->psp + val;
1800 break;
1802 case UNW_INSN_ADD_SP:
1803 s[dst] = state->sp + val;
1804 break;
1806 case UNW_INSN_SETNAT_MEMSTK:
1807 if (!state->pri_unat_loc)
1808 state->pri_unat_loc = &state->sw->caller_unat;
1809 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1810 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1811 break;
1813 case UNW_INSN_SETNAT_TYPE:
1814 s[dst+1] = val;
1815 break;
1817 case UNW_INSN_LOAD:
1818 #ifdef UNW_DEBUG
1819 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1820 #ifndef XEN
1821 || s[val] < TASK_SIZE
1822 #endif
1825 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1826 __FUNCTION__, s[val]);
1827 break;
1829 #endif
1830 s[dst] = *(unsigned long *) s[val];
1831 break;
1834 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1835 return;
1837 lazy_init:
1838 off = unw.sw_off[val];
1839 s[val] = (unsigned long) state->sw + off;
1840 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1841 /*
1842 * We're initializing a general register: init NaT info, too. Note that
1843 * the offset is a multiple of 8 which gives us the 3 bits needed for
1844 * the type field.
1845 */
1846 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1847 goto redo;
1850 #ifdef XEN
1851 static inline int
1852 is_hypervisor_virt(unsigned long addr)
1854 return IS_VMM_ADDRESS(addr) &&
1855 (HYPERVISOR_VIRT_START <= addr) &&
1856 (addr < HYPERVISOR_VIRT_END);
1858 #endif
1860 static int
1861 find_save_locs (struct unw_frame_info *info)
1863 int have_write_lock = 0;
1864 struct unw_script *scr;
1865 unsigned long flags = 0;
1867 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf))
1868 #ifndef XEN
1869 || info->ip < TASK_SIZE
1870 #else
1871 || !is_hypervisor_virt(info->ip)
1872 #endif
1873 ) {
1874 /* don't let obviously bad addresses pollute the cache */
1875 /* FIXME: should really be level 0 but it occurs too often. KAO */
1876 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1877 info->rp_loc = NULL;
1878 return -1;
1881 scr = script_lookup(info);
1882 if (!scr) {
1883 spin_lock_irqsave(&unw.lock, flags);
1884 scr = build_script(info);
1885 if (!scr) {
1886 spin_unlock_irqrestore(&unw.lock, flags);
1887 UNW_DPRINT(0,
1888 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1889 __FUNCTION__, info->ip);
1890 return -1;
1892 have_write_lock = 1;
1894 info->hint = scr->hint;
1895 info->prev_script = scr - unw.cache;
1897 run_script(scr, info);
1899 if (have_write_lock) {
1900 write_unlock(&scr->lock);
1901 spin_unlock_irqrestore(&unw.lock, flags);
1902 } else
1903 read_unlock(&scr->lock);
1904 return 0;
1907 int
1908 unw_unwind (struct unw_frame_info *info)
1910 unsigned long prev_ip, prev_sp, prev_bsp;
1911 unsigned long ip, pr, num_regs;
1912 STAT(unsigned long start, flags;)
1913 int retval;
1915 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1917 prev_ip = info->ip;
1918 prev_sp = info->sp;
1919 prev_bsp = info->bsp;
1921 /* restore the ip */
1922 if (!info->rp_loc) {
1923 /* FIXME: should really be level 0 but it occurs too often. KAO */
1924 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1925 __FUNCTION__, info->ip);
1926 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1927 return -1;
1929 ip = info->ip = *info->rp_loc;
1930 #ifndef XEN
1931 if (ip < GATE_ADDR) {
1932 #else
1933 if (!is_hypervisor_virt(info->ip)) {
1934 #endif
1935 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1936 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1937 return -1;
1940 /* restore the cfm: */
1941 if (!info->pfs_loc) {
1942 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1943 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1944 return -1;
1946 info->cfm_loc = info->pfs_loc;
1948 /* restore the bsp: */
1949 pr = info->pr;
1950 num_regs = 0;
1951 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1952 info->pt = info->sp + 16;
1953 if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1954 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1955 info->pfs_loc =
1956 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1957 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1958 } else
1959 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1960 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1961 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1962 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1963 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1964 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1965 return -1;
1968 /* restore the sp: */
1969 info->sp = info->psp;
1970 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1971 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1972 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1973 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1974 return -1;
1977 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1978 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1979 __FUNCTION__, ip);
1980 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1981 return -1;
1984 /* as we unwind, the saved ar.unat becomes the primary unat: */
1985 info->pri_unat_loc = info->unat_loc;
1987 /* finally, restore the predicates: */
1988 unw_get_pr(info, &info->pr);
1990 retval = find_save_locs(info);
1991 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1992 return retval;
1994 EXPORT_SYMBOL(unw_unwind);
1996 int
1997 unw_unwind_to_user (struct unw_frame_info *info)
1999 unsigned long ip, sp, pr = 0;
2001 while (unw_unwind(info) >= 0) {
2002 unw_get_sp(info, &sp);
2003 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
2004 < IA64_PT_REGS_SIZE) {
2005 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
2006 __FUNCTION__);
2007 break;
2009 #ifndef XEN
2010 if (unw_is_intr_frame(info) &&
2011 (pr & (1UL << PRED_USER_STACK)))
2012 return 0;
2013 #else
2014 if (unw_is_intr_frame(info) &&
2015 !is_hvm_vcpu(info->task) &&
2016 (pr & (1UL << PRED_USER_STACK)))
2017 return 0;
2018 /*
2019 * vmx fault handlers don't vcpu->on_stack and keep
2020 * (pr & (1UL << PRED_USER_STACK)) condition untouched.
2021 * we need to stop unwinding somehow.
2022 */
2023 if (unw_is_intr_frame(info) &&
2024 is_hvm_vcpu(info->task) &&
2025 info->pr_loc == &vcpu_regs(info->task)->pr)
2026 return 0;
2027 #endif
2028 if (unw_get_pr (info, &pr) < 0) {
2029 unw_get_rp(info, &ip);
2030 UNW_DPRINT(0, "unwind.%s: failed to read "
2031 "predicate register (ip=0x%lx)\n",
2032 __FUNCTION__, ip);
2033 return -1;
2036 unw_get_ip(info, &ip);
2037 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
2038 __FUNCTION__, ip);
2039 return -1;
2041 EXPORT_SYMBOL(unw_unwind_to_user);
2043 static void
2044 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2045 struct switch_stack *sw, unsigned long stktop)
2047 unsigned long rbslimit, rbstop, stklimit;
2048 STAT(unsigned long start, flags;)
2050 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
2052 /*
2053 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2054 * don't want to do that because it would be slow as each preserved register would
2055 * have to be processed. Instead, what we do here is zero out the frame info and
2056 * start the unwind process at the function that created the switch_stack frame.
2057 * When a preserved value in switch_stack needs to be accessed, run_script() will
2058 * initialize the appropriate pointer on demand.
2059 */
2060 memset(info, 0, sizeof(*info));
2062 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
2063 rbstop = sw->ar_bspstore;
2064 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
2065 rbstop = rbslimit;
2067 stklimit = (unsigned long) t + IA64_STK_OFFSET;
2068 if (stktop <= rbstop)
2069 stktop = rbstop;
2071 info->regstk.limit = rbslimit;
2072 info->regstk.top = rbstop;
2073 info->memstk.limit = stklimit;
2074 info->memstk.top = stktop;
2075 info->task = t;
2076 info->sw = sw;
2077 info->sp = info->psp = stktop;
2078 info->pr = sw->pr;
2079 UNW_DPRINT(3, "unwind.%s:\n"
2080 " task 0x%lx\n"
2081 " rbs = [0x%lx-0x%lx)\n"
2082 " stk = [0x%lx-0x%lx)\n"
2083 " pr 0x%lx\n"
2084 " sw 0x%lx\n"
2085 " sp 0x%lx\n",
2086 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2087 info->pr, (unsigned long) info->sw, info->sp);
2088 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2091 void
2092 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
2093 struct pt_regs *pt, struct switch_stack *sw)
2095 unsigned long sof;
2097 init_frame_info(info, t, sw, pt->r12);
2098 info->cfm_loc = &pt->cr_ifs;
2099 info->unat_loc = &pt->ar_unat;
2100 info->pfs_loc = &pt->ar_pfs;
2101 sof = *info->cfm_loc & 0x7f;
2102 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
2103 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
2104 info->pt = (unsigned long) pt;
2105 UNW_DPRINT(3, "unwind.%s:\n"
2106 " bsp 0x%lx\n"
2107 " sof 0x%lx\n"
2108 " ip 0x%lx\n",
2109 __FUNCTION__, info->bsp, sof, info->ip);
2110 find_save_locs(info);
2113 void
2114 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2116 unsigned long sol;
2118 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2119 info->cfm_loc = &sw->ar_pfs;
2120 sol = (*info->cfm_loc >> 7) & 0x7f;
2121 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2122 info->ip = sw->b0;
2123 UNW_DPRINT(3, "unwind.%s:\n"
2124 " bsp 0x%lx\n"
2125 " sol 0x%lx\n"
2126 " ip 0x%lx\n",
2127 __FUNCTION__, info->bsp, sol, info->ip);
2128 find_save_locs(info);
2131 EXPORT_SYMBOL(unw_init_frame_info);
2133 void
2134 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2136 #ifdef XEN
2137 struct switch_stack *sw = (struct switch_stack *) (t->arch._thread.ksp + 16);
2138 #else
2139 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2140 #endif
2142 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2143 unw_init_frame_info(info, t, sw);
2145 EXPORT_SYMBOL(unw_init_from_blocked_task);
2147 static void
2148 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2149 unsigned long gp, const void *table_start, const void *table_end)
2151 const struct unw_table_entry *start = table_start, *end = table_end;
2153 table->name = name;
2154 table->segment_base = segment_base;
2155 table->gp = gp;
2156 table->start = segment_base + start[0].start_offset;
2157 table->end = segment_base + end[-1].end_offset;
2158 table->array = start;
2159 table->length = end - start;
2162 #ifndef XEN
2163 void *
2164 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2165 const void *table_start, const void *table_end)
2167 const struct unw_table_entry *start = table_start, *end = table_end;
2168 struct unw_table *table;
2169 unsigned long flags;
2171 if (end - start <= 0) {
2172 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2173 __FUNCTION__);
2174 return NULL;
2177 table = kmalloc(sizeof(*table), GFP_USER);
2178 if (!table)
2179 return NULL;
2181 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2183 spin_lock_irqsave(&unw.lock, flags);
2185 /* keep kernel unwind table at the front (it's searched most commonly): */
2186 table->next = unw.tables->next;
2187 unw.tables->next = table;
2189 spin_unlock_irqrestore(&unw.lock, flags);
2191 return table;
2194 void
2195 unw_remove_unwind_table (void *handle)
2197 struct unw_table *table, *prev;
2198 struct unw_script *tmp;
2199 unsigned long flags;
2200 long index;
2202 if (!handle) {
2203 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2204 __FUNCTION__);
2205 return;
2208 table = handle;
2209 if (table == &unw.kernel_table) {
2210 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2211 "no-can-do!\n", __FUNCTION__);
2212 return;
2215 spin_lock_irqsave(&unw.lock, flags);
2217 /* first, delete the table: */
2219 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2220 if (prev->next == table)
2221 break;
2222 if (!prev) {
2223 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2224 __FUNCTION__, (void *) table);
2225 spin_unlock_irqrestore(&unw.lock, flags);
2226 return;
2228 prev->next = table->next;
2230 spin_unlock_irqrestore(&unw.lock, flags);
2232 /* next, remove hash table entries for this table */
2234 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2235 tmp = unw.cache + unw.hash[index];
2236 if (unw.hash[index] >= UNW_CACHE_SIZE
2237 || tmp->ip < table->start || tmp->ip >= table->end)
2238 continue;
2240 write_lock(&tmp->lock);
2242 if (tmp->ip >= table->start && tmp->ip < table->end) {
2243 unw.hash[index] = tmp->coll_chain;
2244 tmp->ip = 0;
2247 write_unlock(&tmp->lock);
2250 kfree(table);
2253 static int __init
2254 create_gate_table (void)
2256 const struct unw_table_entry *entry, *start, *end;
2257 unsigned long *lp, segbase = GATE_ADDR;
2258 size_t info_size, size;
2259 char *info;
2260 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2261 int i;
2263 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2264 if (phdr->p_type == PT_IA_64_UNWIND) {
2265 punw = phdr;
2266 break;
2269 if (!punw) {
2270 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2271 return 0;
2274 start = (const struct unw_table_entry *) punw->p_vaddr;
2275 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2276 size = 0;
2278 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2280 for (entry = start; entry < end; ++entry)
2281 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2282 size += 8; /* reserve space for "end of table" marker */
2284 unw.gate_table = kmalloc(size, GFP_KERNEL);
2285 if (!unw.gate_table) {
2286 unw.gate_table_size = 0;
2287 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2288 return 0;
2290 unw.gate_table_size = size;
2292 lp = unw.gate_table;
2293 info = (char *) unw.gate_table + size;
2295 for (entry = start; entry < end; ++entry, lp += 3) {
2296 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2297 info -= info_size;
2298 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2300 lp[0] = segbase + entry->start_offset; /* start */
2301 lp[1] = segbase + entry->end_offset; /* end */
2302 lp[2] = info - (char *) unw.gate_table; /* info */
2304 *lp = 0; /* end-of-table marker */
2305 return 0;
2308 __initcall(create_gate_table);
2309 #endif // !XEN
2311 void __init
2312 unw_init (void)
2314 extern char __gp[];
2315 extern void unw_hash_index_t_is_too_narrow (void);
2316 long i, off;
2318 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2319 unw_hash_index_t_is_too_narrow();
2321 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2322 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2323 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2324 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2325 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2326 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2327 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2328 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2329 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2330 unw.sw_off[unw.preg_index[i]] = off;
2331 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2332 unw.sw_off[unw.preg_index[i]] = off;
2333 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2334 unw.sw_off[unw.preg_index[i]] = off;
2335 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2336 unw.sw_off[unw.preg_index[i]] = off;
2338 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2339 if (i > 0)
2340 unw.cache[i].lru_chain = (i - 1);
2341 unw.cache[i].coll_chain = -1;
2342 rwlock_init(&unw.cache[i].lock);
2344 unw.lru_head = UNW_CACHE_SIZE - 1;
2345 unw.lru_tail = 0;
2347 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2348 __start_unwind, __end_unwind);
2351 #ifndef XEN
2352 /*
2353 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2355 * This system call has been deprecated. The new and improved way to get
2356 * at the kernel's unwind info is via the gate DSO. The address of the
2357 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2359 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2361 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2362 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2363 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2364 * unwind data.
2366 * The first portion of the unwind data contains an unwind table and rest contains the
2367 * associated unwind info (in no particular order). The unwind table consists of a table
2368 * of entries of the form:
2370 * u64 start; (64-bit address of start of function)
2371 * u64 end; (64-bit address of start of function)
2372 * u64 info; (BUF-relative offset to unwind info)
2374 * The end of the unwind table is indicated by an entry with a START address of zero.
2376 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2377 * on the format of the unwind info.
2379 * ERRORS
2380 * EFAULT BUF points outside your accessible address space.
2381 */
2382 asmlinkage long
2383 sys_getunwind (void __user *buf, size_t buf_size)
2385 if (buf && buf_size >= unw.gate_table_size)
2386 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2387 return -EFAULT;
2388 return unw.gate_table_size;
2390 #endif