ia64/xen-unstable

view xen/arch/ia64/linux-xen/unwind.c @ 19846:5d35b3f7898b

[IA64] remove a warning

This patch removes the following warning.
> unwind.c:40:1: warning: "write_trylock" redefined
> In file included from xen/include/xen/sched.h:7,
> from unwind.c:33:
> xen/include/xen/spinlock.h:115:1: warning: this is the location of the previous definition

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:23:31 2009 +0900 (2009-06-29)
parents 5bb2700e773a
children
line source
1 /*
2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure.
6 */
7 /*
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
19 *
20 * SMP conventions:
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
28 */
29 #ifdef XEN
30 #include <xen/types.h>
31 #include <xen/elf.h>
32 #include <xen/kernel.h>
33 #include <xen/sched.h>
34 #include <xen/xmalloc.h>
35 #include <xen/spinlock.h>
36 #include <xen/errno.h>
38 // work around
39 // write_trylock() does bug check, but stack unwinder can be called
40 // subtle situation, so skip bug check.
41 #undef write_trylock(lock)
42 #ifdef CONFIG_SMP
43 #define write_trylock(lock) _raw_write_trylock(lock)
44 #else
45 #define write_trylock(lock) ({1;})
46 #endif
48 #else
49 #include <linux/module.h>
50 #include <linux/bootmem.h>
51 #include <linux/elf.h>
52 #include <linux/kernel.h>
53 #include <linux/sched.h>
54 #include <linux/slab.h>
55 #endif
57 #include <asm/unwind.h>
59 #include <asm/delay.h>
60 #include <asm/page.h>
61 #include <asm/ptrace.h>
62 #include <asm/ptrace_offsets.h>
63 #include <asm/rse.h>
64 #include <asm/sections.h>
65 #include <asm/system.h>
66 #include <asm/uaccess.h>
68 #include "entry.h"
69 #include "unwind_i.h"
71 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
72 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
74 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
75 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
77 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
79 #ifdef UNW_DEBUG
80 static unsigned int unw_debug_level = UNW_DEBUG;
81 # define UNW_DEBUG_ON(n) unw_debug_level >= n
82 /* Do not code a printk level, not all debug lines end in newline */
83 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
84 # define inline
85 #else /* !UNW_DEBUG */
86 # define UNW_DEBUG_ON(n) 0
87 # define UNW_DPRINT(n, ...)
88 #endif /* UNW_DEBUG */
90 #if UNW_STATS
91 # define STAT(x...) x
92 #else
93 # define STAT(x...)
94 #endif
96 #ifdef XEN
97 #define alloc_reg_state() ({in_irq()? NULL: xmalloc(struct unw_reg_state);})
98 #define free_reg_state(usr) xfree(usr)
99 #define alloc_labeled_state() ({in_irq()? NULL: xmalloc(struct unw_labeled_state);})
100 #define free_labeled_state(usr) xfree(usr)
101 #else
102 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
103 #define free_reg_state(usr) kfree(usr)
104 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
105 #define free_labeled_state(usr) kfree(usr)
106 #endif
108 typedef unsigned long unw_word;
109 typedef unsigned char unw_hash_index_t;
111 static struct {
112 spinlock_t lock; /* spinlock for unwind data */
114 /* list of unwind tables (one per load-module) */
115 struct unw_table *tables;
117 unsigned long r0; /* constant 0 for r0 */
119 /* table of registers that prologues can save (and order in which they're saved): */
120 const unsigned char save_order[8];
122 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
123 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
125 unsigned short lru_head; /* index of lead-recently used script */
126 unsigned short lru_tail; /* index of most-recently used script */
128 /* index into unw_frame_info for preserved register i */
129 unsigned short preg_index[UNW_NUM_REGS];
131 short pt_regs_offsets[32];
133 /* unwind table for the kernel: */
134 struct unw_table kernel_table;
136 /* unwind table describing the gate page (kernel code that is mapped into user space): */
137 size_t gate_table_size;
138 unsigned long *gate_table;
140 /* hash table that maps instruction pointer to script index: */
141 unsigned short hash[UNW_HASH_SIZE];
143 /* script cache: */
144 struct unw_script cache[UNW_CACHE_SIZE];
146 # ifdef UNW_DEBUG
147 const char *preg_name[UNW_NUM_REGS];
148 # endif
149 # if UNW_STATS
150 struct {
151 struct {
152 int lookups;
153 int hinted_hits;
154 int normal_hits;
155 int collision_chain_traversals;
156 } cache;
157 struct {
158 unsigned long build_time;
159 unsigned long run_time;
160 unsigned long parse_time;
161 int builds;
162 int news;
163 int collisions;
164 int runs;
165 } script;
166 struct {
167 unsigned long init_time;
168 unsigned long unwind_time;
169 int inits;
170 int unwinds;
171 } api;
172 } stat;
173 # endif
174 } unw = {
175 .tables = &unw.kernel_table,
176 .lock = SPIN_LOCK_UNLOCKED,
177 .save_order = {
178 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
179 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
180 },
181 .preg_index = {
182 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
183 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
184 offsetof(struct unw_frame_info, bsp_loc)/8,
185 offsetof(struct unw_frame_info, bspstore_loc)/8,
186 offsetof(struct unw_frame_info, pfs_loc)/8,
187 offsetof(struct unw_frame_info, rnat_loc)/8,
188 offsetof(struct unw_frame_info, psp)/8,
189 offsetof(struct unw_frame_info, rp_loc)/8,
190 offsetof(struct unw_frame_info, r4)/8,
191 offsetof(struct unw_frame_info, r5)/8,
192 offsetof(struct unw_frame_info, r6)/8,
193 offsetof(struct unw_frame_info, r7)/8,
194 offsetof(struct unw_frame_info, unat_loc)/8,
195 offsetof(struct unw_frame_info, pr_loc)/8,
196 offsetof(struct unw_frame_info, lc_loc)/8,
197 offsetof(struct unw_frame_info, fpsr_loc)/8,
198 offsetof(struct unw_frame_info, b1_loc)/8,
199 offsetof(struct unw_frame_info, b2_loc)/8,
200 offsetof(struct unw_frame_info, b3_loc)/8,
201 offsetof(struct unw_frame_info, b4_loc)/8,
202 offsetof(struct unw_frame_info, b5_loc)/8,
203 offsetof(struct unw_frame_info, f2_loc)/8,
204 offsetof(struct unw_frame_info, f3_loc)/8,
205 offsetof(struct unw_frame_info, f4_loc)/8,
206 offsetof(struct unw_frame_info, f5_loc)/8,
207 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
208 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
209 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
210 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
211 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
212 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
213 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
214 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
215 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
216 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
217 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
218 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
219 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
220 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
221 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
222 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
223 },
224 .pt_regs_offsets = {
225 [0] = -1,
226 offsetof(struct pt_regs, r1),
227 offsetof(struct pt_regs, r2),
228 offsetof(struct pt_regs, r3),
229 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
230 offsetof(struct pt_regs, r8),
231 offsetof(struct pt_regs, r9),
232 offsetof(struct pt_regs, r10),
233 offsetof(struct pt_regs, r11),
234 offsetof(struct pt_regs, r12),
235 offsetof(struct pt_regs, r13),
236 offsetof(struct pt_regs, r14),
237 offsetof(struct pt_regs, r15),
238 offsetof(struct pt_regs, r16),
239 offsetof(struct pt_regs, r17),
240 offsetof(struct pt_regs, r18),
241 offsetof(struct pt_regs, r19),
242 offsetof(struct pt_regs, r20),
243 offsetof(struct pt_regs, r21),
244 offsetof(struct pt_regs, r22),
245 offsetof(struct pt_regs, r23),
246 offsetof(struct pt_regs, r24),
247 offsetof(struct pt_regs, r25),
248 offsetof(struct pt_regs, r26),
249 offsetof(struct pt_regs, r27),
250 offsetof(struct pt_regs, r28),
251 offsetof(struct pt_regs, r29),
252 offsetof(struct pt_regs, r30),
253 offsetof(struct pt_regs, r31),
254 },
255 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
256 #ifdef UNW_DEBUG
257 .preg_name = {
258 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
259 "r4", "r5", "r6", "r7",
260 "ar.unat", "pr", "ar.lc", "ar.fpsr",
261 "b1", "b2", "b3", "b4", "b5",
262 "f2", "f3", "f4", "f5",
263 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
264 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
265 }
266 #endif
267 };
269 static inline int
270 read_only (void *addr)
271 {
272 return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
273 }
275 /*
276 * Returns offset of rREG in struct pt_regs.
277 */
278 static inline unsigned long
279 pt_regs_off (unsigned long reg)
280 {
281 short off = -1;
283 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
284 off = unw.pt_regs_offsets[reg];
286 if (off < 0) {
287 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
288 off = 0;
289 }
290 return (unsigned long) off;
291 }
293 static inline struct pt_regs *
294 get_scratch_regs (struct unw_frame_info *info)
295 {
296 if (!info->pt) {
297 /* This should not happen with valid unwind info. */
298 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
299 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
300 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
301 else
302 info->pt = info->sp - 16;
303 }
304 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
305 return (struct pt_regs *) info->pt;
306 }
308 /* Unwind accessors. */
310 int
311 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
312 {
313 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
314 struct unw_ireg *ireg;
315 struct pt_regs *pt;
317 if ((unsigned) regnum - 1 >= 127) {
318 if (regnum == 0 && !write) {
319 *val = 0; /* read r0 always returns 0 */
320 *nat = 0;
321 return 0;
322 }
323 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
324 __FUNCTION__, regnum);
325 return -1;
326 }
328 if (regnum < 32) {
329 if (regnum >= 4 && regnum <= 7) {
330 /* access a preserved register */
331 ireg = &info->r4 + (regnum - 4);
332 addr = ireg->loc;
333 if (addr) {
334 nat_addr = addr + ireg->nat.off;
335 switch (ireg->nat.type) {
336 case UNW_NAT_VAL:
337 /* simulate getf.sig/setf.sig */
338 if (write) {
339 if (*nat) {
340 /* write NaTVal and be done with it */
341 addr[0] = 0;
342 addr[1] = 0x1fffe;
343 return 0;
344 }
345 addr[1] = 0x1003e;
346 } else {
347 if (addr[0] == 0 && addr[1] == 0x1ffe) {
348 /* return NaT and be done with it */
349 *val = 0;
350 *nat = 1;
351 return 0;
352 }
353 }
354 /* fall through */
355 case UNW_NAT_NONE:
356 dummy_nat = 0;
357 nat_addr = &dummy_nat;
358 break;
360 case UNW_NAT_MEMSTK:
361 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
362 break;
364 case UNW_NAT_REGSTK:
365 nat_addr = ia64_rse_rnat_addr(addr);
366 if ((unsigned long) addr < info->regstk.limit
367 || (unsigned long) addr >= info->regstk.top)
368 {
369 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
370 "[0x%lx-0x%lx)\n",
371 __FUNCTION__, (void *) addr,
372 info->regstk.limit,
373 info->regstk.top);
374 return -1;
375 }
376 if ((unsigned long) nat_addr >= info->regstk.top)
377 nat_addr = &info->sw->ar_rnat;
378 nat_mask = (1UL << ia64_rse_slot_num(addr));
379 break;
380 }
381 } else {
382 addr = &info->sw->r4 + (regnum - 4);
383 nat_addr = &info->sw->ar_unat;
384 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
385 }
386 } else {
387 /* access a scratch register */
388 pt = get_scratch_regs(info);
389 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
390 if (info->pri_unat_loc)
391 nat_addr = info->pri_unat_loc;
392 else
393 nat_addr = &info->sw->caller_unat;
394 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
395 }
396 } else {
397 /* access a stacked register */
398 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
399 nat_addr = ia64_rse_rnat_addr(addr);
400 if ((unsigned long) addr < info->regstk.limit
401 || (unsigned long) addr >= info->regstk.top)
402 {
403 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
404 "of rbs\n", __FUNCTION__);
405 return -1;
406 }
407 if ((unsigned long) nat_addr >= info->regstk.top)
408 nat_addr = &info->sw->ar_rnat;
409 nat_mask = (1UL << ia64_rse_slot_num(addr));
410 }
412 if (write) {
413 if (read_only(addr)) {
414 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
415 __FUNCTION__);
416 } else {
417 *addr = *val;
418 if (*nat)
419 *nat_addr |= nat_mask;
420 else
421 *nat_addr &= ~nat_mask;
422 }
423 } else {
424 if ((*nat_addr & nat_mask) == 0) {
425 *val = *addr;
426 *nat = 0;
427 } else {
428 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
429 *nat = 1;
430 }
431 }
432 return 0;
433 }
434 EXPORT_SYMBOL(unw_access_gr);
436 int
437 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
438 {
439 unsigned long *addr;
440 struct pt_regs *pt;
442 switch (regnum) {
443 /* scratch: */
444 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
445 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
446 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
448 /* preserved: */
449 case 1: case 2: case 3: case 4: case 5:
450 addr = *(&info->b1_loc + (regnum - 1));
451 if (!addr)
452 addr = &info->sw->b1 + (regnum - 1);
453 break;
455 default:
456 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
457 __FUNCTION__, regnum);
458 return -1;
459 }
460 if (write)
461 if (read_only(addr)) {
462 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
463 __FUNCTION__);
464 } else
465 *addr = *val;
466 else
467 *val = *addr;
468 return 0;
469 }
470 EXPORT_SYMBOL(unw_access_br);
472 int
473 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
474 {
475 struct ia64_fpreg *addr = NULL;
476 struct pt_regs *pt;
478 if ((unsigned) (regnum - 2) >= 126) {
479 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
480 __FUNCTION__, regnum);
481 return -1;
482 }
484 if (regnum <= 5) {
485 addr = *(&info->f2_loc + (regnum - 2));
486 if (!addr)
487 addr = &info->sw->f2 + (regnum - 2);
488 } else if (regnum <= 15) {
489 if (regnum <= 11) {
490 pt = get_scratch_regs(info);
491 //XXX struct ia64_fpreg and struct pt_fpreg are same.
492 addr = (struct ia64_fpreg*)(&pt->f6 + (regnum - 6));
493 }
494 else
495 addr = &info->sw->f12 + (regnum - 12);
496 } else if (regnum <= 31) {
497 addr = info->fr_loc[regnum - 16];
498 if (!addr)
499 addr = &info->sw->f16 + (regnum - 16);
500 } else {
501 struct task_struct *t = info->task;
503 if (write)
504 ia64_sync_fph(t);
505 else
506 ia64_flush_fph(t);
507 #ifdef XEN
508 addr = t->arch._thread.fph + (regnum - 32);
509 #else
510 addr = t->thread.fph + (regnum - 32);
511 #endif
512 }
514 if (write)
515 if (read_only(addr)) {
516 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
517 __FUNCTION__);
518 } else
519 *addr = *val;
520 else
521 *val = *addr;
522 return 0;
523 }
524 EXPORT_SYMBOL(unw_access_fr);
526 int
527 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
528 {
529 unsigned long *addr;
530 struct pt_regs *pt;
532 switch (regnum) {
533 case UNW_AR_BSP:
534 addr = info->bsp_loc;
535 if (!addr)
536 addr = &info->sw->ar_bspstore;
537 break;
539 case UNW_AR_BSPSTORE:
540 addr = info->bspstore_loc;
541 if (!addr)
542 addr = &info->sw->ar_bspstore;
543 break;
545 case UNW_AR_PFS:
546 addr = info->pfs_loc;
547 if (!addr)
548 addr = &info->sw->ar_pfs;
549 break;
551 case UNW_AR_RNAT:
552 addr = info->rnat_loc;
553 if (!addr)
554 addr = &info->sw->ar_rnat;
555 break;
557 case UNW_AR_UNAT:
558 addr = info->unat_loc;
559 if (!addr)
560 addr = &info->sw->caller_unat;
561 break;
563 case UNW_AR_LC:
564 addr = info->lc_loc;
565 if (!addr)
566 addr = &info->sw->ar_lc;
567 break;
569 case UNW_AR_EC:
570 if (!info->cfm_loc)
571 return -1;
572 if (write)
573 *info->cfm_loc =
574 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
575 else
576 *val = (*info->cfm_loc >> 52) & 0x3f;
577 return 0;
579 case UNW_AR_FPSR:
580 addr = info->fpsr_loc;
581 if (!addr)
582 addr = &info->sw->ar_fpsr;
583 break;
585 case UNW_AR_RSC:
586 pt = get_scratch_regs(info);
587 addr = &pt->ar_rsc;
588 break;
590 case UNW_AR_CCV:
591 pt = get_scratch_regs(info);
592 addr = &pt->ar_ccv;
593 break;
595 case UNW_AR_CSD:
596 pt = get_scratch_regs(info);
597 addr = &pt->ar_csd;
598 break;
600 case UNW_AR_SSD:
601 pt = get_scratch_regs(info);
602 addr = &pt->ar_ssd;
603 break;
605 default:
606 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
607 __FUNCTION__, regnum);
608 return -1;
609 }
611 if (write) {
612 if (read_only(addr)) {
613 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
614 __FUNCTION__);
615 } else
616 *addr = *val;
617 } else
618 *val = *addr;
619 return 0;
620 }
621 EXPORT_SYMBOL(unw_access_ar);
623 int
624 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
625 {
626 unsigned long *addr;
628 addr = info->pr_loc;
629 if (!addr)
630 addr = &info->sw->pr;
632 if (write) {
633 if (read_only(addr)) {
634 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
635 __FUNCTION__);
636 } else
637 *addr = *val;
638 } else
639 *val = *addr;
640 return 0;
641 }
642 EXPORT_SYMBOL(unw_access_pr);
645 /* Routines to manipulate the state stack. */
647 static inline void
648 push (struct unw_state_record *sr)
649 {
650 struct unw_reg_state *rs;
652 rs = alloc_reg_state();
653 if (!rs) {
654 printk(KERN_ERR "unwind: cannot stack reg state!\n");
655 return;
656 }
657 memcpy(rs, &sr->curr, sizeof(*rs));
658 sr->curr.next = rs;
659 }
661 static void
662 pop (struct unw_state_record *sr)
663 {
664 struct unw_reg_state *rs = sr->curr.next;
666 if (!rs) {
667 printk(KERN_ERR "unwind: stack underflow!\n");
668 return;
669 }
670 memcpy(&sr->curr, rs, sizeof(*rs));
671 free_reg_state(rs);
672 }
674 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
675 static struct unw_reg_state *
676 dup_state_stack (struct unw_reg_state *rs)
677 {
678 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
680 while (rs) {
681 copy = alloc_reg_state();
682 if (!copy) {
683 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
684 return NULL;
685 }
686 memcpy(copy, rs, sizeof(*copy));
687 if (first)
688 prev->next = copy;
689 else
690 first = copy;
691 rs = rs->next;
692 prev = copy;
693 }
694 return first;
695 }
697 /* Free all stacked register states (but not RS itself). */
698 static void
699 free_state_stack (struct unw_reg_state *rs)
700 {
701 struct unw_reg_state *p, *next;
703 for (p = rs->next; p != NULL; p = next) {
704 next = p->next;
705 free_reg_state(p);
706 }
707 rs->next = NULL;
708 }
710 /* Unwind decoder routines */
712 static enum unw_register_index __attribute_const__
713 decode_abreg (unsigned char abreg, int memory)
714 {
715 switch (abreg) {
716 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
717 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
718 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
719 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
720 case 0x60: return UNW_REG_PR;
721 case 0x61: return UNW_REG_PSP;
722 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
723 case 0x63: return UNW_REG_RP;
724 case 0x64: return UNW_REG_BSP;
725 case 0x65: return UNW_REG_BSPSTORE;
726 case 0x66: return UNW_REG_RNAT;
727 case 0x67: return UNW_REG_UNAT;
728 case 0x68: return UNW_REG_FPSR;
729 case 0x69: return UNW_REG_PFS;
730 case 0x6a: return UNW_REG_LC;
731 default:
732 break;
733 }
734 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
735 return UNW_REG_LC;
736 }
738 static void
739 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
740 {
741 reg->val = val;
742 reg->where = where;
743 if (reg->when == UNW_WHEN_NEVER)
744 reg->when = when;
745 }
747 static void
748 alloc_spill_area (unsigned long *offp, unsigned long regsize,
749 struct unw_reg_info *lo, struct unw_reg_info *hi)
750 {
751 struct unw_reg_info *reg;
753 for (reg = hi; reg >= lo; --reg) {
754 if (reg->where == UNW_WHERE_SPILL_HOME) {
755 reg->where = UNW_WHERE_PSPREL;
756 *offp -= regsize;
757 reg->val = *offp;
758 }
759 }
760 }
762 static inline void
763 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
764 {
765 struct unw_reg_info *reg;
767 for (reg = *regp; reg <= lim; ++reg) {
768 if (reg->where == UNW_WHERE_SPILL_HOME) {
769 reg->when = t;
770 *regp = reg + 1;
771 return;
772 }
773 }
774 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
775 }
777 static inline void
778 finish_prologue (struct unw_state_record *sr)
779 {
780 struct unw_reg_info *reg;
781 unsigned long off;
782 int i;
784 /*
785 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
786 * for Using Unwind Descriptors", rule 3):
787 */
788 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
789 reg = sr->curr.reg + unw.save_order[i];
790 if (reg->where == UNW_WHERE_GR_SAVE) {
791 reg->where = UNW_WHERE_GR;
792 reg->val = sr->gr_save_loc++;
793 }
794 }
796 /*
797 * Next, compute when the fp, general, and branch registers get
798 * saved. This must come before alloc_spill_area() because
799 * we need to know which registers are spilled to their home
800 * locations.
801 */
802 if (sr->imask) {
803 unsigned char kind, mask = 0, *cp = sr->imask;
804 int t;
805 static const unsigned char limit[3] = {
806 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
807 };
808 struct unw_reg_info *(regs[3]);
810 regs[0] = sr->curr.reg + UNW_REG_F2;
811 regs[1] = sr->curr.reg + UNW_REG_R4;
812 regs[2] = sr->curr.reg + UNW_REG_B1;
814 for (t = 0; t < sr->region_len; ++t) {
815 if ((t & 3) == 0)
816 mask = *cp++;
817 kind = (mask >> 2*(3-(t & 3))) & 3;
818 if (kind > 0)
819 spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
820 sr->region_start + t);
821 }
822 }
823 /*
824 * Next, lay out the memory stack spill area:
825 */
826 if (sr->any_spills) {
827 off = sr->spill_offset;
828 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
829 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
830 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
831 }
832 }
834 /*
835 * Region header descriptors.
836 */
838 static void
839 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
840 struct unw_state_record *sr)
841 {
842 int i, region_start;
844 if (!(sr->in_body || sr->first_region))
845 finish_prologue(sr);
846 sr->first_region = 0;
848 /* check if we're done: */
849 if (sr->when_target < sr->region_start + sr->region_len) {
850 sr->done = 1;
851 return;
852 }
854 region_start = sr->region_start + sr->region_len;
856 for (i = 0; i < sr->epilogue_count; ++i)
857 pop(sr);
858 sr->epilogue_count = 0;
859 sr->epilogue_start = UNW_WHEN_NEVER;
861 sr->region_start = region_start;
862 sr->region_len = rlen;
863 sr->in_body = body;
865 if (!body) {
866 push(sr);
868 for (i = 0; i < 4; ++i) {
869 if (mask & 0x8)
870 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
871 sr->region_start + sr->region_len - 1, grsave++);
872 mask <<= 1;
873 }
874 sr->gr_save_loc = grsave;
875 sr->any_spills = 0;
876 sr->imask = NULL;
877 sr->spill_offset = 0x10; /* default to psp+16 */
878 }
879 }
881 /*
882 * Prologue descriptors.
883 */
885 static inline void
886 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
887 {
888 if (abi == 3 && context == 'i') {
889 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
890 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
891 }
892 else
893 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
894 __FUNCTION__, abi, context);
895 }
897 static inline void
898 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
899 {
900 int i;
902 for (i = 0; i < 5; ++i) {
903 if (brmask & 1)
904 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
905 sr->region_start + sr->region_len - 1, gr++);
906 brmask >>= 1;
907 }
908 }
910 static inline void
911 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
912 {
913 int i;
915 for (i = 0; i < 5; ++i) {
916 if (brmask & 1) {
917 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
918 sr->region_start + sr->region_len - 1, 0);
919 sr->any_spills = 1;
920 }
921 brmask >>= 1;
922 }
923 }
925 static inline void
926 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
927 {
928 int i;
930 for (i = 0; i < 4; ++i) {
931 if ((grmask & 1) != 0) {
932 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
933 sr->region_start + sr->region_len - 1, 0);
934 sr->any_spills = 1;
935 }
936 grmask >>= 1;
937 }
938 for (i = 0; i < 20; ++i) {
939 if ((frmask & 1) != 0) {
940 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
941 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
942 sr->region_start + sr->region_len - 1, 0);
943 sr->any_spills = 1;
944 }
945 frmask >>= 1;
946 }
947 }
949 static inline void
950 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
951 {
952 int i;
954 for (i = 0; i < 4; ++i) {
955 if ((frmask & 1) != 0) {
956 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
957 sr->region_start + sr->region_len - 1, 0);
958 sr->any_spills = 1;
959 }
960 frmask >>= 1;
961 }
962 }
964 static inline void
965 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
966 {
967 int i;
969 for (i = 0; i < 4; ++i) {
970 if ((grmask & 1) != 0)
971 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
972 sr->region_start + sr->region_len - 1, gr++);
973 grmask >>= 1;
974 }
975 }
977 static inline void
978 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
979 {
980 int i;
982 for (i = 0; i < 4; ++i) {
983 if ((grmask & 1) != 0) {
984 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
985 sr->region_start + sr->region_len - 1, 0);
986 sr->any_spills = 1;
987 }
988 grmask >>= 1;
989 }
990 }
992 static inline void
993 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
994 {
995 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
996 sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
997 }
999 static inline void
1000 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
1002 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
1005 static inline void
1006 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
1008 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
1011 static inline void
1012 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
1014 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
1015 0x10 - 4*pspoff);
1018 static inline void
1019 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
1021 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
1022 4*spoff);
1025 static inline void
1026 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
1028 sr->return_link_reg = dst;
1031 static inline void
1032 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1034 struct unw_reg_info *reg = sr->curr.reg + regnum;
1036 if (reg->where == UNW_WHERE_NONE)
1037 reg->where = UNW_WHERE_GR_SAVE;
1038 reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1041 static inline void
1042 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1044 sr->spill_offset = 0x10 - 4*pspoff;
1047 static inline unsigned char *
1048 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1050 sr->imask = imaskp;
1051 return imaskp + (2*sr->region_len + 7)/8;
1054 /*
1055 * Body descriptors.
1056 */
1057 static inline void
1058 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1060 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1061 sr->epilogue_count = ecount + 1;
1064 static inline void
1065 desc_copy_state (unw_word label, struct unw_state_record *sr)
1067 struct unw_labeled_state *ls;
1069 for (ls = sr->labeled_states; ls; ls = ls->next) {
1070 if (ls->label == label) {
1071 free_state_stack(&sr->curr);
1072 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1073 sr->curr.next = dup_state_stack(ls->saved_state.next);
1074 return;
1077 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1080 static inline void
1081 desc_label_state (unw_word label, struct unw_state_record *sr)
1083 struct unw_labeled_state *ls;
1085 ls = alloc_labeled_state();
1086 if (!ls) {
1087 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1088 return;
1090 ls->label = label;
1091 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1092 ls->saved_state.next = dup_state_stack(sr->curr.next);
1094 /* insert into list of labeled states: */
1095 ls->next = sr->labeled_states;
1096 sr->labeled_states = ls;
1099 /*
1100 * General descriptors.
1101 */
1103 static inline int
1104 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1106 if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1107 return 0;
1108 if (qp > 0) {
1109 if ((sr->pr_val & (1UL << qp)) == 0)
1110 return 0;
1111 sr->pr_mask |= (1UL << qp);
1113 return 1;
1116 static inline void
1117 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1119 struct unw_reg_info *r;
1121 if (!desc_is_active(qp, t, sr))
1122 return;
1124 r = sr->curr.reg + decode_abreg(abreg, 0);
1125 r->where = UNW_WHERE_NONE;
1126 r->when = UNW_WHEN_NEVER;
1127 r->val = 0;
1130 static inline void
1131 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1132 unsigned char ytreg, struct unw_state_record *sr)
1134 enum unw_where where = UNW_WHERE_GR;
1135 struct unw_reg_info *r;
1137 if (!desc_is_active(qp, t, sr))
1138 return;
1140 if (x)
1141 where = UNW_WHERE_BR;
1142 else if (ytreg & 0x80)
1143 where = UNW_WHERE_FR;
1145 r = sr->curr.reg + decode_abreg(abreg, 0);
1146 r->where = where;
1147 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1148 r->val = (ytreg & 0x7f);
1151 static inline void
1152 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1153 struct unw_state_record *sr)
1155 struct unw_reg_info *r;
1157 if (!desc_is_active(qp, t, sr))
1158 return;
1160 r = sr->curr.reg + decode_abreg(abreg, 1);
1161 r->where = UNW_WHERE_PSPREL;
1162 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1163 r->val = 0x10 - 4*pspoff;
1166 static inline void
1167 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1168 struct unw_state_record *sr)
1170 struct unw_reg_info *r;
1172 if (!desc_is_active(qp, t, sr))
1173 return;
1175 r = sr->curr.reg + decode_abreg(abreg, 1);
1176 r->where = UNW_WHERE_SPREL;
1177 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1178 r->val = 4*spoff;
1181 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1182 code);
1184 /*
1185 * region headers:
1186 */
1187 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1188 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1189 /*
1190 * prologue descriptors:
1191 */
1192 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1193 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1194 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1195 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1196 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1197 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1198 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1199 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1200 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1201 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1202 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1203 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1204 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1205 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1206 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1207 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1208 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1209 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1210 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1211 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1212 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1213 /*
1214 * body descriptors:
1215 */
1216 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1217 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1218 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1219 /*
1220 * general unwind descriptors:
1221 */
1222 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1223 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1224 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1225 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1226 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1227 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1228 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1229 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1231 #include "unwind_decoder.c"
1234 /* Unwind scripts. */
1236 static inline unw_hash_index_t
1237 hash (unsigned long ip)
1239 # define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
1241 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1242 #undef hashmagic
1245 static inline long
1246 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1248 read_lock(&script->lock);
1249 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1250 /* keep the read lock... */
1251 return 1;
1252 read_unlock(&script->lock);
1253 return 0;
1256 static inline struct unw_script *
1257 script_lookup (struct unw_frame_info *info)
1259 struct unw_script *script = unw.cache + info->hint;
1260 unsigned short index;
1261 unsigned long ip, pr;
1263 if (UNW_DEBUG_ON(0))
1264 return NULL; /* Always regenerate scripts in debug mode */
1266 STAT(++unw.stat.cache.lookups);
1268 ip = info->ip;
1269 pr = info->pr;
1271 if (cache_match(script, ip, pr)) {
1272 STAT(++unw.stat.cache.hinted_hits);
1273 return script;
1276 index = unw.hash[hash(ip)];
1277 if (index >= UNW_CACHE_SIZE)
1278 return NULL;
1280 script = unw.cache + index;
1281 while (1) {
1282 if (cache_match(script, ip, pr)) {
1283 /* update hint; no locking required as single-word writes are atomic */
1284 STAT(++unw.stat.cache.normal_hits);
1285 unw.cache[info->prev_script].hint = script - unw.cache;
1286 return script;
1288 if (script->coll_chain >= UNW_HASH_SIZE)
1289 return NULL;
1290 script = unw.cache + script->coll_chain;
1291 STAT(++unw.stat.cache.collision_chain_traversals);
1295 /*
1296 * On returning, a write lock for the SCRIPT is still being held.
1297 */
1298 static inline struct unw_script *
1299 script_new (unsigned long ip)
1301 struct unw_script *script, *prev, *tmp;
1302 unw_hash_index_t index;
1303 unsigned short head;
1305 STAT(++unw.stat.script.news);
1307 /*
1308 * Can't (easily) use cmpxchg() here because of ABA problem
1309 * that is intrinsic in cmpxchg()...
1310 */
1311 head = unw.lru_head;
1312 script = unw.cache + head;
1313 unw.lru_head = script->lru_chain;
1315 /*
1316 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1317 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1318 * alternative would be to disable interrupts whenever we hold a read-lock, but
1319 * that seems silly.
1320 */
1321 if (!write_trylock(&script->lock))
1322 return NULL;
1324 /* re-insert script at the tail of the LRU chain: */
1325 unw.cache[unw.lru_tail].lru_chain = head;
1326 unw.lru_tail = head;
1328 /* remove the old script from the hash table (if it's there): */
1329 if (script->ip) {
1330 index = hash(script->ip);
1331 tmp = unw.cache + unw.hash[index];
1332 prev = NULL;
1333 while (1) {
1334 if (tmp == script) {
1335 if (prev)
1336 prev->coll_chain = tmp->coll_chain;
1337 else
1338 unw.hash[index] = tmp->coll_chain;
1339 break;
1340 } else
1341 prev = tmp;
1342 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1343 /* old script wasn't in the hash-table */
1344 break;
1345 tmp = unw.cache + tmp->coll_chain;
1349 /* enter new script in the hash table */
1350 index = hash(ip);
1351 script->coll_chain = unw.hash[index];
1352 unw.hash[index] = script - unw.cache;
1354 script->ip = ip; /* set new IP while we're holding the locks */
1356 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1358 script->flags = 0;
1359 script->hint = 0;
1360 script->count = 0;
1361 return script;
1364 static void
1365 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1367 script->pr_mask = sr->pr_mask;
1368 script->pr_val = sr->pr_val;
1369 /*
1370 * We could down-grade our write-lock on script->lock here but
1371 * the rwlock API doesn't offer atomic lock downgrading, so
1372 * we'll just keep the write-lock and release it later when
1373 * we're done using the script.
1374 */
1377 static inline void
1378 script_emit (struct unw_script *script, struct unw_insn insn)
1380 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1381 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1382 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1383 return;
1385 script->insn[script->count++] = insn;
1388 static inline void
1389 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1391 struct unw_reg_info *r = sr->curr.reg + i;
1392 enum unw_insn_opcode opc;
1393 struct unw_insn insn;
1394 unsigned long val = 0;
1396 switch (r->where) {
1397 case UNW_WHERE_GR:
1398 if (r->val >= 32) {
1399 /* register got spilled to a stacked register */
1400 opc = UNW_INSN_SETNAT_TYPE;
1401 val = UNW_NAT_REGSTK;
1402 } else
1403 /* register got spilled to a scratch register */
1404 opc = UNW_INSN_SETNAT_MEMSTK;
1405 break;
1407 case UNW_WHERE_FR:
1408 opc = UNW_INSN_SETNAT_TYPE;
1409 val = UNW_NAT_VAL;
1410 break;
1412 case UNW_WHERE_BR:
1413 opc = UNW_INSN_SETNAT_TYPE;
1414 val = UNW_NAT_NONE;
1415 break;
1417 case UNW_WHERE_PSPREL:
1418 case UNW_WHERE_SPREL:
1419 opc = UNW_INSN_SETNAT_MEMSTK;
1420 break;
1422 default:
1423 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1424 __FUNCTION__, r->where);
1425 return;
1427 insn.opc = opc;
1428 insn.dst = unw.preg_index[i];
1429 insn.val = val;
1430 script_emit(script, insn);
1433 static void
1434 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1436 struct unw_reg_info *r = sr->curr.reg + i;
1437 enum unw_insn_opcode opc;
1438 unsigned long val, rval;
1439 struct unw_insn insn;
1440 long need_nat_info;
1442 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1443 return;
1445 opc = UNW_INSN_MOVE;
1446 val = rval = r->val;
1447 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1449 switch (r->where) {
1450 case UNW_WHERE_GR:
1451 if (rval >= 32) {
1452 opc = UNW_INSN_MOVE_STACKED;
1453 val = rval - 32;
1454 } else if (rval >= 4 && rval <= 7) {
1455 if (need_nat_info) {
1456 opc = UNW_INSN_MOVE2;
1457 need_nat_info = 0;
1459 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1460 } else if (rval == 0) {
1461 opc = UNW_INSN_MOVE_CONST;
1462 val = 0;
1463 } else {
1464 /* register got spilled to a scratch register */
1465 opc = UNW_INSN_MOVE_SCRATCH;
1466 val = pt_regs_off(rval);
1468 break;
1470 case UNW_WHERE_FR:
1471 if (rval <= 5)
1472 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1473 else if (rval >= 16 && rval <= 31)
1474 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1475 else {
1476 opc = UNW_INSN_MOVE_SCRATCH;
1477 if (rval <= 11)
1478 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1479 else
1480 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1481 __FUNCTION__, rval);
1483 break;
1485 case UNW_WHERE_BR:
1486 if (rval >= 1 && rval <= 5)
1487 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1488 else {
1489 opc = UNW_INSN_MOVE_SCRATCH;
1490 if (rval == 0)
1491 val = offsetof(struct pt_regs, b0);
1492 else if (rval == 6)
1493 val = offsetof(struct pt_regs, b6);
1494 else
1495 val = offsetof(struct pt_regs, b7);
1497 break;
1499 case UNW_WHERE_SPREL:
1500 opc = UNW_INSN_ADD_SP;
1501 break;
1503 case UNW_WHERE_PSPREL:
1504 opc = UNW_INSN_ADD_PSP;
1505 break;
1507 default:
1508 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1509 __FUNCTION__, i, r->where);
1510 break;
1512 insn.opc = opc;
1513 insn.dst = unw.preg_index[i];
1514 insn.val = val;
1515 script_emit(script, insn);
1516 if (need_nat_info)
1517 emit_nat_info(sr, i, script);
1519 if (i == UNW_REG_PSP) {
1520 /*
1521 * info->psp must contain the _value_ of the previous
1522 * sp, not it's save location. We get this by
1523 * dereferencing the value we just stored in
1524 * info->psp:
1525 */
1526 insn.opc = UNW_INSN_LOAD;
1527 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1528 script_emit(script, insn);
1532 static inline const struct unw_table_entry *
1533 lookup (struct unw_table *table, unsigned long rel_ip)
1535 const struct unw_table_entry *e = NULL;
1536 unsigned long lo, hi, mid;
1538 /* do a binary search for right entry: */
1539 for (lo = 0, hi = table->length; lo < hi; ) {
1540 mid = (lo + hi) / 2;
1541 e = &table->array[mid];
1542 if (rel_ip < e->start_offset)
1543 hi = mid;
1544 else if (rel_ip >= e->end_offset)
1545 lo = mid + 1;
1546 else
1547 break;
1549 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1550 return NULL;
1551 return e;
1554 /*
1555 * Build an unwind script that unwinds from state OLD_STATE to the
1556 * entrypoint of the function that called OLD_STATE.
1557 */
1558 static inline struct unw_script *
1559 build_script (struct unw_frame_info *info)
1561 const struct unw_table_entry *e = NULL;
1562 struct unw_script *script = NULL;
1563 struct unw_labeled_state *ls, *next;
1564 unsigned long ip = info->ip;
1565 struct unw_state_record sr;
1566 struct unw_table *table;
1567 struct unw_reg_info *r;
1568 struct unw_insn insn;
1569 u8 *dp, *desc_end;
1570 u64 hdr;
1571 int i;
1572 STAT(unsigned long start, parse_start;)
1574 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1576 /* build state record */
1577 memset(&sr, 0, sizeof(sr));
1578 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1579 r->when = UNW_WHEN_NEVER;
1580 sr.pr_val = info->pr;
1582 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1583 script = script_new(ip);
1584 if (!script) {
1585 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1586 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1587 return NULL;
1589 unw.cache[info->prev_script].hint = script - unw.cache;
1591 /* search the kernels and the modules' unwind tables for IP: */
1593 STAT(parse_start = ia64_get_itc());
1595 for (table = unw.tables; table; table = table->next) {
1596 if (ip >= table->start && ip < table->end) {
1597 e = lookup(table, ip - table->segment_base);
1598 break;
1601 if (!e) {
1602 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1603 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1604 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1605 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1606 sr.curr.reg[UNW_REG_RP].when = -1;
1607 sr.curr.reg[UNW_REG_RP].val = 0;
1608 compile_reg(&sr, UNW_REG_RP, script);
1609 script_finalize(script, &sr);
1610 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1611 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1612 return script;
1615 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1616 + (ip & 0xfUL));
1617 hdr = *(u64 *) (table->segment_base + e->info_offset);
1618 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1619 desc_end = dp + 8*UNW_LENGTH(hdr);
1621 while (!sr.done && dp < desc_end)
1622 dp = unw_decode(dp, sr.in_body, &sr);
1624 if (sr.when_target > sr.epilogue_start) {
1625 /*
1626 * sp has been restored and all values on the memory stack below
1627 * psp also have been restored.
1628 */
1629 sr.curr.reg[UNW_REG_PSP].val = 0;
1630 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1631 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1632 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1633 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1634 || r->where == UNW_WHERE_SPREL)
1636 r->val = 0;
1637 r->where = UNW_WHERE_NONE;
1638 r->when = UNW_WHEN_NEVER;
1642 script->flags = sr.flags;
1644 /*
1645 * If RP did't get saved, generate entry for the return link
1646 * register.
1647 */
1648 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1649 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1650 sr.curr.reg[UNW_REG_RP].when = -1;
1651 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1652 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1653 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1654 sr.curr.reg[UNW_REG_RP].val);
1657 #ifdef UNW_DEBUG
1658 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1659 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1660 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1661 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1662 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1663 switch (r->where) {
1664 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1665 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1666 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1667 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1668 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1669 case UNW_WHERE_NONE:
1670 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1671 break;
1673 default:
1674 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1675 break;
1677 UNW_DPRINT(1, "\t\t%d\n", r->when);
1680 #endif
1682 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1684 /* translate state record into unwinder instructions: */
1686 /*
1687 * First, set psp if we're dealing with a fixed-size frame;
1688 * subsequent instructions may depend on this value.
1689 */
1690 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1691 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1692 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1693 /* new psp is sp plus frame size */
1694 insn.opc = UNW_INSN_ADD;
1695 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1696 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1697 script_emit(script, insn);
1700 /* determine where the primary UNaT is: */
1701 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1702 i = UNW_REG_PRI_UNAT_MEM;
1703 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1704 i = UNW_REG_PRI_UNAT_GR;
1705 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1706 i = UNW_REG_PRI_UNAT_MEM;
1707 else
1708 i = UNW_REG_PRI_UNAT_GR;
1710 compile_reg(&sr, i, script);
1712 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1713 compile_reg(&sr, i, script);
1715 /* free labeled register states & stack: */
1717 STAT(parse_start = ia64_get_itc());
1718 for (ls = sr.labeled_states; ls; ls = next) {
1719 next = ls->next;
1720 free_state_stack(&ls->saved_state);
1721 free_labeled_state(ls);
1723 free_state_stack(&sr.curr);
1724 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1726 script_finalize(script, &sr);
1727 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1728 return script;
1731 /*
1732 * Apply the unwinding actions represented by OPS and update SR to
1733 * reflect the state that existed upon entry to the function that this
1734 * unwinder represents.
1735 */
1736 static inline void
1737 run_script (struct unw_script *script, struct unw_frame_info *state)
1739 struct unw_insn *ip, *limit, next_insn;
1740 unsigned long opc, dst, val, off;
1741 unsigned long *s = (unsigned long *) state;
1742 STAT(unsigned long start;)
1744 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1745 state->flags = script->flags;
1746 ip = script->insn;
1747 limit = script->insn + script->count;
1748 next_insn = *ip;
1750 while (ip++ < limit) {
1751 opc = next_insn.opc;
1752 dst = next_insn.dst;
1753 val = next_insn.val;
1754 next_insn = *ip;
1756 redo:
1757 switch (opc) {
1758 case UNW_INSN_ADD:
1759 s[dst] += val;
1760 break;
1762 case UNW_INSN_MOVE2:
1763 if (!s[val])
1764 goto lazy_init;
1765 s[dst+1] = s[val+1];
1766 s[dst] = s[val];
1767 break;
1769 case UNW_INSN_MOVE:
1770 if (!s[val])
1771 goto lazy_init;
1772 s[dst] = s[val];
1773 break;
1775 case UNW_INSN_MOVE_SCRATCH:
1776 if (state->pt) {
1777 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1778 } else {
1779 s[dst] = 0;
1780 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1781 __FUNCTION__, dst, val);
1783 break;
1785 case UNW_INSN_MOVE_CONST:
1786 if (val == 0)
1787 s[dst] = (unsigned long) &unw.r0;
1788 else {
1789 s[dst] = 0;
1790 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1791 __FUNCTION__, val);
1793 break;
1796 case UNW_INSN_MOVE_STACKED:
1797 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1798 val);
1799 break;
1801 case UNW_INSN_ADD_PSP:
1802 s[dst] = state->psp + val;
1803 break;
1805 case UNW_INSN_ADD_SP:
1806 s[dst] = state->sp + val;
1807 break;
1809 case UNW_INSN_SETNAT_MEMSTK:
1810 if (!state->pri_unat_loc)
1811 state->pri_unat_loc = &state->sw->caller_unat;
1812 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1813 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1814 break;
1816 case UNW_INSN_SETNAT_TYPE:
1817 s[dst+1] = val;
1818 break;
1820 case UNW_INSN_LOAD:
1821 #ifdef UNW_DEBUG
1822 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1823 #ifndef XEN
1824 || s[val] < TASK_SIZE
1825 #endif
1828 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1829 __FUNCTION__, s[val]);
1830 break;
1832 #endif
1833 s[dst] = *(unsigned long *) s[val];
1834 break;
1837 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1838 return;
1840 lazy_init:
1841 off = unw.sw_off[val];
1842 s[val] = (unsigned long) state->sw + off;
1843 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1844 /*
1845 * We're initializing a general register: init NaT info, too. Note that
1846 * the offset is a multiple of 8 which gives us the 3 bits needed for
1847 * the type field.
1848 */
1849 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1850 goto redo;
1853 #ifdef XEN
1854 static inline int
1855 is_hypervisor_virt(unsigned long addr)
1857 return IS_VMM_ADDRESS(addr) &&
1858 (HYPERVISOR_VIRT_START <= addr) &&
1859 (addr < HYPERVISOR_VIRT_END);
1861 #endif
1863 static int
1864 find_save_locs (struct unw_frame_info *info)
1866 int have_write_lock = 0;
1867 struct unw_script *scr;
1868 unsigned long flags = 0;
1870 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf))
1871 #ifndef XEN
1872 || info->ip < TASK_SIZE
1873 #else
1874 || !is_hypervisor_virt(info->ip)
1875 #endif
1876 ) {
1877 /* don't let obviously bad addresses pollute the cache */
1878 /* FIXME: should really be level 0 but it occurs too often. KAO */
1879 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1880 info->rp_loc = NULL;
1881 return -1;
1884 scr = script_lookup(info);
1885 if (!scr) {
1886 spin_lock_irqsave(&unw.lock, flags);
1887 scr = build_script(info);
1888 if (!scr) {
1889 spin_unlock_irqrestore(&unw.lock, flags);
1890 UNW_DPRINT(0,
1891 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1892 __FUNCTION__, info->ip);
1893 return -1;
1895 have_write_lock = 1;
1897 info->hint = scr->hint;
1898 info->prev_script = scr - unw.cache;
1900 run_script(scr, info);
1902 if (have_write_lock) {
1903 write_unlock(&scr->lock);
1904 spin_unlock_irqrestore(&unw.lock, flags);
1905 } else
1906 read_unlock(&scr->lock);
1907 return 0;
1910 int
1911 unw_unwind (struct unw_frame_info *info)
1913 unsigned long prev_ip, prev_sp, prev_bsp;
1914 unsigned long ip, pr, num_regs;
1915 STAT(unsigned long start, flags;)
1916 int retval;
1918 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1920 prev_ip = info->ip;
1921 prev_sp = info->sp;
1922 prev_bsp = info->bsp;
1924 /* restore the ip */
1925 if (!info->rp_loc) {
1926 /* FIXME: should really be level 0 but it occurs too often. KAO */
1927 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1928 __FUNCTION__, info->ip);
1929 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1930 return -1;
1932 ip = info->ip = *info->rp_loc;
1933 #ifndef XEN
1934 if (ip < GATE_ADDR) {
1935 #else
1936 if (!is_hypervisor_virt(info->ip)) {
1937 #endif
1938 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1939 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1940 return -1;
1943 /* restore the cfm: */
1944 if (!info->pfs_loc) {
1945 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1946 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1947 return -1;
1949 info->cfm_loc = info->pfs_loc;
1951 /* restore the bsp: */
1952 pr = info->pr;
1953 num_regs = 0;
1954 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1955 info->pt = info->sp + 16;
1956 if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1957 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1958 info->pfs_loc =
1959 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1960 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1961 } else
1962 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1963 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1964 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1965 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1966 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1967 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1968 return -1;
1971 /* restore the sp: */
1972 info->sp = info->psp;
1973 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1974 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1975 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1976 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1977 return -1;
1980 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1981 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1982 __FUNCTION__, ip);
1983 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1984 return -1;
1987 /* as we unwind, the saved ar.unat becomes the primary unat: */
1988 info->pri_unat_loc = info->unat_loc;
1990 /* finally, restore the predicates: */
1991 unw_get_pr(info, &info->pr);
1993 retval = find_save_locs(info);
1994 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1995 return retval;
1997 EXPORT_SYMBOL(unw_unwind);
1999 int
2000 unw_unwind_to_user (struct unw_frame_info *info)
2002 unsigned long ip, sp, pr = 0;
2004 while (unw_unwind(info) >= 0) {
2005 unw_get_sp(info, &sp);
2006 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
2007 < IA64_PT_REGS_SIZE) {
2008 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
2009 __FUNCTION__);
2010 break;
2012 #ifndef XEN
2013 if (unw_is_intr_frame(info) &&
2014 (pr & (1UL << PRED_USER_STACK)))
2015 return 0;
2016 #else
2017 if (unw_is_intr_frame(info) &&
2018 !is_hvm_vcpu(info->task) &&
2019 (pr & (1UL << PRED_USER_STACK)))
2020 return 0;
2021 /*
2022 * vmx fault handlers don't vcpu->on_stack and keep
2023 * (pr & (1UL << PRED_USER_STACK)) condition untouched.
2024 * we need to stop unwinding somehow.
2025 */
2026 if (unw_is_intr_frame(info) &&
2027 is_hvm_vcpu(info->task) &&
2028 info->pr_loc == &vcpu_regs(info->task)->pr)
2029 return 0;
2030 #endif
2031 if (unw_get_pr (info, &pr) < 0) {
2032 unw_get_rp(info, &ip);
2033 UNW_DPRINT(0, "unwind.%s: failed to read "
2034 "predicate register (ip=0x%lx)\n",
2035 __FUNCTION__, ip);
2036 return -1;
2039 unw_get_ip(info, &ip);
2040 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
2041 __FUNCTION__, ip);
2042 return -1;
2044 EXPORT_SYMBOL(unw_unwind_to_user);
2046 static void
2047 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2048 struct switch_stack *sw, unsigned long stktop)
2050 unsigned long rbslimit, rbstop, stklimit;
2051 STAT(unsigned long start, flags;)
2053 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
2055 /*
2056 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2057 * don't want to do that because it would be slow as each preserved register would
2058 * have to be processed. Instead, what we do here is zero out the frame info and
2059 * start the unwind process at the function that created the switch_stack frame.
2060 * When a preserved value in switch_stack needs to be accessed, run_script() will
2061 * initialize the appropriate pointer on demand.
2062 */
2063 memset(info, 0, sizeof(*info));
2065 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
2066 rbstop = sw->ar_bspstore;
2067 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
2068 rbstop = rbslimit;
2070 stklimit = (unsigned long) t + IA64_STK_OFFSET;
2071 if (stktop <= rbstop)
2072 stktop = rbstop;
2074 info->regstk.limit = rbslimit;
2075 info->regstk.top = rbstop;
2076 info->memstk.limit = stklimit;
2077 info->memstk.top = stktop;
2078 info->task = t;
2079 info->sw = sw;
2080 info->sp = info->psp = stktop;
2081 info->pr = sw->pr;
2082 UNW_DPRINT(3, "unwind.%s:\n"
2083 " task 0x%lx\n"
2084 " rbs = [0x%lx-0x%lx)\n"
2085 " stk = [0x%lx-0x%lx)\n"
2086 " pr 0x%lx\n"
2087 " sw 0x%lx\n"
2088 " sp 0x%lx\n",
2089 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2090 info->pr, (unsigned long) info->sw, info->sp);
2091 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2094 void
2095 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
2096 struct pt_regs *pt, struct switch_stack *sw)
2098 unsigned long sof;
2100 init_frame_info(info, t, sw, pt->r12);
2101 info->cfm_loc = &pt->cr_ifs;
2102 info->unat_loc = &pt->ar_unat;
2103 info->pfs_loc = &pt->ar_pfs;
2104 sof = *info->cfm_loc & 0x7f;
2105 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
2106 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
2107 info->pt = (unsigned long) pt;
2108 UNW_DPRINT(3, "unwind.%s:\n"
2109 " bsp 0x%lx\n"
2110 " sof 0x%lx\n"
2111 " ip 0x%lx\n",
2112 __FUNCTION__, info->bsp, sof, info->ip);
2113 find_save_locs(info);
2116 void
2117 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2119 unsigned long sol;
2121 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2122 info->cfm_loc = &sw->ar_pfs;
2123 sol = (*info->cfm_loc >> 7) & 0x7f;
2124 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2125 info->ip = sw->b0;
2126 UNW_DPRINT(3, "unwind.%s:\n"
2127 " bsp 0x%lx\n"
2128 " sol 0x%lx\n"
2129 " ip 0x%lx\n",
2130 __FUNCTION__, info->bsp, sol, info->ip);
2131 find_save_locs(info);
2134 EXPORT_SYMBOL(unw_init_frame_info);
2136 void
2137 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2139 #ifdef XEN
2140 struct switch_stack *sw = (struct switch_stack *) (t->arch._thread.ksp + 16);
2141 #else
2142 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2143 #endif
2145 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2146 unw_init_frame_info(info, t, sw);
2148 EXPORT_SYMBOL(unw_init_from_blocked_task);
2150 static void
2151 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2152 unsigned long gp, const void *table_start, const void *table_end)
2154 const struct unw_table_entry *start = table_start, *end = table_end;
2156 table->name = name;
2157 table->segment_base = segment_base;
2158 table->gp = gp;
2159 table->start = segment_base + start[0].start_offset;
2160 table->end = segment_base + end[-1].end_offset;
2161 table->array = start;
2162 table->length = end - start;
2165 #ifndef XEN
2166 void *
2167 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2168 const void *table_start, const void *table_end)
2170 const struct unw_table_entry *start = table_start, *end = table_end;
2171 struct unw_table *table;
2172 unsigned long flags;
2174 if (end - start <= 0) {
2175 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2176 __FUNCTION__);
2177 return NULL;
2180 table = kmalloc(sizeof(*table), GFP_USER);
2181 if (!table)
2182 return NULL;
2184 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2186 spin_lock_irqsave(&unw.lock, flags);
2188 /* keep kernel unwind table at the front (it's searched most commonly): */
2189 table->next = unw.tables->next;
2190 unw.tables->next = table;
2192 spin_unlock_irqrestore(&unw.lock, flags);
2194 return table;
2197 void
2198 unw_remove_unwind_table (void *handle)
2200 struct unw_table *table, *prev;
2201 struct unw_script *tmp;
2202 unsigned long flags;
2203 long index;
2205 if (!handle) {
2206 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2207 __FUNCTION__);
2208 return;
2211 table = handle;
2212 if (table == &unw.kernel_table) {
2213 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2214 "no-can-do!\n", __FUNCTION__);
2215 return;
2218 spin_lock_irqsave(&unw.lock, flags);
2220 /* first, delete the table: */
2222 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2223 if (prev->next == table)
2224 break;
2225 if (!prev) {
2226 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2227 __FUNCTION__, (void *) table);
2228 spin_unlock_irqrestore(&unw.lock, flags);
2229 return;
2231 prev->next = table->next;
2233 spin_unlock_irqrestore(&unw.lock, flags);
2235 /* next, remove hash table entries for this table */
2237 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2238 tmp = unw.cache + unw.hash[index];
2239 if (unw.hash[index] >= UNW_CACHE_SIZE
2240 || tmp->ip < table->start || tmp->ip >= table->end)
2241 continue;
2243 write_lock(&tmp->lock);
2245 if (tmp->ip >= table->start && tmp->ip < table->end) {
2246 unw.hash[index] = tmp->coll_chain;
2247 tmp->ip = 0;
2250 write_unlock(&tmp->lock);
2253 kfree(table);
2256 static int __init
2257 create_gate_table (void)
2259 const struct unw_table_entry *entry, *start, *end;
2260 unsigned long *lp, segbase = GATE_ADDR;
2261 size_t info_size, size;
2262 char *info;
2263 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2264 int i;
2266 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2267 if (phdr->p_type == PT_IA_64_UNWIND) {
2268 punw = phdr;
2269 break;
2272 if (!punw) {
2273 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2274 return 0;
2277 start = (const struct unw_table_entry *) punw->p_vaddr;
2278 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2279 size = 0;
2281 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2283 for (entry = start; entry < end; ++entry)
2284 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2285 size += 8; /* reserve space for "end of table" marker */
2287 unw.gate_table = kmalloc(size, GFP_KERNEL);
2288 if (!unw.gate_table) {
2289 unw.gate_table_size = 0;
2290 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2291 return 0;
2293 unw.gate_table_size = size;
2295 lp = unw.gate_table;
2296 info = (char *) unw.gate_table + size;
2298 for (entry = start; entry < end; ++entry, lp += 3) {
2299 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2300 info -= info_size;
2301 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2303 lp[0] = segbase + entry->start_offset; /* start */
2304 lp[1] = segbase + entry->end_offset; /* end */
2305 lp[2] = info - (char *) unw.gate_table; /* info */
2307 *lp = 0; /* end-of-table marker */
2308 return 0;
2311 __initcall(create_gate_table);
2312 #endif // !XEN
2314 void __init
2315 unw_init (void)
2317 extern char __gp[];
2318 extern void unw_hash_index_t_is_too_narrow (void);
2319 long i, off;
2321 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2322 unw_hash_index_t_is_too_narrow();
2324 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2325 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2326 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2327 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2328 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2329 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2330 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2331 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2332 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2333 unw.sw_off[unw.preg_index[i]] = off;
2334 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2335 unw.sw_off[unw.preg_index[i]] = off;
2336 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2337 unw.sw_off[unw.preg_index[i]] = off;
2338 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2339 unw.sw_off[unw.preg_index[i]] = off;
2341 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2342 if (i > 0)
2343 unw.cache[i].lru_chain = (i - 1);
2344 unw.cache[i].coll_chain = -1;
2345 rwlock_init(&unw.cache[i].lock);
2347 unw.lru_head = UNW_CACHE_SIZE - 1;
2348 unw.lru_tail = 0;
2350 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2351 __start_unwind, __end_unwind);
2354 #ifndef XEN
2355 /*
2356 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2358 * This system call has been deprecated. The new and improved way to get
2359 * at the kernel's unwind info is via the gate DSO. The address of the
2360 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2362 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2364 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2365 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2366 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2367 * unwind data.
2369 * The first portion of the unwind data contains an unwind table and rest contains the
2370 * associated unwind info (in no particular order). The unwind table consists of a table
2371 * of entries of the form:
2373 * u64 start; (64-bit address of start of function)
2374 * u64 end; (64-bit address of start of function)
2375 * u64 info; (BUF-relative offset to unwind info)
2377 * The end of the unwind table is indicated by an entry with a START address of zero.
2379 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2380 * on the format of the unwind info.
2382 * ERRORS
2383 * EFAULT BUF points outside your accessible address space.
2384 */
2385 asmlinkage long
2386 sys_getunwind (void __user *buf, size_t buf_size)
2388 if (buf && buf_size >= unw.gate_table_size)
2389 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2390 return -EFAULT;
2391 return unw.gate_table_size;
2393 #endif