direct-io.hg

changeset 8485:45c4e735fc8c

Enable stack trace on Xen BUG (by Isaku Yamahata)
author djm@kirby.fc.hp.com
date Fri Dec 30 23:40:13 2005 -0600 (2005-12-30)
parents f89906acd9f6
children 903fb46f240e
files xen/arch/ia64/Makefile xen/arch/ia64/linux-xen/entry.S xen/arch/ia64/linux-xen/process-linux-xen.c xen/arch/ia64/linux-xen/unwind.c xen/arch/ia64/linux-xen/unwind_decoder.c xen/arch/ia64/linux-xen/unwind_i.h xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/ivt.S xen/arch/ia64/xen/process.c xen/arch/ia64/xen/xenmisc.c
line diff
     1.1 --- a/xen/arch/ia64/Makefile	Fri Dec 30 16:11:08 2005 -0600
     1.2 +++ b/xen/arch/ia64/Makefile	Fri Dec 30 23:40:13 2005 -0600
     1.3 @@ -23,6 +23,13 @@ OBJS +=	bitop.o clear_page.o flush.o cop
     1.4  	__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o			\
     1.5  	__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
     1.6  
     1.7 +# xen stack unwinder
     1.8 +# unwind_decoder.c is included in unwind.c
     1.9 +OBJS += unwind.o
    1.10 +#unwind.o: CFLAGS += -DUNW_DEBUG=4
    1.11 +
    1.12 +OBJS += process-linux-xen.o
    1.13 +
    1.14  # perfmon.o
    1.15  # unwind.o needed for kernel unwinding (rare)
    1.16  
    1.17 @@ -31,11 +38,26 @@ OBJS := $(subst $(TARGET_ARCH)/asm-offse
    1.18  # remove following line if not privifying in memory
    1.19  # OBJS += privify.o
    1.20  
    1.21 -default: $(OBJS) head.o xen.lds.s
    1.22 -	$(LD) -r -o arch.o $(OBJS)
    1.23 +default: $(TARGET)
    1.24 +
    1.25 +$(CURDIR)/arch.o: $(OBJS)
    1.26 +	$(LD) -r -o $@ $(OBJS)
    1.27 +
    1.28 +$(TARGET)-syms: $(ALL_OBJS) head.o xen.lds.s
    1.29  	$(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
    1.30 -		-Map map.out head.o $(ALL_OBJS) -o $(TARGET)-syms
    1.31 -	$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET)
    1.32 +		-Map map.out head.o $(ALL_OBJS) -o $@
    1.33 +	$(NM) -n $@ | $(BASEDIR)/tools/symbols > $(BASEDIR)/xen-syms.S
    1.34 +	$(MAKE) $(BASEDIR)/xen-syms.o
    1.35 +	$(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
    1.36 +		-Map map.out head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
    1.37 +	$(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S
    1.38 +	$(MAKE) $(BASEDIR)/xen-syms.o
    1.39 +	$(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
    1.40 +		-Map map.out head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
    1.41 +	rm -f $(BASEDIR)/xen-syms.S $(BASEDIR)/xen-syms.o
    1.42 +
    1.43 +$(TARGET): $(TARGET)-syms
    1.44 +	$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $@
    1.45  	$(NM) -n $(TARGET)-syms | grep -v '\( [aUw] \)\|\(__crc_\)\|\( \$[adt]\)'\
    1.46  		 > $(BASEDIR)/System.map
    1.47  
     2.1 --- a/xen/arch/ia64/linux-xen/entry.S	Fri Dec 30 16:11:08 2005 -0600
     2.2 +++ b/xen/arch/ia64/linux-xen/entry.S	Fri Dec 30 23:40:13 2005 -0600
     2.3 @@ -1417,7 +1417,6 @@ GLOBAL_ENTRY(ia64_prepare_handle_unalign
     2.4  	br.cond.sptk.many rp				// goes to ia64_leave_kernel
     2.5  END(ia64_prepare_handle_unaligned)
     2.6  
     2.7 -#ifndef XEN
     2.8  	//
     2.9  	// unw_init_running(void (*callback)(info, arg), void *arg)
    2.10  	//
    2.11 @@ -1463,6 +1462,7 @@ 1:	mov gp=loc2				// restore gp
    2.12  	br.ret.sptk.many rp
    2.13  END(unw_init_running)
    2.14  
    2.15 +#ifndef XEN
    2.16  	.rodata
    2.17  	.align 8
    2.18  	.globl sys_call_table
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/linux-xen/process-linux-xen.c	Fri Dec 30 23:40:13 2005 -0600
     3.3 @@ -0,0 +1,848 @@
     3.4 +/*
     3.5 + * Architecture-specific setup.
     3.6 + *
     3.7 + * Copyright (C) 1998-2003 Hewlett-Packard Co
     3.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     3.9 + * 04/11/17 Ashok Raj	<ashok.raj@intel.com> Added CPU Hotplug Support
    3.10 + */
    3.11 +#ifdef XEN
    3.12 +#include <xen/types.h>
    3.13 +#include <xen/lib.h>
    3.14 +#include <xen/symbols.h>
    3.15 +#include <xen/smp.h>
    3.16 +#include <asm/uaccess.h>
    3.17 +#include <asm/processor.h>
    3.18 +#include <asm/ptrace.h>
    3.19 +#include <asm/unwind.h>
    3.20 +#else
    3.21 +#define __KERNEL_SYSCALLS__	/* see <asm/unistd.h> */
    3.22 +#include <linux/config.h>
    3.23 +
    3.24 +#include <linux/cpu.h>
    3.25 +#include <linux/pm.h>
    3.26 +#include <linux/elf.h>
    3.27 +#include <linux/errno.h>
    3.28 +#include <linux/kallsyms.h>
    3.29 +#include <linux/kernel.h>
    3.30 +#include <linux/mm.h>
    3.31 +#include <linux/module.h>
    3.32 +#include <linux/notifier.h>
    3.33 +#include <linux/personality.h>
    3.34 +#include <linux/sched.h>
    3.35 +#include <linux/slab.h>
    3.36 +#include <linux/smp_lock.h>
    3.37 +#include <linux/stddef.h>
    3.38 +#include <linux/thread_info.h>
    3.39 +#include <linux/unistd.h>
    3.40 +#include <linux/efi.h>
    3.41 +#include <linux/interrupt.h>
    3.42 +#include <linux/delay.h>
    3.43 +#include <linux/kprobes.h>
    3.44 +
    3.45 +#include <asm/cpu.h>
    3.46 +#include <asm/delay.h>
    3.47 +#include <asm/elf.h>
    3.48 +#include <asm/ia32.h>
    3.49 +#include <asm/irq.h>
    3.50 +#include <asm/pgalloc.h>
    3.51 +#include <asm/processor.h>
    3.52 +#include <asm/sal.h>
    3.53 +#include <asm/tlbflush.h>
    3.54 +#include <asm/uaccess.h>
    3.55 +#include <asm/unwind.h>
    3.56 +#include <asm/user.h>
    3.57 +
    3.58 +#include "entry.h"
    3.59 +
    3.60 +#ifdef CONFIG_PERFMON
    3.61 +# include <asm/perfmon.h>
    3.62 +#endif
    3.63 +
    3.64 +#include "sigframe.h"
    3.65 +
    3.66 +void (*ia64_mark_idle)(int);
    3.67 +static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
    3.68 +
    3.69 +unsigned long boot_option_idle_override = 0;
    3.70 +EXPORT_SYMBOL(boot_option_idle_override);
    3.71 +#endif
    3.72 +
    3.73 +void
    3.74 +ia64_do_show_stack (struct unw_frame_info *info, void *arg)
    3.75 +{
    3.76 +	unsigned long ip, sp, bsp;
    3.77 +	char buf[128];			/* don't make it so big that it overflows the stack! */
    3.78 +
    3.79 +	printk("\nCall Trace:\n");
    3.80 +	do {
    3.81 +		unw_get_ip(info, &ip);
    3.82 +		if (ip == 0)
    3.83 +			break;
    3.84 +
    3.85 +		unw_get_sp(info, &sp);
    3.86 +		unw_get_bsp(info, &bsp);
    3.87 +		snprintf(buf, sizeof(buf),
    3.88 +			 " [<%016lx>] %%s\n"
    3.89 +			 "                                sp=%016lx bsp=%016lx\n",
    3.90 +			 ip, sp, bsp);
    3.91 +		print_symbol(buf, ip);
    3.92 +	} while (unw_unwind(info) >= 0);
    3.93 +}
    3.94 +
    3.95 +void
    3.96 +show_stack (struct task_struct *task, unsigned long *sp)
    3.97 +{
    3.98 +	if (!task)
    3.99 +		unw_init_running(ia64_do_show_stack, NULL);
   3.100 +	else {
   3.101 +		struct unw_frame_info info;
   3.102 +
   3.103 +		unw_init_from_blocked_task(&info, task);
   3.104 +		ia64_do_show_stack(&info, NULL);
   3.105 +	}
   3.106 +}
   3.107 +
   3.108 +#ifndef XEN
   3.109 +void
   3.110 +dump_stack (void)
   3.111 +{
   3.112 +	show_stack(NULL, NULL);
   3.113 +}
   3.114 +
   3.115 +EXPORT_SYMBOL(dump_stack);
   3.116 +#endif
   3.117 +
   3.118 +#ifdef XEN
   3.119 +void
   3.120 +show_registers(struct pt_regs *regs)
   3.121 +#else
   3.122 +void
   3.123 +show_regs (struct pt_regs *regs)
   3.124 +#endif
   3.125 +{
   3.126 +	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
   3.127 +
   3.128 +#ifndef XEN
   3.129 +	print_modules();
   3.130 +	printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
   3.131 +	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]    %s\n",
   3.132 +	       regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
   3.133 +#else
   3.134 +	printk("\nCPU %d\n", smp_processor_id());
   3.135 +	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
   3.136 +	       regs->cr_ipsr, regs->cr_ifs, ip);
   3.137 +#endif
   3.138 +	print_symbol("ip is at %s\n", ip);
   3.139 +	printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
   3.140 +	       regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
   3.141 +	printk("rnat: %016lx bsps: %016lx pr  : %016lx\n",
   3.142 +	       regs->ar_rnat, regs->ar_bspstore, regs->pr);
   3.143 +	printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
   3.144 +	       regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
   3.145 +	printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
   3.146 +	printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0, regs->b6, regs->b7);
   3.147 +	printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
   3.148 +	       regs->f6.u.bits[1], regs->f6.u.bits[0],
   3.149 +	       regs->f7.u.bits[1], regs->f7.u.bits[0]);
   3.150 +	printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
   3.151 +	       regs->f8.u.bits[1], regs->f8.u.bits[0],
   3.152 +	       regs->f9.u.bits[1], regs->f9.u.bits[0]);
   3.153 +	printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
   3.154 +	       regs->f10.u.bits[1], regs->f10.u.bits[0],
   3.155 +	       regs->f11.u.bits[1], regs->f11.u.bits[0]);
   3.156 +
   3.157 +	printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1, regs->r2, regs->r3);
   3.158 +	printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
   3.159 +	printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
   3.160 +	printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
   3.161 +	printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
   3.162 +	printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
   3.163 +	printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
   3.164 +	printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
   3.165 +	printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
   3.166 +
   3.167 +	if (user_mode(regs)) {
   3.168 +		/* print the stacked registers */
   3.169 +		unsigned long val, *bsp, ndirty;
   3.170 +		int i, sof, is_nat = 0;
   3.171 +
   3.172 +		sof = regs->cr_ifs & 0x7f;	/* size of frame */
   3.173 +		ndirty = (regs->loadrs >> 19);
   3.174 +		bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
   3.175 +		for (i = 0; i < sof; ++i) {
   3.176 +			get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
   3.177 +			printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
   3.178 +			       ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
   3.179 +		}
   3.180 +	} else
   3.181 +		show_stack(NULL, NULL);
   3.182 +}
   3.183 +
   3.184 +#ifndef XEN
   3.185 +void
   3.186 +do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
   3.187 +{
   3.188 +	if (fsys_mode(current, &scr->pt)) {
   3.189 +		/* defer signal-handling etc. until we return to privilege-level 0.  */
   3.190 +		if (!ia64_psr(&scr->pt)->lp)
   3.191 +			ia64_psr(&scr->pt)->lp = 1;
   3.192 +		return;
   3.193 +	}
   3.194 +
   3.195 +#ifdef CONFIG_PERFMON
   3.196 +	if (current->thread.pfm_needs_checking)
   3.197 +		pfm_handle_work();
   3.198 +#endif
   3.199 +
   3.200 +	/* deal with pending signal delivery */
   3.201 +	if (test_thread_flag(TIF_SIGPENDING))
   3.202 +		ia64_do_signal(oldset, scr, in_syscall);
   3.203 +}
   3.204 +
   3.205 +static int pal_halt        = 1;
   3.206 +static int can_do_pal_halt = 1;
   3.207 +
   3.208 +static int __init nohalt_setup(char * str)
   3.209 +{
   3.210 +	pal_halt = can_do_pal_halt = 0;
   3.211 +	return 1;
   3.212 +}
   3.213 +__setup("nohalt", nohalt_setup);
   3.214 +
   3.215 +void
   3.216 +update_pal_halt_status(int status)
   3.217 +{
   3.218 +	can_do_pal_halt = pal_halt && status;
   3.219 +}
   3.220 +
   3.221 +/*
   3.222 + * We use this if we don't have any better idle routine..
   3.223 + */
   3.224 +void
   3.225 +default_idle (void)
   3.226 +{
   3.227 +	local_irq_enable();
   3.228 +	while (!need_resched())
   3.229 +		if (can_do_pal_halt)
   3.230 +			safe_halt();
   3.231 +		else
   3.232 +			cpu_relax();
   3.233 +}
   3.234 +
   3.235 +#ifdef CONFIG_HOTPLUG_CPU
   3.236 +/* We don't actually take CPU down, just spin without interrupts. */
   3.237 +static inline void play_dead(void)
   3.238 +{
   3.239 +	extern void ia64_cpu_local_tick (void);
   3.240 +	unsigned int this_cpu = smp_processor_id();
   3.241 +
   3.242 +	/* Ack it */
   3.243 +	__get_cpu_var(cpu_state) = CPU_DEAD;
   3.244 +
   3.245 +	max_xtp();
   3.246 +	local_irq_disable();
   3.247 +	idle_task_exit();
   3.248 +	ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
   3.249 +	/*
   3.250 +	 * The above is a point of no-return, the processor is
   3.251 +	 * expected to be in SAL loop now.
   3.252 +	 */
   3.253 +	BUG();
   3.254 +}
   3.255 +#else
   3.256 +static inline void play_dead(void)
   3.257 +{
   3.258 +	BUG();
   3.259 +}
   3.260 +#endif /* CONFIG_HOTPLUG_CPU */
   3.261 +
   3.262 +void cpu_idle_wait(void)
   3.263 +{
   3.264 +	unsigned int cpu, this_cpu = get_cpu();
   3.265 +	cpumask_t map;
   3.266 +
   3.267 +	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
   3.268 +	put_cpu();
   3.269 +
   3.270 +	cpus_clear(map);
   3.271 +	for_each_online_cpu(cpu) {
   3.272 +		per_cpu(cpu_idle_state, cpu) = 1;
   3.273 +		cpu_set(cpu, map);
   3.274 +	}
   3.275 +
   3.276 +	__get_cpu_var(cpu_idle_state) = 0;
   3.277 +
   3.278 +	wmb();
   3.279 +	do {
   3.280 +		ssleep(1);
   3.281 +		for_each_online_cpu(cpu) {
   3.282 +			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
   3.283 +				cpu_clear(cpu, map);
   3.284 +		}
   3.285 +		cpus_and(map, map, cpu_online_map);
   3.286 +	} while (!cpus_empty(map));
   3.287 +}
   3.288 +EXPORT_SYMBOL_GPL(cpu_idle_wait);
   3.289 +
   3.290 +void __attribute__((noreturn))
   3.291 +cpu_idle (void)
   3.292 +{
   3.293 +	void (*mark_idle)(int) = ia64_mark_idle;
   3.294 +
   3.295 +	/* endless idle loop with no priority at all */
   3.296 +	while (1) {
   3.297 +#ifdef CONFIG_SMP
   3.298 +		if (!need_resched())
   3.299 +			min_xtp();
   3.300 +#endif
   3.301 +		while (!need_resched()) {
   3.302 +			void (*idle)(void);
   3.303 +
   3.304 +			if (__get_cpu_var(cpu_idle_state))
   3.305 +				__get_cpu_var(cpu_idle_state) = 0;
   3.306 +
   3.307 +			rmb();
   3.308 +			if (mark_idle)
   3.309 +				(*mark_idle)(1);
   3.310 +
   3.311 +			idle = pm_idle;
   3.312 +			if (!idle)
   3.313 +				idle = default_idle;
   3.314 +			(*idle)();
   3.315 +		}
   3.316 +
   3.317 +		if (mark_idle)
   3.318 +			(*mark_idle)(0);
   3.319 +
   3.320 +#ifdef CONFIG_SMP
   3.321 +		normal_xtp();
   3.322 +#endif
   3.323 +		schedule();
   3.324 +		check_pgt_cache();
   3.325 +		if (cpu_is_offline(smp_processor_id()))
   3.326 +			play_dead();
   3.327 +	}
   3.328 +}
   3.329 +
   3.330 +void
   3.331 +ia64_save_extra (struct task_struct *task)
   3.332 +{
   3.333 +#ifdef CONFIG_PERFMON
   3.334 +	unsigned long info;
   3.335 +#endif
   3.336 +
   3.337 +	if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
   3.338 +		ia64_save_debug_regs(&task->thread.dbr[0]);
   3.339 +
   3.340 +#ifdef CONFIG_PERFMON
   3.341 +	if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
   3.342 +		pfm_save_regs(task);
   3.343 +
   3.344 +	info = __get_cpu_var(pfm_syst_info);
   3.345 +	if (info & PFM_CPUINFO_SYST_WIDE)
   3.346 +		pfm_syst_wide_update_task(task, info, 0);
   3.347 +#endif
   3.348 +
   3.349 +#ifdef CONFIG_IA32_SUPPORT
   3.350 +	if (IS_IA32_PROCESS(ia64_task_regs(task)))
   3.351 +		ia32_save_state(task);
   3.352 +#endif
   3.353 +}
   3.354 +
   3.355 +void
   3.356 +ia64_load_extra (struct task_struct *task)
   3.357 +{
   3.358 +#ifdef CONFIG_PERFMON
   3.359 +	unsigned long info;
   3.360 +#endif
   3.361 +
   3.362 +	if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
   3.363 +		ia64_load_debug_regs(&task->thread.dbr[0]);
   3.364 +
   3.365 +#ifdef CONFIG_PERFMON
   3.366 +	if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
   3.367 +		pfm_load_regs(task);
   3.368 +
   3.369 +	info = __get_cpu_var(pfm_syst_info);
   3.370 +	if (info & PFM_CPUINFO_SYST_WIDE) 
   3.371 +		pfm_syst_wide_update_task(task, info, 1);
   3.372 +#endif
   3.373 +
   3.374 +#ifdef CONFIG_IA32_SUPPORT
   3.375 +	if (IS_IA32_PROCESS(ia64_task_regs(task)))
   3.376 +		ia32_load_state(task);
   3.377 +#endif
   3.378 +}
   3.379 +
   3.380 +/*
   3.381 + * Copy the state of an ia-64 thread.
   3.382 + *
   3.383 + * We get here through the following  call chain:
   3.384 + *
   3.385 + *	from user-level:	from kernel:
   3.386 + *
   3.387 + *	<clone syscall>	        <some kernel call frames>
   3.388 + *	sys_clone		   :
   3.389 + *	do_fork			do_fork
   3.390 + *	copy_thread		copy_thread
   3.391 + *
   3.392 + * This means that the stack layout is as follows:
   3.393 + *
   3.394 + *	+---------------------+ (highest addr)
   3.395 + *	|   struct pt_regs    |
   3.396 + *	+---------------------+
   3.397 + *	| struct switch_stack |
   3.398 + *	+---------------------+
   3.399 + *	|                     |
   3.400 + *	|    memory stack     |
   3.401 + *	|                     | <-- sp (lowest addr)
   3.402 + *	+---------------------+
   3.403 + *
   3.404 + * Observe that we copy the unat values that are in pt_regs and switch_stack.  Spilling an
   3.405 + * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
   3.406 + * with N=(X & 0x1ff)/8.  Thus, copying the unat value preserves the NaT bits ONLY if the
   3.407 + * pt_regs structure in the parent is congruent to that of the child, modulo 512.  Since
   3.408 + * the stack is page aligned and the page size is at least 4KB, this is always the case,
   3.409 + * so there is nothing to worry about.
   3.410 + */
   3.411 +int
   3.412 +copy_thread (int nr, unsigned long clone_flags,
   3.413 +	     unsigned long user_stack_base, unsigned long user_stack_size,
   3.414 +	     struct task_struct *p, struct pt_regs *regs)
   3.415 +{
   3.416 +	extern char ia64_ret_from_clone, ia32_ret_from_clone;
   3.417 +	struct switch_stack *child_stack, *stack;
   3.418 +	unsigned long rbs, child_rbs, rbs_size;
   3.419 +	struct pt_regs *child_ptregs;
   3.420 +	int retval = 0;
   3.421 +
   3.422 +#ifdef CONFIG_SMP
   3.423 +	/*
   3.424 +	 * For SMP idle threads, fork_by_hand() calls do_fork with
   3.425 +	 * NULL regs.
   3.426 +	 */
   3.427 +	if (!regs)
   3.428 +		return 0;
   3.429 +#endif
   3.430 +
   3.431 +	stack = ((struct switch_stack *) regs) - 1;
   3.432 +
   3.433 +	child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
   3.434 +	child_stack = (struct switch_stack *) child_ptregs - 1;
   3.435 +
   3.436 +	/* copy parent's switch_stack & pt_regs to child: */
   3.437 +	memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
   3.438 +
   3.439 +	rbs = (unsigned long) current + IA64_RBS_OFFSET;
   3.440 +	child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
   3.441 +	rbs_size = stack->ar_bspstore - rbs;
   3.442 +
   3.443 +	/* copy the parent's register backing store to the child: */
   3.444 +	memcpy((void *) child_rbs, (void *) rbs, rbs_size);
   3.445 +
   3.446 +	if (likely(user_mode(child_ptregs))) {
   3.447 +		if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
   3.448 +			child_ptregs->r13 = regs->r16;	/* see sys_clone2() in entry.S */
   3.449 +		if (user_stack_base) {
   3.450 +			child_ptregs->r12 = user_stack_base + user_stack_size - 16;
   3.451 +			child_ptregs->ar_bspstore = user_stack_base;
   3.452 +			child_ptregs->ar_rnat = 0;
   3.453 +			child_ptregs->loadrs = 0;
   3.454 +		}
   3.455 +	} else {
   3.456 +		/*
   3.457 +		 * Note: we simply preserve the relative position of
   3.458 +		 * the stack pointer here.  There is no need to
   3.459 +		 * allocate a scratch area here, since that will have
   3.460 +		 * been taken care of by the caller of sys_clone()
   3.461 +		 * already.
   3.462 +		 */
   3.463 +		child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */
   3.464 +		child_ptregs->r13 = (unsigned long) p;		/* set `current' pointer */
   3.465 +	}
   3.466 +	child_stack->ar_bspstore = child_rbs + rbs_size;
   3.467 +	if (IS_IA32_PROCESS(regs))
   3.468 +		child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
   3.469 +	else
   3.470 +		child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
   3.471 +
   3.472 +	/* copy parts of thread_struct: */
   3.473 +	p->thread.ksp = (unsigned long) child_stack - 16;
   3.474 +
   3.475 +	/* stop some PSR bits from being inherited.
   3.476 +	 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
   3.477 +	 * therefore we must specify them explicitly here and not include them in
   3.478 +	 * IA64_PSR_BITS_TO_CLEAR.
   3.479 +	 */
   3.480 +	child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
   3.481 +				 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
   3.482 +
   3.483 +	/*
   3.484 +	 * NOTE: The calling convention considers all floating point
   3.485 +	 * registers in the high partition (fph) to be scratch.  Since
   3.486 +	 * the only way to get to this point is through a system call,
   3.487 +	 * we know that the values in fph are all dead.  Hence, there
   3.488 +	 * is no need to inherit the fph state from the parent to the
   3.489 +	 * child and all we have to do is to make sure that
   3.490 +	 * IA64_THREAD_FPH_VALID is cleared in the child.
   3.491 +	 *
   3.492 +	 * XXX We could push this optimization a bit further by
   3.493 +	 * clearing IA64_THREAD_FPH_VALID on ANY system call.
   3.494 +	 * However, it's not clear this is worth doing.  Also, it
   3.495 +	 * would be a slight deviation from the normal Linux system
   3.496 +	 * call behavior where scratch registers are preserved across
   3.497 +	 * system calls (unless used by the system call itself).
   3.498 +	 */
   3.499 +#	define THREAD_FLAGS_TO_CLEAR	(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
   3.500 +					 | IA64_THREAD_PM_VALID)
   3.501 +#	define THREAD_FLAGS_TO_SET	0
   3.502 +	p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
   3.503 +			   | THREAD_FLAGS_TO_SET);
   3.504 +	ia64_drop_fpu(p);	/* don't pick up stale state from a CPU's fph */
   3.505 +#ifdef CONFIG_IA32_SUPPORT
   3.506 +	/*
   3.507 +	 * If we're cloning an IA32 task then save the IA32 extra
   3.508 +	 * state from the current task to the new task
   3.509 +	 */
   3.510 +	if (IS_IA32_PROCESS(ia64_task_regs(current))) {
   3.511 +		ia32_save_state(p);
   3.512 +		if (clone_flags & CLONE_SETTLS)
   3.513 +			retval = ia32_clone_tls(p, child_ptregs);
   3.514 +
   3.515 +		/* Copy partially mapped page list */
   3.516 +		if (!retval)
   3.517 +			retval = ia32_copy_partial_page_list(p, clone_flags);
   3.518 +	}
   3.519 +#endif
   3.520 +
   3.521 +#ifdef CONFIG_PERFMON
   3.522 +	if (current->thread.pfm_context)
   3.523 +		pfm_inherit(p, child_ptregs);
   3.524 +#endif
   3.525 +	return retval;
   3.526 +}
   3.527 +
   3.528 +static void
   3.529 +do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
   3.530 +{
   3.531 +	unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
   3.532 +	elf_greg_t *dst = arg;
   3.533 +	struct pt_regs *pt;
   3.534 +	char nat;
   3.535 +	int i;
   3.536 +
   3.537 +	memset(dst, 0, sizeof(elf_gregset_t));	/* don't leak any kernel bits to user-level */
   3.538 +
   3.539 +	if (unw_unwind_to_user(info) < 0)
   3.540 +		return;
   3.541 +
   3.542 +	unw_get_sp(info, &sp);
   3.543 +	pt = (struct pt_regs *) (sp + 16);
   3.544 +
   3.545 +	urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
   3.546 +
   3.547 +	if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
   3.548 +		return;
   3.549 +
   3.550 +	ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
   3.551 +		  &ar_rnat);
   3.552 +
   3.553 +	/*
   3.554 +	 * coredump format:
   3.555 +	 *	r0-r31
   3.556 +	 *	NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
   3.557 +	 *	predicate registers (p0-p63)
   3.558 +	 *	b0-b7
   3.559 +	 *	ip cfm user-mask
   3.560 +	 *	ar.rsc ar.bsp ar.bspstore ar.rnat
   3.561 +	 *	ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
   3.562 +	 */
   3.563 +
   3.564 +	/* r0 is zero */
   3.565 +	for (i = 1, mask = (1UL << i); i < 32; ++i) {
   3.566 +		unw_get_gr(info, i, &dst[i], &nat);
   3.567 +		if (nat)
   3.568 +			nat_bits |= mask;
   3.569 +		mask <<= 1;
   3.570 +	}
   3.571 +	dst[32] = nat_bits;
   3.572 +	unw_get_pr(info, &dst[33]);
   3.573 +
   3.574 +	for (i = 0; i < 8; ++i)
   3.575 +		unw_get_br(info, i, &dst[34 + i]);
   3.576 +
   3.577 +	unw_get_rp(info, &ip);
   3.578 +	dst[42] = ip + ia64_psr(pt)->ri;
   3.579 +	dst[43] = cfm;
   3.580 +	dst[44] = pt->cr_ipsr & IA64_PSR_UM;
   3.581 +
   3.582 +	unw_get_ar(info, UNW_AR_RSC, &dst[45]);
   3.583 +	/*
   3.584 +	 * For bsp and bspstore, unw_get_ar() would return the kernel
   3.585 +	 * addresses, but we need the user-level addresses instead:
   3.586 +	 */
   3.587 +	dst[46] = urbs_end;	/* note: by convention PT_AR_BSP points to the end of the urbs! */
   3.588 +	dst[47] = pt->ar_bspstore;
   3.589 +	dst[48] = ar_rnat;
   3.590 +	unw_get_ar(info, UNW_AR_CCV, &dst[49]);
   3.591 +	unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
   3.592 +	unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
   3.593 +	dst[52] = pt->ar_pfs;	/* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
   3.594 +	unw_get_ar(info, UNW_AR_LC, &dst[53]);
   3.595 +	unw_get_ar(info, UNW_AR_EC, &dst[54]);
   3.596 +	unw_get_ar(info, UNW_AR_CSD, &dst[55]);
   3.597 +	unw_get_ar(info, UNW_AR_SSD, &dst[56]);
   3.598 +}
   3.599 +
   3.600 +void
   3.601 +do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
   3.602 +{
   3.603 +	elf_fpreg_t *dst = arg;
   3.604 +	int i;
   3.605 +
   3.606 +	memset(dst, 0, sizeof(elf_fpregset_t));	/* don't leak any "random" bits */
   3.607 +
   3.608 +	if (unw_unwind_to_user(info) < 0)
   3.609 +		return;
   3.610 +
   3.611 +	/* f0 is 0.0, f1 is 1.0 */
   3.612 +
   3.613 +	for (i = 2; i < 32; ++i)
   3.614 +		unw_get_fr(info, i, dst + i);
   3.615 +
   3.616 +	ia64_flush_fph(task);
   3.617 +	if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
   3.618 +		memcpy(dst + 32, task->thread.fph, 96*16);
   3.619 +}
   3.620 +
   3.621 +void
   3.622 +do_copy_regs (struct unw_frame_info *info, void *arg)
   3.623 +{
   3.624 +	do_copy_task_regs(current, info, arg);
   3.625 +}
   3.626 +
   3.627 +void
   3.628 +do_dump_fpu (struct unw_frame_info *info, void *arg)
   3.629 +{
   3.630 +	do_dump_task_fpu(current, info, arg);
   3.631 +}
   3.632 +
   3.633 +int
   3.634 +dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
   3.635 +{
   3.636 +	struct unw_frame_info tcore_info;
   3.637 +
   3.638 +	if (current == task) {
   3.639 +		unw_init_running(do_copy_regs, regs);
   3.640 +	} else {
   3.641 +		memset(&tcore_info, 0, sizeof(tcore_info));
   3.642 +		unw_init_from_blocked_task(&tcore_info, task);
   3.643 +		do_copy_task_regs(task, &tcore_info, regs);
   3.644 +	}
   3.645 +	return 1;
   3.646 +}
   3.647 +
   3.648 +void
   3.649 +ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
   3.650 +{
   3.651 +	unw_init_running(do_copy_regs, dst);
   3.652 +}
   3.653 +
   3.654 +int
   3.655 +dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
   3.656 +{
   3.657 +	struct unw_frame_info tcore_info;
   3.658 +
   3.659 +	if (current == task) {
   3.660 +		unw_init_running(do_dump_fpu, dst);
   3.661 +	} else {
   3.662 +		memset(&tcore_info, 0, sizeof(tcore_info));
   3.663 +		unw_init_from_blocked_task(&tcore_info, task);
   3.664 +		do_dump_task_fpu(task, &tcore_info, dst);
   3.665 +	}
   3.666 +	return 1;
   3.667 +}
   3.668 +
   3.669 +int
   3.670 +dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
   3.671 +{
   3.672 +	unw_init_running(do_dump_fpu, dst);
   3.673 +	return 1;	/* f0-f31 are always valid so we always return 1 */
   3.674 +}
   3.675 +
   3.676 +long
   3.677 +sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
   3.678 +	    struct pt_regs *regs)
   3.679 +{
   3.680 +	char *fname;
   3.681 +	int error;
   3.682 +
   3.683 +	fname = getname(filename);
   3.684 +	error = PTR_ERR(fname);
   3.685 +	if (IS_ERR(fname))
   3.686 +		goto out;
   3.687 +	error = do_execve(fname, argv, envp, regs);
   3.688 +	putname(fname);
   3.689 +out:
   3.690 +	return error;
   3.691 +}
   3.692 +
   3.693 +pid_t
   3.694 +kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
   3.695 +{
   3.696 +	extern void start_kernel_thread (void);
   3.697 +	unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;
   3.698 +	struct {
   3.699 +		struct switch_stack sw;
   3.700 +		struct pt_regs pt;
   3.701 +	} regs;
   3.702 +
   3.703 +	memset(&regs, 0, sizeof(regs));
   3.704 +	regs.pt.cr_iip = helper_fptr[0];	/* set entry point (IP) */
   3.705 +	regs.pt.r1 = helper_fptr[1];		/* set GP */
   3.706 +	regs.pt.r9 = (unsigned long) fn;	/* 1st argument */
   3.707 +	regs.pt.r11 = (unsigned long) arg;	/* 2nd argument */
   3.708 +	/* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read.  */
   3.709 +	regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
   3.710 +	regs.pt.cr_ifs = 1UL << 63;		/* mark as valid, empty frame */
   3.711 +	regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
   3.712 +	regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
   3.713 +	regs.sw.pr = (1 << PRED_KERNEL_STACK);
   3.714 +	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);
   3.715 +}
   3.716 +EXPORT_SYMBOL(kernel_thread);
   3.717 +
   3.718 +/* This gets called from kernel_thread() via ia64_invoke_thread_helper().  */
   3.719 +int
   3.720 +kernel_thread_helper (int (*fn)(void *), void *arg)
   3.721 +{
   3.722 +#ifdef CONFIG_IA32_SUPPORT
   3.723 +	if (IS_IA32_PROCESS(ia64_task_regs(current))) {
   3.724 +		/* A kernel thread is always a 64-bit process. */
   3.725 +		current->thread.map_base  = DEFAULT_MAP_BASE;
   3.726 +		current->thread.task_size = DEFAULT_TASK_SIZE;
   3.727 +		ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
   3.728 +		ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
   3.729 +	}
   3.730 +#endif
   3.731 +	return (*fn)(arg);
   3.732 +}
   3.733 +
   3.734 +/*
   3.735 + * Flush thread state.  This is called when a thread does an execve().
   3.736 + */
   3.737 +void
   3.738 +flush_thread (void)
   3.739 +{
   3.740 +	/*
   3.741 +	 * Remove function-return probe instances associated with this task
   3.742 +	 * and put them back on the free list. Do not insert an exit probe for
   3.743 +	 * this function, it will be disabled by kprobe_flush_task if you do.
   3.744 +	 */
   3.745 +	kprobe_flush_task(current);
   3.746 +
   3.747 +	/* drop floating-point and debug-register state if it exists: */
   3.748 +	current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
   3.749 +	ia64_drop_fpu(current);
   3.750 +	if (IS_IA32_PROCESS(ia64_task_regs(current)))
   3.751 +		ia32_drop_partial_page_list(current);
   3.752 +}
   3.753 +
   3.754 +/*
   3.755 + * Clean up state associated with current thread.  This is called when
   3.756 + * the thread calls exit().
   3.757 + */
   3.758 +void
   3.759 +exit_thread (void)
   3.760 +{
   3.761 +
   3.762 +	/*
   3.763 +	 * Remove function-return probe instances associated with this task
   3.764 +	 * and put them back on the free list. Do not insert an exit probe for
   3.765 +	 * this function, it will be disabled by kprobe_flush_task if you do.
   3.766 +	 */
   3.767 +	kprobe_flush_task(current);
   3.768 +
   3.769 +	ia64_drop_fpu(current);
   3.770 +#ifdef CONFIG_PERFMON
   3.771 +       /* if needed, stop monitoring and flush state to perfmon context */
   3.772 +	if (current->thread.pfm_context)
   3.773 +		pfm_exit_thread(current);
   3.774 +
   3.775 +	/* free debug register resources */
   3.776 +	if (current->thread.flags & IA64_THREAD_DBG_VALID)
   3.777 +		pfm_release_debug_registers(current);
   3.778 +#endif
   3.779 +	if (IS_IA32_PROCESS(ia64_task_regs(current)))
   3.780 +		ia32_drop_partial_page_list(current);
   3.781 +}
   3.782 +
   3.783 +unsigned long
   3.784 +get_wchan (struct task_struct *p)
   3.785 +{
   3.786 +	struct unw_frame_info info;
   3.787 +	unsigned long ip;
   3.788 +	int count = 0;
   3.789 +
   3.790 +	/*
   3.791 +	 * Note: p may not be a blocked task (it could be current or
   3.792 +	 * another process running on some other CPU.  Rather than
   3.793 +	 * trying to determine if p is really blocked, we just assume
   3.794 +	 * it's blocked and rely on the unwind routines to fail
   3.795 +	 * gracefully if the process wasn't really blocked after all.
   3.796 +	 * --davidm 99/12/15
   3.797 +	 */
   3.798 +	unw_init_from_blocked_task(&info, p);
   3.799 +	do {
   3.800 +		if (unw_unwind(&info) < 0)
   3.801 +			return 0;
   3.802 +		unw_get_ip(&info, &ip);
   3.803 +		if (!in_sched_functions(ip))
   3.804 +			return ip;
   3.805 +	} while (count++ < 16);
   3.806 +	return 0;
   3.807 +}
   3.808 +
   3.809 +void
   3.810 +cpu_halt (void)
   3.811 +{
   3.812 +	pal_power_mgmt_info_u_t power_info[8];
   3.813 +	unsigned long min_power;
   3.814 +	int i, min_power_state;
   3.815 +
   3.816 +	if (ia64_pal_halt_info(power_info) != 0)
   3.817 +		return;
   3.818 +
   3.819 +	min_power_state = 0;
   3.820 +	min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
   3.821 +	for (i = 1; i < 8; ++i)
   3.822 +		if (power_info[i].pal_power_mgmt_info_s.im
   3.823 +		    && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
   3.824 +			min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
   3.825 +			min_power_state = i;
   3.826 +		}
   3.827 +
   3.828 +	while (1)
   3.829 +		ia64_pal_halt(min_power_state);
   3.830 +}
   3.831 +
   3.832 +void
   3.833 +machine_restart (char *restart_cmd)
   3.834 +{
   3.835 +	(*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
   3.836 +}
   3.837 +
   3.838 +void
   3.839 +machine_halt (void)
   3.840 +{
   3.841 +	cpu_halt();
   3.842 +}
   3.843 +
   3.844 +void
   3.845 +machine_power_off (void)
   3.846 +{
   3.847 +	if (pm_power_off)
   3.848 +		pm_power_off();
   3.849 +	machine_halt();
   3.850 +}
   3.851 +#endif // !XEN
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/ia64/linux-xen/unwind.c	Fri Dec 30 23:40:13 2005 -0600
     4.3 @@ -0,0 +1,2332 @@
     4.4 +/*
     4.5 + * Copyright (C) 1999-2004 Hewlett-Packard Co
     4.6 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     4.7 + * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
     4.8 + * 	- Change pt_regs_off() to make it less dependant on pt_regs structure.
     4.9 + */
    4.10 +/*
    4.11 + * This file implements call frame unwind support for the Linux
    4.12 + * kernel.  Parsing and processing the unwind information is
    4.13 + * time-consuming, so this implementation translates the unwind
    4.14 + * descriptors into unwind scripts.  These scripts are very simple
    4.15 + * (basically a sequence of assignments) and efficient to execute.
    4.16 + * They are cached for later re-use.  Each script is specific for a
    4.17 + * given instruction pointer address and the set of predicate values
    4.18 + * that the script depends on (most unwind descriptors are
    4.19 + * unconditional and scripts often do not depend on predicates at
    4.20 + * all).  This code is based on the unwind conventions described in
    4.21 + * the "IA-64 Software Conventions and Runtime Architecture" manual.
    4.22 + *
    4.23 + * SMP conventions:
    4.24 + *	o updates to the global unwind data (in structure "unw") are serialized
    4.25 + *	  by the unw.lock spinlock
    4.26 + *	o each unwind script has its own read-write lock; a thread must acquire
    4.27 + *	  a read lock before executing a script and must acquire a write lock
    4.28 + *	  before modifying a script
    4.29 + *	o if both the unw.lock spinlock and a script's read-write lock must be
    4.30 + *	  acquired, then the read-write lock must be acquired first.
    4.31 + */
    4.32 +#ifdef XEN
    4.33 +#include <xen/types.h>
    4.34 +#include <xen/elf.h>
    4.35 +#include <xen/kernel.h>
    4.36 +#include <xen/sched.h>
    4.37 +#include <xen/xmalloc.h>
    4.38 +#include <xen/spinlock.h>
    4.39 +
    4.40 +// work around
    4.41 +#ifdef CONFIG_SMP
    4.42 +#define write_trylock(lock)	_raw_write_trylock(lock)
    4.43 +#else
    4.44 +#define write_trylock(lock)	({1;})
    4.45 +#endif
    4.46 +
    4.47 +#else
    4.48 +#include <linux/module.h>
    4.49 +#include <linux/bootmem.h>
    4.50 +#include <linux/elf.h>
    4.51 +#include <linux/kernel.h>
    4.52 +#include <linux/sched.h>
    4.53 +#include <linux/slab.h>
    4.54 +#endif
    4.55 +
    4.56 +#include <asm/unwind.h>
    4.57 +
    4.58 +#include <asm/delay.h>
    4.59 +#include <asm/page.h>
    4.60 +#include <asm/ptrace.h>
    4.61 +#include <asm/ptrace_offsets.h>
    4.62 +#include <asm/rse.h>
    4.63 +#include <asm/sections.h>
    4.64 +#include <asm/system.h>
    4.65 +#include <asm/uaccess.h>
    4.66 +
    4.67 +#include "entry.h"
    4.68 +#include "unwind_i.h"
    4.69 +
    4.70 +#define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
    4.71 +#define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
    4.72 +
    4.73 +#define UNW_LOG_HASH_SIZE	(UNW_LOG_CACHE_SIZE + 1)
    4.74 +#define UNW_HASH_SIZE		(1 << UNW_LOG_HASH_SIZE)
    4.75 +
    4.76 +#define UNW_STATS	0	/* WARNING: this disabled interrupts for long time-spans!! */
    4.77 +
    4.78 +#ifdef UNW_DEBUG
    4.79 +  static unsigned int unw_debug_level = UNW_DEBUG;
    4.80 +#  define UNW_DEBUG_ON(n)	unw_debug_level >= n
    4.81 +   /* Do not code a printk level, not all debug lines end in newline */
    4.82 +#  define UNW_DPRINT(n, ...)  if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
    4.83 +#  define inline
    4.84 +#else /* !UNW_DEBUG */
    4.85 +#  define UNW_DEBUG_ON(n)  0
    4.86 +#  define UNW_DPRINT(n, ...)
    4.87 +#endif /* UNW_DEBUG */
    4.88 +
    4.89 +#if UNW_STATS
    4.90 +# define STAT(x...)	x
    4.91 +#else
    4.92 +# define STAT(x...)
    4.93 +#endif
    4.94 +
    4.95 +#ifdef XEN
    4.96 +#define alloc_reg_state()	xmalloc(struct unw_reg_state)
    4.97 +#define free_reg_state(usr)	xfree(usr)
    4.98 +#define alloc_labeled_state()	xmalloc(struct unw_labeled_state)
    4.99 +#define free_labeled_state(usr)	xfree(usr)
   4.100 +#else
   4.101 +#define alloc_reg_state()	kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
   4.102 +#define free_reg_state(usr)	kfree(usr)
   4.103 +#define alloc_labeled_state()	kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
   4.104 +#define free_labeled_state(usr)	kfree(usr)
   4.105 +#endif
   4.106 +
   4.107 +typedef unsigned long unw_word;
   4.108 +typedef unsigned char unw_hash_index_t;
   4.109 +
   4.110 +static struct {
   4.111 +	spinlock_t lock;			/* spinlock for unwind data */
   4.112 +
   4.113 +	/* list of unwind tables (one per load-module) */
   4.114 +	struct unw_table *tables;
   4.115 +
   4.116 +	unsigned long r0;			/* constant 0 for r0 */
   4.117 +
   4.118 +	/* table of registers that prologues can save (and order in which they're saved): */
   4.119 +	const unsigned char save_order[8];
   4.120 +
   4.121 +	/* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
   4.122 +	unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
   4.123 +
   4.124 +	unsigned short lru_head;		/* index of lead-recently used script */
   4.125 +	unsigned short lru_tail;		/* index of most-recently used script */
   4.126 +
   4.127 +	/* index into unw_frame_info for preserved register i */
   4.128 +	unsigned short preg_index[UNW_NUM_REGS];
   4.129 +
   4.130 +	short pt_regs_offsets[32];
   4.131 +
   4.132 +	/* unwind table for the kernel: */
   4.133 +	struct unw_table kernel_table;
   4.134 +
   4.135 +	/* unwind table describing the gate page (kernel code that is mapped into user space): */
   4.136 +	size_t gate_table_size;
   4.137 +	unsigned long *gate_table;
   4.138 +
   4.139 +	/* hash table that maps instruction pointer to script index: */
   4.140 +	unsigned short hash[UNW_HASH_SIZE];
   4.141 +
   4.142 +	/* script cache: */
   4.143 +	struct unw_script cache[UNW_CACHE_SIZE];
   4.144 +
   4.145 +# ifdef UNW_DEBUG
   4.146 +	const char *preg_name[UNW_NUM_REGS];
   4.147 +# endif
   4.148 +# if UNW_STATS
   4.149 +	struct {
   4.150 +		struct {
   4.151 +			int lookups;
   4.152 +			int hinted_hits;
   4.153 +			int normal_hits;
   4.154 +			int collision_chain_traversals;
   4.155 +		} cache;
   4.156 +		struct {
   4.157 +			unsigned long build_time;
   4.158 +			unsigned long run_time;
   4.159 +			unsigned long parse_time;
   4.160 +			int builds;
   4.161 +			int news;
   4.162 +			int collisions;
   4.163 +			int runs;
   4.164 +		} script;
   4.165 +		struct {
   4.166 +			unsigned long init_time;
   4.167 +			unsigned long unwind_time;
   4.168 +			int inits;
   4.169 +			int unwinds;
   4.170 +		} api;
   4.171 +	} stat;
   4.172 +# endif
   4.173 +} unw = {
   4.174 +	.tables = &unw.kernel_table,
   4.175 +	.lock = SPIN_LOCK_UNLOCKED,
   4.176 +	.save_order = {
   4.177 +		UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
   4.178 +		UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
   4.179 +	},
   4.180 +	.preg_index = {
   4.181 +		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_GR */
   4.182 +		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_MEM */
   4.183 +		offsetof(struct unw_frame_info, bsp_loc)/8,
   4.184 +		offsetof(struct unw_frame_info, bspstore_loc)/8,
   4.185 +		offsetof(struct unw_frame_info, pfs_loc)/8,
   4.186 +		offsetof(struct unw_frame_info, rnat_loc)/8,
   4.187 +		offsetof(struct unw_frame_info, psp)/8,
   4.188 +		offsetof(struct unw_frame_info, rp_loc)/8,
   4.189 +		offsetof(struct unw_frame_info, r4)/8,
   4.190 +		offsetof(struct unw_frame_info, r5)/8,
   4.191 +		offsetof(struct unw_frame_info, r6)/8,
   4.192 +		offsetof(struct unw_frame_info, r7)/8,
   4.193 +		offsetof(struct unw_frame_info, unat_loc)/8,
   4.194 +		offsetof(struct unw_frame_info, pr_loc)/8,
   4.195 +		offsetof(struct unw_frame_info, lc_loc)/8,
   4.196 +		offsetof(struct unw_frame_info, fpsr_loc)/8,
   4.197 +		offsetof(struct unw_frame_info, b1_loc)/8,
   4.198 +		offsetof(struct unw_frame_info, b2_loc)/8,
   4.199 +		offsetof(struct unw_frame_info, b3_loc)/8,
   4.200 +		offsetof(struct unw_frame_info, b4_loc)/8,
   4.201 +		offsetof(struct unw_frame_info, b5_loc)/8,
   4.202 +		offsetof(struct unw_frame_info, f2_loc)/8,
   4.203 +		offsetof(struct unw_frame_info, f3_loc)/8,
   4.204 +		offsetof(struct unw_frame_info, f4_loc)/8,
   4.205 +		offsetof(struct unw_frame_info, f5_loc)/8,
   4.206 +		offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
   4.207 +		offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
   4.208 +		offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
   4.209 +		offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
   4.210 +		offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
   4.211 +		offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
   4.212 +		offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
   4.213 +		offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
   4.214 +		offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
   4.215 +		offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
   4.216 +		offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
   4.217 +		offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
   4.218 +		offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
   4.219 +		offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
   4.220 +		offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
   4.221 +		offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
   4.222 +	},
   4.223 +	.pt_regs_offsets = {
   4.224 +		[0] = -1,
   4.225 +		offsetof(struct pt_regs,  r1),
   4.226 +		offsetof(struct pt_regs,  r2),
   4.227 +		offsetof(struct pt_regs,  r3),
   4.228 +		[4] = -1, [5] = -1, [6] = -1, [7] = -1,
   4.229 +		offsetof(struct pt_regs,  r8),
   4.230 +		offsetof(struct pt_regs,  r9),
   4.231 +		offsetof(struct pt_regs, r10),
   4.232 +		offsetof(struct pt_regs, r11),
   4.233 +		offsetof(struct pt_regs, r12),
   4.234 +		offsetof(struct pt_regs, r13),
   4.235 +		offsetof(struct pt_regs, r14),
   4.236 +		offsetof(struct pt_regs, r15),
   4.237 +		offsetof(struct pt_regs, r16),
   4.238 +		offsetof(struct pt_regs, r17),
   4.239 +		offsetof(struct pt_regs, r18),
   4.240 +		offsetof(struct pt_regs, r19),
   4.241 +		offsetof(struct pt_regs, r20),
   4.242 +		offsetof(struct pt_regs, r21),
   4.243 +		offsetof(struct pt_regs, r22),
   4.244 +		offsetof(struct pt_regs, r23),
   4.245 +		offsetof(struct pt_regs, r24),
   4.246 +		offsetof(struct pt_regs, r25),
   4.247 +		offsetof(struct pt_regs, r26),
   4.248 +		offsetof(struct pt_regs, r27),
   4.249 +		offsetof(struct pt_regs, r28),
   4.250 +		offsetof(struct pt_regs, r29),
   4.251 +		offsetof(struct pt_regs, r30),
   4.252 +		offsetof(struct pt_regs, r31),
   4.253 +	},
   4.254 +	.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
   4.255 +#ifdef UNW_DEBUG
   4.256 +	.preg_name = {
   4.257 +		"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
   4.258 +		"r4", "r5", "r6", "r7",
   4.259 +		"ar.unat", "pr", "ar.lc", "ar.fpsr",
   4.260 +		"b1", "b2", "b3", "b4", "b5",
   4.261 +		"f2", "f3", "f4", "f5",
   4.262 +		"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
   4.263 +		"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
   4.264 +	}
   4.265 +#endif
   4.266 +};
   4.267 +
   4.268 +static inline int
   4.269 +read_only (void *addr)
   4.270 +{
   4.271 +	return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
   4.272 +}
   4.273 +
   4.274 +/*
   4.275 + * Returns offset of rREG in struct pt_regs.
   4.276 + */
   4.277 +static inline unsigned long
   4.278 +pt_regs_off (unsigned long reg)
   4.279 +{
   4.280 +	short off = -1;
   4.281 +
   4.282 +	if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
   4.283 +		off = unw.pt_regs_offsets[reg];
   4.284 +
   4.285 +	if (off < 0) {
   4.286 +		UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
   4.287 +		off = 0;
   4.288 +	}
   4.289 +	return (unsigned long) off;
   4.290 +}
   4.291 +
   4.292 +static inline struct pt_regs *
   4.293 +get_scratch_regs (struct unw_frame_info *info)
   4.294 +{
   4.295 +	if (!info->pt) {
   4.296 +		/* This should not happen with valid unwind info.  */
   4.297 +		UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
   4.298 +		if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
   4.299 +			info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
   4.300 +		else
   4.301 +			info->pt = info->sp - 16;
   4.302 +	}
   4.303 +	UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
   4.304 +	return (struct pt_regs *) info->pt;
   4.305 +}
   4.306 +
   4.307 +/* Unwind accessors.  */
   4.308 +
   4.309 +int
   4.310 +unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
   4.311 +{
   4.312 +	unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
   4.313 +	struct unw_ireg *ireg;
   4.314 +	struct pt_regs *pt;
   4.315 +
   4.316 +	if ((unsigned) regnum - 1 >= 127) {
   4.317 +		if (regnum == 0 && !write) {
   4.318 +			*val = 0;	/* read r0 always returns 0 */
   4.319 +			*nat = 0;
   4.320 +			return 0;
   4.321 +		}
   4.322 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
   4.323 +			   __FUNCTION__, regnum);
   4.324 +		return -1;
   4.325 +	}
   4.326 +
   4.327 +	if (regnum < 32) {
   4.328 +		if (regnum >= 4 && regnum <= 7) {
   4.329 +			/* access a preserved register */
   4.330 +			ireg = &info->r4 + (regnum - 4);
   4.331 +			addr = ireg->loc;
   4.332 +			if (addr) {
   4.333 +				nat_addr = addr + ireg->nat.off;
   4.334 +				switch (ireg->nat.type) {
   4.335 +				      case UNW_NAT_VAL:
   4.336 +					/* simulate getf.sig/setf.sig */
   4.337 +					if (write) {
   4.338 +						if (*nat) {
   4.339 +							/* write NaTVal and be done with it */
   4.340 +							addr[0] = 0;
   4.341 +							addr[1] = 0x1fffe;
   4.342 +							return 0;
   4.343 +						}
   4.344 +						addr[1] = 0x1003e;
   4.345 +					} else {
   4.346 +						if (addr[0] == 0 && addr[1] == 0x1ffe) {
   4.347 +							/* return NaT and be done with it */
   4.348 +							*val = 0;
   4.349 +							*nat = 1;
   4.350 +							return 0;
   4.351 +						}
   4.352 +					}
   4.353 +					/* fall through */
   4.354 +				      case UNW_NAT_NONE:
   4.355 +					dummy_nat = 0;
   4.356 +					nat_addr = &dummy_nat;
   4.357 +					break;
   4.358 +
   4.359 +				      case UNW_NAT_MEMSTK:
   4.360 +					nat_mask = (1UL << ((long) addr & 0x1f8)/8);
   4.361 +					break;
   4.362 +
   4.363 +				      case UNW_NAT_REGSTK:
   4.364 +					nat_addr = ia64_rse_rnat_addr(addr);
   4.365 +					if ((unsigned long) addr < info->regstk.limit
   4.366 +					    || (unsigned long) addr >= info->regstk.top)
   4.367 +					{
   4.368 +						UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
   4.369 +							"[0x%lx-0x%lx)\n",
   4.370 +							__FUNCTION__, (void *) addr,
   4.371 +							info->regstk.limit,
   4.372 +							info->regstk.top);
   4.373 +						return -1;
   4.374 +					}
   4.375 +					if ((unsigned long) nat_addr >= info->regstk.top)
   4.376 +						nat_addr = &info->sw->ar_rnat;
   4.377 +					nat_mask = (1UL << ia64_rse_slot_num(addr));
   4.378 +					break;
   4.379 +				}
   4.380 +			} else {
   4.381 +				addr = &info->sw->r4 + (regnum - 4);
   4.382 +				nat_addr = &info->sw->ar_unat;
   4.383 +				nat_mask = (1UL << ((long) addr & 0x1f8)/8);
   4.384 +			}
   4.385 +		} else {
   4.386 +			/* access a scratch register */
   4.387 +			pt = get_scratch_regs(info);
   4.388 +			addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
   4.389 +			if (info->pri_unat_loc)
   4.390 +				nat_addr = info->pri_unat_loc;
   4.391 +			else
   4.392 +				nat_addr = &info->sw->caller_unat;
   4.393 +			nat_mask = (1UL << ((long) addr & 0x1f8)/8);
   4.394 +		}
   4.395 +	} else {
   4.396 +		/* access a stacked register */
   4.397 +		addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
   4.398 +		nat_addr = ia64_rse_rnat_addr(addr);
   4.399 +		if ((unsigned long) addr < info->regstk.limit
   4.400 +		    || (unsigned long) addr >= info->regstk.top)
   4.401 +		{
   4.402 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
   4.403 +				   "of rbs\n",  __FUNCTION__);
   4.404 +			return -1;
   4.405 +		}
   4.406 +		if ((unsigned long) nat_addr >= info->regstk.top)
   4.407 +			nat_addr = &info->sw->ar_rnat;
   4.408 +		nat_mask = (1UL << ia64_rse_slot_num(addr));
   4.409 +	}
   4.410 +
   4.411 +	if (write) {
   4.412 +		if (read_only(addr)) {
   4.413 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   4.414 +				__FUNCTION__);
   4.415 +		} else {
   4.416 +			*addr = *val;
   4.417 +			if (*nat)
   4.418 +				*nat_addr |= nat_mask;
   4.419 +			else
   4.420 +				*nat_addr &= ~nat_mask;
   4.421 +		}
   4.422 +	} else {
   4.423 +		if ((*nat_addr & nat_mask) == 0) {
   4.424 +			*val = *addr;
   4.425 +			*nat = 0;
   4.426 +		} else {
   4.427 +			*val = 0;	/* if register is a NaT, *addr may contain kernel data! */
   4.428 +			*nat = 1;
   4.429 +		}
   4.430 +	}
   4.431 +	return 0;
   4.432 +}
   4.433 +EXPORT_SYMBOL(unw_access_gr);
   4.434 +
   4.435 +int
   4.436 +unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
   4.437 +{
   4.438 +	unsigned long *addr;
   4.439 +	struct pt_regs *pt;
   4.440 +
   4.441 +	switch (regnum) {
   4.442 +		/* scratch: */
   4.443 +	      case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
   4.444 +	      case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
   4.445 +	      case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
   4.446 +
   4.447 +		/* preserved: */
   4.448 +	      case 1: case 2: case 3: case 4: case 5:
   4.449 +		addr = *(&info->b1_loc + (regnum - 1));
   4.450 +		if (!addr)
   4.451 +			addr = &info->sw->b1 + (regnum - 1);
   4.452 +		break;
   4.453 +
   4.454 +	      default:
   4.455 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
   4.456 +			   __FUNCTION__, regnum);
   4.457 +		return -1;
   4.458 +	}
   4.459 +	if (write)
   4.460 +		if (read_only(addr)) {
   4.461 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   4.462 +				__FUNCTION__);
   4.463 +		} else
   4.464 +			*addr = *val;
   4.465 +	else
   4.466 +		*val = *addr;
   4.467 +	return 0;
   4.468 +}
   4.469 +EXPORT_SYMBOL(unw_access_br);
   4.470 +
   4.471 +int
   4.472 +unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
   4.473 +{
   4.474 +	struct ia64_fpreg *addr = NULL;
   4.475 +	struct pt_regs *pt;
   4.476 +
   4.477 +	if ((unsigned) (regnum - 2) >= 126) {
   4.478 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
   4.479 +			   __FUNCTION__, regnum);
   4.480 +		return -1;
   4.481 +	}
   4.482 +
   4.483 +	if (regnum <= 5) {
   4.484 +		addr = *(&info->f2_loc + (regnum - 2));
   4.485 +		if (!addr)
   4.486 +			addr = &info->sw->f2 + (regnum - 2);
   4.487 +	} else if (regnum <= 15) {
   4.488 +		if (regnum <= 11) {
   4.489 +			pt = get_scratch_regs(info);
   4.490 +			addr = &pt->f6  + (regnum - 6);
   4.491 +		}
   4.492 +		else
   4.493 +			addr = &info->sw->f12 + (regnum - 12);
   4.494 +	} else if (regnum <= 31) {
   4.495 +		addr = info->fr_loc[regnum - 16];
   4.496 +		if (!addr)
   4.497 +			addr = &info->sw->f16 + (regnum - 16);
   4.498 +	} else {
   4.499 +		struct task_struct *t = info->task;
   4.500 +
   4.501 +		if (write)
   4.502 +			ia64_sync_fph(t);
   4.503 +		else
   4.504 +			ia64_flush_fph(t);
   4.505 +#ifdef XEN
   4.506 +		addr = t->arch._thread.fph + (regnum - 32);
   4.507 +#else
   4.508 +		addr = t->thread.fph + (regnum - 32);
   4.509 +#endif
   4.510 +	}
   4.511 +
   4.512 +	if (write)
   4.513 +		if (read_only(addr)) {
   4.514 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   4.515 +				__FUNCTION__);
   4.516 +		} else
   4.517 +			*addr = *val;
   4.518 +	else
   4.519 +		*val = *addr;
   4.520 +	return 0;
   4.521 +}
   4.522 +EXPORT_SYMBOL(unw_access_fr);
   4.523 +
   4.524 +int
   4.525 +unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
   4.526 +{
   4.527 +	unsigned long *addr;
   4.528 +	struct pt_regs *pt;
   4.529 +
   4.530 +	switch (regnum) {
   4.531 +	      case UNW_AR_BSP:
   4.532 +		addr = info->bsp_loc;
   4.533 +		if (!addr)
   4.534 +			addr = &info->sw->ar_bspstore;
   4.535 +		break;
   4.536 +
   4.537 +	      case UNW_AR_BSPSTORE:
   4.538 +		addr = info->bspstore_loc;
   4.539 +		if (!addr)
   4.540 +			addr = &info->sw->ar_bspstore;
   4.541 +		break;
   4.542 +
   4.543 +	      case UNW_AR_PFS:
   4.544 +		addr = info->pfs_loc;
   4.545 +		if (!addr)
   4.546 +			addr = &info->sw->ar_pfs;
   4.547 +		break;
   4.548 +
   4.549 +	      case UNW_AR_RNAT:
   4.550 +		addr = info->rnat_loc;
   4.551 +		if (!addr)
   4.552 +			addr = &info->sw->ar_rnat;
   4.553 +		break;
   4.554 +
   4.555 +	      case UNW_AR_UNAT:
   4.556 +		addr = info->unat_loc;
   4.557 +		if (!addr)
   4.558 +			addr = &info->sw->caller_unat;
   4.559 +		break;
   4.560 +
   4.561 +	      case UNW_AR_LC:
   4.562 +		addr = info->lc_loc;
   4.563 +		if (!addr)
   4.564 +			addr = &info->sw->ar_lc;
   4.565 +		break;
   4.566 +
   4.567 +	      case UNW_AR_EC:
   4.568 +		if (!info->cfm_loc)
   4.569 +			return -1;
   4.570 +		if (write)
   4.571 +			*info->cfm_loc =
   4.572 +				(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
   4.573 +		else
   4.574 +			*val = (*info->cfm_loc >> 52) & 0x3f;
   4.575 +		return 0;
   4.576 +
   4.577 +	      case UNW_AR_FPSR:
   4.578 +		addr = info->fpsr_loc;
   4.579 +		if (!addr)
   4.580 +			addr = &info->sw->ar_fpsr;
   4.581 +		break;
   4.582 +
   4.583 +	      case UNW_AR_RSC:
   4.584 +		pt = get_scratch_regs(info);
   4.585 +		addr = &pt->ar_rsc;
   4.586 +		break;
   4.587 +
   4.588 +	      case UNW_AR_CCV:
   4.589 +		pt = get_scratch_regs(info);
   4.590 +		addr = &pt->ar_ccv;
   4.591 +		break;
   4.592 +
   4.593 +	      case UNW_AR_CSD:
   4.594 +		pt = get_scratch_regs(info);
   4.595 +		addr = &pt->ar_csd;
   4.596 +		break;
   4.597 +
   4.598 +	      case UNW_AR_SSD:
   4.599 +		pt = get_scratch_regs(info);
   4.600 +		addr = &pt->ar_ssd;
   4.601 +		break;
   4.602 +
   4.603 +	      default:
   4.604 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
   4.605 +			   __FUNCTION__, regnum);
   4.606 +		return -1;
   4.607 +	}
   4.608 +
   4.609 +	if (write) {
   4.610 +		if (read_only(addr)) {
   4.611 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   4.612 +				__FUNCTION__);
   4.613 +		} else
   4.614 +			*addr = *val;
   4.615 +	} else
   4.616 +		*val = *addr;
   4.617 +	return 0;
   4.618 +}
   4.619 +EXPORT_SYMBOL(unw_access_ar);
   4.620 +
   4.621 +int
   4.622 +unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
   4.623 +{
   4.624 +	unsigned long *addr;
   4.625 +
   4.626 +	addr = info->pr_loc;
   4.627 +	if (!addr)
   4.628 +		addr = &info->sw->pr;
   4.629 +
   4.630 +	if (write) {
   4.631 +		if (read_only(addr)) {
   4.632 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   4.633 +				__FUNCTION__);
   4.634 +		} else
   4.635 +			*addr = *val;
   4.636 +	} else
   4.637 +		*val = *addr;
   4.638 +	return 0;
   4.639 +}
   4.640 +EXPORT_SYMBOL(unw_access_pr);
   4.641 +
   4.642 +
   4.643 +/* Routines to manipulate the state stack.  */
   4.644 +
   4.645 +static inline void
   4.646 +push (struct unw_state_record *sr)
   4.647 +{
   4.648 +	struct unw_reg_state *rs;
   4.649 +
   4.650 +	rs = alloc_reg_state();
   4.651 +	if (!rs) {
   4.652 +		printk(KERN_ERR "unwind: cannot stack reg state!\n");
   4.653 +		return;
   4.654 +	}
   4.655 +	memcpy(rs, &sr->curr, sizeof(*rs));
   4.656 +	sr->curr.next = rs;
   4.657 +}
   4.658 +
   4.659 +static void
   4.660 +pop (struct unw_state_record *sr)
   4.661 +{
   4.662 +	struct unw_reg_state *rs = sr->curr.next;
   4.663 +
   4.664 +	if (!rs) {
   4.665 +		printk(KERN_ERR "unwind: stack underflow!\n");
   4.666 +		return;
   4.667 +	}
   4.668 +	memcpy(&sr->curr, rs, sizeof(*rs));
   4.669 +	free_reg_state(rs);
   4.670 +}
   4.671 +
   4.672 +/* Make a copy of the state stack.  Non-recursive to avoid stack overflows.  */
   4.673 +static struct unw_reg_state *
   4.674 +dup_state_stack (struct unw_reg_state *rs)
   4.675 +{
   4.676 +	struct unw_reg_state *copy, *prev = NULL, *first = NULL;
   4.677 +
   4.678 +	while (rs) {
   4.679 +		copy = alloc_reg_state();
   4.680 +		if (!copy) {
   4.681 +			printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
   4.682 +			return NULL;
   4.683 +		}
   4.684 +		memcpy(copy, rs, sizeof(*copy));
   4.685 +		if (first)
   4.686 +			prev->next = copy;
   4.687 +		else
   4.688 +			first = copy;
   4.689 +		rs = rs->next;
   4.690 +		prev = copy;
   4.691 +	}
   4.692 +	return first;
   4.693 +}
   4.694 +
   4.695 +/* Free all stacked register states (but not RS itself).  */
   4.696 +static void
   4.697 +free_state_stack (struct unw_reg_state *rs)
   4.698 +{
   4.699 +	struct unw_reg_state *p, *next;
   4.700 +
   4.701 +	for (p = rs->next; p != NULL; p = next) {
   4.702 +		next = p->next;
   4.703 +		free_reg_state(p);
   4.704 +	}
   4.705 +	rs->next = NULL;
   4.706 +}
   4.707 +
   4.708 +/* Unwind decoder routines */
   4.709 +
   4.710 +static enum unw_register_index __attribute_const__
   4.711 +decode_abreg (unsigned char abreg, int memory)
   4.712 +{
   4.713 +	switch (abreg) {
   4.714 +	      case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
   4.715 +	      case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
   4.716 +	      case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
   4.717 +	      case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
   4.718 +	      case 0x60: return UNW_REG_PR;
   4.719 +	      case 0x61: return UNW_REG_PSP;
   4.720 +	      case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
   4.721 +	      case 0x63: return UNW_REG_RP;
   4.722 +	      case 0x64: return UNW_REG_BSP;
   4.723 +	      case 0x65: return UNW_REG_BSPSTORE;
   4.724 +	      case 0x66: return UNW_REG_RNAT;
   4.725 +	      case 0x67: return UNW_REG_UNAT;
   4.726 +	      case 0x68: return UNW_REG_FPSR;
   4.727 +	      case 0x69: return UNW_REG_PFS;
   4.728 +	      case 0x6a: return UNW_REG_LC;
   4.729 +	      default:
   4.730 +		break;
   4.731 +	}
   4.732 +	UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
   4.733 +	return UNW_REG_LC;
   4.734 +}
   4.735 +
   4.736 +static void
   4.737 +set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
   4.738 +{
   4.739 +	reg->val = val;
   4.740 +	reg->where = where;
   4.741 +	if (reg->when == UNW_WHEN_NEVER)
   4.742 +		reg->when = when;
   4.743 +}
   4.744 +
   4.745 +static void
   4.746 +alloc_spill_area (unsigned long *offp, unsigned long regsize,
   4.747 +		  struct unw_reg_info *lo, struct unw_reg_info *hi)
   4.748 +{
   4.749 +	struct unw_reg_info *reg;
   4.750 +
   4.751 +	for (reg = hi; reg >= lo; --reg) {
   4.752 +		if (reg->where == UNW_WHERE_SPILL_HOME) {
   4.753 +			reg->where = UNW_WHERE_PSPREL;
   4.754 +			*offp -= regsize;
   4.755 +			reg->val = *offp;
   4.756 +		}
   4.757 +	}
   4.758 +}
   4.759 +
   4.760 +static inline void
   4.761 +spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
   4.762 +{
   4.763 +	struct unw_reg_info *reg;
   4.764 +
   4.765 +	for (reg = *regp; reg <= lim; ++reg) {
   4.766 +		if (reg->where == UNW_WHERE_SPILL_HOME) {
   4.767 +			reg->when = t;
   4.768 +			*regp = reg + 1;
   4.769 +			return;
   4.770 +		}
   4.771 +	}
   4.772 +	UNW_DPRINT(0, "unwind.%s: excess spill!\n",  __FUNCTION__);
   4.773 +}
   4.774 +
   4.775 +static inline void
   4.776 +finish_prologue (struct unw_state_record *sr)
   4.777 +{
   4.778 +	struct unw_reg_info *reg;
   4.779 +	unsigned long off;
   4.780 +	int i;
   4.781 +
   4.782 +	/*
   4.783 +	 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
   4.784 +	 * for Using Unwind Descriptors", rule 3):
   4.785 +	 */
   4.786 +	for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
   4.787 +		reg = sr->curr.reg + unw.save_order[i];
   4.788 +		if (reg->where == UNW_WHERE_GR_SAVE) {
   4.789 +			reg->where = UNW_WHERE_GR;
   4.790 +			reg->val = sr->gr_save_loc++;
   4.791 +		}
   4.792 +	}
   4.793 +
   4.794 +	/*
   4.795 +	 * Next, compute when the fp, general, and branch registers get
   4.796 +	 * saved.  This must come before alloc_spill_area() because
   4.797 +	 * we need to know which registers are spilled to their home
   4.798 +	 * locations.
   4.799 +	 */
   4.800 +	if (sr->imask) {
   4.801 +		unsigned char kind, mask = 0, *cp = sr->imask;
   4.802 +		int t;
   4.803 +		static const unsigned char limit[3] = {
   4.804 +			UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
   4.805 +		};
   4.806 +		struct unw_reg_info *(regs[3]);
   4.807 +
   4.808 +		regs[0] = sr->curr.reg + UNW_REG_F2;
   4.809 +		regs[1] = sr->curr.reg + UNW_REG_R4;
   4.810 +		regs[2] = sr->curr.reg + UNW_REG_B1;
   4.811 +
   4.812 +		for (t = 0; t < sr->region_len; ++t) {
   4.813 +			if ((t & 3) == 0)
   4.814 +				mask = *cp++;
   4.815 +			kind = (mask >> 2*(3-(t & 3))) & 3;
   4.816 +			if (kind > 0)
   4.817 +				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
   4.818 +						sr->region_start + t);
   4.819 +		}
   4.820 +	}
   4.821 +	/*
   4.822 +	 * Next, lay out the memory stack spill area:
   4.823 +	 */
   4.824 +	if (sr->any_spills) {
   4.825 +		off = sr->spill_offset;
   4.826 +		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
   4.827 +		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
   4.828 +		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
   4.829 +	}
   4.830 +}
   4.831 +
   4.832 +/*
   4.833 + * Region header descriptors.
   4.834 + */
   4.835 +
   4.836 +static void
   4.837 +desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
   4.838 +	       struct unw_state_record *sr)
   4.839 +{
   4.840 +	int i, region_start;
   4.841 +
   4.842 +	if (!(sr->in_body || sr->first_region))
   4.843 +		finish_prologue(sr);
   4.844 +	sr->first_region = 0;
   4.845 +
   4.846 +	/* check if we're done: */
   4.847 +	if (sr->when_target < sr->region_start + sr->region_len) {
   4.848 +		sr->done = 1;
   4.849 +		return;
   4.850 +	}
   4.851 +
   4.852 +	region_start = sr->region_start + sr->region_len;
   4.853 +
   4.854 +	for (i = 0; i < sr->epilogue_count; ++i)
   4.855 +		pop(sr);
   4.856 +	sr->epilogue_count = 0;
   4.857 +	sr->epilogue_start = UNW_WHEN_NEVER;
   4.858 +
   4.859 +	sr->region_start = region_start;
   4.860 +	sr->region_len = rlen;
   4.861 +	sr->in_body = body;
   4.862 +
   4.863 +	if (!body) {
   4.864 +		push(sr);
   4.865 +
   4.866 +		for (i = 0; i < 4; ++i) {
   4.867 +			if (mask & 0x8)
   4.868 +				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
   4.869 +					sr->region_start + sr->region_len - 1, grsave++);
   4.870 +			mask <<= 1;
   4.871 +		}
   4.872 +		sr->gr_save_loc = grsave;
   4.873 +		sr->any_spills = 0;
   4.874 +		sr->imask = NULL;
   4.875 +		sr->spill_offset = 0x10;	/* default to psp+16 */
   4.876 +	}
   4.877 +}
   4.878 +
   4.879 +/*
   4.880 + * Prologue descriptors.
   4.881 + */
   4.882 +
   4.883 +static inline void
   4.884 +desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
   4.885 +{
   4.886 +	if (abi == 3 && context == 'i') {
   4.887 +		sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
   4.888 +		UNW_DPRINT(3, "unwind.%s: interrupt frame\n",  __FUNCTION__);
   4.889 +	}
   4.890 +	else
   4.891 +		UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
   4.892 +				__FUNCTION__, abi, context);
   4.893 +}
   4.894 +
   4.895 +static inline void
   4.896 +desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
   4.897 +{
   4.898 +	int i;
   4.899 +
   4.900 +	for (i = 0; i < 5; ++i) {
   4.901 +		if (brmask & 1)
   4.902 +			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
   4.903 +				sr->region_start + sr->region_len - 1, gr++);
   4.904 +		brmask >>= 1;
   4.905 +	}
   4.906 +}
   4.907 +
   4.908 +static inline void
   4.909 +desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
   4.910 +{
   4.911 +	int i;
   4.912 +
   4.913 +	for (i = 0; i < 5; ++i) {
   4.914 +		if (brmask & 1) {
   4.915 +			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
   4.916 +				sr->region_start + sr->region_len - 1, 0);
   4.917 +			sr->any_spills = 1;
   4.918 +		}
   4.919 +		brmask >>= 1;
   4.920 +	}
   4.921 +}
   4.922 +
   4.923 +static inline void
   4.924 +desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
   4.925 +{
   4.926 +	int i;
   4.927 +
   4.928 +	for (i = 0; i < 4; ++i) {
   4.929 +		if ((grmask & 1) != 0) {
   4.930 +			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
   4.931 +				sr->region_start + sr->region_len - 1, 0);
   4.932 +			sr->any_spills = 1;
   4.933 +		}
   4.934 +		grmask >>= 1;
   4.935 +	}
   4.936 +	for (i = 0; i < 20; ++i) {
   4.937 +		if ((frmask & 1) != 0) {
   4.938 +			int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
   4.939 +			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
   4.940 +				sr->region_start + sr->region_len - 1, 0);
   4.941 +			sr->any_spills = 1;
   4.942 +		}
   4.943 +		frmask >>= 1;
   4.944 +	}
   4.945 +}
   4.946 +
   4.947 +static inline void
   4.948 +desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
   4.949 +{
   4.950 +	int i;
   4.951 +
   4.952 +	for (i = 0; i < 4; ++i) {
   4.953 +		if ((frmask & 1) != 0) {
   4.954 +			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
   4.955 +				sr->region_start + sr->region_len - 1, 0);
   4.956 +			sr->any_spills = 1;
   4.957 +		}
   4.958 +		frmask >>= 1;
   4.959 +	}
   4.960 +}
   4.961 +
   4.962 +static inline void
   4.963 +desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
   4.964 +{
   4.965 +	int i;
   4.966 +
   4.967 +	for (i = 0; i < 4; ++i) {
   4.968 +		if ((grmask & 1) != 0)
   4.969 +			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
   4.970 +				sr->region_start + sr->region_len - 1, gr++);
   4.971 +		grmask >>= 1;
   4.972 +	}
   4.973 +}
   4.974 +
   4.975 +static inline void
   4.976 +desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
   4.977 +{
   4.978 +	int i;
   4.979 +
   4.980 +	for (i = 0; i < 4; ++i) {
   4.981 +		if ((grmask & 1) != 0) {
   4.982 +			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
   4.983 +				sr->region_start + sr->region_len - 1, 0);
   4.984 +			sr->any_spills = 1;
   4.985 +		}
   4.986 +		grmask >>= 1;
   4.987 +	}
   4.988 +}
   4.989 +
   4.990 +static inline void
   4.991 +desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
   4.992 +{
   4.993 +	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
   4.994 +		sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
   4.995 +}
   4.996 +
   4.997 +static inline void
   4.998 +desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
   4.999 +{
  4.1000 +	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
  4.1001 +}
  4.1002 +
  4.1003 +static inline void
  4.1004 +desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
  4.1005 +{
  4.1006 +	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
  4.1007 +}
  4.1008 +
  4.1009 +static inline void
  4.1010 +desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
  4.1011 +{
  4.1012 +	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
  4.1013 +		0x10 - 4*pspoff);
  4.1014 +}
  4.1015 +
  4.1016 +static inline void
  4.1017 +desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
  4.1018 +{
  4.1019 +	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
  4.1020 +		4*spoff);
  4.1021 +}
  4.1022 +
  4.1023 +static inline void
  4.1024 +desc_rp_br (unsigned char dst, struct unw_state_record *sr)
  4.1025 +{
  4.1026 +	sr->return_link_reg = dst;
  4.1027 +}
  4.1028 +
  4.1029 +static inline void
  4.1030 +desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
  4.1031 +{
  4.1032 +	struct unw_reg_info *reg = sr->curr.reg + regnum;
  4.1033 +
  4.1034 +	if (reg->where == UNW_WHERE_NONE)
  4.1035 +		reg->where = UNW_WHERE_GR_SAVE;
  4.1036 +	reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  4.1037 +}
  4.1038 +
  4.1039 +static inline void
  4.1040 +desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
  4.1041 +{
  4.1042 +	sr->spill_offset = 0x10 - 4*pspoff;
  4.1043 +}
  4.1044 +
  4.1045 +static inline unsigned char *
  4.1046 +desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
  4.1047 +{
  4.1048 +	sr->imask = imaskp;
  4.1049 +	return imaskp + (2*sr->region_len + 7)/8;
  4.1050 +}
  4.1051 +
  4.1052 +/*
  4.1053 + * Body descriptors.
  4.1054 + */
  4.1055 +static inline void
  4.1056 +desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
  4.1057 +{
  4.1058 +	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
  4.1059 +	sr->epilogue_count = ecount + 1;
  4.1060 +}
  4.1061 +
  4.1062 +static inline void
  4.1063 +desc_copy_state (unw_word label, struct unw_state_record *sr)
  4.1064 +{
  4.1065 +	struct unw_labeled_state *ls;
  4.1066 +
  4.1067 +	for (ls = sr->labeled_states; ls; ls = ls->next) {
  4.1068 +		if (ls->label == label) {
  4.1069 +			free_state_stack(&sr->curr);
  4.1070 +			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
  4.1071 +			sr->curr.next = dup_state_stack(ls->saved_state.next);
  4.1072 +			return;
  4.1073 +		}
  4.1074 +	}
  4.1075 +	printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
  4.1076 +}
  4.1077 +
  4.1078 +static inline void
  4.1079 +desc_label_state (unw_word label, struct unw_state_record *sr)
  4.1080 +{
  4.1081 +	struct unw_labeled_state *ls;
  4.1082 +
  4.1083 +	ls = alloc_labeled_state();
  4.1084 +	if (!ls) {
  4.1085 +		printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
  4.1086 +		return;
  4.1087 +	}
  4.1088 +	ls->label = label;
  4.1089 +	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
  4.1090 +	ls->saved_state.next = dup_state_stack(sr->curr.next);
  4.1091 +
  4.1092 +	/* insert into list of labeled states: */
  4.1093 +	ls->next = sr->labeled_states;
  4.1094 +	sr->labeled_states = ls;
  4.1095 +}
  4.1096 +
  4.1097 +/*
  4.1098 + * General descriptors.
  4.1099 + */
  4.1100 +
  4.1101 +static inline int
  4.1102 +desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
  4.1103 +{
  4.1104 +	if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
  4.1105 +		return 0;
  4.1106 +	if (qp > 0) {
  4.1107 +		if ((sr->pr_val & (1UL << qp)) == 0)
  4.1108 +			return 0;
  4.1109 +		sr->pr_mask |= (1UL << qp);
  4.1110 +	}
  4.1111 +	return 1;
  4.1112 +}
  4.1113 +
  4.1114 +static inline void
  4.1115 +desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
  4.1116 +{
  4.1117 +	struct unw_reg_info *r;
  4.1118 +
  4.1119 +	if (!desc_is_active(qp, t, sr))
  4.1120 +		return;
  4.1121 +
  4.1122 +	r = sr->curr.reg + decode_abreg(abreg, 0);
  4.1123 +	r->where = UNW_WHERE_NONE;
  4.1124 +	r->when = UNW_WHEN_NEVER;
  4.1125 +	r->val = 0;
  4.1126 +}
  4.1127 +
  4.1128 +static inline void
  4.1129 +desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
  4.1130 +		     unsigned char ytreg, struct unw_state_record *sr)
  4.1131 +{
  4.1132 +	enum unw_where where = UNW_WHERE_GR;
  4.1133 +	struct unw_reg_info *r;
  4.1134 +
  4.1135 +	if (!desc_is_active(qp, t, sr))
  4.1136 +		return;
  4.1137 +
  4.1138 +	if (x)
  4.1139 +		where = UNW_WHERE_BR;
  4.1140 +	else if (ytreg & 0x80)
  4.1141 +		where = UNW_WHERE_FR;
  4.1142 +
  4.1143 +	r = sr->curr.reg + decode_abreg(abreg, 0);
  4.1144 +	r->where = where;
  4.1145 +	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  4.1146 +	r->val = (ytreg & 0x7f);
  4.1147 +}
  4.1148 +
  4.1149 +static inline void
  4.1150 +desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
  4.1151 +		     struct unw_state_record *sr)
  4.1152 +{
  4.1153 +	struct unw_reg_info *r;
  4.1154 +
  4.1155 +	if (!desc_is_active(qp, t, sr))
  4.1156 +		return;
  4.1157 +
  4.1158 +	r = sr->curr.reg + decode_abreg(abreg, 1);
  4.1159 +	r->where = UNW_WHERE_PSPREL;
  4.1160 +	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  4.1161 +	r->val = 0x10 - 4*pspoff;
  4.1162 +}
  4.1163 +
  4.1164 +static inline void
  4.1165 +desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
  4.1166 +		       struct unw_state_record *sr)
  4.1167 +{
  4.1168 +	struct unw_reg_info *r;
  4.1169 +
  4.1170 +	if (!desc_is_active(qp, t, sr))
  4.1171 +		return;
  4.1172 +
  4.1173 +	r = sr->curr.reg + decode_abreg(abreg, 1);
  4.1174 +	r->where = UNW_WHERE_SPREL;
  4.1175 +	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  4.1176 +	r->val = 4*spoff;
  4.1177 +}
  4.1178 +
  4.1179 +#define UNW_DEC_BAD_CODE(code)			printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
  4.1180 +						       code);
  4.1181 +
  4.1182 +/*
  4.1183 + * region headers:
  4.1184 + */
  4.1185 +#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg)	desc_prologue(0,r,m,gr,arg)
  4.1186 +#define UNW_DEC_PROLOGUE(fmt,b,r,arg)		desc_prologue(b,r,0,32,arg)
  4.1187 +/*
  4.1188 + * prologue descriptors:
  4.1189 + */
  4.1190 +#define UNW_DEC_ABI(fmt,a,c,arg)		desc_abi(a,c,arg)
  4.1191 +#define UNW_DEC_BR_GR(fmt,b,g,arg)		desc_br_gr(b,g,arg)
  4.1192 +#define UNW_DEC_BR_MEM(fmt,b,arg)		desc_br_mem(b,arg)
  4.1193 +#define UNW_DEC_FRGR_MEM(fmt,g,f,arg)		desc_frgr_mem(g,f,arg)
  4.1194 +#define UNW_DEC_FR_MEM(fmt,f,arg)		desc_fr_mem(f,arg)
  4.1195 +#define UNW_DEC_GR_GR(fmt,m,g,arg)		desc_gr_gr(m,g,arg)
  4.1196 +#define UNW_DEC_GR_MEM(fmt,m,arg)		desc_gr_mem(m,arg)
  4.1197 +#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
  4.1198 +#define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
  4.1199 +#define UNW_DEC_REG_GR(fmt,r,d,arg)		desc_reg_gr(r,d,arg)
  4.1200 +#define UNW_DEC_REG_PSPREL(fmt,r,o,arg)		desc_reg_psprel(r,o,arg)
  4.1201 +#define UNW_DEC_REG_SPREL(fmt,r,o,arg)		desc_reg_sprel(r,o,arg)
  4.1202 +#define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
  4.1203 +#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
  4.1204 +#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
  4.1205 +#define UNW_DEC_PRIUNAT_GR(fmt,r,arg)		desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
  4.1206 +#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg)	desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
  4.1207 +#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg)	desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
  4.1208 +#define UNW_DEC_RP_BR(fmt,d,arg)		desc_rp_br(d,arg)
  4.1209 +#define UNW_DEC_SPILL_BASE(fmt,o,arg)		desc_spill_base(o,arg)
  4.1210 +#define UNW_DEC_SPILL_MASK(fmt,m,arg)		(m = desc_spill_mask(m,arg))
  4.1211 +/*
  4.1212 + * body descriptors:
  4.1213 + */
  4.1214 +#define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
  4.1215 +#define UNW_DEC_COPY_STATE(fmt,l,arg)		desc_copy_state(l,arg)
  4.1216 +#define UNW_DEC_LABEL_STATE(fmt,l,arg)		desc_label_state(l,arg)
  4.1217 +/*
  4.1218 + * general unwind descriptors:
  4.1219 + */
  4.1220 +#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
  4.1221 +#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
  4.1222 +#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
  4.1223 +#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
  4.1224 +#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
  4.1225 +#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
  4.1226 +#define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
  4.1227 +#define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
  4.1228 +
  4.1229 +#include "unwind_decoder.c"
  4.1230 +
  4.1231 +
  4.1232 +/* Unwind scripts. */
  4.1233 +
  4.1234 +static inline unw_hash_index_t
  4.1235 +hash (unsigned long ip)
  4.1236 +{
  4.1237 +#	define hashmagic	0x9e3779b97f4a7c16UL	/* based on (sqrt(5)/2-1)*2^64 */
  4.1238 +
  4.1239 +	return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
  4.1240 +#undef hashmagic
  4.1241 +}
  4.1242 +
  4.1243 +static inline long
  4.1244 +cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
  4.1245 +{
  4.1246 +	read_lock(&script->lock);
  4.1247 +	if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
  4.1248 +		/* keep the read lock... */
  4.1249 +		return 1;
  4.1250 +	read_unlock(&script->lock);
  4.1251 +	return 0;
  4.1252 +}
  4.1253 +
  4.1254 +static inline struct unw_script *
  4.1255 +script_lookup (struct unw_frame_info *info)
  4.1256 +{
  4.1257 +	struct unw_script *script = unw.cache + info->hint;
  4.1258 +	unsigned short index;
  4.1259 +	unsigned long ip, pr;
  4.1260 +
  4.1261 +	if (UNW_DEBUG_ON(0))
  4.1262 +		return NULL;	/* Always regenerate scripts in debug mode */
  4.1263 +
  4.1264 +	STAT(++unw.stat.cache.lookups);
  4.1265 +
  4.1266 +	ip = info->ip;
  4.1267 +	pr = info->pr;
  4.1268 +
  4.1269 +	if (cache_match(script, ip, pr)) {
  4.1270 +		STAT(++unw.stat.cache.hinted_hits);
  4.1271 +		return script;
  4.1272 +	}
  4.1273 +
  4.1274 +	index = unw.hash[hash(ip)];
  4.1275 +	if (index >= UNW_CACHE_SIZE)
  4.1276 +		return NULL;
  4.1277 +
  4.1278 +	script = unw.cache + index;
  4.1279 +	while (1) {
  4.1280 +		if (cache_match(script, ip, pr)) {
  4.1281 +			/* update hint; no locking required as single-word writes are atomic */
  4.1282 +			STAT(++unw.stat.cache.normal_hits);
  4.1283 +			unw.cache[info->prev_script].hint = script - unw.cache;
  4.1284 +			return script;
  4.1285 +		}
  4.1286 +		if (script->coll_chain >= UNW_HASH_SIZE)
  4.1287 +			return NULL;
  4.1288 +		script = unw.cache + script->coll_chain;
  4.1289 +		STAT(++unw.stat.cache.collision_chain_traversals);
  4.1290 +	}
  4.1291 +}
  4.1292 +
  4.1293 +/*
  4.1294 + * On returning, a write lock for the SCRIPT is still being held.
  4.1295 + */
  4.1296 +static inline struct unw_script *
  4.1297 +script_new (unsigned long ip)
  4.1298 +{
  4.1299 +	struct unw_script *script, *prev, *tmp;
  4.1300 +	unw_hash_index_t index;
  4.1301 +	unsigned short head;
  4.1302 +
  4.1303 +	STAT(++unw.stat.script.news);
  4.1304 +
  4.1305 +	/*
  4.1306 +	 * Can't (easily) use cmpxchg() here because of ABA problem
  4.1307 +	 * that is intrinsic in cmpxchg()...
  4.1308 +	 */
  4.1309 +	head = unw.lru_head;
  4.1310 +	script = unw.cache + head;
  4.1311 +	unw.lru_head = script->lru_chain;
  4.1312 +
  4.1313 +	/*
  4.1314 +	 * We'd deadlock here if we interrupted a thread that is holding a read lock on
  4.1315 +	 * script->lock.  Thus, if the write_trylock() fails, we simply bail out.  The
  4.1316 +	 * alternative would be to disable interrupts whenever we hold a read-lock, but
  4.1317 +	 * that seems silly.
  4.1318 +	 */
  4.1319 +	if (!write_trylock(&script->lock))
  4.1320 +		return NULL;
  4.1321 +
  4.1322 +	/* re-insert script at the tail of the LRU chain: */
  4.1323 +	unw.cache[unw.lru_tail].lru_chain = head;
  4.1324 +	unw.lru_tail = head;
  4.1325 +
  4.1326 +	/* remove the old script from the hash table (if it's there): */
  4.1327 +	if (script->ip) {
  4.1328 +		index = hash(script->ip);
  4.1329 +		tmp = unw.cache + unw.hash[index];
  4.1330 +		prev = NULL;
  4.1331 +		while (1) {
  4.1332 +			if (tmp == script) {
  4.1333 +				if (prev)
  4.1334 +					prev->coll_chain = tmp->coll_chain;
  4.1335 +				else
  4.1336 +					unw.hash[index] = tmp->coll_chain;
  4.1337 +				break;
  4.1338 +			} else
  4.1339 +				prev = tmp;
  4.1340 +			if (tmp->coll_chain >= UNW_CACHE_SIZE)
  4.1341 +			/* old script wasn't in the hash-table */
  4.1342 +				break;
  4.1343 +			tmp = unw.cache + tmp->coll_chain;
  4.1344 +		}
  4.1345 +	}
  4.1346 +
  4.1347 +	/* enter new script in the hash table */
  4.1348 +	index = hash(ip);
  4.1349 +	script->coll_chain = unw.hash[index];
  4.1350 +	unw.hash[index] = script - unw.cache;
  4.1351 +
  4.1352 +	script->ip = ip;	/* set new IP while we're holding the locks */
  4.1353 +
  4.1354 +	STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
  4.1355 +
  4.1356 +	script->flags = 0;
  4.1357 +	script->hint = 0;
  4.1358 +	script->count = 0;
  4.1359 +	return script;
  4.1360 +}
  4.1361 +
  4.1362 +static void
  4.1363 +script_finalize (struct unw_script *script, struct unw_state_record *sr)
  4.1364 +{
  4.1365 +	script->pr_mask = sr->pr_mask;
  4.1366 +	script->pr_val = sr->pr_val;
  4.1367 +	/*
  4.1368 +	 * We could down-grade our write-lock on script->lock here but
  4.1369 +	 * the rwlock API doesn't offer atomic lock downgrading, so
  4.1370 +	 * we'll just keep the write-lock and release it later when
  4.1371 +	 * we're done using the script.
  4.1372 +	 */
  4.1373 +}
  4.1374 +
  4.1375 +static inline void
  4.1376 +script_emit (struct unw_script *script, struct unw_insn insn)
  4.1377 +{
  4.1378 +	if (script->count >= UNW_MAX_SCRIPT_LEN) {
  4.1379 +		UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
  4.1380 +			__FUNCTION__, UNW_MAX_SCRIPT_LEN);
  4.1381 +		return;
  4.1382 +	}
  4.1383 +	script->insn[script->count++] = insn;
  4.1384 +}
  4.1385 +
  4.1386 +static inline void
  4.1387 +emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
  4.1388 +{
  4.1389 +	struct unw_reg_info *r = sr->curr.reg + i;
  4.1390 +	enum unw_insn_opcode opc;
  4.1391 +	struct unw_insn insn;
  4.1392 +	unsigned long val = 0;
  4.1393 +
  4.1394 +	switch (r->where) {
  4.1395 +	      case UNW_WHERE_GR:
  4.1396 +		if (r->val >= 32) {
  4.1397 +			/* register got spilled to a stacked register */
  4.1398 +			opc = UNW_INSN_SETNAT_TYPE;
  4.1399 +			val = UNW_NAT_REGSTK;
  4.1400 +		} else
  4.1401 +			/* register got spilled to a scratch register */
  4.1402 +			opc = UNW_INSN_SETNAT_MEMSTK;
  4.1403 +		break;
  4.1404 +
  4.1405 +	      case UNW_WHERE_FR:
  4.1406 +		opc = UNW_INSN_SETNAT_TYPE;
  4.1407 +		val = UNW_NAT_VAL;
  4.1408 +		break;
  4.1409 +
  4.1410 +	      case UNW_WHERE_BR:
  4.1411 +		opc = UNW_INSN_SETNAT_TYPE;
  4.1412 +		val = UNW_NAT_NONE;
  4.1413 +		break;
  4.1414 +
  4.1415 +	      case UNW_WHERE_PSPREL:
  4.1416 +	      case UNW_WHERE_SPREL:
  4.1417 +		opc = UNW_INSN_SETNAT_MEMSTK;
  4.1418 +		break;
  4.1419 +
  4.1420 +	      default:
  4.1421 +		UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
  4.1422 +			   __FUNCTION__, r->where);
  4.1423 +		return;
  4.1424 +	}
  4.1425 +	insn.opc = opc;
  4.1426 +	insn.dst = unw.preg_index[i];
  4.1427 +	insn.val = val;
  4.1428 +	script_emit(script, insn);
  4.1429 +}
  4.1430 +
  4.1431 +static void
  4.1432 +compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
  4.1433 +{
  4.1434 +	struct unw_reg_info *r = sr->curr.reg + i;
  4.1435 +	enum unw_insn_opcode opc;
  4.1436 +	unsigned long val, rval;
  4.1437 +	struct unw_insn insn;
  4.1438 +	long need_nat_info;
  4.1439 +
  4.1440 +	if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
  4.1441 +		return;
  4.1442 +
  4.1443 +	opc = UNW_INSN_MOVE;
  4.1444 +	val = rval = r->val;
  4.1445 +	need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
  4.1446 +
  4.1447 +	switch (r->where) {
  4.1448 +	      case UNW_WHERE_GR:
  4.1449 +		if (rval >= 32) {
  4.1450 +			opc = UNW_INSN_MOVE_STACKED;
  4.1451 +			val = rval - 32;
  4.1452 +		} else if (rval >= 4 && rval <= 7) {
  4.1453 +			if (need_nat_info) {
  4.1454 +				opc = UNW_INSN_MOVE2;
  4.1455 +				need_nat_info = 0;
  4.1456 +			}
  4.1457 +			val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
  4.1458 +		} else if (rval == 0) {
  4.1459 +			opc = UNW_INSN_MOVE_CONST;
  4.1460 +			val = 0;
  4.1461 +		} else {
  4.1462 +			/* register got spilled to a scratch register */
  4.1463 +			opc = UNW_INSN_MOVE_SCRATCH;
  4.1464 +			val = pt_regs_off(rval);
  4.1465 +		}
  4.1466 +		break;
  4.1467 +
  4.1468 +	      case UNW_WHERE_FR:
  4.1469 +		if (rval <= 5)
  4.1470 +			val = unw.preg_index[UNW_REG_F2  + (rval -  2)];
  4.1471 +		else if (rval >= 16 && rval <= 31)
  4.1472 +			val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
  4.1473 +		else {
  4.1474 +			opc = UNW_INSN_MOVE_SCRATCH;
  4.1475 +			if (rval <= 11)
  4.1476 +				val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
  4.1477 +			else
  4.1478 +				UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
  4.1479 +					   __FUNCTION__, rval);
  4.1480 +		}
  4.1481 +		break;
  4.1482 +
  4.1483 +	      case UNW_WHERE_BR:
  4.1484 +		if (rval >= 1 && rval <= 5)
  4.1485 +			val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
  4.1486 +		else {
  4.1487 +			opc = UNW_INSN_MOVE_SCRATCH;
  4.1488 +			if (rval == 0)
  4.1489 +				val = offsetof(struct pt_regs, b0);
  4.1490 +			else if (rval == 6)
  4.1491 +				val = offsetof(struct pt_regs, b6);
  4.1492 +			else
  4.1493 +				val = offsetof(struct pt_regs, b7);
  4.1494 +		}
  4.1495 +		break;
  4.1496 +
  4.1497 +	      case UNW_WHERE_SPREL:
  4.1498 +		opc = UNW_INSN_ADD_SP;
  4.1499 +		break;
  4.1500 +
  4.1501 +	      case UNW_WHERE_PSPREL:
  4.1502 +		opc = UNW_INSN_ADD_PSP;
  4.1503 +		break;
  4.1504 +
  4.1505 +	      default:
  4.1506 +		UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
  4.1507 +			   __FUNCTION__, i, r->where);
  4.1508 +		break;
  4.1509 +	}
  4.1510 +	insn.opc = opc;
  4.1511 +	insn.dst = unw.preg_index[i];
  4.1512 +	insn.val = val;
  4.1513 +	script_emit(script, insn);
  4.1514 +	if (need_nat_info)
  4.1515 +		emit_nat_info(sr, i, script);
  4.1516 +
  4.1517 +	if (i == UNW_REG_PSP) {
  4.1518 +		/*
  4.1519 +		 * info->psp must contain the _value_ of the previous
  4.1520 +		 * sp, not it's save location.  We get this by
  4.1521 +		 * dereferencing the value we just stored in
  4.1522 +		 * info->psp:
  4.1523 +		 */
  4.1524 +		insn.opc = UNW_INSN_LOAD;
  4.1525 +		insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
  4.1526 +		script_emit(script, insn);
  4.1527 +	}
  4.1528 +}
  4.1529 +
  4.1530 +static inline const struct unw_table_entry *
  4.1531 +lookup (struct unw_table *table, unsigned long rel_ip)
  4.1532 +{
  4.1533 +	const struct unw_table_entry *e = NULL;
  4.1534 +	unsigned long lo, hi, mid;
  4.1535 +
  4.1536 +	/* do a binary search for right entry: */
  4.1537 +	for (lo = 0, hi = table->length; lo < hi; ) {
  4.1538 +		mid = (lo + hi) / 2;
  4.1539 +		e = &table->array[mid];
  4.1540 +		if (rel_ip < e->start_offset)
  4.1541 +			hi = mid;
  4.1542 +		else if (rel_ip >= e->end_offset)
  4.1543 +			lo = mid + 1;
  4.1544 +		else
  4.1545 +			break;
  4.1546 +	}
  4.1547 +	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
  4.1548 +		return NULL;
  4.1549 +	return e;
  4.1550 +}
  4.1551 +
  4.1552 +/*
  4.1553 + * Build an unwind script that unwinds from state OLD_STATE to the
  4.1554 + * entrypoint of the function that called OLD_STATE.
  4.1555 + */
  4.1556 +static inline struct unw_script *
  4.1557 +build_script (struct unw_frame_info *info)
  4.1558 +{
  4.1559 +	const struct unw_table_entry *e = NULL;
  4.1560 +	struct unw_script *script = NULL;
  4.1561 +	struct unw_labeled_state *ls, *next;
  4.1562 +	unsigned long ip = info->ip;
  4.1563 +	struct unw_state_record sr;
  4.1564 +	struct unw_table *table;
  4.1565 +	struct unw_reg_info *r;
  4.1566 +	struct unw_insn insn;
  4.1567 +	u8 *dp, *desc_end;
  4.1568 +	u64 hdr;
  4.1569 +	int i;
  4.1570 +	STAT(unsigned long start, parse_start;)
  4.1571 +
  4.1572 +	STAT(++unw.stat.script.builds; start = ia64_get_itc());
  4.1573 +
  4.1574 +	/* build state record */
  4.1575 +	memset(&sr, 0, sizeof(sr));
  4.1576 +	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
  4.1577 +		r->when = UNW_WHEN_NEVER;
  4.1578 +	sr.pr_val = info->pr;
  4.1579 +
  4.1580 +	UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
  4.1581 +	script = script_new(ip);
  4.1582 +	if (!script) {
  4.1583 +		UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n",  __FUNCTION__);
  4.1584 +		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
  4.1585 +		return NULL;
  4.1586 +	}
  4.1587 +	unw.cache[info->prev_script].hint = script - unw.cache;
  4.1588 +
  4.1589 +	/* search the kernels and the modules' unwind tables for IP: */
  4.1590 +
  4.1591 +	STAT(parse_start = ia64_get_itc());
  4.1592 +
  4.1593 +	for (table = unw.tables; table; table = table->next) {
  4.1594 +		if (ip >= table->start && ip < table->end) {
  4.1595 +			e = lookup(table, ip - table->segment_base);
  4.1596 +			break;
  4.1597 +		}
  4.1598 +	}
  4.1599 +	if (!e) {
  4.1600 +		/* no info, return default unwinder (leaf proc, no mem stack, no saved regs)  */
  4.1601 +		UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
  4.1602 +			__FUNCTION__, ip, unw.cache[info->prev_script].ip);
  4.1603 +		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
  4.1604 +		sr.curr.reg[UNW_REG_RP].when = -1;
  4.1605 +		sr.curr.reg[UNW_REG_RP].val = 0;
  4.1606 +		compile_reg(&sr, UNW_REG_RP, script);
  4.1607 +		script_finalize(script, &sr);
  4.1608 +		STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
  4.1609 +		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
  4.1610 +		return script;
  4.1611 +	}
  4.1612 +
  4.1613 +	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
  4.1614 +			  + (ip & 0xfUL));
  4.1615 +	hdr = *(u64 *) (table->segment_base + e->info_offset);
  4.1616 +	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
  4.1617 +	desc_end = dp + 8*UNW_LENGTH(hdr);
  4.1618 +
  4.1619 +	while (!sr.done && dp < desc_end)
  4.1620 +		dp = unw_decode(dp, sr.in_body, &sr);
  4.1621 +
  4.1622 +	if (sr.when_target > sr.epilogue_start) {
  4.1623 +		/*
  4.1624 +		 * sp has been restored and all values on the memory stack below
  4.1625 +		 * psp also have been restored.
  4.1626 +		 */
  4.1627 +		sr.curr.reg[UNW_REG_PSP].val = 0;
  4.1628 +		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
  4.1629 +		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
  4.1630 +		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
  4.1631 +			if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
  4.1632 +			    || r->where == UNW_WHERE_SPREL)
  4.1633 +			{
  4.1634 +				r->val = 0;
  4.1635 +				r->where = UNW_WHERE_NONE;
  4.1636 +				r->when = UNW_WHEN_NEVER;
  4.1637 +			}
  4.1638 +	}
  4.1639 +
  4.1640 +	script->flags = sr.flags;
  4.1641 +
  4.1642 +	/*
  4.1643 +	 * If RP did't get saved, generate entry for the return link
  4.1644 +	 * register.
  4.1645 +	 */
  4.1646 +	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
  4.1647 +		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
  4.1648 +		sr.curr.reg[UNW_REG_RP].when = -1;
  4.1649 +		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
  4.1650 +		UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
  4.1651 +			   __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
  4.1652 +			   sr.curr.reg[UNW_REG_RP].val);
  4.1653 +	}
  4.1654 +
  4.1655 +#ifdef UNW_DEBUG
  4.1656 +	UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
  4.1657 +		__FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
  4.1658 +	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
  4.1659 +		if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
  4.1660 +			UNW_DPRINT(1, "  %s <- ", unw.preg_name[r - sr.curr.reg]);
  4.1661 +			switch (r->where) {
  4.1662 +			      case UNW_WHERE_GR:     UNW_DPRINT(1, "r%lu", r->val); break;
  4.1663 +			      case UNW_WHERE_FR:     UNW_DPRINT(1, "f%lu", r->val); break;
  4.1664 +			      case UNW_WHERE_BR:     UNW_DPRINT(1, "b%lu", r->val); break;
  4.1665 +			      case UNW_WHERE_SPREL:  UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
  4.1666 +			      case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
  4.1667 +			      case UNW_WHERE_NONE:
  4.1668 +				UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
  4.1669 +				break;
  4.1670 +
  4.1671 +			      default:
  4.1672 +				UNW_DPRINT(1, "BADWHERE(%d)", r->where);
  4.1673 +				break;
  4.1674 +			}
  4.1675 +			UNW_DPRINT(1, "\t\t%d\n", r->when);
  4.1676 +		}
  4.1677 +	}
  4.1678 +#endif
  4.1679 +
  4.1680 +	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
  4.1681 +
  4.1682 +	/* translate state record into unwinder instructions: */
  4.1683 +
  4.1684 +	/*
  4.1685 +	 * First, set psp if we're dealing with a fixed-size frame;
  4.1686 +	 * subsequent instructions may depend on this value.
  4.1687 +	 */
  4.1688 +	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
  4.1689 +	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
  4.1690 +	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
  4.1691 +		/* new psp is sp plus frame size */
  4.1692 +		insn.opc = UNW_INSN_ADD;
  4.1693 +		insn.dst = offsetof(struct unw_frame_info, psp)/8;
  4.1694 +		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
  4.1695 +		script_emit(script, insn);
  4.1696 +	}
  4.1697 +
  4.1698 +	/* determine where the primary UNaT is: */
  4.1699 +	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
  4.1700 +		i = UNW_REG_PRI_UNAT_MEM;
  4.1701 +	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
  4.1702 +		i = UNW_REG_PRI_UNAT_GR;
  4.1703 +	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
  4.1704 +		i = UNW_REG_PRI_UNAT_MEM;
  4.1705 +	else
  4.1706 +		i = UNW_REG_PRI_UNAT_GR;
  4.1707 +
  4.1708 +	compile_reg(&sr, i, script);
  4.1709 +
  4.1710 +	for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
  4.1711 +		compile_reg(&sr, i, script);
  4.1712 +
  4.1713 +	/* free labeled register states & stack: */
  4.1714 +
  4.1715 +	STAT(parse_start = ia64_get_itc());
  4.1716 +	for (ls = sr.labeled_states; ls; ls = next) {
  4.1717 +		next = ls->next;
  4.1718 +		free_state_stack(&ls->saved_state);
  4.1719 +		free_labeled_state(ls);
  4.1720 +	}
  4.1721 +	free_state_stack(&sr.curr);
  4.1722 +	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
  4.1723 +
  4.1724 +	script_finalize(script, &sr);
  4.1725 +	STAT(unw.stat.script.build_time += ia64_get_itc() - start);
  4.1726 +	return script;
  4.1727 +}
  4.1728 +
  4.1729 +/*
  4.1730 + * Apply the unwinding actions represented by OPS and update SR to
  4.1731 + * reflect the state that existed upon entry to the function that this
  4.1732 + * unwinder represents.
  4.1733 + */
  4.1734 +static inline void
  4.1735 +run_script (struct unw_script *script, struct unw_frame_info *state)
  4.1736 +{
  4.1737 +	struct unw_insn *ip, *limit, next_insn;
  4.1738 +	unsigned long opc, dst, val, off;
  4.1739 +	unsigned long *s = (unsigned long *) state;
  4.1740 +	STAT(unsigned long start;)
  4.1741 +
  4.1742 +	STAT(++unw.stat.script.runs; start = ia64_get_itc());
  4.1743 +	state->flags = script->flags;
  4.1744 +	ip = script->insn;
  4.1745 +	limit = script->insn + script->count;
  4.1746 +	next_insn = *ip;
  4.1747 +
  4.1748 +	while (ip++ < limit) {
  4.1749 +		opc = next_insn.opc;
  4.1750 +		dst = next_insn.dst;
  4.1751 +		val = next_insn.val;
  4.1752 +		next_insn = *ip;
  4.1753 +
  4.1754 +	  redo:
  4.1755 +		switch (opc) {
  4.1756 +		      case UNW_INSN_ADD:
  4.1757 +			s[dst] += val;
  4.1758 +			break;
  4.1759 +
  4.1760 +		      case UNW_INSN_MOVE2:
  4.1761 +			if (!s[val])
  4.1762 +				goto lazy_init;
  4.1763 +			s[dst+1] = s[val+1];
  4.1764 +			s[dst] = s[val];
  4.1765 +			break;
  4.1766 +
  4.1767 +		      case UNW_INSN_MOVE:
  4.1768 +			if (!s[val])
  4.1769 +				goto lazy_init;
  4.1770 +			s[dst] = s[val];
  4.1771 +			break;
  4.1772 +
  4.1773 +		      case UNW_INSN_MOVE_SCRATCH:
  4.1774 +			if (state->pt) {
  4.1775 +				s[dst] = (unsigned long) get_scratch_regs(state) + val;
  4.1776 +			} else {
  4.1777 +				s[dst] = 0;
  4.1778 +				UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
  4.1779 +					   __FUNCTION__, dst, val);
  4.1780 +			}
  4.1781 +			break;
  4.1782 +
  4.1783 +		      case UNW_INSN_MOVE_CONST:
  4.1784 +			if (val == 0)
  4.1785 +				s[dst] = (unsigned long) &unw.r0;
  4.1786 +			else {
  4.1787 +				s[dst] = 0;
  4.1788 +				UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
  4.1789 +					   __FUNCTION__, val);
  4.1790 +			}
  4.1791 +			break;
  4.1792 +
  4.1793 +
  4.1794 +		      case UNW_INSN_MOVE_STACKED:
  4.1795 +			s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
  4.1796 +								    val);
  4.1797 +			break;
  4.1798 +
  4.1799 +		      case UNW_INSN_ADD_PSP:
  4.1800 +			s[dst] = state->psp + val;
  4.1801 +			break;
  4.1802 +
  4.1803 +		      case UNW_INSN_ADD_SP:
  4.1804 +			s[dst] = state->sp + val;
  4.1805 +			break;
  4.1806 +
  4.1807 +		      case UNW_INSN_SETNAT_MEMSTK:
  4.1808 +			if (!state->pri_unat_loc)
  4.1809 +				state->pri_unat_loc = &state->sw->caller_unat;
  4.1810 +			/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
  4.1811 +			s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
  4.1812 +			break;
  4.1813 +
  4.1814 +		      case UNW_INSN_SETNAT_TYPE:
  4.1815 +			s[dst+1] = val;
  4.1816 +			break;
  4.1817 +
  4.1818 +		      case UNW_INSN_LOAD:
  4.1819 +#ifdef UNW_DEBUG
  4.1820 +			if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
  4.1821 +#ifndef XEN
  4.1822 +			    || s[val] < TASK_SIZE
  4.1823 +#endif
  4.1824 +				)
  4.1825 +			{
  4.1826 +				UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
  4.1827 +					   __FUNCTION__, s[val]);
  4.1828 +				break;
  4.1829 +			}
  4.1830 +#endif
  4.1831 +			s[dst] = *(unsigned long *) s[val];
  4.1832 +			break;
  4.1833 +		}
  4.1834 +	}
  4.1835 +	STAT(unw.stat.script.run_time += ia64_get_itc() - start);
  4.1836 +	return;
  4.1837 +
  4.1838 +  lazy_init:
  4.1839 +	off = unw.sw_off[val];
  4.1840 +	s[val] = (unsigned long) state->sw + off;
  4.1841 +	if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
  4.1842 +		/*
  4.1843 +		 * We're initializing a general register: init NaT info, too.  Note that
  4.1844 +		 * the offset is a multiple of 8 which gives us the 3 bits needed for
  4.1845 +		 * the type field.
  4.1846 +		 */
  4.1847 +		s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
  4.1848 +	goto redo;
  4.1849 +}
  4.1850 +
  4.1851 +static int
  4.1852 +find_save_locs (struct unw_frame_info *info)
  4.1853 +{
  4.1854 +	int have_write_lock = 0;
  4.1855 +	struct unw_script *scr;
  4.1856 +	unsigned long flags = 0;
  4.1857 +
  4.1858 +	if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf))
  4.1859 +#ifndef XEN
  4.1860 +	    || info->ip < TASK_SIZE
  4.1861 +#endif
  4.1862 +		) {
  4.1863 +		/* don't let obviously bad addresses pollute the cache */
  4.1864 +		/* FIXME: should really be level 0 but it occurs too often. KAO */
  4.1865 +		UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
  4.1866 +		info->rp_loc = NULL;
  4.1867 +		return -1;
  4.1868 +	}
  4.1869 +
  4.1870 +	scr = script_lookup(info);
  4.1871 +	if (!scr) {
  4.1872 +		spin_lock_irqsave(&unw.lock, flags);
  4.1873 +		scr = build_script(info);
  4.1874 +		if (!scr) {
  4.1875 +			spin_unlock_irqrestore(&unw.lock, flags);
  4.1876 +			UNW_DPRINT(0,
  4.1877 +				   "unwind.%s: failed to locate/build unwind script for ip %lx\n",
  4.1878 +				   __FUNCTION__, info->ip);
  4.1879 +			return -1;
  4.1880 +		}
  4.1881 +		have_write_lock = 1;
  4.1882 +	}
  4.1883 +	info->hint = scr->hint;
  4.1884 +	info->prev_script = scr - unw.cache;
  4.1885 +
  4.1886 +	run_script(scr, info);
  4.1887 +
  4.1888 +	if (have_write_lock) {
  4.1889 +		write_unlock(&scr->lock);
  4.1890 +		spin_unlock_irqrestore(&unw.lock, flags);
  4.1891 +	} else
  4.1892 +		read_unlock(&scr->lock);
  4.1893 +	return 0;
  4.1894 +}
  4.1895 +
  4.1896 +int
  4.1897 +unw_unwind (struct unw_frame_info *info)
  4.1898 +{
  4.1899 +	unsigned long prev_ip, prev_sp, prev_bsp;
  4.1900 +	unsigned long ip, pr, num_regs;
  4.1901 +	STAT(unsigned long start, flags;)
  4.1902 +	int retval;
  4.1903 +
  4.1904 +	STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
  4.1905 +
  4.1906 +	prev_ip = info->ip;
  4.1907 +	prev_sp = info->sp;
  4.1908 +	prev_bsp = info->bsp;
  4.1909 +
  4.1910 +	/* restore the ip */
  4.1911 +	if (!info->rp_loc) {
  4.1912 +		/* FIXME: should really be level 0 but it occurs too often. KAO */
  4.1913 +		UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
  4.1914 +			   __FUNCTION__, info->ip);
  4.1915 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.1916 +		return -1;
  4.1917 +	}
  4.1918 +	ip = info->ip = *info->rp_loc;
  4.1919 +	if (ip < GATE_ADDR) {
  4.1920 +		UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
  4.1921 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.1922 +		return -1;
  4.1923 +	}
  4.1924 +
  4.1925 +	/* restore the cfm: */
  4.1926 +	if (!info->pfs_loc) {
  4.1927 +		UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
  4.1928 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.1929 +		return -1;
  4.1930 +	}
  4.1931 +	info->cfm_loc = info->pfs_loc;
  4.1932 +
  4.1933 +	/* restore the bsp: */
  4.1934 +	pr = info->pr;
  4.1935 +	num_regs = 0;
  4.1936 +	if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
  4.1937 +		info->pt = info->sp + 16;
  4.1938 +		if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
  4.1939 +			num_regs = *info->cfm_loc & 0x7f;		/* size of frame */
  4.1940 +		info->pfs_loc =
  4.1941 +			(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
  4.1942 +		UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
  4.1943 +	} else
  4.1944 +		num_regs = (*info->cfm_loc >> 7) & 0x7f;	/* size of locals */
  4.1945 +	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
  4.1946 +	if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
  4.1947 +		UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
  4.1948 +			__FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
  4.1949 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.1950 +		return -1;
  4.1951 +	}
  4.1952 +
  4.1953 +	/* restore the sp: */
  4.1954 +	info->sp = info->psp;
  4.1955 +	if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
  4.1956 +		UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
  4.1957 +			__FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
  4.1958 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.1959 +		return -1;
  4.1960 +	}
  4.1961 +
  4.1962 +	if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
  4.1963 +		UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
  4.1964 +			   __FUNCTION__, ip);
  4.1965 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.1966 +		return -1;
  4.1967 +	}
  4.1968 +
  4.1969 +	/* as we unwind, the saved ar.unat becomes the primary unat: */
  4.1970 +	info->pri_unat_loc = info->unat_loc;
  4.1971 +
  4.1972 +	/* finally, restore the predicates: */
  4.1973 +	unw_get_pr(info, &info->pr);
  4.1974 +
  4.1975 +	retval = find_save_locs(info);
  4.1976 +	STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.1977 +	return retval;
  4.1978 +}
  4.1979 +EXPORT_SYMBOL(unw_unwind);
  4.1980 +
  4.1981 +int
  4.1982 +unw_unwind_to_user (struct unw_frame_info *info)
  4.1983 +{
  4.1984 +	unsigned long ip, sp, pr = 0;
  4.1985 +
  4.1986 +	while (unw_unwind(info) >= 0) {
  4.1987 +		unw_get_sp(info, &sp);
  4.1988 +		if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
  4.1989 +		    < IA64_PT_REGS_SIZE) {
  4.1990 +			UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
  4.1991 +				   __FUNCTION__);
  4.1992 +			break;
  4.1993 +		}
  4.1994 +		if (unw_is_intr_frame(info) &&
  4.1995 +		    (pr & (1UL << PRED_USER_STACK)))
  4.1996 +			return 0;
  4.1997 +		if (unw_get_pr (info, &pr) < 0) {
  4.1998 +			unw_get_rp(info, &ip);
  4.1999 +			UNW_DPRINT(0, "unwind.%s: failed to read "
  4.2000 +				   "predicate register (ip=0x%lx)\n",
  4.2001 +				__FUNCTION__, ip);
  4.2002 +			return -1;
  4.2003 +		}
  4.2004 +	}
  4.2005 +	unw_get_ip(info, &ip);
  4.2006 +	UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
  4.2007 +		   __FUNCTION__, ip);
  4.2008 +	return -1;
  4.2009 +}
  4.2010 +EXPORT_SYMBOL(unw_unwind_to_user);
  4.2011 +
  4.2012 +static void
  4.2013 +init_frame_info (struct unw_frame_info *info, struct task_struct *t,
  4.2014 +		 struct switch_stack *sw, unsigned long stktop)
  4.2015 +{
  4.2016 +	unsigned long rbslimit, rbstop, stklimit;
  4.2017 +	STAT(unsigned long start, flags;)
  4.2018 +
  4.2019 +	STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
  4.2020 +
  4.2021 +	/*
  4.2022 +	 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
  4.2023 +	 * don't want to do that because it would be slow as each preserved register would
  4.2024 +	 * have to be processed.  Instead, what we do here is zero out the frame info and
  4.2025 +	 * start the unwind process at the function that created the switch_stack frame.
  4.2026 +	 * When a preserved value in switch_stack needs to be accessed, run_script() will
  4.2027 +	 * initialize the appropriate pointer on demand.
  4.2028 +	 */
  4.2029 +	memset(info, 0, sizeof(*info));
  4.2030 +
  4.2031 +	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
  4.2032 +	rbstop   = sw->ar_bspstore;
  4.2033 +	if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
  4.2034 +		rbstop = rbslimit;
  4.2035 +
  4.2036 +	stklimit = (unsigned long) t + IA64_STK_OFFSET;
  4.2037 +	if (stktop <= rbstop)
  4.2038 +		stktop = rbstop;
  4.2039 +
  4.2040 +	info->regstk.limit = rbslimit;
  4.2041 +	info->regstk.top   = rbstop;
  4.2042 +	info->memstk.limit = stklimit;
  4.2043 +	info->memstk.top   = stktop;
  4.2044 +	info->task = t;
  4.2045 +	info->sw  = sw;
  4.2046 +	info->sp = info->psp = stktop;
  4.2047 +	info->pr = sw->pr;
  4.2048 +	UNW_DPRINT(3, "unwind.%s:\n"
  4.2049 +		   "  task   0x%lx\n"
  4.2050 +		   "  rbs = [0x%lx-0x%lx)\n"
  4.2051 +		   "  stk = [0x%lx-0x%lx)\n"
  4.2052 +		   "  pr     0x%lx\n"
  4.2053 +		   "  sw     0x%lx\n"
  4.2054 +		   "  sp     0x%lx\n",
  4.2055 +		   __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
  4.2056 +		   info->pr, (unsigned long) info->sw, info->sp);
  4.2057 +	STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
  4.2058 +}
  4.2059 +
  4.2060 +void
  4.2061 +unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
  4.2062 +{
  4.2063 +	unsigned long sol;
  4.2064 +
  4.2065 +	init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
  4.2066 +	info->cfm_loc = &sw->ar_pfs;
  4.2067 +	sol = (*info->cfm_loc >> 7) & 0x7f;
  4.2068 +	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
  4.2069 +	info->ip = sw->b0;
  4.2070 +	UNW_DPRINT(3, "unwind.%s:\n"
  4.2071 +		   "  bsp    0x%lx\n"
  4.2072 +		   "  sol    0x%lx\n"
  4.2073 +		   "  ip     0x%lx\n",
  4.2074 +		   __FUNCTION__, info->bsp, sol, info->ip);
  4.2075 +	find_save_locs(info);
  4.2076 +}
  4.2077 +
  4.2078 +EXPORT_SYMBOL(unw_init_frame_info);
  4.2079 +
  4.2080 +void
  4.2081 +unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
  4.2082 +{
  4.2083 +#ifdef XEN
  4.2084 +	struct switch_stack *sw = (struct switch_stack *) (t->arch._thread.ksp + 16);
  4.2085 +#else
  4.2086 +	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
  4.2087 +#endif
  4.2088 +
  4.2089 +	UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
  4.2090 +	unw_init_frame_info(info, t, sw);
  4.2091 +}
  4.2092 +EXPORT_SYMBOL(unw_init_from_blocked_task);
  4.2093 +
  4.2094 +static void
  4.2095 +init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
  4.2096 +		   unsigned long gp, const void *table_start, const void *table_end)
  4.2097 +{
  4.2098 +	const struct unw_table_entry *start = table_start, *end = table_end;
  4.2099 +
  4.2100 +	table->name = name;
  4.2101 +	table->segment_base = segment_base;
  4.2102 +	table->gp = gp;
  4.2103 +	table->start = segment_base + start[0].start_offset;
  4.2104 +	table->end = segment_base + end[-1].end_offset;
  4.2105 +	table->array = start;
  4.2106 +	table->length = end - start;
  4.2107 +}
  4.2108 +
  4.2109 +#ifndef XEN
  4.2110 +void *
  4.2111 +unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
  4.2112 +		      const void *table_start, const void *table_end)
  4.2113 +{
  4.2114 +	const struct unw_table_entry *start = table_start, *end = table_end;
  4.2115 +	struct unw_table *table;
  4.2116 +	unsigned long flags;
  4.2117 +
  4.2118 +	if (end - start <= 0) {
  4.2119 +		UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
  4.2120 +			   __FUNCTION__);
  4.2121 +		return NULL;
  4.2122 +	}
  4.2123 +
  4.2124 +	table = kmalloc(sizeof(*table), GFP_USER);
  4.2125 +	if (!table)
  4.2126 +		return NULL;
  4.2127 +
  4.2128 +	init_unwind_table(table, name, segment_base, gp, table_start, table_end);
  4.2129 +
  4.2130 +	spin_lock_irqsave(&unw.lock, flags);
  4.2131 +	{
  4.2132 +		/* keep kernel unwind table at the front (it's searched most commonly): */
  4.2133 +		table->next = unw.tables->next;
  4.2134 +		unw.tables->next = table;
  4.2135 +	}
  4.2136 +	spin_unlock_irqrestore(&unw.lock, flags);
  4.2137 +
  4.2138 +	return table;
  4.2139 +}
  4.2140 +
  4.2141 +void
  4.2142 +unw_remove_unwind_table (void *handle)
  4.2143 +{
  4.2144 +	struct unw_table *table, *prev;
  4.2145 +	struct unw_script *tmp;
  4.2146 +	unsigned long flags;
  4.2147 +	long index;
  4.2148 +
  4.2149 +	if (!handle) {
  4.2150 +		UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
  4.2151 +			   __FUNCTION__);
  4.2152 +		return;
  4.2153 +	}
  4.2154 +
  4.2155 +	table = handle;
  4.2156 +	if (table == &unw.kernel_table) {
  4.2157 +		UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
  4.2158 +			   "no-can-do!\n", __FUNCTION__);
  4.2159 +		return;
  4.2160 +	}
  4.2161 +
  4.2162 +	spin_lock_irqsave(&unw.lock, flags);
  4.2163 +	{
  4.2164 +		/* first, delete the table: */
  4.2165 +
  4.2166 +		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
  4.2167 +			if (prev->next == table)
  4.2168 +				break;
  4.2169 +		if (!prev) {
  4.2170 +			UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
  4.2171 +				   __FUNCTION__, (void *) table);
  4.2172 +			spin_unlock_irqrestore(&unw.lock, flags);
  4.2173 +			return;
  4.2174 +		}
  4.2175 +		prev->next = table->next;
  4.2176 +	}
  4.2177 +	spin_unlock_irqrestore(&unw.lock, flags);
  4.2178 +
  4.2179 +	/* next, remove hash table entries for this table */
  4.2180 +
  4.2181 +	for (index = 0; index <= UNW_HASH_SIZE; ++index) {
  4.2182 +		tmp = unw.cache + unw.hash[index];
  4.2183 +		if (unw.hash[index] >= UNW_CACHE_SIZE
  4.2184 +		    || tmp->ip < table->start || tmp->ip >= table->end)
  4.2185 +			continue;
  4.2186 +
  4.2187 +		write_lock(&tmp->lock);
  4.2188 +		{
  4.2189 +			if (tmp->ip >= table->start && tmp->ip < table->end) {
  4.2190 +				unw.hash[index] = tmp->coll_chain;
  4.2191 +				tmp->ip = 0;
  4.2192 +			}
  4.2193 +		}
  4.2194 +		write_unlock(&tmp->lock);
  4.2195 +	}
  4.2196 +
  4.2197 +	kfree(table);
  4.2198 +}
  4.2199 +
  4.2200 +static int __init
  4.2201 +create_gate_table (void)
  4.2202 +{
  4.2203 +	const struct unw_table_entry *entry, *start, *end;
  4.2204 +	unsigned long *lp, segbase = GATE_ADDR;
  4.2205 +	size_t info_size, size;
  4.2206 +	char *info;
  4.2207 +	Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
  4.2208 +	int i;
  4.2209 +
  4.2210 +	for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
  4.2211 +		if (phdr->p_type == PT_IA_64_UNWIND) {
  4.2212 +			punw = phdr;
  4.2213 +			break;
  4.2214 +		}
  4.2215 +
  4.2216 +	if (!punw) {
  4.2217 +		printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
  4.2218 +		return 0;
  4.2219 +	}
  4.2220 +
  4.2221 +	start = (const struct unw_table_entry *) punw->p_vaddr;
  4.2222 +	end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
  4.2223 +	size  = 0;
  4.2224 +
  4.2225 +	unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
  4.2226 +
  4.2227 +	for (entry = start; entry < end; ++entry)
  4.2228 +		size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
  4.2229 +	size += 8;	/* reserve space for "end of table" marker */
  4.2230 +
  4.2231 +	unw.gate_table = kmalloc(size, GFP_KERNEL);
  4.2232 +	if (!unw.gate_table) {
  4.2233 +		unw.gate_table_size = 0;
  4.2234 +		printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
  4.2235 +		return 0;
  4.2236 +	}
  4.2237 +	unw.gate_table_size = size;
  4.2238 +
  4.2239 +	lp = unw.gate_table;
  4.2240 +	info = (char *) unw.gate_table + size;
  4.2241 +
  4.2242 +	for (entry = start; entry < end; ++entry, lp += 3) {
  4.2243 +		info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
  4.2244 +		info -= info_size;
  4.2245 +		memcpy(info, (char *) segbase + entry->info_offset, info_size);
  4.2246 +
  4.2247 +		lp[0] = segbase + entry->start_offset;		/* start */
  4.2248 +		lp[1] = segbase + entry->end_offset;		/* end */
  4.2249 +		lp[2] = info - (char *) unw.gate_table;		/* info */
  4.2250 +	}
  4.2251 +	*lp = 0;	/* end-of-table marker */
  4.2252 +	return 0;
  4.2253 +}
  4.2254 +
  4.2255 +__initcall(create_gate_table);
  4.2256 +#endif // !XEN
  4.2257 +
  4.2258 +void __init
  4.2259 +unw_init (void)
  4.2260 +{
  4.2261 +	extern char __gp[];
  4.2262 +	extern void unw_hash_index_t_is_too_narrow (void);
  4.2263 +	long i, off;
  4.2264 +
  4.2265 +	if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
  4.2266 +		unw_hash_index_t_is_too_narrow();
  4.2267 +
  4.2268 +	unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
  4.2269 +	unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
  4.2270 +	unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
  4.2271 +	unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
  4.2272 +	unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
  4.2273 +	unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
  4.2274 +	unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
  4.2275 +	unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
  4.2276 +	for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
  4.2277 +		unw.sw_off[unw.preg_index[i]] = off;
  4.2278 +	for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
  4.2279 +		unw.sw_off[unw.preg_index[i]] = off;
  4.2280 +	for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
  4.2281 +		unw.sw_off[unw.preg_index[i]] = off;
  4.2282 +	for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
  4.2283 +		unw.sw_off[unw.preg_index[i]] = off;
  4.2284 +
  4.2285 +	for (i = 0; i < UNW_CACHE_SIZE; ++i) {
  4.2286 +		if (i > 0)
  4.2287 +			unw.cache[i].lru_chain = (i - 1);
  4.2288 +		unw.cache[i].coll_chain = -1;
  4.2289 +		rwlock_init(&unw.cache[i].lock);
  4.2290 +	}
  4.2291 +	unw.lru_head = UNW_CACHE_SIZE - 1;
  4.2292 +	unw.lru_tail = 0;
  4.2293 +
  4.2294 +	init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
  4.2295 +			  __start_unwind, __end_unwind);
  4.2296 +}
  4.2297 +
  4.2298 +/*
  4.2299 + * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
  4.2300 + *
  4.2301 + *	This system call has been deprecated.  The new and improved way to get
  4.2302 + *	at the kernel's unwind info is via the gate DSO.  The address of the
  4.2303 + *	ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
  4.2304 + *
  4.2305 + * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
  4.2306 + *
  4.2307 + * This system call copies the unwind data into the buffer pointed to by BUF and returns
  4.2308 + * the size of the unwind data.  If BUF_SIZE is smaller than the size of the unwind data
  4.2309 + * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
  4.2310 + * unwind data.
  4.2311 + *
  4.2312 + * The first portion of the unwind data contains an unwind table and rest contains the
  4.2313 + * associated unwind info (in no particular order).  The unwind table consists of a table
  4.2314 + * of entries of the form:
  4.2315 + *
  4.2316 + *	u64 start;	(64-bit address of start of function)
  4.2317 + *	u64 end;	(64-bit address of start of function)
  4.2318 + *	u64 info;	(BUF-relative offset to unwind info)
  4.2319 + *
  4.2320 + * The end of the unwind table is indicated by an entry with a START address of zero.
  4.2321 + *
  4.2322 + * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
  4.2323 + * on the format of the unwind info.
  4.2324 + *
  4.2325 + * ERRORS
  4.2326 + *	EFAULT	BUF points outside your accessible address space.
  4.2327 + */
  4.2328 +asmlinkage long
  4.2329 +sys_getunwind (void __user *buf, size_t buf_size)
  4.2330 +{
  4.2331 +	if (buf && buf_size >= unw.gate_table_size)
  4.2332 +		if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
  4.2333 +			return -EFAULT;
  4.2334 +	return unw.gate_table_size;
  4.2335 +}
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/linux-xen/unwind_decoder.c	Fri Dec 30 23:40:13 2005 -0600
     5.3 @@ -0,0 +1,459 @@
     5.4 +/*
     5.5 + * Copyright (C) 2000 Hewlett-Packard Co
     5.6 + * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
     5.7 + *
     5.8 + * Generic IA-64 unwind info decoder.
     5.9 + *
    5.10 + * This file is used both by the Linux kernel and objdump.  Please keep
    5.11 + * the two copies of this file in sync.
    5.12 + *
    5.13 + * You need to customize the decoder by defining the following
    5.14 + * macros/constants before including this file:
    5.15 + *
    5.16 + *  Types:
    5.17 + *	unw_word	Unsigned integer type with at least 64 bits 
    5.18 + *
    5.19 + *  Register names:
    5.20 + *	UNW_REG_BSP
    5.21 + *	UNW_REG_BSPSTORE
    5.22 + *	UNW_REG_FPSR
    5.23 + *	UNW_REG_LC
    5.24 + *	UNW_REG_PFS
    5.25 + *	UNW_REG_PR
    5.26 + *	UNW_REG_RNAT
    5.27 + *	UNW_REG_PSP
    5.28 + *	UNW_REG_RP
    5.29 + *	UNW_REG_UNAT
    5.30 + *
    5.31 + *  Decoder action macros:
    5.32 + *	UNW_DEC_BAD_CODE(code)
    5.33 + *	UNW_DEC_ABI(fmt,abi,context,arg)
    5.34 + *	UNW_DEC_BR_GR(fmt,brmask,gr,arg)
    5.35 + *	UNW_DEC_BR_MEM(fmt,brmask,arg)
    5.36 + *	UNW_DEC_COPY_STATE(fmt,label,arg)
    5.37 + *	UNW_DEC_EPILOGUE(fmt,t,ecount,arg)
    5.38 + *	UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg)
    5.39 + *	UNW_DEC_FR_MEM(fmt,frmask,arg)
    5.40 + *	UNW_DEC_GR_GR(fmt,grmask,gr,arg)
    5.41 + *	UNW_DEC_GR_MEM(fmt,grmask,arg)
    5.42 + *	UNW_DEC_LABEL_STATE(fmt,label,arg)
    5.43 + *	UNW_DEC_MEM_STACK_F(fmt,t,size,arg)
    5.44 + *	UNW_DEC_MEM_STACK_V(fmt,t,arg)
    5.45 + *	UNW_DEC_PRIUNAT_GR(fmt,r,arg)
    5.46 + *	UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)
    5.47 + *	UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)
    5.48 + *	UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg)
    5.49 + *	UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg)
    5.50 + *	UNW_DEC_PROLOGUE(fmt,body,rlen,arg)
    5.51 + *	UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg)
    5.52 + *	UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg)
    5.53 + *	UNW_DEC_REG_REG(fmt,src,dst,arg)
    5.54 + *	UNW_DEC_REG_SPREL(fmt,reg,spoff,arg)
    5.55 + *	UNW_DEC_REG_WHEN(fmt,reg,t,arg)
    5.56 + *	UNW_DEC_RESTORE(fmt,t,abreg,arg)
    5.57 + *	UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
    5.58 + *	UNW_DEC_SPILL_BASE(fmt,pspoff,arg)
    5.59 + *	UNW_DEC_SPILL_MASK(fmt,imaskp,arg)
    5.60 + *	UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg)
    5.61 + *	UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
    5.62 + *	UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg)
    5.63 + *	UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
    5.64 + *	UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg)
    5.65 + *	UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
    5.66 + */
    5.67 +
    5.68 +static unw_word
    5.69 +unw_decode_uleb128 (unsigned char **dpp)
    5.70 +{
    5.71 +  unsigned shift = 0;
    5.72 +  unw_word byte, result = 0;
    5.73 +  unsigned char *bp = *dpp;
    5.74 +
    5.75 +  while (1)
    5.76 +    {
    5.77 +      byte = *bp++;
    5.78 +      result |= (byte & 0x7f) << shift;
    5.79 +      if ((byte & 0x80) == 0)
    5.80 +	break;
    5.81 +      shift += 7;
    5.82 +    }
    5.83 +  *dpp = bp;
    5.84 +  return result;
    5.85 +}
    5.86 +
    5.87 +static unsigned char *
    5.88 +unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg)
    5.89 +{
    5.90 +  unsigned char byte1, abreg;
    5.91 +  unw_word t, off;
    5.92 +
    5.93 +  byte1 = *dp++;
    5.94 +  t = unw_decode_uleb128 (&dp);
    5.95 +  off = unw_decode_uleb128 (&dp);
    5.96 +  abreg = (byte1 & 0x7f);
    5.97 +  if (byte1 & 0x80)
    5.98 +	  UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
    5.99 +  else
   5.100 +	  UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
   5.101 +  return dp;
   5.102 +}
   5.103 +
   5.104 +static unsigned char *
   5.105 +unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg)
   5.106 +{
   5.107 +  unsigned char byte1, byte2, abreg, x, ytreg;
   5.108 +  unw_word t;
   5.109 +
   5.110 +  byte1 = *dp++; byte2 = *dp++;
   5.111 +  t = unw_decode_uleb128 (&dp);
   5.112 +  abreg = (byte1 & 0x7f);
   5.113 +  ytreg = byte2;
   5.114 +  x = (byte1 >> 7) & 1;
   5.115 +  if ((byte1 & 0x80) == 0 && ytreg == 0)
   5.116 +    UNW_DEC_RESTORE(X2, t, abreg, arg);
   5.117 +  else
   5.118 +    UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
   5.119 +  return dp;
   5.120 +}
   5.121 +
   5.122 +static unsigned char *
   5.123 +unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg)
   5.124 +{
   5.125 +  unsigned char byte1, byte2, abreg, qp;
   5.126 +  unw_word t, off;
   5.127 +
   5.128 +  byte1 = *dp++; byte2 = *dp++;
   5.129 +  t = unw_decode_uleb128 (&dp);
   5.130 +  off = unw_decode_uleb128 (&dp);
   5.131 +
   5.132 +  qp = (byte1 & 0x3f);
   5.133 +  abreg = (byte2 & 0x7f);
   5.134 +
   5.135 +  if (byte1 & 0x80)
   5.136 +    UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
   5.137 +  else
   5.138 +    UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
   5.139 +  return dp;
   5.140 +}
   5.141 +
   5.142 +static unsigned char *
   5.143 +unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg)
   5.144 +{
   5.145 +  unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg;
   5.146 +  unw_word t;
   5.147 +
   5.148 +  byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
   5.149 +  t = unw_decode_uleb128 (&dp);
   5.150 +
   5.151 +  qp = (byte1 & 0x3f);
   5.152 +  abreg = (byte2 & 0x7f);
   5.153 +  x = (byte2 >> 7) & 1;
   5.154 +  ytreg = byte3;
   5.155 +
   5.156 +  if ((byte2 & 0x80) == 0 && byte3 == 0)
   5.157 +    UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
   5.158 +  else
   5.159 +    UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
   5.160 +  return dp;
   5.161 +}
   5.162 +
   5.163 +static unsigned char *
   5.164 +unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg)
   5.165 +{
   5.166 +  int body = (code & 0x20) != 0;
   5.167 +  unw_word rlen;
   5.168 +
   5.169 +  rlen = (code & 0x1f);
   5.170 +  UNW_DEC_PROLOGUE(R1, body, rlen, arg);
   5.171 +  return dp;
   5.172 +}
   5.173 +
   5.174 +static unsigned char *
   5.175 +unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg)
   5.176 +{
   5.177 +  unsigned char byte1, mask, grsave;
   5.178 +  unw_word rlen;
   5.179 +
   5.180 +  byte1 = *dp++;
   5.181 +
   5.182 +  mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
   5.183 +  grsave = (byte1 & 0x7f);
   5.184 +  rlen = unw_decode_uleb128 (&dp);
   5.185 +  UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg);
   5.186 +  return dp;
   5.187 +}
   5.188 +
   5.189 +static unsigned char *
   5.190 +unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg)
   5.191 +{
   5.192 +  unw_word rlen;
   5.193 +
   5.194 +  rlen = unw_decode_uleb128 (&dp);
   5.195 +  UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg);
   5.196 +  return dp;
   5.197 +}
   5.198 +
   5.199 +static unsigned char *
   5.200 +unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg)
   5.201 +{
   5.202 +  unsigned char brmask = (code & 0x1f);
   5.203 +
   5.204 +  UNW_DEC_BR_MEM(P1, brmask, arg);
   5.205 +  return dp;
   5.206 +}
   5.207 +
   5.208 +static unsigned char *
   5.209 +unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg)
   5.210 +{
   5.211 +  if ((code & 0x10) == 0)
   5.212 +    {
   5.213 +      unsigned char byte1 = *dp++;
   5.214 +
   5.215 +      UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1),
   5.216 +		    (byte1 & 0x7f), arg);
   5.217 +    }
   5.218 +  else if ((code & 0x08) == 0)
   5.219 +    {
   5.220 +      unsigned char byte1 = *dp++, r, dst;
   5.221 +
   5.222 +      r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
   5.223 +      dst = (byte1 & 0x7f);
   5.224 +      switch (r)
   5.225 +	{
   5.226 +	case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break;
   5.227 +	case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break;
   5.228 +	case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break;
   5.229 +	case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break;
   5.230 +	case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break;
   5.231 +	case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break;
   5.232 +	case 6: UNW_DEC_RP_BR(P3, dst, arg); break;
   5.233 +	case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break;
   5.234 +	case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break;
   5.235 +	case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break;
   5.236 +	case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break;
   5.237 +	case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break;
   5.238 +	default: UNW_DEC_BAD_CODE(r); break;
   5.239 +	}
   5.240 +    }
   5.241 +  else if ((code & 0x7) == 0)
   5.242 +    UNW_DEC_SPILL_MASK(P4, dp, arg);
   5.243 +  else if ((code & 0x7) == 1)
   5.244 +    {
   5.245 +      unw_word grmask, frmask, byte1, byte2, byte3;
   5.246 +
   5.247 +      byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
   5.248 +      grmask = ((byte1 >> 4) & 0xf);
   5.249 +      frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3;
   5.250 +      UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg);
   5.251 +    }
   5.252 +  else
   5.253 +    UNW_DEC_BAD_CODE(code);
   5.254 +  return dp;
   5.255 +}
   5.256 +
   5.257 +static unsigned char *
   5.258 +unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg)
   5.259 +{
   5.260 +  int gregs = (code & 0x10) != 0;
   5.261 +  unsigned char mask = (code & 0x0f);
   5.262 +
   5.263 +  if (gregs)
   5.264 +    UNW_DEC_GR_MEM(P6, mask, arg);
   5.265 +  else
   5.266 +    UNW_DEC_FR_MEM(P6, mask, arg);
   5.267 +  return dp;
   5.268 +}
   5.269 +
   5.270 +static unsigned char *
   5.271 +unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg)
   5.272 +{
   5.273 +  unsigned char r, byte1, byte2;
   5.274 +  unw_word t, size;
   5.275 +
   5.276 +  if ((code & 0x10) == 0)
   5.277 +    {
   5.278 +      r = (code & 0xf);
   5.279 +      t = unw_decode_uleb128 (&dp);
   5.280 +      switch (r)
   5.281 +	{
   5.282 +	case 0:
   5.283 +	  size = unw_decode_uleb128 (&dp);
   5.284 +	  UNW_DEC_MEM_STACK_F(P7, t, size, arg);
   5.285 +	  break;
   5.286 +
   5.287 +	case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
   5.288 +	case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
   5.289 +	case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
   5.290 +	case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
   5.291 +	case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
   5.292 +	case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
   5.293 +	case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
   5.294 +	case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
   5.295 +	case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
   5.296 +	case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
   5.297 +	case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
   5.298 +	case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
   5.299 +	case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
   5.300 +	case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
   5.301 +	case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
   5.302 +	default: UNW_DEC_BAD_CODE(r); break;
   5.303 +	}
   5.304 +    }
   5.305 +  else
   5.306 +    {
   5.307 +      switch (code & 0xf)
   5.308 +	{
   5.309 +	case 0x0: /* p8 */
   5.310 +	  {
   5.311 +	    r = *dp++;
   5.312 +	    t = unw_decode_uleb128 (&dp);
   5.313 +	    switch (r)
   5.314 +	      {
   5.315 +	      case  1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
   5.316 +	      case  2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
   5.317 +	      case  3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
   5.318 +	      case  4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
   5.319 +	      case  5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
   5.320 +	      case  6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
   5.321 +	      case  7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
   5.322 +	      case  8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
   5.323 +	      case  9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
   5.324 +	      case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
   5.325 +	      case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
   5.326 +	      case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
   5.327 +	      case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
   5.328 +	      case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
   5.329 +	      case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
   5.330 +	      case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
   5.331 +	      case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
   5.332 +	      case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
   5.333 +	      case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
   5.334 +	      default: UNW_DEC_BAD_CODE(r); break;
   5.335 +	    }
   5.336 +	  }
   5.337 +	  break;
   5.338 +
   5.339 +	case 0x1:
   5.340 +	  byte1 = *dp++; byte2 = *dp++;
   5.341 +	  UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg);
   5.342 +	  break;
   5.343 +
   5.344 +	case 0xf: /* p10 */
   5.345 +	  byte1 = *dp++; byte2 = *dp++;
   5.346 +	  UNW_DEC_ABI(P10, byte1, byte2, arg);
   5.347 +	  break;
   5.348 +
   5.349 +	case 0x9:
   5.350 +	  return unw_decode_x1 (dp, code, arg);
   5.351 +
   5.352 +	case 0xa:
   5.353 +	  return unw_decode_x2 (dp, code, arg);
   5.354 +
   5.355 +	case 0xb:
   5.356 +	  return unw_decode_x3 (dp, code, arg);
   5.357 +
   5.358 +	case 0xc:
   5.359 +	  return unw_decode_x4 (dp, code, arg);
   5.360 +
   5.361 +	default:
   5.362 +	  UNW_DEC_BAD_CODE(code);
   5.363 +	  break;
   5.364 +	}
   5.365 +    }
   5.366 +  return dp;
   5.367 +}
   5.368 +
   5.369 +static unsigned char *
   5.370 +unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg)
   5.371 +{
   5.372 +  unw_word label = (code & 0x1f);
   5.373 +
   5.374 +  if ((code & 0x20) != 0)
   5.375 +    UNW_DEC_COPY_STATE(B1, label, arg);
   5.376 +  else
   5.377 +    UNW_DEC_LABEL_STATE(B1, label, arg);
   5.378 +  return dp;
   5.379 +}
   5.380 +
   5.381 +static unsigned char *
   5.382 +unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg)
   5.383 +{
   5.384 +  unw_word t;
   5.385 +
   5.386 +  t = unw_decode_uleb128 (&dp);
   5.387 +  UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
   5.388 +  return dp;
   5.389 +}
   5.390 +
   5.391 +static unsigned char *
   5.392 +unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg)
   5.393 +{
   5.394 +  unw_word t, ecount, label;
   5.395 +
   5.396 +  if ((code & 0x10) == 0)
   5.397 +    {
   5.398 +      t = unw_decode_uleb128 (&dp);
   5.399 +      ecount = unw_decode_uleb128 (&dp);
   5.400 +      UNW_DEC_EPILOGUE(B3, t, ecount, arg);
   5.401 +    }
   5.402 +  else if ((code & 0x07) == 0)
   5.403 +    {
   5.404 +      label = unw_decode_uleb128 (&dp);
   5.405 +      if ((code & 0x08) != 0)
   5.406 +	UNW_DEC_COPY_STATE(B4, label, arg);
   5.407 +      else
   5.408 +	UNW_DEC_LABEL_STATE(B4, label, arg);
   5.409 +    }
   5.410 +  else
   5.411 +    switch (code & 0x7)
   5.412 +      {
   5.413 +      case 1: return unw_decode_x1 (dp, code, arg);
   5.414 +      case 2: return unw_decode_x2 (dp, code, arg);
   5.415 +      case 3: return unw_decode_x3 (dp, code, arg);
   5.416 +      case 4: return unw_decode_x4 (dp, code, arg);
   5.417 +      default: UNW_DEC_BAD_CODE(code); break;
   5.418 +      }
   5.419 +  return dp;
   5.420 +}
   5.421 +
   5.422 +typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *);
   5.423 +
   5.424 +static unw_decoder unw_decode_table[2][8] =
   5.425 +{
   5.426 +  /* prologue table: */
   5.427 +  {
   5.428 +    unw_decode_r1,	/* 0 */
   5.429 +    unw_decode_r1,
   5.430 +    unw_decode_r2,
   5.431 +    unw_decode_r3,
   5.432 +    unw_decode_p1,	/* 4 */
   5.433 +    unw_decode_p2_p5,
   5.434 +    unw_decode_p6,
   5.435 +    unw_decode_p7_p10
   5.436 +  },
   5.437 +  {
   5.438 +    unw_decode_r1,	/* 0 */
   5.439 +    unw_decode_r1,
   5.440 +    unw_decode_r2,
   5.441 +    unw_decode_r3,
   5.442 +    unw_decode_b1,	/* 4 */
   5.443 +    unw_decode_b1,
   5.444 +    unw_decode_b2,
   5.445 +    unw_decode_b3_x4
   5.446 +  }
   5.447 +};
   5.448 +
   5.449 +/*
   5.450 + * Decode one descriptor and return address of next descriptor.
   5.451 + */
   5.452 +static inline unsigned char *
   5.453 +unw_decode (unsigned char *dp, int inside_body, void *arg)
   5.454 +{
   5.455 +  unw_decoder decoder;
   5.456 +  unsigned char code;
   5.457 +
   5.458 +  code = *dp++;
   5.459 +  decoder = unw_decode_table[inside_body][code >> 5];
   5.460 +  dp = (*decoder) (dp, code, arg);
   5.461 +  return dp;
   5.462 +}
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/ia64/linux-xen/unwind_i.h	Fri Dec 30 23:40:13 2005 -0600
     6.3 @@ -0,0 +1,164 @@
     6.4 +/*
     6.5 + * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co
     6.6 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     6.7 + *
     6.8 + * Kernel unwind support.
     6.9 + */
    6.10 +
    6.11 +#define UNW_VER(x)		((x) >> 48)
    6.12 +#define UNW_FLAG_MASK		0x0000ffff00000000
    6.13 +#define UNW_FLAG_OSMASK		0x0000f00000000000
    6.14 +#define UNW_FLAG_EHANDLER(x)	((x) & 0x0000000100000000L)
    6.15 +#define UNW_FLAG_UHANDLER(x)	((x) & 0x0000000200000000L)
    6.16 +#define UNW_LENGTH(x)		((x) & 0x00000000ffffffffL)
    6.17 +
    6.18 +enum unw_register_index {
    6.19 +	/* primary unat: */
    6.20 +	UNW_REG_PRI_UNAT_GR,
    6.21 +	UNW_REG_PRI_UNAT_MEM,
    6.22 +
    6.23 +	/* register stack */
    6.24 +	UNW_REG_BSP,					/* register stack pointer */
    6.25 +	UNW_REG_BSPSTORE,
    6.26 +	UNW_REG_PFS,					/* previous function state */
    6.27 +	UNW_REG_RNAT,
    6.28 +	/* memory stack */
    6.29 +	UNW_REG_PSP,					/* previous memory stack pointer */
    6.30 +	/* return pointer: */
    6.31 +	UNW_REG_RP,
    6.32 +
    6.33 +	/* preserved registers: */
    6.34 +	UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7,
    6.35 +	UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR,
    6.36 +	UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5,
    6.37 +	UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5,
    6.38 +	UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19,
    6.39 +	UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23,
    6.40 +	UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27,
    6.41 +	UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31,
    6.42 +	UNW_NUM_REGS
    6.43 +};
    6.44 +
    6.45 +struct unw_info_block {
    6.46 +	u64 header;
    6.47 +	u64 desc[0];		/* unwind descriptors */
    6.48 +	/* personality routine and language-specific data follow behind descriptors */
    6.49 +};
    6.50 +
    6.51 +struct unw_table {
    6.52 +	struct unw_table *next;		/* must be first member! */
    6.53 +	const char *name;
    6.54 +	unsigned long gp;		/* global pointer for this load-module */
    6.55 +	unsigned long segment_base;	/* base for offsets in the unwind table entries */
    6.56 +	unsigned long start;
    6.57 +	unsigned long end;
    6.58 +	const struct unw_table_entry *array;
    6.59 +	unsigned long length;
    6.60 +};
    6.61 +
    6.62 +enum unw_where {
    6.63 +	UNW_WHERE_NONE,			/* register isn't saved at all */
    6.64 +	UNW_WHERE_GR,			/* register is saved in a general register */
    6.65 +	UNW_WHERE_FR,			/* register is saved in a floating-point register */
    6.66 +	UNW_WHERE_BR,			/* register is saved in a branch register */
    6.67 +	UNW_WHERE_SPREL,		/* register is saved on memstack (sp-relative) */
    6.68 +	UNW_WHERE_PSPREL,		/* register is saved on memstack (psp-relative) */
    6.69 +	/*
    6.70 +	 * At the end of each prologue these locations get resolved to
    6.71 +	 * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively:
    6.72 +	 */
    6.73 +	UNW_WHERE_SPILL_HOME,		/* register is saved in its spill home */
    6.74 +	UNW_WHERE_GR_SAVE		/* register is saved in next general register */
    6.75 +};
    6.76 +
    6.77 +#define UNW_WHEN_NEVER	0x7fffffff
    6.78 +
    6.79 +struct unw_reg_info {
    6.80 +	unsigned long val;		/* save location: register number or offset */
    6.81 +	enum unw_where where;		/* where the register gets saved */
    6.82 +	int when;			/* when the register gets saved */
    6.83 +};
    6.84 +
    6.85 +struct unw_reg_state {
    6.86 +	struct unw_reg_state *next;		/* next (outer) element on state stack */
    6.87 +	struct unw_reg_info reg[UNW_NUM_REGS];	/* register save locations */
    6.88 +};
    6.89 +
    6.90 +struct unw_labeled_state {
    6.91 +	struct unw_labeled_state *next;		/* next labeled state (or NULL) */
    6.92 +	unsigned long label;			/* label for this state */
    6.93 +	struct unw_reg_state saved_state;
    6.94 +};
    6.95 +
    6.96 +struct unw_state_record {
    6.97 +	unsigned int first_region : 1;	/* is this the first region? */
    6.98 +	unsigned int done : 1;		/* are we done scanning descriptors? */
    6.99 +	unsigned int any_spills : 1;	/* got any register spills? */
   6.100 +	unsigned int in_body : 1;	/* are we inside a body (as opposed to a prologue)? */
   6.101 +	unsigned long flags;		/* see UNW_FLAG_* in unwind.h */
   6.102 +
   6.103 +	u8 *imask;			/* imask of spill_mask record or NULL */
   6.104 +	unsigned long pr_val;		/* predicate values */
   6.105 +	unsigned long pr_mask;		/* predicate mask */
   6.106 +	long spill_offset;		/* psp-relative offset for spill base */
   6.107 +	int region_start;
   6.108 +	int region_len;
   6.109 +	int epilogue_start;
   6.110 +	int epilogue_count;
   6.111 +	int when_target;
   6.112 +
   6.113 +	u8 gr_save_loc;			/* next general register to use for saving a register */
   6.114 +	u8 return_link_reg;		/* branch register in which the return link is passed */
   6.115 +
   6.116 +	struct unw_labeled_state *labeled_states;	/* list of all labeled states */
   6.117 +	struct unw_reg_state curr;	/* current state */
   6.118 +};
   6.119 +
   6.120 +enum unw_nat_type {
   6.121 +	UNW_NAT_NONE,		/* NaT not represented */
   6.122 +	UNW_NAT_VAL,		/* NaT represented by NaT value (fp reg) */
   6.123 +	UNW_NAT_MEMSTK,		/* NaT value is in unat word at offset OFF  */
   6.124 +	UNW_NAT_REGSTK		/* NaT is in rnat */
   6.125 +};
   6.126 +
   6.127 +enum unw_insn_opcode {
   6.128 +	UNW_INSN_ADD,			/* s[dst] += val */
   6.129 +	UNW_INSN_ADD_PSP,		/* s[dst] = (s.psp + val) */
   6.130 +	UNW_INSN_ADD_SP,		/* s[dst] = (s.sp + val) */
   6.131 +	UNW_INSN_MOVE,			/* s[dst] = s[val] */
   6.132 +	UNW_INSN_MOVE2,			/* s[dst] = s[val]; s[dst+1] = s[val+1] */
   6.133 +	UNW_INSN_MOVE_STACKED,		/* s[dst] = ia64_rse_skip(*s.bsp, val) */
   6.134 +	UNW_INSN_SETNAT_MEMSTK,		/* s[dst+1].nat.type = MEMSTK;
   6.135 +					   s[dst+1].nat.off = *s.pri_unat - s[dst] */
   6.136 +	UNW_INSN_SETNAT_TYPE,		/* s[dst+1].nat.type = val */
   6.137 +	UNW_INSN_LOAD,			/* s[dst] = *s[val] */
   6.138 +	UNW_INSN_MOVE_SCRATCH,		/* s[dst] = scratch reg "val" */
   6.139 +	UNW_INSN_MOVE_CONST,            /* s[dst] = constant reg "val" */
   6.140 +};
   6.141 +
   6.142 +struct unw_insn {
   6.143 +	unsigned int opc	:  4;
   6.144 +	unsigned int dst	:  9;
   6.145 +	signed int val		: 19;
   6.146 +};
   6.147 +
   6.148 +/*
   6.149 + * Preserved general static registers (r4-r7) give rise to two script
   6.150 + * instructions; everything else yields at most one instruction; at
   6.151 + * the end of the script, the psp gets popped, accounting for one more
   6.152 + * instruction.
   6.153 + */
   6.154 +#define UNW_MAX_SCRIPT_LEN	(UNW_NUM_REGS + 5)
   6.155 +
   6.156 +struct unw_script {
   6.157 +	unsigned long ip;		/* ip this script is for */
   6.158 +	unsigned long pr_mask;		/* mask of predicates script depends on */
   6.159 +	unsigned long pr_val;		/* predicate values this script is for */
   6.160 +	rwlock_t lock;
   6.161 +	unsigned int flags;		/* see UNW_FLAG_* in unwind.h */
   6.162 +	unsigned short lru_chain;	/* used for least-recently-used chain */
   6.163 +	unsigned short coll_chain;	/* used for hash collisions */
   6.164 +	unsigned short hint;		/* hint for next script to try (or -1) */
   6.165 +	unsigned short count;		/* number of instructions in script */
   6.166 +	struct unw_insn insn[UNW_MAX_SCRIPT_LEN];
   6.167 +};
     7.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Fri Dec 30 16:11:08 2005 -0600
     7.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Fri Dec 30 23:40:13 2005 -0600
     7.3 @@ -53,6 +53,7 @@
     7.4  #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
     7.5  
     7.6  
     7.7 +extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
     7.8  extern void rnat_consumption (VCPU *vcpu);
     7.9  #define DOMN_PAL_REQUEST    0x110000
    7.10  
    7.11 @@ -185,8 +186,11 @@ vmx_ia64_handle_break (unsigned long ifa
    7.12  	}else if(iim == DOMN_PAL_REQUEST){
    7.13          pal_emul(current);
    7.14  		vmx_vcpu_increment_iip(current);
    7.15 -    }  else
    7.16 +    } else {
    7.17 +		if (iim == 0) 
    7.18 +			die_if_kernel("bug check", regs, iim);
    7.19  		vmx_reflect_interruption(ifa,isr,iim,11,regs);
    7.20 +    }
    7.21  }
    7.22  
    7.23  
     8.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Fri Dec 30 16:11:08 2005 -0600
     8.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Fri Dec 30 23:40:13 2005 -0600
     8.3 @@ -543,6 +543,13 @@ GLOBAL_ENTRY(fast_break_reflect)
     8.4  	extr.u r21=r30,IA64_PSR_PP_BIT,1 ;;
     8.5  	cmp.ne p7,p0=r21,r0 ;;
     8.6  (p7)	br.spnt.few dispatch_break_fault ;;
     8.7 +        movl r20=IA64_PSR_CPL ;; 
     8.8 +        and r22=r20,r30 ;;
     8.9 +        cmp.ne p7,p0=r22,r0
    8.10 +(p7)    br.spnt.many 1f ;;
    8.11 +        cmp.eq p7,p0=r17,r0
    8.12 +(p7)    br.spnt.few dispatch_break_fault ;;
    8.13 +1:
    8.14  #if 1 /* special handling in case running on simulator */
    8.15  	movl r20=first_break;;
    8.16  	ld4 r23=[r20];;
     9.1 --- a/xen/arch/ia64/xen/ivt.S	Fri Dec 30 16:11:08 2005 -0600
     9.2 +++ b/xen/arch/ia64/xen/ivt.S	Fri Dec 30 23:40:13 2005 -0600
     9.3 @@ -839,6 +839,8 @@ ENTRY(break_fault)
     9.4  	mov r17=cr.iim
     9.5  	mov r31=pr
     9.6  	;;
     9.7 +	cmp.eq p7,p0=r17,r0
     9.8 +(p7)	br.spnt.few dispatch_break_fault ;;
     9.9  	movl r18=XSI_PSR_IC
    9.10  	;;
    9.11  	ld8 r19=[r18]
    10.1 --- a/xen/arch/ia64/xen/process.c	Fri Dec 30 16:11:08 2005 -0600
    10.2 +++ b/xen/arch/ia64/xen/process.c	Fri Dec 30 23:40:13 2005 -0600
    10.3 @@ -33,6 +33,7 @@
    10.4  #include <xen/multicall.h>
    10.5  
    10.6  extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
    10.7 +extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    10.8  
    10.9  extern unsigned long dom0_start, dom0_size;
   10.10  
   10.11 @@ -686,6 +687,8 @@ ia64_handle_break (unsigned long ifa, st
   10.12  			vcpu_increment_iip(current);
   10.13  	}
   10.14  	else {
   10.15 +		if (iim == 0) 
   10.16 +			die_if_kernel("bug check", regs, iim);
   10.17  		PSCB(v,iim) = iim;
   10.18  		reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
   10.19  	}
    11.1 --- a/xen/arch/ia64/xen/xenmisc.c	Fri Dec 30 16:11:08 2005 -0600
    11.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Fri Dec 30 23:40:13 2005 -0600
    11.3 @@ -25,7 +25,6 @@ unsigned long wait_init_idle;
    11.4  int phys_proc_id[NR_CPUS];
    11.5  unsigned long loops_per_jiffy = (1<<12);	// from linux/init/main.c
    11.6  
    11.7 -void unw_init(void) { printf("unw_init() skipped (NEED FOR KERNEL UNWIND)\n"); }
    11.8  void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
    11.9  void ia64_mca_cpu_init(void *x) { }
   11.10  void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
   11.11 @@ -180,11 +179,6 @@ void pgtable_quicklist_free(void *pgtabl
   11.12  // from arch/ia64/traps.c
   11.13  ///////////////////////////////
   11.14  
   11.15 -void show_registers(struct pt_regs *regs)
   11.16 -{
   11.17 -	printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n");
   11.18 -}
   11.19 -
   11.20  int is_kernel_text(unsigned long addr)
   11.21  {
   11.22  	extern char _stext[], _etext[];
   11.23 @@ -236,7 +230,13 @@ void sys_exit(void)
   11.24  
   11.25  void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
   11.26  {
   11.27 -	printk("die_if_kernel: called, not implemented\n");
   11.28 +	if (user_mode(regs))
   11.29 +		return;
   11.30 +
   11.31 +	printk("%s: %s %ld\n", __func__, str, err);
   11.32 +	debugtrace_dump();
   11.33 +	show_registers(regs);
   11.34 +	domain_crash_synchronous();
   11.35  }
   11.36  
   11.37  long