ia64/xen-unstable

changeset 8498:f22ea989389d

merge
author kaf24@firebug.cl.cam.ac.uk
date Tue Jan 03 19:06:14 2006 +0100 (2006-01-03)
parents 9e03e60f2d46 f7c2cbb5368f
children d186157615d6
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c	Tue Jan 03 16:57:41 2006 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c	Tue Jan 03 19:06:14 2006 +0100
     1.3 @@ -25,8 +25,9 @@ int xen_init(void)
     1.4  
     1.5  	xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
     1.6  	xen_start_info->flags = s->arch.flags;
     1.7 -	printk("Running on Xen! start_info_pfn=0x%lx lags=0x%x\n",
     1.8 -		s->arch.start_info_pfn, xen_start_info->flags);
     1.9 +	printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%d flags=0x%x\n",
    1.10 +		s->arch.start_info_pfn, xen_start_info->nr_pages,
    1.11 +		xen_start_info->flags);
    1.12  
    1.13  	evtchn_init();
    1.14  	initialized = 1;
     2.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h	Tue Jan 03 16:57:41 2006 +0000
     2.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h	Tue Jan 03 19:06:14 2006 +0100
     2.3 @@ -372,7 +372,7 @@ HYPERVISOR_memory_op(
     2.4      int ret;
     2.5      __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
     2.6          : "=r" (ret)
     2.7 -        : "i" (__HYPERVISOR_console_io), "r"(cmd), "r"(arg)
     2.8 +        : "i" (__HYPERVISOR_memory_op), "r"(cmd), "r"(arg)
     2.9          : "r14","r15","r2","r8","memory" );
    2.10      return ret;
    2.11  }
     3.1 --- a/tools/libxc/xc_ia64_stubs.c	Tue Jan 03 16:57:41 2006 +0000
     3.2 +++ b/tools/libxc/xc_ia64_stubs.c	Tue Jan 03 19:06:14 2006 +0100
     3.3 @@ -627,6 +627,7 @@ int xc_vmx_build(int xc_handle,
     3.4                   unsigned int control_evtchn,
     3.5                   unsigned int lapic,
     3.6                   unsigned int vcpus,
     3.7 +                 unsigned int acpi,
     3.8                   unsigned int store_evtchn,
     3.9                   unsigned long *store_mfn)
    3.10  {
     4.1 --- a/tools/libxc/xc_linux_build.c	Tue Jan 03 16:57:41 2006 +0000
     4.2 +++ b/tools/libxc/xc_linux_build.c	Tue Jan 03 19:06:14 2006 +0100
     4.3 @@ -393,10 +393,14 @@ static int setup_guest(int xc_handle,
     4.4      start_info->store_evtchn = store_evtchn;
     4.5      start_info->console_mfn   = nr_pages - 1;
     4.6      start_info->console_evtchn = console_evtchn;
     4.7 +    start_info->nr_pages       = nr_pages;	// FIXME?: nr_pages - 2 ????
     4.8      if ( initrd_len != 0 )
     4.9      {
    4.10          ctxt->initrd.start    = vinitrd_start;
    4.11          ctxt->initrd.size     = initrd_len;
    4.12 +    } else {
    4.13 +        ctxt->initrd.start    = 0;
    4.14 +        ctxt->initrd.size     = 0;
    4.15      }
    4.16      strncpy((char *)ctxt->cmdline, cmdline, IA64_COMMAND_LINE_SIZE);
    4.17      ctxt->cmdline[IA64_COMMAND_LINE_SIZE-1] = '\0';
     5.1 --- a/xen/arch/ia64/Makefile	Tue Jan 03 16:57:41 2006 +0000
     5.2 +++ b/xen/arch/ia64/Makefile	Tue Jan 03 19:06:14 2006 +0100
     5.3 @@ -23,6 +23,13 @@ OBJS +=	bitop.o clear_page.o flush.o cop
     5.4  	__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o			\
     5.5  	__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
     5.6  
     5.7 +# xen stack unwinder
     5.8 +# unwind_decoder.c is included in unwind.c
     5.9 +OBJS += unwind.o
    5.10 +#unwind.o: CFLAGS += -DUNW_DEBUG=4
    5.11 +
    5.12 +OBJS += process-linux-xen.o
    5.13 +
    5.14  # perfmon.o
    5.15  # unwind.o needed for kernel unwinding (rare)
    5.16  
    5.17 @@ -31,11 +38,26 @@ OBJS := $(subst $(TARGET_ARCH)/asm-offse
    5.18  # remove following line if not privifying in memory
    5.19  # OBJS += privify.o
    5.20  
    5.21 -default: $(OBJS) head.o xen.lds.s
    5.22 -	$(LD) -r -o arch.o $(OBJS)
    5.23 +default: $(TARGET)
    5.24 +
    5.25 +$(CURDIR)/arch.o: $(OBJS)
    5.26 +	$(LD) -r -o $@ $(OBJS)
    5.27 +
    5.28 +$(TARGET)-syms: $(ALL_OBJS) head.o xen.lds.s
    5.29  	$(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
    5.30 -		-Map map.out head.o $(ALL_OBJS) -o $(TARGET)-syms
    5.31 -	$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET)
    5.32 +		-Map map.out head.o $(ALL_OBJS) -o $@
    5.33 +	$(NM) -n $@ | $(BASEDIR)/tools/symbols > $(BASEDIR)/xen-syms.S
    5.34 +	$(MAKE) $(BASEDIR)/xen-syms.o
    5.35 +	$(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
    5.36 +		-Map map.out head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
    5.37 +	$(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S
    5.38 +	$(MAKE) $(BASEDIR)/xen-syms.o
    5.39 +	$(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
    5.40 +		-Map map.out head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
    5.41 +	rm -f $(BASEDIR)/xen-syms.S $(BASEDIR)/xen-syms.o
    5.42 +
    5.43 +$(TARGET): $(TARGET)-syms
    5.44 +	$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $@
    5.45  	$(NM) -n $(TARGET)-syms | grep -v '\( [aUw] \)\|\(__crc_\)\|\( \$[adt]\)'\
    5.46  		 > $(BASEDIR)/System.map
    5.47  
     6.1 --- a/xen/arch/ia64/linux-xen/entry.S	Tue Jan 03 16:57:41 2006 +0000
     6.2 +++ b/xen/arch/ia64/linux-xen/entry.S	Tue Jan 03 19:06:14 2006 +0100
     6.3 @@ -1417,7 +1417,6 @@ GLOBAL_ENTRY(ia64_prepare_handle_unalign
     6.4  	br.cond.sptk.many rp				// goes to ia64_leave_kernel
     6.5  END(ia64_prepare_handle_unaligned)
     6.6  
     6.7 -#ifndef XEN
     6.8  	//
     6.9  	// unw_init_running(void (*callback)(info, arg), void *arg)
    6.10  	//
    6.11 @@ -1463,6 +1462,7 @@ 1:	mov gp=loc2				// restore gp
    6.12  	br.ret.sptk.many rp
    6.13  END(unw_init_running)
    6.14  
    6.15 +#ifndef XEN
    6.16  	.rodata
    6.17  	.align 8
    6.18  	.globl sys_call_table
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/ia64/linux-xen/process-linux-xen.c	Tue Jan 03 19:06:14 2006 +0100
     7.3 @@ -0,0 +1,848 @@
     7.4 +/*
     7.5 + * Architecture-specific setup.
     7.6 + *
     7.7 + * Copyright (C) 1998-2003 Hewlett-Packard Co
     7.8 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     7.9 + * 04/11/17 Ashok Raj	<ashok.raj@intel.com> Added CPU Hotplug Support
    7.10 + */
    7.11 +#ifdef XEN
    7.12 +#include <xen/types.h>
    7.13 +#include <xen/lib.h>
    7.14 +#include <xen/symbols.h>
    7.15 +#include <xen/smp.h>
    7.16 +#include <asm/uaccess.h>
    7.17 +#include <asm/processor.h>
    7.18 +#include <asm/ptrace.h>
    7.19 +#include <asm/unwind.h>
    7.20 +#else
    7.21 +#define __KERNEL_SYSCALLS__	/* see <asm/unistd.h> */
    7.22 +#include <linux/config.h>
    7.23 +
    7.24 +#include <linux/cpu.h>
    7.25 +#include <linux/pm.h>
    7.26 +#include <linux/elf.h>
    7.27 +#include <linux/errno.h>
    7.28 +#include <linux/kallsyms.h>
    7.29 +#include <linux/kernel.h>
    7.30 +#include <linux/mm.h>
    7.31 +#include <linux/module.h>
    7.32 +#include <linux/notifier.h>
    7.33 +#include <linux/personality.h>
    7.34 +#include <linux/sched.h>
    7.35 +#include <linux/slab.h>
    7.36 +#include <linux/smp_lock.h>
    7.37 +#include <linux/stddef.h>
    7.38 +#include <linux/thread_info.h>
    7.39 +#include <linux/unistd.h>
    7.40 +#include <linux/efi.h>
    7.41 +#include <linux/interrupt.h>
    7.42 +#include <linux/delay.h>
    7.43 +#include <linux/kprobes.h>
    7.44 +
    7.45 +#include <asm/cpu.h>
    7.46 +#include <asm/delay.h>
    7.47 +#include <asm/elf.h>
    7.48 +#include <asm/ia32.h>
    7.49 +#include <asm/irq.h>
    7.50 +#include <asm/pgalloc.h>
    7.51 +#include <asm/processor.h>
    7.52 +#include <asm/sal.h>
    7.53 +#include <asm/tlbflush.h>
    7.54 +#include <asm/uaccess.h>
    7.55 +#include <asm/unwind.h>
    7.56 +#include <asm/user.h>
    7.57 +
    7.58 +#include "entry.h"
    7.59 +
    7.60 +#ifdef CONFIG_PERFMON
    7.61 +# include <asm/perfmon.h>
    7.62 +#endif
    7.63 +
    7.64 +#include "sigframe.h"
    7.65 +
    7.66 +void (*ia64_mark_idle)(int);
    7.67 +static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
    7.68 +
    7.69 +unsigned long boot_option_idle_override = 0;
    7.70 +EXPORT_SYMBOL(boot_option_idle_override);
    7.71 +#endif
    7.72 +
    7.73 +void
    7.74 +ia64_do_show_stack (struct unw_frame_info *info, void *arg)
    7.75 +{
    7.76 +	unsigned long ip, sp, bsp;
    7.77 +	char buf[128];			/* don't make it so big that it overflows the stack! */
    7.78 +
    7.79 +	printk("\nCall Trace:\n");
    7.80 +	do {
    7.81 +		unw_get_ip(info, &ip);
    7.82 +		if (ip == 0)
    7.83 +			break;
    7.84 +
    7.85 +		unw_get_sp(info, &sp);
    7.86 +		unw_get_bsp(info, &bsp);
    7.87 +		snprintf(buf, sizeof(buf),
    7.88 +			 " [<%016lx>] %%s\n"
    7.89 +			 "                                sp=%016lx bsp=%016lx\n",
    7.90 +			 ip, sp, bsp);
    7.91 +		print_symbol(buf, ip);
    7.92 +	} while (unw_unwind(info) >= 0);
    7.93 +}
    7.94 +
    7.95 +void
    7.96 +show_stack (struct task_struct *task, unsigned long *sp)
    7.97 +{
    7.98 +	if (!task)
    7.99 +		unw_init_running(ia64_do_show_stack, NULL);
   7.100 +	else {
   7.101 +		struct unw_frame_info info;
   7.102 +
   7.103 +		unw_init_from_blocked_task(&info, task);
   7.104 +		ia64_do_show_stack(&info, NULL);
   7.105 +	}
   7.106 +}
   7.107 +
   7.108 +#ifndef XEN
   7.109 +void
   7.110 +dump_stack (void)
   7.111 +{
   7.112 +	show_stack(NULL, NULL);
   7.113 +}
   7.114 +
   7.115 +EXPORT_SYMBOL(dump_stack);
   7.116 +#endif
   7.117 +
   7.118 +#ifdef XEN
   7.119 +void
   7.120 +show_registers(struct pt_regs *regs)
   7.121 +#else
   7.122 +void
   7.123 +show_regs (struct pt_regs *regs)
   7.124 +#endif
   7.125 +{
   7.126 +	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
   7.127 +
   7.128 +#ifndef XEN
   7.129 +	print_modules();
   7.130 +	printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
   7.131 +	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]    %s\n",
   7.132 +	       regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
   7.133 +#else
   7.134 +	printk("\nCPU %d\n", smp_processor_id());
   7.135 +	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
   7.136 +	       regs->cr_ipsr, regs->cr_ifs, ip);
   7.137 +#endif
   7.138 +	print_symbol("ip is at %s\n", ip);
   7.139 +	printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
   7.140 +	       regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
   7.141 +	printk("rnat: %016lx bsps: %016lx pr  : %016lx\n",
   7.142 +	       regs->ar_rnat, regs->ar_bspstore, regs->pr);
   7.143 +	printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
   7.144 +	       regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
   7.145 +	printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
   7.146 +	printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0, regs->b6, regs->b7);
   7.147 +	printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
   7.148 +	       regs->f6.u.bits[1], regs->f6.u.bits[0],
   7.149 +	       regs->f7.u.bits[1], regs->f7.u.bits[0]);
   7.150 +	printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
   7.151 +	       regs->f8.u.bits[1], regs->f8.u.bits[0],
   7.152 +	       regs->f9.u.bits[1], regs->f9.u.bits[0]);
   7.153 +	printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
   7.154 +	       regs->f10.u.bits[1], regs->f10.u.bits[0],
   7.155 +	       regs->f11.u.bits[1], regs->f11.u.bits[0]);
   7.156 +
   7.157 +	printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1, regs->r2, regs->r3);
   7.158 +	printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
   7.159 +	printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
   7.160 +	printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
   7.161 +	printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
   7.162 +	printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
   7.163 +	printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
   7.164 +	printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
   7.165 +	printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
   7.166 +
   7.167 +	if (user_mode(regs)) {
   7.168 +		/* print the stacked registers */
   7.169 +		unsigned long val, *bsp, ndirty;
   7.170 +		int i, sof, is_nat = 0;
   7.171 +
   7.172 +		sof = regs->cr_ifs & 0x7f;	/* size of frame */
   7.173 +		ndirty = (regs->loadrs >> 19);
   7.174 +		bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
   7.175 +		for (i = 0; i < sof; ++i) {
   7.176 +			get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
   7.177 +			printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
   7.178 +			       ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
   7.179 +		}
   7.180 +	} else
   7.181 +		show_stack(NULL, NULL);
   7.182 +}
   7.183 +
   7.184 +#ifndef XEN
   7.185 +void
   7.186 +do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
   7.187 +{
   7.188 +	if (fsys_mode(current, &scr->pt)) {
   7.189 +		/* defer signal-handling etc. until we return to privilege-level 0.  */
   7.190 +		if (!ia64_psr(&scr->pt)->lp)
   7.191 +			ia64_psr(&scr->pt)->lp = 1;
   7.192 +		return;
   7.193 +	}
   7.194 +
   7.195 +#ifdef CONFIG_PERFMON
   7.196 +	if (current->thread.pfm_needs_checking)
   7.197 +		pfm_handle_work();
   7.198 +#endif
   7.199 +
   7.200 +	/* deal with pending signal delivery */
   7.201 +	if (test_thread_flag(TIF_SIGPENDING))
   7.202 +		ia64_do_signal(oldset, scr, in_syscall);
   7.203 +}
   7.204 +
   7.205 +static int pal_halt        = 1;
   7.206 +static int can_do_pal_halt = 1;
   7.207 +
   7.208 +static int __init nohalt_setup(char * str)
   7.209 +{
   7.210 +	pal_halt = can_do_pal_halt = 0;
   7.211 +	return 1;
   7.212 +}
   7.213 +__setup("nohalt", nohalt_setup);
   7.214 +
   7.215 +void
   7.216 +update_pal_halt_status(int status)
   7.217 +{
   7.218 +	can_do_pal_halt = pal_halt && status;
   7.219 +}
   7.220 +
   7.221 +/*
   7.222 + * We use this if we don't have any better idle routine..
   7.223 + */
   7.224 +void
   7.225 +default_idle (void)
   7.226 +{
   7.227 +	local_irq_enable();
   7.228 +	while (!need_resched())
   7.229 +		if (can_do_pal_halt)
   7.230 +			safe_halt();
   7.231 +		else
   7.232 +			cpu_relax();
   7.233 +}
   7.234 +
   7.235 +#ifdef CONFIG_HOTPLUG_CPU
   7.236 +/* We don't actually take CPU down, just spin without interrupts. */
   7.237 +static inline void play_dead(void)
   7.238 +{
   7.239 +	extern void ia64_cpu_local_tick (void);
   7.240 +	unsigned int this_cpu = smp_processor_id();
   7.241 +
   7.242 +	/* Ack it */
   7.243 +	__get_cpu_var(cpu_state) = CPU_DEAD;
   7.244 +
   7.245 +	max_xtp();
   7.246 +	local_irq_disable();
   7.247 +	idle_task_exit();
   7.248 +	ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
   7.249 +	/*
   7.250 +	 * The above is a point of no-return, the processor is
   7.251 +	 * expected to be in SAL loop now.
   7.252 +	 */
   7.253 +	BUG();
   7.254 +}
   7.255 +#else
   7.256 +static inline void play_dead(void)
   7.257 +{
   7.258 +	BUG();
   7.259 +}
   7.260 +#endif /* CONFIG_HOTPLUG_CPU */
   7.261 +
   7.262 +void cpu_idle_wait(void)
   7.263 +{
   7.264 +	unsigned int cpu, this_cpu = get_cpu();
   7.265 +	cpumask_t map;
   7.266 +
   7.267 +	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
   7.268 +	put_cpu();
   7.269 +
   7.270 +	cpus_clear(map);
   7.271 +	for_each_online_cpu(cpu) {
   7.272 +		per_cpu(cpu_idle_state, cpu) = 1;
   7.273 +		cpu_set(cpu, map);
   7.274 +	}
   7.275 +
   7.276 +	__get_cpu_var(cpu_idle_state) = 0;
   7.277 +
   7.278 +	wmb();
   7.279 +	do {
   7.280 +		ssleep(1);
   7.281 +		for_each_online_cpu(cpu) {
   7.282 +			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
   7.283 +				cpu_clear(cpu, map);
   7.284 +		}
   7.285 +		cpus_and(map, map, cpu_online_map);
   7.286 +	} while (!cpus_empty(map));
   7.287 +}
   7.288 +EXPORT_SYMBOL_GPL(cpu_idle_wait);
   7.289 +
   7.290 +void __attribute__((noreturn))
   7.291 +cpu_idle (void)
   7.292 +{
   7.293 +	void (*mark_idle)(int) = ia64_mark_idle;
   7.294 +
   7.295 +	/* endless idle loop with no priority at all */
   7.296 +	while (1) {
   7.297 +#ifdef CONFIG_SMP
   7.298 +		if (!need_resched())
   7.299 +			min_xtp();
   7.300 +#endif
   7.301 +		while (!need_resched()) {
   7.302 +			void (*idle)(void);
   7.303 +
   7.304 +			if (__get_cpu_var(cpu_idle_state))
   7.305 +				__get_cpu_var(cpu_idle_state) = 0;
   7.306 +
   7.307 +			rmb();
   7.308 +			if (mark_idle)
   7.309 +				(*mark_idle)(1);
   7.310 +
   7.311 +			idle = pm_idle;
   7.312 +			if (!idle)
   7.313 +				idle = default_idle;
   7.314 +			(*idle)();
   7.315 +		}
   7.316 +
   7.317 +		if (mark_idle)
   7.318 +			(*mark_idle)(0);
   7.319 +
   7.320 +#ifdef CONFIG_SMP
   7.321 +		normal_xtp();
   7.322 +#endif
   7.323 +		schedule();
   7.324 +		check_pgt_cache();
   7.325 +		if (cpu_is_offline(smp_processor_id()))
   7.326 +			play_dead();
   7.327 +	}
   7.328 +}
   7.329 +
   7.330 +void
   7.331 +ia64_save_extra (struct task_struct *task)
   7.332 +{
   7.333 +#ifdef CONFIG_PERFMON
   7.334 +	unsigned long info;
   7.335 +#endif
   7.336 +
   7.337 +	if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
   7.338 +		ia64_save_debug_regs(&task->thread.dbr[0]);
   7.339 +
   7.340 +#ifdef CONFIG_PERFMON
   7.341 +	if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
   7.342 +		pfm_save_regs(task);
   7.343 +
   7.344 +	info = __get_cpu_var(pfm_syst_info);
   7.345 +	if (info & PFM_CPUINFO_SYST_WIDE)
   7.346 +		pfm_syst_wide_update_task(task, info, 0);
   7.347 +#endif
   7.348 +
   7.349 +#ifdef CONFIG_IA32_SUPPORT
   7.350 +	if (IS_IA32_PROCESS(ia64_task_regs(task)))
   7.351 +		ia32_save_state(task);
   7.352 +#endif
   7.353 +}
   7.354 +
   7.355 +void
   7.356 +ia64_load_extra (struct task_struct *task)
   7.357 +{
   7.358 +#ifdef CONFIG_PERFMON
   7.359 +	unsigned long info;
   7.360 +#endif
   7.361 +
   7.362 +	if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
   7.363 +		ia64_load_debug_regs(&task->thread.dbr[0]);
   7.364 +
   7.365 +#ifdef CONFIG_PERFMON
   7.366 +	if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
   7.367 +		pfm_load_regs(task);
   7.368 +
   7.369 +	info = __get_cpu_var(pfm_syst_info);
   7.370 +	if (info & PFM_CPUINFO_SYST_WIDE) 
   7.371 +		pfm_syst_wide_update_task(task, info, 1);
   7.372 +#endif
   7.373 +
   7.374 +#ifdef CONFIG_IA32_SUPPORT
   7.375 +	if (IS_IA32_PROCESS(ia64_task_regs(task)))
   7.376 +		ia32_load_state(task);
   7.377 +#endif
   7.378 +}
   7.379 +
   7.380 +/*
   7.381 + * Copy the state of an ia-64 thread.
   7.382 + *
   7.383 + * We get here through the following  call chain:
   7.384 + *
   7.385 + *	from user-level:	from kernel:
   7.386 + *
   7.387 + *	<clone syscall>	        <some kernel call frames>
   7.388 + *	sys_clone		   :
   7.389 + *	do_fork			do_fork
   7.390 + *	copy_thread		copy_thread
   7.391 + *
   7.392 + * This means that the stack layout is as follows:
   7.393 + *
   7.394 + *	+---------------------+ (highest addr)
   7.395 + *	|   struct pt_regs    |
   7.396 + *	+---------------------+
   7.397 + *	| struct switch_stack |
   7.398 + *	+---------------------+
   7.399 + *	|                     |
   7.400 + *	|    memory stack     |
   7.401 + *	|                     | <-- sp (lowest addr)
   7.402 + *	+---------------------+
   7.403 + *
   7.404 + * Observe that we copy the unat values that are in pt_regs and switch_stack.  Spilling an
   7.405 + * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
   7.406 + * with N=(X & 0x1ff)/8.  Thus, copying the unat value preserves the NaT bits ONLY if the
   7.407 + * pt_regs structure in the parent is congruent to that of the child, modulo 512.  Since
   7.408 + * the stack is page aligned and the page size is at least 4KB, this is always the case,
   7.409 + * so there is nothing to worry about.
   7.410 + */
   7.411 +int
   7.412 +copy_thread (int nr, unsigned long clone_flags,
   7.413 +	     unsigned long user_stack_base, unsigned long user_stack_size,
   7.414 +	     struct task_struct *p, struct pt_regs *regs)
   7.415 +{
   7.416 +	extern char ia64_ret_from_clone, ia32_ret_from_clone;
   7.417 +	struct switch_stack *child_stack, *stack;
   7.418 +	unsigned long rbs, child_rbs, rbs_size;
   7.419 +	struct pt_regs *child_ptregs;
   7.420 +	int retval = 0;
   7.421 +
   7.422 +#ifdef CONFIG_SMP
   7.423 +	/*
   7.424 +	 * For SMP idle threads, fork_by_hand() calls do_fork with
   7.425 +	 * NULL regs.
   7.426 +	 */
   7.427 +	if (!regs)
   7.428 +		return 0;
   7.429 +#endif
   7.430 +
   7.431 +	stack = ((struct switch_stack *) regs) - 1;
   7.432 +
   7.433 +	child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
   7.434 +	child_stack = (struct switch_stack *) child_ptregs - 1;
   7.435 +
   7.436 +	/* copy parent's switch_stack & pt_regs to child: */
   7.437 +	memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
   7.438 +
   7.439 +	rbs = (unsigned long) current + IA64_RBS_OFFSET;
   7.440 +	child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
   7.441 +	rbs_size = stack->ar_bspstore - rbs;
   7.442 +
   7.443 +	/* copy the parent's register backing store to the child: */
   7.444 +	memcpy((void *) child_rbs, (void *) rbs, rbs_size);
   7.445 +
   7.446 +	if (likely(user_mode(child_ptregs))) {
   7.447 +		if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
   7.448 +			child_ptregs->r13 = regs->r16;	/* see sys_clone2() in entry.S */
   7.449 +		if (user_stack_base) {
   7.450 +			child_ptregs->r12 = user_stack_base + user_stack_size - 16;
   7.451 +			child_ptregs->ar_bspstore = user_stack_base;
   7.452 +			child_ptregs->ar_rnat = 0;
   7.453 +			child_ptregs->loadrs = 0;
   7.454 +		}
   7.455 +	} else {
   7.456 +		/*
   7.457 +		 * Note: we simply preserve the relative position of
   7.458 +		 * the stack pointer here.  There is no need to
   7.459 +		 * allocate a scratch area here, since that will have
   7.460 +		 * been taken care of by the caller of sys_clone()
   7.461 +		 * already.
   7.462 +		 */
   7.463 +		child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */
   7.464 +		child_ptregs->r13 = (unsigned long) p;		/* set `current' pointer */
   7.465 +	}
   7.466 +	child_stack->ar_bspstore = child_rbs + rbs_size;
   7.467 +	if (IS_IA32_PROCESS(regs))
   7.468 +		child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
   7.469 +	else
   7.470 +		child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
   7.471 +
   7.472 +	/* copy parts of thread_struct: */
   7.473 +	p->thread.ksp = (unsigned long) child_stack - 16;
   7.474 +
   7.475 +	/* stop some PSR bits from being inherited.
   7.476 +	 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
   7.477 +	 * therefore we must specify them explicitly here and not include them in
   7.478 +	 * IA64_PSR_BITS_TO_CLEAR.
   7.479 +	 */
   7.480 +	child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
   7.481 +				 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
   7.482 +
   7.483 +	/*
   7.484 +	 * NOTE: The calling convention considers all floating point
   7.485 +	 * registers in the high partition (fph) to be scratch.  Since
   7.486 +	 * the only way to get to this point is through a system call,
   7.487 +	 * we know that the values in fph are all dead.  Hence, there
   7.488 +	 * is no need to inherit the fph state from the parent to the
   7.489 +	 * child and all we have to do is to make sure that
   7.490 +	 * IA64_THREAD_FPH_VALID is cleared in the child.
   7.491 +	 *
   7.492 +	 * XXX We could push this optimization a bit further by
   7.493 +	 * clearing IA64_THREAD_FPH_VALID on ANY system call.
   7.494 +	 * However, it's not clear this is worth doing.  Also, it
   7.495 +	 * would be a slight deviation from the normal Linux system
   7.496 +	 * call behavior where scratch registers are preserved across
   7.497 +	 * system calls (unless used by the system call itself).
   7.498 +	 */
   7.499 +#	define THREAD_FLAGS_TO_CLEAR	(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
   7.500 +					 | IA64_THREAD_PM_VALID)
   7.501 +#	define THREAD_FLAGS_TO_SET	0
   7.502 +	p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
   7.503 +			   | THREAD_FLAGS_TO_SET);
   7.504 +	ia64_drop_fpu(p);	/* don't pick up stale state from a CPU's fph */
   7.505 +#ifdef CONFIG_IA32_SUPPORT
   7.506 +	/*
   7.507 +	 * If we're cloning an IA32 task then save the IA32 extra
   7.508 +	 * state from the current task to the new task
   7.509 +	 */
   7.510 +	if (IS_IA32_PROCESS(ia64_task_regs(current))) {
   7.511 +		ia32_save_state(p);
   7.512 +		if (clone_flags & CLONE_SETTLS)
   7.513 +			retval = ia32_clone_tls(p, child_ptregs);
   7.514 +
   7.515 +		/* Copy partially mapped page list */
   7.516 +		if (!retval)
   7.517 +			retval = ia32_copy_partial_page_list(p, clone_flags);
   7.518 +	}
   7.519 +#endif
   7.520 +
   7.521 +#ifdef CONFIG_PERFMON
   7.522 +	if (current->thread.pfm_context)
   7.523 +		pfm_inherit(p, child_ptregs);
   7.524 +#endif
   7.525 +	return retval;
   7.526 +}
   7.527 +
   7.528 +static void
   7.529 +do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
   7.530 +{
   7.531 +	unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
   7.532 +	elf_greg_t *dst = arg;
   7.533 +	struct pt_regs *pt;
   7.534 +	char nat;
   7.535 +	int i;
   7.536 +
   7.537 +	memset(dst, 0, sizeof(elf_gregset_t));	/* don't leak any kernel bits to user-level */
   7.538 +
   7.539 +	if (unw_unwind_to_user(info) < 0)
   7.540 +		return;
   7.541 +
   7.542 +	unw_get_sp(info, &sp);
   7.543 +	pt = (struct pt_regs *) (sp + 16);
   7.544 +
   7.545 +	urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
   7.546 +
   7.547 +	if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
   7.548 +		return;
   7.549 +
   7.550 +	ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
   7.551 +		  &ar_rnat);
   7.552 +
   7.553 +	/*
   7.554 +	 * coredump format:
   7.555 +	 *	r0-r31
   7.556 +	 *	NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
   7.557 +	 *	predicate registers (p0-p63)
   7.558 +	 *	b0-b7
   7.559 +	 *	ip cfm user-mask
   7.560 +	 *	ar.rsc ar.bsp ar.bspstore ar.rnat
   7.561 +	 *	ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
   7.562 +	 */
   7.563 +
   7.564 +	/* r0 is zero */
   7.565 +	for (i = 1, mask = (1UL << i); i < 32; ++i) {
   7.566 +		unw_get_gr(info, i, &dst[i], &nat);
   7.567 +		if (nat)
   7.568 +			nat_bits |= mask;
   7.569 +		mask <<= 1;
   7.570 +	}
   7.571 +	dst[32] = nat_bits;
   7.572 +	unw_get_pr(info, &dst[33]);
   7.573 +
   7.574 +	for (i = 0; i < 8; ++i)
   7.575 +		unw_get_br(info, i, &dst[34 + i]);
   7.576 +
   7.577 +	unw_get_rp(info, &ip);
   7.578 +	dst[42] = ip + ia64_psr(pt)->ri;
   7.579 +	dst[43] = cfm;
   7.580 +	dst[44] = pt->cr_ipsr & IA64_PSR_UM;
   7.581 +
   7.582 +	unw_get_ar(info, UNW_AR_RSC, &dst[45]);
   7.583 +	/*
   7.584 +	 * For bsp and bspstore, unw_get_ar() would return the kernel
   7.585 +	 * addresses, but we need the user-level addresses instead:
   7.586 +	 */
   7.587 +	dst[46] = urbs_end;	/* note: by convention PT_AR_BSP points to the end of the urbs! */
   7.588 +	dst[47] = pt->ar_bspstore;
   7.589 +	dst[48] = ar_rnat;
   7.590 +	unw_get_ar(info, UNW_AR_CCV, &dst[49]);
   7.591 +	unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
   7.592 +	unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
   7.593 +	dst[52] = pt->ar_pfs;	/* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
   7.594 +	unw_get_ar(info, UNW_AR_LC, &dst[53]);
   7.595 +	unw_get_ar(info, UNW_AR_EC, &dst[54]);
   7.596 +	unw_get_ar(info, UNW_AR_CSD, &dst[55]);
   7.597 +	unw_get_ar(info, UNW_AR_SSD, &dst[56]);
   7.598 +}
   7.599 +
   7.600 +void
   7.601 +do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
   7.602 +{
   7.603 +	elf_fpreg_t *dst = arg;
   7.604 +	int i;
   7.605 +
   7.606 +	memset(dst, 0, sizeof(elf_fpregset_t));	/* don't leak any "random" bits */
   7.607 +
   7.608 +	if (unw_unwind_to_user(info) < 0)
   7.609 +		return;
   7.610 +
   7.611 +	/* f0 is 0.0, f1 is 1.0 */
   7.612 +
   7.613 +	for (i = 2; i < 32; ++i)
   7.614 +		unw_get_fr(info, i, dst + i);
   7.615 +
   7.616 +	ia64_flush_fph(task);
   7.617 +	if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
   7.618 +		memcpy(dst + 32, task->thread.fph, 96*16);
   7.619 +}
   7.620 +
   7.621 +void
   7.622 +do_copy_regs (struct unw_frame_info *info, void *arg)
   7.623 +{
   7.624 +	do_copy_task_regs(current, info, arg);
   7.625 +}
   7.626 +
   7.627 +void
   7.628 +do_dump_fpu (struct unw_frame_info *info, void *arg)
   7.629 +{
   7.630 +	do_dump_task_fpu(current, info, arg);
   7.631 +}
   7.632 +
   7.633 +int
   7.634 +dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
   7.635 +{
   7.636 +	struct unw_frame_info tcore_info;
   7.637 +
   7.638 +	if (current == task) {
   7.639 +		unw_init_running(do_copy_regs, regs);
   7.640 +	} else {
   7.641 +		memset(&tcore_info, 0, sizeof(tcore_info));
   7.642 +		unw_init_from_blocked_task(&tcore_info, task);
   7.643 +		do_copy_task_regs(task, &tcore_info, regs);
   7.644 +	}
   7.645 +	return 1;
   7.646 +}
   7.647 +
   7.648 +void
   7.649 +ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
   7.650 +{
   7.651 +	unw_init_running(do_copy_regs, dst);
   7.652 +}
   7.653 +
   7.654 +int
   7.655 +dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
   7.656 +{
   7.657 +	struct unw_frame_info tcore_info;
   7.658 +
   7.659 +	if (current == task) {
   7.660 +		unw_init_running(do_dump_fpu, dst);
   7.661 +	} else {
   7.662 +		memset(&tcore_info, 0, sizeof(tcore_info));
   7.663 +		unw_init_from_blocked_task(&tcore_info, task);
   7.664 +		do_dump_task_fpu(task, &tcore_info, dst);
   7.665 +	}
   7.666 +	return 1;
   7.667 +}
   7.668 +
   7.669 +int
   7.670 +dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
   7.671 +{
   7.672 +	unw_init_running(do_dump_fpu, dst);
   7.673 +	return 1;	/* f0-f31 are always valid so we always return 1 */
   7.674 +}
   7.675 +
   7.676 +long
   7.677 +sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
   7.678 +	    struct pt_regs *regs)
   7.679 +{
   7.680 +	char *fname;
   7.681 +	int error;
   7.682 +
   7.683 +	fname = getname(filename);
   7.684 +	error = PTR_ERR(fname);
   7.685 +	if (IS_ERR(fname))
   7.686 +		goto out;
   7.687 +	error = do_execve(fname, argv, envp, regs);
   7.688 +	putname(fname);
   7.689 +out:
   7.690 +	return error;
   7.691 +}
   7.692 +
   7.693 +pid_t
   7.694 +kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
   7.695 +{
   7.696 +	extern void start_kernel_thread (void);
   7.697 +	unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;
   7.698 +	struct {
   7.699 +		struct switch_stack sw;
   7.700 +		struct pt_regs pt;
   7.701 +	} regs;
   7.702 +
   7.703 +	memset(&regs, 0, sizeof(regs));
   7.704 +	regs.pt.cr_iip = helper_fptr[0];	/* set entry point (IP) */
   7.705 +	regs.pt.r1 = helper_fptr[1];		/* set GP */
   7.706 +	regs.pt.r9 = (unsigned long) fn;	/* 1st argument */
   7.707 +	regs.pt.r11 = (unsigned long) arg;	/* 2nd argument */
   7.708 +	/* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read.  */
   7.709 +	regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
   7.710 +	regs.pt.cr_ifs = 1UL << 63;		/* mark as valid, empty frame */
   7.711 +	regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
   7.712 +	regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
   7.713 +	regs.sw.pr = (1 << PRED_KERNEL_STACK);
   7.714 +	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);
   7.715 +}
   7.716 +EXPORT_SYMBOL(kernel_thread);
   7.717 +
   7.718 +/* This gets called from kernel_thread() via ia64_invoke_thread_helper().  */
   7.719 +int
   7.720 +kernel_thread_helper (int (*fn)(void *), void *arg)
   7.721 +{
   7.722 +#ifdef CONFIG_IA32_SUPPORT
   7.723 +	if (IS_IA32_PROCESS(ia64_task_regs(current))) {
   7.724 +		/* A kernel thread is always a 64-bit process. */
   7.725 +		current->thread.map_base  = DEFAULT_MAP_BASE;
   7.726 +		current->thread.task_size = DEFAULT_TASK_SIZE;
   7.727 +		ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
   7.728 +		ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
   7.729 +	}
   7.730 +#endif
   7.731 +	return (*fn)(arg);
   7.732 +}
   7.733 +
   7.734 +/*
   7.735 + * Flush thread state.  This is called when a thread does an execve().
   7.736 + */
   7.737 +void
   7.738 +flush_thread (void)
   7.739 +{
   7.740 +	/*
   7.741 +	 * Remove function-return probe instances associated with this task
   7.742 +	 * and put them back on the free list. Do not insert an exit probe for
   7.743 +	 * this function, it will be disabled by kprobe_flush_task if you do.
   7.744 +	 */
   7.745 +	kprobe_flush_task(current);
   7.746 +
   7.747 +	/* drop floating-point and debug-register state if it exists: */
   7.748 +	current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
   7.749 +	ia64_drop_fpu(current);
   7.750 +	if (IS_IA32_PROCESS(ia64_task_regs(current)))
   7.751 +		ia32_drop_partial_page_list(current);
   7.752 +}
   7.753 +
   7.754 +/*
   7.755 + * Clean up state associated with current thread.  This is called when
   7.756 + * the thread calls exit().
   7.757 + */
   7.758 +void
   7.759 +exit_thread (void)
   7.760 +{
   7.761 +
   7.762 +	/*
   7.763 +	 * Remove function-return probe instances associated with this task
   7.764 +	 * and put them back on the free list. Do not insert an exit probe for
   7.765 +	 * this function, it will be disabled by kprobe_flush_task if you do.
   7.766 +	 */
   7.767 +	kprobe_flush_task(current);
   7.768 +
   7.769 +	ia64_drop_fpu(current);
   7.770 +#ifdef CONFIG_PERFMON
   7.771 +       /* if needed, stop monitoring and flush state to perfmon context */
   7.772 +	if (current->thread.pfm_context)
   7.773 +		pfm_exit_thread(current);
   7.774 +
   7.775 +	/* free debug register resources */
   7.776 +	if (current->thread.flags & IA64_THREAD_DBG_VALID)
   7.777 +		pfm_release_debug_registers(current);
   7.778 +#endif
   7.779 +	if (IS_IA32_PROCESS(ia64_task_regs(current)))
   7.780 +		ia32_drop_partial_page_list(current);
   7.781 +}
   7.782 +
   7.783 +unsigned long
   7.784 +get_wchan (struct task_struct *p)
   7.785 +{
   7.786 +	struct unw_frame_info info;
   7.787 +	unsigned long ip;
   7.788 +	int count = 0;
   7.789 +
   7.790 +	/*
   7.791 +	 * Note: p may not be a blocked task (it could be current or
   7.792 +	 * another process running on some other CPU.  Rather than
   7.793 +	 * trying to determine if p is really blocked, we just assume
   7.794 +	 * it's blocked and rely on the unwind routines to fail
   7.795 +	 * gracefully if the process wasn't really blocked after all.
   7.796 +	 * --davidm 99/12/15
   7.797 +	 */
   7.798 +	unw_init_from_blocked_task(&info, p);
   7.799 +	do {
   7.800 +		if (unw_unwind(&info) < 0)
   7.801 +			return 0;
   7.802 +		unw_get_ip(&info, &ip);
   7.803 +		if (!in_sched_functions(ip))
   7.804 +			return ip;
   7.805 +	} while (count++ < 16);
   7.806 +	return 0;
   7.807 +}
   7.808 +
   7.809 +void
   7.810 +cpu_halt (void)
   7.811 +{
   7.812 +	pal_power_mgmt_info_u_t power_info[8];
   7.813 +	unsigned long min_power;
   7.814 +	int i, min_power_state;
   7.815 +
   7.816 +	if (ia64_pal_halt_info(power_info) != 0)
   7.817 +		return;
   7.818 +
   7.819 +	min_power_state = 0;
   7.820 +	min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
   7.821 +	for (i = 1; i < 8; ++i)
   7.822 +		if (power_info[i].pal_power_mgmt_info_s.im
   7.823 +		    && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
   7.824 +			min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
   7.825 +			min_power_state = i;
   7.826 +		}
   7.827 +
   7.828 +	while (1)
   7.829 +		ia64_pal_halt(min_power_state);
   7.830 +}
   7.831 +
   7.832 +void
   7.833 +machine_restart (char *restart_cmd)
   7.834 +{
   7.835 +	(*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
   7.836 +}
   7.837 +
   7.838 +void
   7.839 +machine_halt (void)
   7.840 +{
   7.841 +	cpu_halt();
   7.842 +}
   7.843 +
   7.844 +void
   7.845 +machine_power_off (void)
   7.846 +{
   7.847 +	if (pm_power_off)
   7.848 +		pm_power_off();
   7.849 +	machine_halt();
   7.850 +}
   7.851 +#endif // !XEN
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/ia64/linux-xen/unwind.c	Tue Jan 03 19:06:14 2006 +0100
     8.3 @@ -0,0 +1,2332 @@
     8.4 +/*
     8.5 + * Copyright (C) 1999-2004 Hewlett-Packard Co
     8.6 + *	David Mosberger-Tang <davidm@hpl.hp.com>
     8.7 + * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
     8.8 + * 	- Change pt_regs_off() to make it less dependant on pt_regs structure.
     8.9 + */
    8.10 +/*
    8.11 + * This file implements call frame unwind support for the Linux
    8.12 + * kernel.  Parsing and processing the unwind information is
    8.13 + * time-consuming, so this implementation translates the unwind
    8.14 + * descriptors into unwind scripts.  These scripts are very simple
    8.15 + * (basically a sequence of assignments) and efficient to execute.
    8.16 + * They are cached for later re-use.  Each script is specific for a
    8.17 + * given instruction pointer address and the set of predicate values
    8.18 + * that the script depends on (most unwind descriptors are
    8.19 + * unconditional and scripts often do not depend on predicates at
    8.20 + * all).  This code is based on the unwind conventions described in
    8.21 + * the "IA-64 Software Conventions and Runtime Architecture" manual.
    8.22 + *
    8.23 + * SMP conventions:
    8.24 + *	o updates to the global unwind data (in structure "unw") are serialized
    8.25 + *	  by the unw.lock spinlock
    8.26 + *	o each unwind script has its own read-write lock; a thread must acquire
    8.27 + *	  a read lock before executing a script and must acquire a write lock
    8.28 + *	  before modifying a script
    8.29 + *	o if both the unw.lock spinlock and a script's read-write lock must be
    8.30 + *	  acquired, then the read-write lock must be acquired first.
    8.31 + */
    8.32 +#ifdef XEN
    8.33 +#include <xen/types.h>
    8.34 +#include <xen/elf.h>
    8.35 +#include <xen/kernel.h>
    8.36 +#include <xen/sched.h>
    8.37 +#include <xen/xmalloc.h>
    8.38 +#include <xen/spinlock.h>
    8.39 +
    8.40 +// work around
    8.41 +#ifdef CONFIG_SMP
    8.42 +#define write_trylock(lock)	_raw_write_trylock(lock)
    8.43 +#else
    8.44 +#define write_trylock(lock)	({1;})
    8.45 +#endif
    8.46 +
    8.47 +#else
    8.48 +#include <linux/module.h>
    8.49 +#include <linux/bootmem.h>
    8.50 +#include <linux/elf.h>
    8.51 +#include <linux/kernel.h>
    8.52 +#include <linux/sched.h>
    8.53 +#include <linux/slab.h>
    8.54 +#endif
    8.55 +
    8.56 +#include <asm/unwind.h>
    8.57 +
    8.58 +#include <asm/delay.h>
    8.59 +#include <asm/page.h>
    8.60 +#include <asm/ptrace.h>
    8.61 +#include <asm/ptrace_offsets.h>
    8.62 +#include <asm/rse.h>
    8.63 +#include <asm/sections.h>
    8.64 +#include <asm/system.h>
    8.65 +#include <asm/uaccess.h>
    8.66 +
    8.67 +#include "entry.h"
    8.68 +#include "unwind_i.h"
    8.69 +
    8.70 +#define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
    8.71 +#define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
    8.72 +
    8.73 +#define UNW_LOG_HASH_SIZE	(UNW_LOG_CACHE_SIZE + 1)
    8.74 +#define UNW_HASH_SIZE		(1 << UNW_LOG_HASH_SIZE)
    8.75 +
    8.76 +#define UNW_STATS	0	/* WARNING: this disabled interrupts for long time-spans!! */
    8.77 +
    8.78 +#ifdef UNW_DEBUG
    8.79 +  static unsigned int unw_debug_level = UNW_DEBUG;
    8.80 +#  define UNW_DEBUG_ON(n)	unw_debug_level >= n
    8.81 +   /* Do not code a printk level, not all debug lines end in newline */
    8.82 +#  define UNW_DPRINT(n, ...)  if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
    8.83 +#  define inline
    8.84 +#else /* !UNW_DEBUG */
    8.85 +#  define UNW_DEBUG_ON(n)  0
    8.86 +#  define UNW_DPRINT(n, ...)
    8.87 +#endif /* UNW_DEBUG */
    8.88 +
    8.89 +#if UNW_STATS
    8.90 +# define STAT(x...)	x
    8.91 +#else
    8.92 +# define STAT(x...)
    8.93 +#endif
    8.94 +
    8.95 +#ifdef XEN
    8.96 +#define alloc_reg_state()	xmalloc(struct unw_reg_state)
    8.97 +#define free_reg_state(usr)	xfree(usr)
    8.98 +#define alloc_labeled_state()	xmalloc(struct unw_labeled_state)
    8.99 +#define free_labeled_state(usr)	xfree(usr)
   8.100 +#else
   8.101 +#define alloc_reg_state()	kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
   8.102 +#define free_reg_state(usr)	kfree(usr)
   8.103 +#define alloc_labeled_state()	kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
   8.104 +#define free_labeled_state(usr)	kfree(usr)
   8.105 +#endif
   8.106 +
   8.107 +typedef unsigned long unw_word;
   8.108 +typedef unsigned char unw_hash_index_t;
   8.109 +
   8.110 +static struct {
   8.111 +	spinlock_t lock;			/* spinlock for unwind data */
   8.112 +
   8.113 +	/* list of unwind tables (one per load-module) */
   8.114 +	struct unw_table *tables;
   8.115 +
   8.116 +	unsigned long r0;			/* constant 0 for r0 */
   8.117 +
   8.118 +	/* table of registers that prologues can save (and order in which they're saved): */
   8.119 +	const unsigned char save_order[8];
   8.120 +
   8.121 +	/* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
   8.122 +	unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
   8.123 +
   8.124 +	unsigned short lru_head;		/* index of lead-recently used script */
   8.125 +	unsigned short lru_tail;		/* index of most-recently used script */
   8.126 +
   8.127 +	/* index into unw_frame_info for preserved register i */
   8.128 +	unsigned short preg_index[UNW_NUM_REGS];
   8.129 +
   8.130 +	short pt_regs_offsets[32];
   8.131 +
   8.132 +	/* unwind table for the kernel: */
   8.133 +	struct unw_table kernel_table;
   8.134 +
   8.135 +	/* unwind table describing the gate page (kernel code that is mapped into user space): */
   8.136 +	size_t gate_table_size;
   8.137 +	unsigned long *gate_table;
   8.138 +
   8.139 +	/* hash table that maps instruction pointer to script index: */
   8.140 +	unsigned short hash[UNW_HASH_SIZE];
   8.141 +
   8.142 +	/* script cache: */
   8.143 +	struct unw_script cache[UNW_CACHE_SIZE];
   8.144 +
   8.145 +# ifdef UNW_DEBUG
   8.146 +	const char *preg_name[UNW_NUM_REGS];
   8.147 +# endif
   8.148 +# if UNW_STATS
   8.149 +	struct {
   8.150 +		struct {
   8.151 +			int lookups;
   8.152 +			int hinted_hits;
   8.153 +			int normal_hits;
   8.154 +			int collision_chain_traversals;
   8.155 +		} cache;
   8.156 +		struct {
   8.157 +			unsigned long build_time;
   8.158 +			unsigned long run_time;
   8.159 +			unsigned long parse_time;
   8.160 +			int builds;
   8.161 +			int news;
   8.162 +			int collisions;
   8.163 +			int runs;
   8.164 +		} script;
   8.165 +		struct {
   8.166 +			unsigned long init_time;
   8.167 +			unsigned long unwind_time;
   8.168 +			int inits;
   8.169 +			int unwinds;
   8.170 +		} api;
   8.171 +	} stat;
   8.172 +# endif
   8.173 +} unw = {
   8.174 +	.tables = &unw.kernel_table,
   8.175 +	.lock = SPIN_LOCK_UNLOCKED,
   8.176 +	.save_order = {
   8.177 +		UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
   8.178 +		UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
   8.179 +	},
   8.180 +	.preg_index = {
   8.181 +		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_GR */
   8.182 +		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_MEM */
   8.183 +		offsetof(struct unw_frame_info, bsp_loc)/8,
   8.184 +		offsetof(struct unw_frame_info, bspstore_loc)/8,
   8.185 +		offsetof(struct unw_frame_info, pfs_loc)/8,
   8.186 +		offsetof(struct unw_frame_info, rnat_loc)/8,
   8.187 +		offsetof(struct unw_frame_info, psp)/8,
   8.188 +		offsetof(struct unw_frame_info, rp_loc)/8,
   8.189 +		offsetof(struct unw_frame_info, r4)/8,
   8.190 +		offsetof(struct unw_frame_info, r5)/8,
   8.191 +		offsetof(struct unw_frame_info, r6)/8,
   8.192 +		offsetof(struct unw_frame_info, r7)/8,
   8.193 +		offsetof(struct unw_frame_info, unat_loc)/8,
   8.194 +		offsetof(struct unw_frame_info, pr_loc)/8,
   8.195 +		offsetof(struct unw_frame_info, lc_loc)/8,
   8.196 +		offsetof(struct unw_frame_info, fpsr_loc)/8,
   8.197 +		offsetof(struct unw_frame_info, b1_loc)/8,
   8.198 +		offsetof(struct unw_frame_info, b2_loc)/8,
   8.199 +		offsetof(struct unw_frame_info, b3_loc)/8,
   8.200 +		offsetof(struct unw_frame_info, b4_loc)/8,
   8.201 +		offsetof(struct unw_frame_info, b5_loc)/8,
   8.202 +		offsetof(struct unw_frame_info, f2_loc)/8,
   8.203 +		offsetof(struct unw_frame_info, f3_loc)/8,
   8.204 +		offsetof(struct unw_frame_info, f4_loc)/8,
   8.205 +		offsetof(struct unw_frame_info, f5_loc)/8,
   8.206 +		offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
   8.207 +		offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
   8.208 +		offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
   8.209 +		offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
   8.210 +		offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
   8.211 +		offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
   8.212 +		offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
   8.213 +		offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
   8.214 +		offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
   8.215 +		offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
   8.216 +		offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
   8.217 +		offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
   8.218 +		offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
   8.219 +		offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
   8.220 +		offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
   8.221 +		offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
   8.222 +	},
   8.223 +	.pt_regs_offsets = {
   8.224 +		[0] = -1,
   8.225 +		offsetof(struct pt_regs,  r1),
   8.226 +		offsetof(struct pt_regs,  r2),
   8.227 +		offsetof(struct pt_regs,  r3),
   8.228 +		[4] = -1, [5] = -1, [6] = -1, [7] = -1,
   8.229 +		offsetof(struct pt_regs,  r8),
   8.230 +		offsetof(struct pt_regs,  r9),
   8.231 +		offsetof(struct pt_regs, r10),
   8.232 +		offsetof(struct pt_regs, r11),
   8.233 +		offsetof(struct pt_regs, r12),
   8.234 +		offsetof(struct pt_regs, r13),
   8.235 +		offsetof(struct pt_regs, r14),
   8.236 +		offsetof(struct pt_regs, r15),
   8.237 +		offsetof(struct pt_regs, r16),
   8.238 +		offsetof(struct pt_regs, r17),
   8.239 +		offsetof(struct pt_regs, r18),
   8.240 +		offsetof(struct pt_regs, r19),
   8.241 +		offsetof(struct pt_regs, r20),
   8.242 +		offsetof(struct pt_regs, r21),
   8.243 +		offsetof(struct pt_regs, r22),
   8.244 +		offsetof(struct pt_regs, r23),
   8.245 +		offsetof(struct pt_regs, r24),
   8.246 +		offsetof(struct pt_regs, r25),
   8.247 +		offsetof(struct pt_regs, r26),
   8.248 +		offsetof(struct pt_regs, r27),
   8.249 +		offsetof(struct pt_regs, r28),
   8.250 +		offsetof(struct pt_regs, r29),
   8.251 +		offsetof(struct pt_regs, r30),
   8.252 +		offsetof(struct pt_regs, r31),
   8.253 +	},
   8.254 +	.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
   8.255 +#ifdef UNW_DEBUG
   8.256 +	.preg_name = {
   8.257 +		"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
   8.258 +		"r4", "r5", "r6", "r7",
   8.259 +		"ar.unat", "pr", "ar.lc", "ar.fpsr",
   8.260 +		"b1", "b2", "b3", "b4", "b5",
   8.261 +		"f2", "f3", "f4", "f5",
   8.262 +		"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
   8.263 +		"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
   8.264 +	}
   8.265 +#endif
   8.266 +};
   8.267 +
   8.268 +static inline int
   8.269 +read_only (void *addr)
   8.270 +{
   8.271 +	return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
   8.272 +}
   8.273 +
   8.274 +/*
   8.275 + * Returns offset of rREG in struct pt_regs.
   8.276 + */
   8.277 +static inline unsigned long
   8.278 +pt_regs_off (unsigned long reg)
   8.279 +{
   8.280 +	short off = -1;
   8.281 +
   8.282 +	if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
   8.283 +		off = unw.pt_regs_offsets[reg];
   8.284 +
   8.285 +	if (off < 0) {
   8.286 +		UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
   8.287 +		off = 0;
   8.288 +	}
   8.289 +	return (unsigned long) off;
   8.290 +}
   8.291 +
   8.292 +static inline struct pt_regs *
   8.293 +get_scratch_regs (struct unw_frame_info *info)
   8.294 +{
   8.295 +	if (!info->pt) {
   8.296 +		/* This should not happen with valid unwind info.  */
   8.297 +		UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
   8.298 +		if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
   8.299 +			info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
   8.300 +		else
   8.301 +			info->pt = info->sp - 16;
   8.302 +	}
   8.303 +	UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
   8.304 +	return (struct pt_regs *) info->pt;
   8.305 +}
   8.306 +
   8.307 +/* Unwind accessors.  */
   8.308 +
   8.309 +int
   8.310 +unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
   8.311 +{
   8.312 +	unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
   8.313 +	struct unw_ireg *ireg;
   8.314 +	struct pt_regs *pt;
   8.315 +
   8.316 +	if ((unsigned) regnum - 1 >= 127) {
   8.317 +		if (regnum == 0 && !write) {
   8.318 +			*val = 0;	/* read r0 always returns 0 */
   8.319 +			*nat = 0;
   8.320 +			return 0;
   8.321 +		}
   8.322 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
   8.323 +			   __FUNCTION__, regnum);
   8.324 +		return -1;
   8.325 +	}
   8.326 +
   8.327 +	if (regnum < 32) {
   8.328 +		if (regnum >= 4 && regnum <= 7) {
   8.329 +			/* access a preserved register */
   8.330 +			ireg = &info->r4 + (regnum - 4);
   8.331 +			addr = ireg->loc;
   8.332 +			if (addr) {
   8.333 +				nat_addr = addr + ireg->nat.off;
   8.334 +				switch (ireg->nat.type) {
   8.335 +				      case UNW_NAT_VAL:
   8.336 +					/* simulate getf.sig/setf.sig */
   8.337 +					if (write) {
   8.338 +						if (*nat) {
   8.339 +							/* write NaTVal and be done with it */
   8.340 +							addr[0] = 0;
   8.341 +							addr[1] = 0x1fffe;
   8.342 +							return 0;
   8.343 +						}
   8.344 +						addr[1] = 0x1003e;
   8.345 +					} else {
   8.346 +						if (addr[0] == 0 && addr[1] == 0x1ffe) {
   8.347 +							/* return NaT and be done with it */
   8.348 +							*val = 0;
   8.349 +							*nat = 1;
   8.350 +							return 0;
   8.351 +						}
   8.352 +					}
   8.353 +					/* fall through */
   8.354 +				      case UNW_NAT_NONE:
   8.355 +					dummy_nat = 0;
   8.356 +					nat_addr = &dummy_nat;
   8.357 +					break;
   8.358 +
   8.359 +				      case UNW_NAT_MEMSTK:
   8.360 +					nat_mask = (1UL << ((long) addr & 0x1f8)/8);
   8.361 +					break;
   8.362 +
   8.363 +				      case UNW_NAT_REGSTK:
   8.364 +					nat_addr = ia64_rse_rnat_addr(addr);
   8.365 +					if ((unsigned long) addr < info->regstk.limit
   8.366 +					    || (unsigned long) addr >= info->regstk.top)
   8.367 +					{
   8.368 +						UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
   8.369 +							"[0x%lx-0x%lx)\n",
   8.370 +							__FUNCTION__, (void *) addr,
   8.371 +							info->regstk.limit,
   8.372 +							info->regstk.top);
   8.373 +						return -1;
   8.374 +					}
   8.375 +					if ((unsigned long) nat_addr >= info->regstk.top)
   8.376 +						nat_addr = &info->sw->ar_rnat;
   8.377 +					nat_mask = (1UL << ia64_rse_slot_num(addr));
   8.378 +					break;
   8.379 +				}
   8.380 +			} else {
   8.381 +				addr = &info->sw->r4 + (regnum - 4);
   8.382 +				nat_addr = &info->sw->ar_unat;
   8.383 +				nat_mask = (1UL << ((long) addr & 0x1f8)/8);
   8.384 +			}
   8.385 +		} else {
   8.386 +			/* access a scratch register */
   8.387 +			pt = get_scratch_regs(info);
   8.388 +			addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
   8.389 +			if (info->pri_unat_loc)
   8.390 +				nat_addr = info->pri_unat_loc;
   8.391 +			else
   8.392 +				nat_addr = &info->sw->caller_unat;
   8.393 +			nat_mask = (1UL << ((long) addr & 0x1f8)/8);
   8.394 +		}
   8.395 +	} else {
   8.396 +		/* access a stacked register */
   8.397 +		addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
   8.398 +		nat_addr = ia64_rse_rnat_addr(addr);
   8.399 +		if ((unsigned long) addr < info->regstk.limit
   8.400 +		    || (unsigned long) addr >= info->regstk.top)
   8.401 +		{
   8.402 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
   8.403 +				   "of rbs\n",  __FUNCTION__);
   8.404 +			return -1;
   8.405 +		}
   8.406 +		if ((unsigned long) nat_addr >= info->regstk.top)
   8.407 +			nat_addr = &info->sw->ar_rnat;
   8.408 +		nat_mask = (1UL << ia64_rse_slot_num(addr));
   8.409 +	}
   8.410 +
   8.411 +	if (write) {
   8.412 +		if (read_only(addr)) {
   8.413 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   8.414 +				__FUNCTION__);
   8.415 +		} else {
   8.416 +			*addr = *val;
   8.417 +			if (*nat)
   8.418 +				*nat_addr |= nat_mask;
   8.419 +			else
   8.420 +				*nat_addr &= ~nat_mask;
   8.421 +		}
   8.422 +	} else {
   8.423 +		if ((*nat_addr & nat_mask) == 0) {
   8.424 +			*val = *addr;
   8.425 +			*nat = 0;
   8.426 +		} else {
   8.427 +			*val = 0;	/* if register is a NaT, *addr may contain kernel data! */
   8.428 +			*nat = 1;
   8.429 +		}
   8.430 +	}
   8.431 +	return 0;
   8.432 +}
   8.433 +EXPORT_SYMBOL(unw_access_gr);
   8.434 +
   8.435 +int
   8.436 +unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
   8.437 +{
   8.438 +	unsigned long *addr;
   8.439 +	struct pt_regs *pt;
   8.440 +
   8.441 +	switch (regnum) {
   8.442 +		/* scratch: */
   8.443 +	      case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
   8.444 +	      case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
   8.445 +	      case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
   8.446 +
   8.447 +		/* preserved: */
   8.448 +	      case 1: case 2: case 3: case 4: case 5:
   8.449 +		addr = *(&info->b1_loc + (regnum - 1));
   8.450 +		if (!addr)
   8.451 +			addr = &info->sw->b1 + (regnum - 1);
   8.452 +		break;
   8.453 +
   8.454 +	      default:
   8.455 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
   8.456 +			   __FUNCTION__, regnum);
   8.457 +		return -1;
   8.458 +	}
   8.459 +	if (write)
   8.460 +		if (read_only(addr)) {
   8.461 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   8.462 +				__FUNCTION__);
   8.463 +		} else
   8.464 +			*addr = *val;
   8.465 +	else
   8.466 +		*val = *addr;
   8.467 +	return 0;
   8.468 +}
   8.469 +EXPORT_SYMBOL(unw_access_br);
   8.470 +
   8.471 +int
   8.472 +unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
   8.473 +{
   8.474 +	struct ia64_fpreg *addr = NULL;
   8.475 +	struct pt_regs *pt;
   8.476 +
   8.477 +	if ((unsigned) (regnum - 2) >= 126) {
   8.478 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
   8.479 +			   __FUNCTION__, regnum);
   8.480 +		return -1;
   8.481 +	}
   8.482 +
   8.483 +	if (regnum <= 5) {
   8.484 +		addr = *(&info->f2_loc + (regnum - 2));
   8.485 +		if (!addr)
   8.486 +			addr = &info->sw->f2 + (regnum - 2);
   8.487 +	} else if (regnum <= 15) {
   8.488 +		if (regnum <= 11) {
   8.489 +			pt = get_scratch_regs(info);
   8.490 +			addr = &pt->f6  + (regnum - 6);
   8.491 +		}
   8.492 +		else
   8.493 +			addr = &info->sw->f12 + (regnum - 12);
   8.494 +	} else if (regnum <= 31) {
   8.495 +		addr = info->fr_loc[regnum - 16];
   8.496 +		if (!addr)
   8.497 +			addr = &info->sw->f16 + (regnum - 16);
   8.498 +	} else {
   8.499 +		struct task_struct *t = info->task;
   8.500 +
   8.501 +		if (write)
   8.502 +			ia64_sync_fph(t);
   8.503 +		else
   8.504 +			ia64_flush_fph(t);
   8.505 +#ifdef XEN
   8.506 +		addr = t->arch._thread.fph + (regnum - 32);
   8.507 +#else
   8.508 +		addr = t->thread.fph + (regnum - 32);
   8.509 +#endif
   8.510 +	}
   8.511 +
   8.512 +	if (write)
   8.513 +		if (read_only(addr)) {
   8.514 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   8.515 +				__FUNCTION__);
   8.516 +		} else
   8.517 +			*addr = *val;
   8.518 +	else
   8.519 +		*val = *addr;
   8.520 +	return 0;
   8.521 +}
   8.522 +EXPORT_SYMBOL(unw_access_fr);
   8.523 +
   8.524 +int
   8.525 +unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
   8.526 +{
   8.527 +	unsigned long *addr;
   8.528 +	struct pt_regs *pt;
   8.529 +
   8.530 +	switch (regnum) {
   8.531 +	      case UNW_AR_BSP:
   8.532 +		addr = info->bsp_loc;
   8.533 +		if (!addr)
   8.534 +			addr = &info->sw->ar_bspstore;
   8.535 +		break;
   8.536 +
   8.537 +	      case UNW_AR_BSPSTORE:
   8.538 +		addr = info->bspstore_loc;
   8.539 +		if (!addr)
   8.540 +			addr = &info->sw->ar_bspstore;
   8.541 +		break;
   8.542 +
   8.543 +	      case UNW_AR_PFS:
   8.544 +		addr = info->pfs_loc;
   8.545 +		if (!addr)
   8.546 +			addr = &info->sw->ar_pfs;
   8.547 +		break;
   8.548 +
   8.549 +	      case UNW_AR_RNAT:
   8.550 +		addr = info->rnat_loc;
   8.551 +		if (!addr)
   8.552 +			addr = &info->sw->ar_rnat;
   8.553 +		break;
   8.554 +
   8.555 +	      case UNW_AR_UNAT:
   8.556 +		addr = info->unat_loc;
   8.557 +		if (!addr)
   8.558 +			addr = &info->sw->caller_unat;
   8.559 +		break;
   8.560 +
   8.561 +	      case UNW_AR_LC:
   8.562 +		addr = info->lc_loc;
   8.563 +		if (!addr)
   8.564 +			addr = &info->sw->ar_lc;
   8.565 +		break;
   8.566 +
   8.567 +	      case UNW_AR_EC:
   8.568 +		if (!info->cfm_loc)
   8.569 +			return -1;
   8.570 +		if (write)
   8.571 +			*info->cfm_loc =
   8.572 +				(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
   8.573 +		else
   8.574 +			*val = (*info->cfm_loc >> 52) & 0x3f;
   8.575 +		return 0;
   8.576 +
   8.577 +	      case UNW_AR_FPSR:
   8.578 +		addr = info->fpsr_loc;
   8.579 +		if (!addr)
   8.580 +			addr = &info->sw->ar_fpsr;
   8.581 +		break;
   8.582 +
   8.583 +	      case UNW_AR_RSC:
   8.584 +		pt = get_scratch_regs(info);
   8.585 +		addr = &pt->ar_rsc;
   8.586 +		break;
   8.587 +
   8.588 +	      case UNW_AR_CCV:
   8.589 +		pt = get_scratch_regs(info);
   8.590 +		addr = &pt->ar_ccv;
   8.591 +		break;
   8.592 +
   8.593 +	      case UNW_AR_CSD:
   8.594 +		pt = get_scratch_regs(info);
   8.595 +		addr = &pt->ar_csd;
   8.596 +		break;
   8.597 +
   8.598 +	      case UNW_AR_SSD:
   8.599 +		pt = get_scratch_regs(info);
   8.600 +		addr = &pt->ar_ssd;
   8.601 +		break;
   8.602 +
   8.603 +	      default:
   8.604 +		UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
   8.605 +			   __FUNCTION__, regnum);
   8.606 +		return -1;
   8.607 +	}
   8.608 +
   8.609 +	if (write) {
   8.610 +		if (read_only(addr)) {
   8.611 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   8.612 +				__FUNCTION__);
   8.613 +		} else
   8.614 +			*addr = *val;
   8.615 +	} else
   8.616 +		*val = *addr;
   8.617 +	return 0;
   8.618 +}
   8.619 +EXPORT_SYMBOL(unw_access_ar);
   8.620 +
   8.621 +int
   8.622 +unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
   8.623 +{
   8.624 +	unsigned long *addr;
   8.625 +
   8.626 +	addr = info->pr_loc;
   8.627 +	if (!addr)
   8.628 +		addr = &info->sw->pr;
   8.629 +
   8.630 +	if (write) {
   8.631 +		if (read_only(addr)) {
   8.632 +			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
   8.633 +				__FUNCTION__);
   8.634 +		} else
   8.635 +			*addr = *val;
   8.636 +	} else
   8.637 +		*val = *addr;
   8.638 +	return 0;
   8.639 +}
   8.640 +EXPORT_SYMBOL(unw_access_pr);
   8.641 +
   8.642 +
   8.643 +/* Routines to manipulate the state stack.  */
   8.644 +
   8.645 +static inline void
   8.646 +push (struct unw_state_record *sr)
   8.647 +{
   8.648 +	struct unw_reg_state *rs;
   8.649 +
   8.650 +	rs = alloc_reg_state();
   8.651 +	if (!rs) {
   8.652 +		printk(KERN_ERR "unwind: cannot stack reg state!\n");
   8.653 +		return;
   8.654 +	}
   8.655 +	memcpy(rs, &sr->curr, sizeof(*rs));
   8.656 +	sr->curr.next = rs;
   8.657 +}
   8.658 +
   8.659 +static void
   8.660 +pop (struct unw_state_record *sr)
   8.661 +{
   8.662 +	struct unw_reg_state *rs = sr->curr.next;
   8.663 +
   8.664 +	if (!rs) {
   8.665 +		printk(KERN_ERR "unwind: stack underflow!\n");
   8.666 +		return;
   8.667 +	}
   8.668 +	memcpy(&sr->curr, rs, sizeof(*rs));
   8.669 +	free_reg_state(rs);
   8.670 +}
   8.671 +
   8.672 +/* Make a copy of the state stack.  Non-recursive to avoid stack overflows.  */
   8.673 +static struct unw_reg_state *
   8.674 +dup_state_stack (struct unw_reg_state *rs)
   8.675 +{
   8.676 +	struct unw_reg_state *copy, *prev = NULL, *first = NULL;
   8.677 +
   8.678 +	while (rs) {
   8.679 +		copy = alloc_reg_state();
   8.680 +		if (!copy) {
   8.681 +			printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
   8.682 +			return NULL;
   8.683 +		}
   8.684 +		memcpy(copy, rs, sizeof(*copy));
   8.685 +		if (first)
   8.686 +			prev->next = copy;
   8.687 +		else
   8.688 +			first = copy;
   8.689 +		rs = rs->next;
   8.690 +		prev = copy;
   8.691 +	}
   8.692 +	return first;
   8.693 +}
   8.694 +
   8.695 +/* Free all stacked register states (but not RS itself).  */
   8.696 +static void
   8.697 +free_state_stack (struct unw_reg_state *rs)
   8.698 +{
   8.699 +	struct unw_reg_state *p, *next;
   8.700 +
   8.701 +	for (p = rs->next; p != NULL; p = next) {
   8.702 +		next = p->next;
   8.703 +		free_reg_state(p);
   8.704 +	}
   8.705 +	rs->next = NULL;
   8.706 +}
   8.707 +
   8.708 +/* Unwind decoder routines */
   8.709 +
   8.710 +static enum unw_register_index __attribute_const__
   8.711 +decode_abreg (unsigned char abreg, int memory)
   8.712 +{
   8.713 +	switch (abreg) {
   8.714 +	      case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
   8.715 +	      case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
   8.716 +	      case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
   8.717 +	      case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
   8.718 +	      case 0x60: return UNW_REG_PR;
   8.719 +	      case 0x61: return UNW_REG_PSP;
   8.720 +	      case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
   8.721 +	      case 0x63: return UNW_REG_RP;
   8.722 +	      case 0x64: return UNW_REG_BSP;
   8.723 +	      case 0x65: return UNW_REG_BSPSTORE;
   8.724 +	      case 0x66: return UNW_REG_RNAT;
   8.725 +	      case 0x67: return UNW_REG_UNAT;
   8.726 +	      case 0x68: return UNW_REG_FPSR;
   8.727 +	      case 0x69: return UNW_REG_PFS;
   8.728 +	      case 0x6a: return UNW_REG_LC;
   8.729 +	      default:
   8.730 +		break;
   8.731 +	}
   8.732 +	UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
   8.733 +	return UNW_REG_LC;
   8.734 +}
   8.735 +
   8.736 +static void
   8.737 +set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
   8.738 +{
   8.739 +	reg->val = val;
   8.740 +	reg->where = where;
   8.741 +	if (reg->when == UNW_WHEN_NEVER)
   8.742 +		reg->when = when;
   8.743 +}
   8.744 +
   8.745 +static void
   8.746 +alloc_spill_area (unsigned long *offp, unsigned long regsize,
   8.747 +		  struct unw_reg_info *lo, struct unw_reg_info *hi)
   8.748 +{
   8.749 +	struct unw_reg_info *reg;
   8.750 +
   8.751 +	for (reg = hi; reg >= lo; --reg) {
   8.752 +		if (reg->where == UNW_WHERE_SPILL_HOME) {
   8.753 +			reg->where = UNW_WHERE_PSPREL;
   8.754 +			*offp -= regsize;
   8.755 +			reg->val = *offp;
   8.756 +		}
   8.757 +	}
   8.758 +}
   8.759 +
   8.760 +static inline void
   8.761 +spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
   8.762 +{
   8.763 +	struct unw_reg_info *reg;
   8.764 +
   8.765 +	for (reg = *regp; reg <= lim; ++reg) {
   8.766 +		if (reg->where == UNW_WHERE_SPILL_HOME) {
   8.767 +			reg->when = t;
   8.768 +			*regp = reg + 1;
   8.769 +			return;
   8.770 +		}
   8.771 +	}
   8.772 +	UNW_DPRINT(0, "unwind.%s: excess spill!\n",  __FUNCTION__);
   8.773 +}
   8.774 +
   8.775 +static inline void
   8.776 +finish_prologue (struct unw_state_record *sr)
   8.777 +{
   8.778 +	struct unw_reg_info *reg;
   8.779 +	unsigned long off;
   8.780 +	int i;
   8.781 +
   8.782 +	/*
   8.783 +	 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
   8.784 +	 * for Using Unwind Descriptors", rule 3):
   8.785 +	 */
   8.786 +	for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
   8.787 +		reg = sr->curr.reg + unw.save_order[i];
   8.788 +		if (reg->where == UNW_WHERE_GR_SAVE) {
   8.789 +			reg->where = UNW_WHERE_GR;
   8.790 +			reg->val = sr->gr_save_loc++;
   8.791 +		}
   8.792 +	}
   8.793 +
   8.794 +	/*
   8.795 +	 * Next, compute when the fp, general, and branch registers get
   8.796 +	 * saved.  This must come before alloc_spill_area() because
   8.797 +	 * we need to know which registers are spilled to their home
   8.798 +	 * locations.
   8.799 +	 */
   8.800 +	if (sr->imask) {
   8.801 +		unsigned char kind, mask = 0, *cp = sr->imask;
   8.802 +		int t;
   8.803 +		static const unsigned char limit[3] = {
   8.804 +			UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
   8.805 +		};
   8.806 +		struct unw_reg_info *(regs[3]);
   8.807 +
   8.808 +		regs[0] = sr->curr.reg + UNW_REG_F2;
   8.809 +		regs[1] = sr->curr.reg + UNW_REG_R4;
   8.810 +		regs[2] = sr->curr.reg + UNW_REG_B1;
   8.811 +
   8.812 +		for (t = 0; t < sr->region_len; ++t) {
   8.813 +			if ((t & 3) == 0)
   8.814 +				mask = *cp++;
   8.815 +			kind = (mask >> 2*(3-(t & 3))) & 3;
   8.816 +			if (kind > 0)
   8.817 +				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
   8.818 +						sr->region_start + t);
   8.819 +		}
   8.820 +	}
   8.821 +	/*
   8.822 +	 * Next, lay out the memory stack spill area:
   8.823 +	 */
   8.824 +	if (sr->any_spills) {
   8.825 +		off = sr->spill_offset;
   8.826 +		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
   8.827 +		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
   8.828 +		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
   8.829 +	}
   8.830 +}
   8.831 +
   8.832 +/*
   8.833 + * Region header descriptors.
   8.834 + */
   8.835 +
   8.836 +static void
   8.837 +desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
   8.838 +	       struct unw_state_record *sr)
   8.839 +{
   8.840 +	int i, region_start;
   8.841 +
   8.842 +	if (!(sr->in_body || sr->first_region))
   8.843 +		finish_prologue(sr);
   8.844 +	sr->first_region = 0;
   8.845 +
   8.846 +	/* check if we're done: */
   8.847 +	if (sr->when_target < sr->region_start + sr->region_len) {
   8.848 +		sr->done = 1;
   8.849 +		return;
   8.850 +	}
   8.851 +
   8.852 +	region_start = sr->region_start + sr->region_len;
   8.853 +
   8.854 +	for (i = 0; i < sr->epilogue_count; ++i)
   8.855 +		pop(sr);
   8.856 +	sr->epilogue_count = 0;
   8.857 +	sr->epilogue_start = UNW_WHEN_NEVER;
   8.858 +
   8.859 +	sr->region_start = region_start;
   8.860 +	sr->region_len = rlen;
   8.861 +	sr->in_body = body;
   8.862 +
   8.863 +	if (!body) {
   8.864 +		push(sr);
   8.865 +
   8.866 +		for (i = 0; i < 4; ++i) {
   8.867 +			if (mask & 0x8)
   8.868 +				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
   8.869 +					sr->region_start + sr->region_len - 1, grsave++);
   8.870 +			mask <<= 1;
   8.871 +		}
   8.872 +		sr->gr_save_loc = grsave;
   8.873 +		sr->any_spills = 0;
   8.874 +		sr->imask = NULL;
   8.875 +		sr->spill_offset = 0x10;	/* default to psp+16 */
   8.876 +	}
   8.877 +}
   8.878 +
   8.879 +/*
   8.880 + * Prologue descriptors.
   8.881 + */
   8.882 +
   8.883 +static inline void
   8.884 +desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
   8.885 +{
   8.886 +	if (abi == 3 && context == 'i') {
   8.887 +		sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
   8.888 +		UNW_DPRINT(3, "unwind.%s: interrupt frame\n",  __FUNCTION__);
   8.889 +	}
   8.890 +	else
   8.891 +		UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
   8.892 +				__FUNCTION__, abi, context);
   8.893 +}
   8.894 +
   8.895 +static inline void
   8.896 +desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
   8.897 +{
   8.898 +	int i;
   8.899 +
   8.900 +	for (i = 0; i < 5; ++i) {
   8.901 +		if (brmask & 1)
   8.902 +			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
   8.903 +				sr->region_start + sr->region_len - 1, gr++);
   8.904 +		brmask >>= 1;
   8.905 +	}
   8.906 +}
   8.907 +
   8.908 +static inline void
   8.909 +desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
   8.910 +{
   8.911 +	int i;
   8.912 +
   8.913 +	for (i = 0; i < 5; ++i) {
   8.914 +		if (brmask & 1) {
   8.915 +			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
   8.916 +				sr->region_start + sr->region_len - 1, 0);
   8.917 +			sr->any_spills = 1;
   8.918 +		}
   8.919 +		brmask >>= 1;
   8.920 +	}
   8.921 +}
   8.922 +
   8.923 +static inline void
   8.924 +desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
   8.925 +{
   8.926 +	int i;
   8.927 +
   8.928 +	for (i = 0; i < 4; ++i) {
   8.929 +		if ((grmask & 1) != 0) {
   8.930 +			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
   8.931 +				sr->region_start + sr->region_len - 1, 0);
   8.932 +			sr->any_spills = 1;
   8.933 +		}
   8.934 +		grmask >>= 1;
   8.935 +	}
   8.936 +	for (i = 0; i < 20; ++i) {
   8.937 +		if ((frmask & 1) != 0) {
   8.938 +			int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
   8.939 +			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
   8.940 +				sr->region_start + sr->region_len - 1, 0);
   8.941 +			sr->any_spills = 1;
   8.942 +		}
   8.943 +		frmask >>= 1;
   8.944 +	}
   8.945 +}
   8.946 +
   8.947 +static inline void
   8.948 +desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
   8.949 +{
   8.950 +	int i;
   8.951 +
   8.952 +	for (i = 0; i < 4; ++i) {
   8.953 +		if ((frmask & 1) != 0) {
   8.954 +			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
   8.955 +				sr->region_start + sr->region_len - 1, 0);
   8.956 +			sr->any_spills = 1;
   8.957 +		}
   8.958 +		frmask >>= 1;
   8.959 +	}
   8.960 +}
   8.961 +
   8.962 +static inline void
   8.963 +desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
   8.964 +{
   8.965 +	int i;
   8.966 +
   8.967 +	for (i = 0; i < 4; ++i) {
   8.968 +		if ((grmask & 1) != 0)
   8.969 +			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
   8.970 +				sr->region_start + sr->region_len - 1, gr++);
   8.971 +		grmask >>= 1;
   8.972 +	}
   8.973 +}
   8.974 +
   8.975 +static inline void
   8.976 +desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
   8.977 +{
   8.978 +	int i;
   8.979 +
   8.980 +	for (i = 0; i < 4; ++i) {
   8.981 +		if ((grmask & 1) != 0) {
   8.982 +			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
   8.983 +				sr->region_start + sr->region_len - 1, 0);
   8.984 +			sr->any_spills = 1;
   8.985 +		}
   8.986 +		grmask >>= 1;
   8.987 +	}
   8.988 +}
   8.989 +
   8.990 +static inline void
   8.991 +desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
   8.992 +{
   8.993 +	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
   8.994 +		sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
   8.995 +}
   8.996 +
   8.997 +static inline void
   8.998 +desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
   8.999 +{
  8.1000 +	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
  8.1001 +}
  8.1002 +
  8.1003 +static inline void
  8.1004 +desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
  8.1005 +{
  8.1006 +	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
  8.1007 +}
  8.1008 +
  8.1009 +static inline void
  8.1010 +desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
  8.1011 +{
  8.1012 +	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
  8.1013 +		0x10 - 4*pspoff);
  8.1014 +}
  8.1015 +
  8.1016 +static inline void
  8.1017 +desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
  8.1018 +{
  8.1019 +	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
  8.1020 +		4*spoff);
  8.1021 +}
  8.1022 +
  8.1023 +static inline void
  8.1024 +desc_rp_br (unsigned char dst, struct unw_state_record *sr)
  8.1025 +{
  8.1026 +	sr->return_link_reg = dst;
  8.1027 +}
  8.1028 +
  8.1029 +static inline void
  8.1030 +desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
  8.1031 +{
  8.1032 +	struct unw_reg_info *reg = sr->curr.reg + regnum;
  8.1033 +
  8.1034 +	if (reg->where == UNW_WHERE_NONE)
  8.1035 +		reg->where = UNW_WHERE_GR_SAVE;
  8.1036 +	reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  8.1037 +}
  8.1038 +
  8.1039 +static inline void
  8.1040 +desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
  8.1041 +{
  8.1042 +	sr->spill_offset = 0x10 - 4*pspoff;
  8.1043 +}
  8.1044 +
  8.1045 +static inline unsigned char *
  8.1046 +desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
  8.1047 +{
  8.1048 +	sr->imask = imaskp;
  8.1049 +	return imaskp + (2*sr->region_len + 7)/8;
  8.1050 +}
  8.1051 +
  8.1052 +/*
  8.1053 + * Body descriptors.
  8.1054 + */
  8.1055 +static inline void
  8.1056 +desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
  8.1057 +{
  8.1058 +	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
  8.1059 +	sr->epilogue_count = ecount + 1;
  8.1060 +}
  8.1061 +
  8.1062 +static inline void
  8.1063 +desc_copy_state (unw_word label, struct unw_state_record *sr)
  8.1064 +{
  8.1065 +	struct unw_labeled_state *ls;
  8.1066 +
  8.1067 +	for (ls = sr->labeled_states; ls; ls = ls->next) {
  8.1068 +		if (ls->label == label) {
  8.1069 +			free_state_stack(&sr->curr);
  8.1070 +			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
  8.1071 +			sr->curr.next = dup_state_stack(ls->saved_state.next);
  8.1072 +			return;
  8.1073 +		}
  8.1074 +	}
  8.1075 +	printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
  8.1076 +}
  8.1077 +
  8.1078 +static inline void
  8.1079 +desc_label_state (unw_word label, struct unw_state_record *sr)
  8.1080 +{
  8.1081 +	struct unw_labeled_state *ls;
  8.1082 +
  8.1083 +	ls = alloc_labeled_state();
  8.1084 +	if (!ls) {
  8.1085 +		printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
  8.1086 +		return;
  8.1087 +	}
  8.1088 +	ls->label = label;
  8.1089 +	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
  8.1090 +	ls->saved_state.next = dup_state_stack(sr->curr.next);
  8.1091 +
  8.1092 +	/* insert into list of labeled states: */
  8.1093 +	ls->next = sr->labeled_states;
  8.1094 +	sr->labeled_states = ls;
  8.1095 +}
  8.1096 +
  8.1097 +/*
  8.1098 + * General descriptors.
  8.1099 + */
  8.1100 +
  8.1101 +static inline int
  8.1102 +desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
  8.1103 +{
  8.1104 +	if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
  8.1105 +		return 0;
  8.1106 +	if (qp > 0) {
  8.1107 +		if ((sr->pr_val & (1UL << qp)) == 0)
  8.1108 +			return 0;
  8.1109 +		sr->pr_mask |= (1UL << qp);
  8.1110 +	}
  8.1111 +	return 1;
  8.1112 +}
  8.1113 +
  8.1114 +static inline void
  8.1115 +desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
  8.1116 +{
  8.1117 +	struct unw_reg_info *r;
  8.1118 +
  8.1119 +	if (!desc_is_active(qp, t, sr))
  8.1120 +		return;
  8.1121 +
  8.1122 +	r = sr->curr.reg + decode_abreg(abreg, 0);
  8.1123 +	r->where = UNW_WHERE_NONE;
  8.1124 +	r->when = UNW_WHEN_NEVER;
  8.1125 +	r->val = 0;
  8.1126 +}
  8.1127 +
  8.1128 +static inline void
  8.1129 +desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
  8.1130 +		     unsigned char ytreg, struct unw_state_record *sr)
  8.1131 +{
  8.1132 +	enum unw_where where = UNW_WHERE_GR;
  8.1133 +	struct unw_reg_info *r;
  8.1134 +
  8.1135 +	if (!desc_is_active(qp, t, sr))
  8.1136 +		return;
  8.1137 +
  8.1138 +	if (x)
  8.1139 +		where = UNW_WHERE_BR;
  8.1140 +	else if (ytreg & 0x80)
  8.1141 +		where = UNW_WHERE_FR;
  8.1142 +
  8.1143 +	r = sr->curr.reg + decode_abreg(abreg, 0);
  8.1144 +	r->where = where;
  8.1145 +	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  8.1146 +	r->val = (ytreg & 0x7f);
  8.1147 +}
  8.1148 +
  8.1149 +static inline void
  8.1150 +desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
  8.1151 +		     struct unw_state_record *sr)
  8.1152 +{
  8.1153 +	struct unw_reg_info *r;
  8.1154 +
  8.1155 +	if (!desc_is_active(qp, t, sr))
  8.1156 +		return;
  8.1157 +
  8.1158 +	r = sr->curr.reg + decode_abreg(abreg, 1);
  8.1159 +	r->where = UNW_WHERE_PSPREL;
  8.1160 +	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  8.1161 +	r->val = 0x10 - 4*pspoff;
  8.1162 +}
  8.1163 +
  8.1164 +static inline void
  8.1165 +desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
  8.1166 +		       struct unw_state_record *sr)
  8.1167 +{
  8.1168 +	struct unw_reg_info *r;
  8.1169 +
  8.1170 +	if (!desc_is_active(qp, t, sr))
  8.1171 +		return;
  8.1172 +
  8.1173 +	r = sr->curr.reg + decode_abreg(abreg, 1);
  8.1174 +	r->where = UNW_WHERE_SPREL;
  8.1175 +	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
  8.1176 +	r->val = 4*spoff;
  8.1177 +}
  8.1178 +
  8.1179 +#define UNW_DEC_BAD_CODE(code)			printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
  8.1180 +						       code);
  8.1181 +
  8.1182 +/*
  8.1183 + * region headers:
  8.1184 + */
  8.1185 +#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg)	desc_prologue(0,r,m,gr,arg)
  8.1186 +#define UNW_DEC_PROLOGUE(fmt,b,r,arg)		desc_prologue(b,r,0,32,arg)
  8.1187 +/*
  8.1188 + * prologue descriptors:
  8.1189 + */
  8.1190 +#define UNW_DEC_ABI(fmt,a,c,arg)		desc_abi(a,c,arg)
  8.1191 +#define UNW_DEC_BR_GR(fmt,b,g,arg)		desc_br_gr(b,g,arg)
  8.1192 +#define UNW_DEC_BR_MEM(fmt,b,arg)		desc_br_mem(b,arg)
  8.1193 +#define UNW_DEC_FRGR_MEM(fmt,g,f,arg)		desc_frgr_mem(g,f,arg)
  8.1194 +#define UNW_DEC_FR_MEM(fmt,f,arg)		desc_fr_mem(f,arg)
  8.1195 +#define UNW_DEC_GR_GR(fmt,m,g,arg)		desc_gr_gr(m,g,arg)
  8.1196 +#define UNW_DEC_GR_MEM(fmt,m,arg)		desc_gr_mem(m,arg)
  8.1197 +#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
  8.1198 +#define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
  8.1199 +#define UNW_DEC_REG_GR(fmt,r,d,arg)		desc_reg_gr(r,d,arg)
  8.1200 +#define UNW_DEC_REG_PSPREL(fmt,r,o,arg)		desc_reg_psprel(r,o,arg)
  8.1201 +#define UNW_DEC_REG_SPREL(fmt,r,o,arg)		desc_reg_sprel(r,o,arg)
  8.1202 +#define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
  8.1203 +#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
  8.1204 +#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
  8.1205 +#define UNW_DEC_PRIUNAT_GR(fmt,r,arg)		desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
  8.1206 +#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg)	desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
  8.1207 +#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg)	desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
  8.1208 +#define UNW_DEC_RP_BR(fmt,d,arg)		desc_rp_br(d,arg)
  8.1209 +#define UNW_DEC_SPILL_BASE(fmt,o,arg)		desc_spill_base(o,arg)
  8.1210 +#define UNW_DEC_SPILL_MASK(fmt,m,arg)		(m = desc_spill_mask(m,arg))
  8.1211 +/*
  8.1212 + * body descriptors:
  8.1213 + */
  8.1214 +#define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
  8.1215 +#define UNW_DEC_COPY_STATE(fmt,l,arg)		desc_copy_state(l,arg)
  8.1216 +#define UNW_DEC_LABEL_STATE(fmt,l,arg)		desc_label_state(l,arg)
  8.1217 +/*
  8.1218 + * general unwind descriptors:
  8.1219 + */
  8.1220 +#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
  8.1221 +#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
  8.1222 +#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
  8.1223 +#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
  8.1224 +#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
  8.1225 +#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
  8.1226 +#define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
  8.1227 +#define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
  8.1228 +
  8.1229 +#include "unwind_decoder.c"
  8.1230 +
  8.1231 +
  8.1232 +/* Unwind scripts. */
  8.1233 +
  8.1234 +static inline unw_hash_index_t
  8.1235 +hash (unsigned long ip)
  8.1236 +{
  8.1237 +#	define hashmagic	0x9e3779b97f4a7c16UL	/* based on (sqrt(5)/2-1)*2^64 */
  8.1238 +
  8.1239 +	return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
  8.1240 +#undef hashmagic
  8.1241 +}
  8.1242 +
  8.1243 +static inline long
  8.1244 +cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
  8.1245 +{
  8.1246 +	read_lock(&script->lock);
  8.1247 +	if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
  8.1248 +		/* keep the read lock... */
  8.1249 +		return 1;
  8.1250 +	read_unlock(&script->lock);
  8.1251 +	return 0;
  8.1252 +}
  8.1253 +
  8.1254 +static inline struct unw_script *
  8.1255 +script_lookup (struct unw_frame_info *info)
  8.1256 +{
  8.1257 +	struct unw_script *script = unw.cache + info->hint;
  8.1258 +	unsigned short index;
  8.1259 +	unsigned long ip, pr;
  8.1260 +
  8.1261 +	if (UNW_DEBUG_ON(0))
  8.1262 +		return NULL;	/* Always regenerate scripts in debug mode */
  8.1263 +
  8.1264 +	STAT(++unw.stat.cache.lookups);
  8.1265 +
  8.1266 +	ip = info->ip;
  8.1267 +	pr = info->pr;
  8.1268 +
  8.1269 +	if (cache_match(script, ip, pr)) {
  8.1270 +		STAT(++unw.stat.cache.hinted_hits);
  8.1271 +		return script;
  8.1272 +	}
  8.1273 +
  8.1274 +	index = unw.hash[hash(ip)];
  8.1275 +	if (index >= UNW_CACHE_SIZE)
  8.1276 +		return NULL;
  8.1277 +
  8.1278 +	script = unw.cache + index;
  8.1279 +	while (1) {
  8.1280 +		if (cache_match(script, ip, pr)) {
  8.1281 +			/* update hint; no locking required as single-word writes are atomic */
  8.1282 +			STAT(++unw.stat.cache.normal_hits);
  8.1283 +			unw.cache[info->prev_script].hint = script - unw.cache;
  8.1284 +			return script;
  8.1285 +		}
  8.1286 +		if (script->coll_chain >= UNW_HASH_SIZE)
  8.1287 +			return NULL;
  8.1288 +		script = unw.cache + script->coll_chain;
  8.1289 +		STAT(++unw.stat.cache.collision_chain_traversals);
  8.1290 +	}
  8.1291 +}
  8.1292 +
  8.1293 +/*
  8.1294 + * On returning, a write lock for the SCRIPT is still being held.
  8.1295 + */
  8.1296 +static inline struct unw_script *
  8.1297 +script_new (unsigned long ip)
  8.1298 +{
  8.1299 +	struct unw_script *script, *prev, *tmp;
  8.1300 +	unw_hash_index_t index;
  8.1301 +	unsigned short head;
  8.1302 +
  8.1303 +	STAT(++unw.stat.script.news);
  8.1304 +
  8.1305 +	/*
  8.1306 +	 * Can't (easily) use cmpxchg() here because of ABA problem
  8.1307 +	 * that is intrinsic in cmpxchg()...
  8.1308 +	 */
  8.1309 +	head = unw.lru_head;
  8.1310 +	script = unw.cache + head;
  8.1311 +	unw.lru_head = script->lru_chain;
  8.1312 +
  8.1313 +	/*
  8.1314 +	 * We'd deadlock here if we interrupted a thread that is holding a read lock on
  8.1315 +	 * script->lock.  Thus, if the write_trylock() fails, we simply bail out.  The
  8.1316 +	 * alternative would be to disable interrupts whenever we hold a read-lock, but
  8.1317 +	 * that seems silly.
  8.1318 +	 */
  8.1319 +	if (!write_trylock(&script->lock))
  8.1320 +		return NULL;
  8.1321 +
  8.1322 +	/* re-insert script at the tail of the LRU chain: */
  8.1323 +	unw.cache[unw.lru_tail].lru_chain = head;
  8.1324 +	unw.lru_tail = head;
  8.1325 +
  8.1326 +	/* remove the old script from the hash table (if it's there): */
  8.1327 +	if (script->ip) {
  8.1328 +		index = hash(script->ip);
  8.1329 +		tmp = unw.cache + unw.hash[index];
  8.1330 +		prev = NULL;
  8.1331 +		while (1) {
  8.1332 +			if (tmp == script) {
  8.1333 +				if (prev)
  8.1334 +					prev->coll_chain = tmp->coll_chain;
  8.1335 +				else
  8.1336 +					unw.hash[index] = tmp->coll_chain;
  8.1337 +				break;
  8.1338 +			} else
  8.1339 +				prev = tmp;
  8.1340 +			if (tmp->coll_chain >= UNW_CACHE_SIZE)
  8.1341 +			/* old script wasn't in the hash-table */
  8.1342 +				break;
  8.1343 +			tmp = unw.cache + tmp->coll_chain;
  8.1344 +		}
  8.1345 +	}
  8.1346 +
  8.1347 +	/* enter new script in the hash table */
  8.1348 +	index = hash(ip);
  8.1349 +	script->coll_chain = unw.hash[index];
  8.1350 +	unw.hash[index] = script - unw.cache;
  8.1351 +
  8.1352 +	script->ip = ip;	/* set new IP while we're holding the locks */
  8.1353 +
  8.1354 +	STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
  8.1355 +
  8.1356 +	script->flags = 0;
  8.1357 +	script->hint = 0;
  8.1358 +	script->count = 0;
  8.1359 +	return script;
  8.1360 +}
  8.1361 +
  8.1362 +static void
  8.1363 +script_finalize (struct unw_script *script, struct unw_state_record *sr)
  8.1364 +{
  8.1365 +	script->pr_mask = sr->pr_mask;
  8.1366 +	script->pr_val = sr->pr_val;
  8.1367 +	/*
  8.1368 +	 * We could down-grade our write-lock on script->lock here but
  8.1369 +	 * the rwlock API doesn't offer atomic lock downgrading, so
  8.1370 +	 * we'll just keep the write-lock and release it later when
  8.1371 +	 * we're done using the script.
  8.1372 +	 */
  8.1373 +}
  8.1374 +
  8.1375 +static inline void
  8.1376 +script_emit (struct unw_script *script, struct unw_insn insn)
  8.1377 +{
  8.1378 +	if (script->count >= UNW_MAX_SCRIPT_LEN) {
  8.1379 +		UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
  8.1380 +			__FUNCTION__, UNW_MAX_SCRIPT_LEN);
  8.1381 +		return;
  8.1382 +	}
  8.1383 +	script->insn[script->count++] = insn;
  8.1384 +}
  8.1385 +
  8.1386 +static inline void
  8.1387 +emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
  8.1388 +{
  8.1389 +	struct unw_reg_info *r = sr->curr.reg + i;
  8.1390 +	enum unw_insn_opcode opc;
  8.1391 +	struct unw_insn insn;
  8.1392 +	unsigned long val = 0;
  8.1393 +
  8.1394 +	switch (r->where) {
  8.1395 +	      case UNW_WHERE_GR:
  8.1396 +		if (r->val >= 32) {
  8.1397 +			/* register got spilled to a stacked register */
  8.1398 +			opc = UNW_INSN_SETNAT_TYPE;
  8.1399 +			val = UNW_NAT_REGSTK;
  8.1400 +		} else
  8.1401 +			/* register got spilled to a scratch register */
  8.1402 +			opc = UNW_INSN_SETNAT_MEMSTK;
  8.1403 +		break;
  8.1404 +
  8.1405 +	      case UNW_WHERE_FR:
  8.1406 +		opc = UNW_INSN_SETNAT_TYPE;
  8.1407 +		val = UNW_NAT_VAL;
  8.1408 +		break;
  8.1409 +
  8.1410 +	      case UNW_WHERE_BR:
  8.1411 +		opc = UNW_INSN_SETNAT_TYPE;
  8.1412 +		val = UNW_NAT_NONE;
  8.1413 +		break;
  8.1414 +
  8.1415 +	      case UNW_WHERE_PSPREL:
  8.1416 +	      case UNW_WHERE_SPREL:
  8.1417 +		opc = UNW_INSN_SETNAT_MEMSTK;
  8.1418 +		break;
  8.1419 +
  8.1420 +	      default:
  8.1421 +		UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
  8.1422 +			   __FUNCTION__, r->where);
  8.1423 +		return;
  8.1424 +	}
  8.1425 +	insn.opc = opc;
  8.1426 +	insn.dst = unw.preg_index[i];
  8.1427 +	insn.val = val;
  8.1428 +	script_emit(script, insn);
  8.1429 +}
  8.1430 +
  8.1431 +static void
  8.1432 +compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
  8.1433 +{
  8.1434 +	struct unw_reg_info *r = sr->curr.reg + i;
  8.1435 +	enum unw_insn_opcode opc;
  8.1436 +	unsigned long val, rval;
  8.1437 +	struct unw_insn insn;
  8.1438 +	long need_nat_info;
  8.1439 +
  8.1440 +	if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
  8.1441 +		return;
  8.1442 +
  8.1443 +	opc = UNW_INSN_MOVE;
  8.1444 +	val = rval = r->val;
  8.1445 +	need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
  8.1446 +
  8.1447 +	switch (r->where) {
  8.1448 +	      case UNW_WHERE_GR:
  8.1449 +		if (rval >= 32) {
  8.1450 +			opc = UNW_INSN_MOVE_STACKED;
  8.1451 +			val = rval - 32;
  8.1452 +		} else if (rval >= 4 && rval <= 7) {
  8.1453 +			if (need_nat_info) {
  8.1454 +				opc = UNW_INSN_MOVE2;
  8.1455 +				need_nat_info = 0;
  8.1456 +			}
  8.1457 +			val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
  8.1458 +		} else if (rval == 0) {
  8.1459 +			opc = UNW_INSN_MOVE_CONST;
  8.1460 +			val = 0;
  8.1461 +		} else {
  8.1462 +			/* register got spilled to a scratch register */
  8.1463 +			opc = UNW_INSN_MOVE_SCRATCH;
  8.1464 +			val = pt_regs_off(rval);
  8.1465 +		}
  8.1466 +		break;
  8.1467 +
  8.1468 +	      case UNW_WHERE_FR:
  8.1469 +		if (rval <= 5)
  8.1470 +			val = unw.preg_index[UNW_REG_F2  + (rval -  2)];
  8.1471 +		else if (rval >= 16 && rval <= 31)
  8.1472 +			val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
  8.1473 +		else {
  8.1474 +			opc = UNW_INSN_MOVE_SCRATCH;
  8.1475 +			if (rval <= 11)
  8.1476 +				val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
  8.1477 +			else
  8.1478 +				UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
  8.1479 +					   __FUNCTION__, rval);
  8.1480 +		}
  8.1481 +		break;
  8.1482 +
  8.1483 +	      case UNW_WHERE_BR:
  8.1484 +		if (rval >= 1 && rval <= 5)
  8.1485 +			val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
  8.1486 +		else {
  8.1487 +			opc = UNW_INSN_MOVE_SCRATCH;
  8.1488 +			if (rval == 0)
  8.1489 +				val = offsetof(struct pt_regs, b0);
  8.1490 +			else if (rval == 6)
  8.1491 +				val = offsetof(struct pt_regs, b6);
  8.1492 +			else
  8.1493 +				val = offsetof(struct pt_regs, b7);
  8.1494 +		}
  8.1495 +		break;
  8.1496 +
  8.1497 +	      case UNW_WHERE_SPREL:
  8.1498 +		opc = UNW_INSN_ADD_SP;
  8.1499 +		break;
  8.1500 +
  8.1501 +	      case UNW_WHERE_PSPREL:
  8.1502 +		opc = UNW_INSN_ADD_PSP;
  8.1503 +		break;
  8.1504 +
  8.1505 +	      default:
  8.1506 +		UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
  8.1507 +			   __FUNCTION__, i, r->where);
  8.1508 +		break;
  8.1509 +	}
  8.1510 +	insn.opc = opc;
  8.1511 +	insn.dst = unw.preg_index[i];
  8.1512 +	insn.val = val;
  8.1513 +	script_emit(script, insn);
  8.1514 +	if (need_nat_info)
  8.1515 +		emit_nat_info(sr, i, script);
  8.1516 +
  8.1517 +	if (i == UNW_REG_PSP) {
  8.1518 +		/*
  8.1519 +		 * info->psp must contain the _value_ of the previous
  8.1520 +		 * sp, not it's save location.  We get this by
  8.1521 +		 * dereferencing the value we just stored in
  8.1522 +		 * info->psp:
  8.1523 +		 */
  8.1524 +		insn.opc = UNW_INSN_LOAD;
  8.1525 +		insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
  8.1526 +		script_emit(script, insn);
  8.1527 +	}
  8.1528 +}
  8.1529 +
  8.1530 +static inline const struct unw_table_entry *
  8.1531 +lookup (struct unw_table *table, unsigned long rel_ip)
  8.1532 +{
  8.1533 +	const struct unw_table_entry *e = NULL;
  8.1534 +	unsigned long lo, hi, mid;
  8.1535 +
  8.1536 +	/* do a binary search for right entry: */
  8.1537 +	for (lo = 0, hi = table->length; lo < hi; ) {
  8.1538 +		mid = (lo + hi) / 2;
  8.1539 +		e = &table->array[mid];
  8.1540 +		if (rel_ip < e->start_offset)
  8.1541 +			hi = mid;
  8.1542 +		else if (rel_ip >= e->end_offset)
  8.1543 +			lo = mid + 1;
  8.1544 +		else
  8.1545 +			break;
  8.1546 +	}
  8.1547 +	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
  8.1548 +		return NULL;
  8.1549 +	return e;
  8.1550 +}
  8.1551 +
  8.1552 +/*
  8.1553 + * Build an unwind script that unwinds from state OLD_STATE to the
  8.1554 + * entrypoint of the function that called OLD_STATE.
  8.1555 + */
  8.1556 +static inline struct unw_script *
  8.1557 +build_script (struct unw_frame_info *info)
  8.1558 +{
  8.1559 +	const struct unw_table_entry *e = NULL;
  8.1560 +	struct unw_script *script = NULL;
  8.1561 +	struct unw_labeled_state *ls, *next;
  8.1562 +	unsigned long ip = info->ip;
  8.1563 +	struct unw_state_record sr;
  8.1564 +	struct unw_table *table;
  8.1565 +	struct unw_reg_info *r;
  8.1566 +	struct unw_insn insn;
  8.1567 +	u8 *dp, *desc_end;
  8.1568 +	u64 hdr;
  8.1569 +	int i;
  8.1570 +	STAT(unsigned long start, parse_start;)
  8.1571 +
  8.1572 +	STAT(++unw.stat.script.builds; start = ia64_get_itc());
  8.1573 +
  8.1574 +	/* build state record */
  8.1575 +	memset(&sr, 0, sizeof(sr));
  8.1576 +	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
  8.1577 +		r->when = UNW_WHEN_NEVER;
  8.1578 +	sr.pr_val = info->pr;
  8.1579 +
  8.1580 +	UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
  8.1581 +	script = script_new(ip);
  8.1582 +	if (!script) {
  8.1583 +		UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n",  __FUNCTION__);
  8.1584 +		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
  8.1585 +		return NULL;
  8.1586 +	}
  8.1587 +	unw.cache[info->prev_script].hint = script - unw.cache;
  8.1588 +
  8.1589 +	/* search the kernels and the modules' unwind tables for IP: */
  8.1590 +
  8.1591 +	STAT(parse_start = ia64_get_itc());
  8.1592 +
  8.1593 +	for (table = unw.tables; table; table = table->next) {
  8.1594 +		if (ip >= table->start && ip < table->end) {
  8.1595 +			e = lookup(table, ip - table->segment_base);
  8.1596 +			break;
  8.1597 +		}
  8.1598 +	}
  8.1599 +	if (!e) {
  8.1600 +		/* no info, return default unwinder (leaf proc, no mem stack, no saved regs)  */
  8.1601 +		UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
  8.1602 +			__FUNCTION__, ip, unw.cache[info->prev_script].ip);
  8.1603 +		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
  8.1604 +		sr.curr.reg[UNW_REG_RP].when = -1;
  8.1605 +		sr.curr.reg[UNW_REG_RP].val = 0;
  8.1606 +		compile_reg(&sr, UNW_REG_RP, script);
  8.1607 +		script_finalize(script, &sr);
  8.1608 +		STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
  8.1609 +		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
  8.1610 +		return script;
  8.1611 +	}
  8.1612 +
  8.1613 +	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
  8.1614 +			  + (ip & 0xfUL));
  8.1615 +	hdr = *(u64 *) (table->segment_base + e->info_offset);
  8.1616 +	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
  8.1617 +	desc_end = dp + 8*UNW_LENGTH(hdr);
  8.1618 +
  8.1619 +	while (!sr.done && dp < desc_end)
  8.1620 +		dp = unw_decode(dp, sr.in_body, &sr);
  8.1621 +
  8.1622 +	if (sr.when_target > sr.epilogue_start) {
  8.1623 +		/*
  8.1624 +		 * sp has been restored and all values on the memory stack below
  8.1625 +		 * psp also have been restored.
  8.1626 +		 */
  8.1627 +		sr.curr.reg[UNW_REG_PSP].val = 0;
  8.1628 +		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
  8.1629 +		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
  8.1630 +		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
  8.1631 +			if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
  8.1632 +			    || r->where == UNW_WHERE_SPREL)
  8.1633 +			{
  8.1634 +				r->val = 0;
  8.1635 +				r->where = UNW_WHERE_NONE;
  8.1636 +				r->when = UNW_WHEN_NEVER;
  8.1637 +			}
  8.1638 +	}
  8.1639 +
  8.1640 +	script->flags = sr.flags;
  8.1641 +
  8.1642 +	/*
  8.1643 +	 * If RP did't get saved, generate entry for the return link
  8.1644 +	 * register.
  8.1645 +	 */
  8.1646 +	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
  8.1647 +		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
  8.1648 +		sr.curr.reg[UNW_REG_RP].when = -1;
  8.1649 +		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
  8.1650 +		UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
  8.1651 +			   __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
  8.1652 +			   sr.curr.reg[UNW_REG_RP].val);
  8.1653 +	}
  8.1654 +
  8.1655 +#ifdef UNW_DEBUG
  8.1656 +	UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
  8.1657 +		__FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
  8.1658 +	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
  8.1659 +		if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
  8.1660 +			UNW_DPRINT(1, "  %s <- ", unw.preg_name[r - sr.curr.reg]);
  8.1661 +			switch (r->where) {
  8.1662 +			      case UNW_WHERE_GR:     UNW_DPRINT(1, "r%lu", r->val); break;
  8.1663 +			      case UNW_WHERE_FR:     UNW_DPRINT(1, "f%lu", r->val); break;
  8.1664 +			      case UNW_WHERE_BR:     UNW_DPRINT(1, "b%lu", r->val); break;
  8.1665 +			      case UNW_WHERE_SPREL:  UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
  8.1666 +			      case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
  8.1667 +			      case UNW_WHERE_NONE:
  8.1668 +				UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
  8.1669 +				break;
  8.1670 +
  8.1671 +			      default:
  8.1672 +				UNW_DPRINT(1, "BADWHERE(%d)", r->where);
  8.1673 +				break;
  8.1674 +			}
  8.1675 +			UNW_DPRINT(1, "\t\t%d\n", r->when);
  8.1676 +		}
  8.1677 +	}
  8.1678 +#endif
  8.1679 +
  8.1680 +	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
  8.1681 +
  8.1682 +	/* translate state record into unwinder instructions: */
  8.1683 +
  8.1684 +	/*
  8.1685 +	 * First, set psp if we're dealing with a fixed-size frame;
  8.1686 +	 * subsequent instructions may depend on this value.
  8.1687 +	 */
  8.1688 +	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
  8.1689 +	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
  8.1690 +	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
  8.1691 +		/* new psp is sp plus frame size */
  8.1692 +		insn.opc = UNW_INSN_ADD;
  8.1693 +		insn.dst = offsetof(struct unw_frame_info, psp)/8;
  8.1694 +		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
  8.1695 +		script_emit(script, insn);
  8.1696 +	}
  8.1697 +
  8.1698 +	/* determine where the primary UNaT is: */
  8.1699 +	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
  8.1700 +		i = UNW_REG_PRI_UNAT_MEM;
  8.1701 +	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
  8.1702 +		i = UNW_REG_PRI_UNAT_GR;
  8.1703 +	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
  8.1704 +		i = UNW_REG_PRI_UNAT_MEM;
  8.1705 +	else
  8.1706 +		i = UNW_REG_PRI_UNAT_GR;
  8.1707 +
  8.1708 +	compile_reg(&sr, i, script);
  8.1709 +
  8.1710 +	for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
  8.1711 +		compile_reg(&sr, i, script);
  8.1712 +
  8.1713 +	/* free labeled register states & stack: */
  8.1714 +
  8.1715 +	STAT(parse_start = ia64_get_itc());
  8.1716 +	for (ls = sr.labeled_states; ls; ls = next) {
  8.1717 +		next = ls->next;
  8.1718 +		free_state_stack(&ls->saved_state);
  8.1719 +		free_labeled_state(ls);
  8.1720 +	}
  8.1721 +	free_state_stack(&sr.curr);
  8.1722 +	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
  8.1723 +
  8.1724 +	script_finalize(script, &sr);
  8.1725 +	STAT(unw.stat.script.build_time += ia64_get_itc() - start);
  8.1726 +	return script;
  8.1727 +}
  8.1728 +
  8.1729 +/*
  8.1730 + * Apply the unwinding actions represented by OPS and update SR to
  8.1731 + * reflect the state that existed upon entry to the function that this
  8.1732 + * unwinder represents.
  8.1733 + */
  8.1734 +static inline void
  8.1735 +run_script (struct unw_script *script, struct unw_frame_info *state)
  8.1736 +{
  8.1737 +	struct unw_insn *ip, *limit, next_insn;
  8.1738 +	unsigned long opc, dst, val, off;
  8.1739 +	unsigned long *s = (unsigned long *) state;
  8.1740 +	STAT(unsigned long start;)
  8.1741 +
  8.1742 +	STAT(++unw.stat.script.runs; start = ia64_get_itc());
  8.1743 +	state->flags = script->flags;
  8.1744 +	ip = script->insn;
  8.1745 +	limit = script->insn + script->count;
  8.1746 +	next_insn = *ip;
  8.1747 +
  8.1748 +	while (ip++ < limit) {
  8.1749 +		opc = next_insn.opc;
  8.1750 +		dst = next_insn.dst;
  8.1751 +		val = next_insn.val;
  8.1752 +		next_insn = *ip;
  8.1753 +
  8.1754 +	  redo:
  8.1755 +		switch (opc) {
  8.1756 +		      case UNW_INSN_ADD:
  8.1757 +			s[dst] += val;
  8.1758 +			break;
  8.1759 +
  8.1760 +		      case UNW_INSN_MOVE2:
  8.1761 +			if (!s[val])
  8.1762 +				goto lazy_init;
  8.1763 +			s[dst+1] = s[val+1];
  8.1764 +			s[dst] = s[val];
  8.1765 +			break;
  8.1766 +
  8.1767 +		      case UNW_INSN_MOVE:
  8.1768 +			if (!s[val])
  8.1769 +				goto lazy_init;
  8.1770 +			s[dst] = s[val];
  8.1771 +			break;
  8.1772 +
  8.1773 +		      case UNW_INSN_MOVE_SCRATCH:
  8.1774 +			if (state->pt) {
  8.1775 +				s[dst] = (unsigned long) get_scratch_regs(state) + val;
  8.1776 +			} else {
  8.1777 +				s[dst] = 0;
  8.1778 +				UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
  8.1779 +					   __FUNCTION__, dst, val);
  8.1780 +			}
  8.1781 +			break;
  8.1782 +
  8.1783 +		      case UNW_INSN_MOVE_CONST:
  8.1784 +			if (val == 0)
  8.1785 +				s[dst] = (unsigned long) &unw.r0;
  8.1786 +			else {
  8.1787 +				s[dst] = 0;
  8.1788 +				UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
  8.1789 +					   __FUNCTION__, val);
  8.1790 +			}
  8.1791 +			break;
  8.1792 +
  8.1793 +
  8.1794 +		      case UNW_INSN_MOVE_STACKED:
  8.1795 +			s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
  8.1796 +								    val);
  8.1797 +			break;
  8.1798 +
  8.1799 +		      case UNW_INSN_ADD_PSP:
  8.1800 +			s[dst] = state->psp + val;
  8.1801 +			break;
  8.1802 +
  8.1803 +		      case UNW_INSN_ADD_SP:
  8.1804 +			s[dst] = state->sp + val;
  8.1805 +			break;
  8.1806 +
  8.1807 +		      case UNW_INSN_SETNAT_MEMSTK:
  8.1808 +			if (!state->pri_unat_loc)
  8.1809 +				state->pri_unat_loc = &state->sw->caller_unat;
  8.1810 +			/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
  8.1811 +			s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
  8.1812 +			break;
  8.1813 +
  8.1814 +		      case UNW_INSN_SETNAT_TYPE:
  8.1815 +			s[dst+1] = val;
  8.1816 +			break;
  8.1817 +
  8.1818 +		      case UNW_INSN_LOAD:
  8.1819 +#ifdef UNW_DEBUG
  8.1820 +			if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
  8.1821 +#ifndef XEN
  8.1822 +			    || s[val] < TASK_SIZE
  8.1823 +#endif
  8.1824 +				)
  8.1825 +			{
  8.1826 +				UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
  8.1827 +					   __FUNCTION__, s[val]);
  8.1828 +				break;
  8.1829 +			}
  8.1830 +#endif
  8.1831 +			s[dst] = *(unsigned long *) s[val];
  8.1832 +			break;
  8.1833 +		}
  8.1834 +	}
  8.1835 +	STAT(unw.stat.script.run_time += ia64_get_itc() - start);
  8.1836 +	return;
  8.1837 +
  8.1838 +  lazy_init:
  8.1839 +	off = unw.sw_off[val];
  8.1840 +	s[val] = (unsigned long) state->sw + off;
  8.1841 +	if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
  8.1842 +		/*
  8.1843 +		 * We're initializing a general register: init NaT info, too.  Note that
  8.1844 +		 * the offset is a multiple of 8 which gives us the 3 bits needed for
  8.1845 +		 * the type field.
  8.1846 +		 */
  8.1847 +		s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
  8.1848 +	goto redo;
  8.1849 +}
  8.1850 +
  8.1851 +static int
  8.1852 +find_save_locs (struct unw_frame_info *info)
  8.1853 +{
  8.1854 +	int have_write_lock = 0;
  8.1855 +	struct unw_script *scr;
  8.1856 +	unsigned long flags = 0;
  8.1857 +
  8.1858 +	if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf))
  8.1859 +#ifndef XEN
  8.1860 +	    || info->ip < TASK_SIZE
  8.1861 +#endif
  8.1862 +		) {
  8.1863 +		/* don't let obviously bad addresses pollute the cache */
  8.1864 +		/* FIXME: should really be level 0 but it occurs too often. KAO */
  8.1865 +		UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
  8.1866 +		info->rp_loc = NULL;
  8.1867 +		return -1;
  8.1868 +	}
  8.1869 +
  8.1870 +	scr = script_lookup(info);
  8.1871 +	if (!scr) {
  8.1872 +		spin_lock_irqsave(&unw.lock, flags);
  8.1873 +		scr = build_script(info);
  8.1874 +		if (!scr) {
  8.1875 +			spin_unlock_irqrestore(&unw.lock, flags);
  8.1876 +			UNW_DPRINT(0,
  8.1877 +				   "unwind.%s: failed to locate/build unwind script for ip %lx\n",
  8.1878 +				   __FUNCTION__, info->ip);
  8.1879 +			return -1;
  8.1880 +		}
  8.1881 +		have_write_lock = 1;
  8.1882 +	}
  8.1883 +	info->hint = scr->hint;
  8.1884 +	info->prev_script = scr - unw.cache;
  8.1885 +
  8.1886 +	run_script(scr, info);
  8.1887 +
  8.1888 +	if (have_write_lock) {
  8.1889 +		write_unlock(&scr->lock);
  8.1890 +		spin_unlock_irqrestore(&unw.lock, flags);
  8.1891 +	} else
  8.1892 +		read_unlock(&scr->lock);
  8.1893 +	return 0;
  8.1894 +}
  8.1895 +
  8.1896 +int
  8.1897 +unw_unwind (struct unw_frame_info *info)
  8.1898 +{
  8.1899 +	unsigned long prev_ip, prev_sp, prev_bsp;
  8.1900 +	unsigned long ip, pr, num_regs;
  8.1901 +	STAT(unsigned long start, flags;)
  8.1902 +	int retval;
  8.1903 +
  8.1904 +	STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
  8.1905 +
  8.1906 +	prev_ip = info->ip;
  8.1907 +	prev_sp = info->sp;
  8.1908 +	prev_bsp = info->bsp;
  8.1909 +
  8.1910 +	/* restore the ip */
  8.1911 +	if (!info->rp_loc) {
  8.1912 +		/* FIXME: should really be level 0 but it occurs too often. KAO */
  8.1913 +		UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
  8.1914 +			   __FUNCTION__, info->ip);
  8.1915 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.1916 +		return -1;
  8.1917 +	}
  8.1918 +	ip = info->ip = *info->rp_loc;
  8.1919 +	if (ip < GATE_ADDR) {
  8.1920 +		UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
  8.1921 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.1922 +		return -1;
  8.1923 +	}
  8.1924 +
  8.1925 +	/* restore the cfm: */
  8.1926 +	if (!info->pfs_loc) {
  8.1927 +		UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
  8.1928 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.1929 +		return -1;
  8.1930 +	}
  8.1931 +	info->cfm_loc = info->pfs_loc;
  8.1932 +
  8.1933 +	/* restore the bsp: */
  8.1934 +	pr = info->pr;
  8.1935 +	num_regs = 0;
  8.1936 +	if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
  8.1937 +		info->pt = info->sp + 16;
  8.1938 +		if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
  8.1939 +			num_regs = *info->cfm_loc & 0x7f;		/* size of frame */
  8.1940 +		info->pfs_loc =
  8.1941 +			(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
  8.1942 +		UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
  8.1943 +	} else
  8.1944 +		num_regs = (*info->cfm_loc >> 7) & 0x7f;	/* size of locals */
  8.1945 +	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
  8.1946 +	if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
  8.1947 +		UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
  8.1948 +			__FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
  8.1949 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.1950 +		return -1;
  8.1951 +	}
  8.1952 +
  8.1953 +	/* restore the sp: */
  8.1954 +	info->sp = info->psp;
  8.1955 +	if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
  8.1956 +		UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
  8.1957 +			__FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
  8.1958 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.1959 +		return -1;
  8.1960 +	}
  8.1961 +
  8.1962 +	if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
  8.1963 +		UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
  8.1964 +			   __FUNCTION__, ip);
  8.1965 +		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.1966 +		return -1;
  8.1967 +	}
  8.1968 +
  8.1969 +	/* as we unwind, the saved ar.unat becomes the primary unat: */
  8.1970 +	info->pri_unat_loc = info->unat_loc;
  8.1971 +
  8.1972 +	/* finally, restore the predicates: */
  8.1973 +	unw_get_pr(info, &info->pr);
  8.1974 +
  8.1975 +	retval = find_save_locs(info);
  8.1976 +	STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.1977 +	return retval;
  8.1978 +}
  8.1979 +EXPORT_SYMBOL(unw_unwind);
  8.1980 +
  8.1981 +int
  8.1982 +unw_unwind_to_user (struct unw_frame_info *info)
  8.1983 +{
  8.1984 +	unsigned long ip, sp, pr = 0;
  8.1985 +
  8.1986 +	while (unw_unwind(info) >= 0) {
  8.1987 +		unw_get_sp(info, &sp);
  8.1988 +		if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
  8.1989 +		    < IA64_PT_REGS_SIZE) {
  8.1990 +			UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
  8.1991 +				   __FUNCTION__);
  8.1992 +			break;
  8.1993 +		}
  8.1994 +		if (unw_is_intr_frame(info) &&
  8.1995 +		    (pr & (1UL << PRED_USER_STACK)))
  8.1996 +			return 0;
  8.1997 +		if (unw_get_pr (info, &pr) < 0) {
  8.1998 +			unw_get_rp(info, &ip);
  8.1999 +			UNW_DPRINT(0, "unwind.%s: failed to read "
  8.2000 +				   "predicate register (ip=0x%lx)\n",
  8.2001 +				__FUNCTION__, ip);
  8.2002 +			return -1;
  8.2003 +		}
  8.2004 +	}
  8.2005 +	unw_get_ip(info, &ip);
  8.2006 +	UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
  8.2007 +		   __FUNCTION__, ip);
  8.2008 +	return -1;
  8.2009 +}
  8.2010 +EXPORT_SYMBOL(unw_unwind_to_user);
  8.2011 +
  8.2012 +static void
  8.2013 +init_frame_info (struct unw_frame_info *info, struct task_struct *t,
  8.2014 +		 struct switch_stack *sw, unsigned long stktop)
  8.2015 +{
  8.2016 +	unsigned long rbslimit, rbstop, stklimit;
  8.2017 +	STAT(unsigned long start, flags;)
  8.2018 +
  8.2019 +	STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
  8.2020 +
  8.2021 +	/*
  8.2022 +	 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
  8.2023 +	 * don't want to do that because it would be slow as each preserved register would
  8.2024 +	 * have to be processed.  Instead, what we do here is zero out the frame info and
  8.2025 +	 * start the unwind process at the function that created the switch_stack frame.
  8.2026 +	 * When a preserved value in switch_stack needs to be accessed, run_script() will
  8.2027 +	 * initialize the appropriate pointer on demand.
  8.2028 +	 */
  8.2029 +	memset(info, 0, sizeof(*info));
  8.2030 +
  8.2031 +	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
  8.2032 +	rbstop   = sw->ar_bspstore;
  8.2033 +	if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
  8.2034 +		rbstop = rbslimit;
  8.2035 +
  8.2036 +	stklimit = (unsigned long) t + IA64_STK_OFFSET;
  8.2037 +	if (stktop <= rbstop)
  8.2038 +		stktop = rbstop;
  8.2039 +
  8.2040 +	info->regstk.limit = rbslimit;
  8.2041 +	info->regstk.top   = rbstop;
  8.2042 +	info->memstk.limit = stklimit;
  8.2043 +	info->memstk.top   = stktop;
  8.2044 +	info->task = t;
  8.2045 +	info->sw  = sw;
  8.2046 +	info->sp = info->psp = stktop;
  8.2047 +	info->pr = sw->pr;
  8.2048 +	UNW_DPRINT(3, "unwind.%s:\n"
  8.2049 +		   "  task   0x%lx\n"
  8.2050 +		   "  rbs = [0x%lx-0x%lx)\n"
  8.2051 +		   "  stk = [0x%lx-0x%lx)\n"
  8.2052 +		   "  pr     0x%lx\n"
  8.2053 +		   "  sw     0x%lx\n"
  8.2054 +		   "  sp     0x%lx\n",
  8.2055 +		   __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
  8.2056 +		   info->pr, (unsigned long) info->sw, info->sp);
  8.2057 +	STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
  8.2058 +}
  8.2059 +
  8.2060 +void
  8.2061 +unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
  8.2062 +{
  8.2063 +	unsigned long sol;
  8.2064 +
  8.2065 +	init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
  8.2066 +	info->cfm_loc = &sw->ar_pfs;
  8.2067 +	sol = (*info->cfm_loc >> 7) & 0x7f;
  8.2068 +	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
  8.2069 +	info->ip = sw->b0;
  8.2070 +	UNW_DPRINT(3, "unwind.%s:\n"
  8.2071 +		   "  bsp    0x%lx\n"
  8.2072 +		   "  sol    0x%lx\n"
  8.2073 +		   "  ip     0x%lx\n",
  8.2074 +		   __FUNCTION__, info->bsp, sol, info->ip);
  8.2075 +	find_save_locs(info);
  8.2076 +}
  8.2077 +
  8.2078 +EXPORT_SYMBOL(unw_init_frame_info);
  8.2079 +
  8.2080 +void
  8.2081 +unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
  8.2082 +{
  8.2083 +#ifdef XEN
  8.2084 +	struct switch_stack *sw = (struct switch_stack *) (t->arch._thread.ksp + 16);
  8.2085 +#else
  8.2086 +	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
  8.2087 +#endif
  8.2088 +
  8.2089 +	UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
  8.2090 +	unw_init_frame_info(info, t, sw);
  8.2091 +}
  8.2092 +EXPORT_SYMBOL(unw_init_from_blocked_task);
  8.2093 +
  8.2094 +static void
  8.2095 +init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
  8.2096 +		   unsigned long gp, const void *table_start, const void *table_end)
  8.2097 +{
  8.2098 +	const struct unw_table_entry *start = table_start, *end = table_end;
  8.2099 +
  8.2100 +	table->name = name;
  8.2101 +	table->segment_base = segment_base;
  8.2102 +	table->gp = gp;
  8.2103 +	table->start = segment_base + start[0].start_offset;
  8.2104 +	table->end = segment_base + end[-1].end_offset;
  8.2105 +	table->array = start;
  8.2106 +	table->length = end - start;
  8.2107 +}
  8.2108 +
  8.2109 +#ifndef XEN
  8.2110 +void *
  8.2111 +unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
  8.2112 +		      const void *table_start, const void *table_end)
  8.2113 +{
  8.2114 +	const struct unw_table_entry *start = table_start, *end = table_end;
  8.2115 +	struct unw_table *table;
  8.2116 +	unsigned long flags;
  8.2117 +
  8.2118 +	if (end - start <= 0) {
  8.2119 +		UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
  8.2120 +			   __FUNCTION__);
  8.2121 +		return NULL;
  8.2122 +	}
  8.2123 +
  8.2124 +	table = kmalloc(sizeof(*table), GFP_USER);
  8.2125 +	if (!table)
  8.2126 +		return NULL;
  8.2127 +
  8.2128 +	init_unwind_table(table, name, segment_base, gp, table_start, table_end);
  8.2129 +
  8.2130 +	spin_lock_irqsave(&unw.lock, flags);
  8.2131 +	{
  8.2132 +		/* keep kernel unwind table at the front (it's searched most commonly): */
  8.2133 +		table->next = unw.tables->next;
  8.2134 +		unw.tables->next = table;
  8.2135 +	}
  8.2136 +	spin_unlock_irqrestore(&unw.lock, flags);
  8.2137 +
  8.2138 +	return table;
  8.2139 +}
  8.2140 +
  8.2141 +void
  8.2142 +unw_remove_unwind_table (void *handle)
  8.2143 +{
  8.2144 +	struct unw_table *table, *prev;
  8.2145 +	struct unw_script *tmp;
  8.2146 +	unsigned long flags;
  8.2147 +	long index;
  8.2148 +
  8.2149 +	if (!handle) {
  8.2150 +		UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
  8.2151 +			   __FUNCTION__);
  8.2152 +		return;
  8.2153 +	}
  8.2154 +
  8.2155 +	table = handle;
  8.2156 +	if (table == &unw.kernel_table) {
  8.2157 +		UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
  8.2158 +			   "no-can-do!\n", __FUNCTION__);
  8.2159 +		return;
  8.2160 +	}
  8.2161 +
  8.2162 +	spin_lock_irqsave(&unw.lock, flags);
  8.2163 +	{
  8.2164 +		/* first, delete the table: */
  8.2165 +
  8.2166 +		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
  8.2167 +			if (prev->next == table)
  8.2168 +				break;
  8.2169 +		if (!prev) {
  8.2170 +			UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
  8.2171 +				   __FUNCTION__, (void *) table);
  8.2172 +			spin_unlock_irqrestore(&unw.lock, flags);
  8.2173 +			return;
  8.2174 +		}
  8.2175 +		prev->next = table->next;
  8.2176 +	}
  8.2177 +	spin_unlock_irqrestore(&unw.lock, flags);
  8.2178 +
  8.2179 +	/* next, remove hash table entries for this table */
  8.2180 +
  8.2181 +	for (index = 0; index <= UNW_HASH_SIZE; ++index) {
  8.2182 +		tmp = unw.cache + unw.hash[index];
  8.2183 +		if (unw.hash[index] >= UNW_CACHE_SIZE
  8.2184 +		    || tmp->ip < table->start || tmp->ip >= table->end)
  8.2185 +			continue;
  8.2186 +
  8.2187 +		write_lock(&tmp->lock);
  8.2188 +		{
  8.2189 +			if (tmp->ip >= table->start && tmp->ip < table->end) {
  8.2190 +				unw.hash[index] = tmp->coll_chain;
  8.2191 +				tmp->ip = 0;
  8.2192 +			}
  8.2193 +		}
  8.2194 +		write_unlock(&tmp->lock);
  8.2195 +	}
  8.2196 +
  8.2197 +	kfree(table);
  8.2198 +}
  8.2199 +
  8.2200 +static int __init
  8.2201 +create_gate_table (void)
  8.2202 +{
  8.2203 +	const struct unw_table_entry *entry, *start, *end;
  8.2204 +	unsigned long *lp, segbase = GATE_ADDR;
  8.2205 +	size_t info_size, size;
  8.2206 +	char *info;
  8.2207 +	Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
  8.2208 +	int i;
  8.2209 +
  8.2210 +	for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
  8.2211 +		if (phdr->p_type == PT_IA_64_UNWIND) {
  8.2212 +			punw = phdr;
  8.2213 +			break;
  8.2214 +		}
  8.2215 +
  8.2216 +	if (!punw) {
  8.2217 +		printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
  8.2218 +		return 0;
  8.2219 +	}
  8.2220 +
  8.2221 +	start = (const struct unw_table_entry *) punw->p_vaddr;
  8.2222 +	end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
  8.2223 +	size  = 0;
  8.2224 +
  8.2225 +	unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
  8.2226 +
  8.2227 +	for (entry = start; entry < end; ++entry)
  8.2228 +		size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
  8.2229 +	size += 8;	/* reserve space for "end of table" marker */
  8.2230 +
  8.2231 +	unw.gate_table = kmalloc(size, GFP_KERNEL);
  8.2232 +	if (!unw.gate_table) {
  8.2233 +		unw.gate_table_size = 0;
  8.2234 +		printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
  8.2235 +		return 0;
  8.2236 +	}
  8.2237 +	unw.gate_table_size = size;
  8.2238 +
  8.2239 +	lp = unw.gate_table;
  8.2240 +	info = (char *) unw.gate_table + size;
  8.2241 +
  8.2242 +	for (entry = start; entry < end; ++entry, lp += 3) {
  8.2243 +		info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
  8.2244 +		info -= info_size;
  8.2245 +		memcpy(info, (char *) segbase + entry->info_offset, info_size);
  8.2246 +
  8.2247 +		lp[0] = segbase + entry->start_offset;		/* start */
  8.2248 +		lp[1] = segbase + entry->end_offset;		/* end */
  8.2249 +		lp[2] = info - (char *) unw.gate_table;		/* info */
  8.2250 +	}
  8.2251 +	*lp = 0;	/* end-of-table marker */
  8.2252 +	return 0;
  8.2253 +}
  8.2254 +
  8.2255 +__initcall(create_gate_table);
  8.2256 +#endif // !XEN
  8.2257 +
  8.2258 +void __init
  8.2259 +unw_init (void)
  8.2260 +{
  8.2261 +	extern char __gp[];
  8.2262 +	extern void unw_hash_index_t_is_too_narrow (void);
  8.2263 +	long i, off;
  8.2264 +
  8.2265 +	if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
  8.2266 +		unw_hash_index_t_is_too_narrow();
  8.2267 +
  8.2268 +	unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
  8.2269 +	unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
  8.2270 +	unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
  8.2271 +	unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
  8.2272 +	unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
  8.2273 +	unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
  8.2274 +	unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
  8.2275 +	unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
  8.2276 +	for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
  8.2277 +		unw.sw_off[unw.preg_index[i]] = off;
  8.2278 +	for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
  8.2279 +		unw.sw_off[unw.preg_index[i]] = off;
  8.2280 +	for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
  8.2281 +		unw.sw_off[unw.preg_index[i]] = off;
  8.2282 +	for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
  8.2283 +		unw.sw_off[unw.preg_index[i]] = off;
  8.2284 +
  8.2285 +	for (i = 0; i < UNW_CACHE_SIZE; ++i) {
  8.2286 +		if (i > 0)
  8.2287 +			unw.cache[i].lru_chain = (i - 1);
  8.2288 +		unw.cache[i].coll_chain = -1;
  8.2289 +		rwlock_init(&unw.cache[i].lock);
  8.2290 +	}
  8.2291 +	unw.lru_head = UNW_CACHE_SIZE - 1;
  8.2292 +	unw.lru_tail = 0;
  8.2293 +
  8.2294 +	init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
  8.2295 +			  __start_unwind, __end_unwind);
  8.2296 +}
  8.2297 +
  8.2298 +/*
  8.2299 + * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
  8.2300 + *
  8.2301 + *	This system call has been deprecated.  The new and improved way to get
  8.2302 + *	at the kernel's unwind info is via the gate DSO.  The address of the
  8.2303 + *	ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
  8.2304 + *
  8.2305 + * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
  8.2306 + *
  8.2307 + * This system call copies the unwind data into the buffer pointed to by BUF and returns
  8.2308 + * the size of the unwind data.  If BUF_SIZE is smaller than the size of the unwind data
  8.2309 + * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
  8.2310 + * unwind data.
  8.2311 + *
  8.2312 + * The first portion of the unwind data contains an unwind table and rest contains the
  8.2313 + * associated unwind info (in no particular order).  The unwind table consists of a table
  8.2314 + * of entries of the form:
  8.2315 + *
  8.2316 + *	u64 start;	(64-bit address of start of function)
  8.2317 + *	u64 end;	(64-bit address of start of function)
  8.2318 + *	u64 info;	(BUF-relative offset to unwind info)
  8.2319 + *
  8.2320 + * The end of the unwind table is indicated by an entry with a START address of zero.
  8.2321 + *
  8.2322 + * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
  8.2323 + * on the format of the unwind info.
  8.2324 + *
  8.2325 + * ERRORS
  8.2326 + *	EFAULT	BUF points outside your accessible address space.
  8.2327 + */
  8.2328 +asmlinkage long
  8.2329 +sys_getunwind (void __user *buf, size_t buf_size)
  8.2330 +{
  8.2331 +	if (buf && buf_size >= unw.gate_table_size)
  8.2332 +		if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
  8.2333 +			return -EFAULT;
  8.2334 +	return unw.gate_table_size;
  8.2335 +}
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/arch/ia64/linux-xen/unwind_decoder.c	Tue Jan 03 19:06:14 2006 +0100
     9.3 @@ -0,0 +1,459 @@
     9.4 +/*
     9.5 + * Copyright (C) 2000 Hewlett-Packard Co
     9.6 + * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
     9.7 + *
     9.8 + * Generic IA-64 unwind info decoder.
     9.9 + *
    9.10 + * This file is used both by the Linux kernel and objdump.  Please keep
    9.11 + * the two copies of this file in sync.
    9.12 + *
    9.13 + * You need to customize the decoder by defining the following
    9.14 + * macros/constants before including this file:
    9.15 + *
    9.16 + *  Types:
    9.17 + *	unw_word	Unsigned integer type with at least 64 bits 
    9.18 + *
    9.19 + *  Register names:
    9.20 + *	UNW_REG_BSP
    9.21 + *	UNW_REG_BSPSTORE
    9.22 + *	UNW_REG_FPSR
    9.23 + *	UNW_REG_LC
    9.24 + *	UNW_REG_PFS
    9.25 + *	UNW_REG_PR
    9.26 + *	UNW_REG_RNAT
    9.27 + *	UNW_REG_PSP
    9.28 + *	UNW_REG_RP
    9.29 + *	UNW_REG_UNAT
    9.30 + *
    9.31 + *  Decoder action macros:
    9.32 + *	UNW_DEC_BAD_CODE(code)
    9.33 + *	UNW_DEC_ABI(fmt,abi,context,arg)
    9.34 + *	UNW_DEC_BR_GR(fmt,brmask,gr,arg)
    9.35 + *	UNW_DEC_BR_MEM(fmt,brmask,arg)
    9.36 + *	UNW_DEC_COPY_STATE(fmt,label,arg)
    9.37 + *	UNW_DEC_EPILOGUE(fmt,t,ecount,arg)
    9.38 + *	UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg)
    9.39 + *	UNW_DEC_FR_MEM(fmt,frmask,arg)
    9.40 + *	UNW_DEC_GR_GR(fmt,grmask,gr,arg)
    9.41 + *	UNW_DEC_GR_MEM(fmt,grmask,arg)
    9.42 + *	UNW_DEC_LABEL_STATE(fmt,label,arg)
    9.43 + *	UNW_DEC_MEM_STACK_F(fmt,t,size,arg)
    9.44 + *	UNW_DEC_MEM_STACK_V(fmt,t,arg)
    9.45 + *	UNW_DEC_PRIUNAT_GR(fmt,r,arg)
    9.46 + *	UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)
    9.47 + *	UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)
    9.48 + *	UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg)
    9.49 + *	UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg)
    9.50 + *	UNW_DEC_PROLOGUE(fmt,body,rlen,arg)
    9.51 + *	UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg)
    9.52 + *	UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg)
    9.53 + *	UNW_DEC_REG_REG(fmt,src,dst,arg)
    9.54 + *	UNW_DEC_REG_SPREL(fmt,reg,spoff,arg)
    9.55 + *	UNW_DEC_REG_WHEN(fmt,reg,t,arg)
    9.56 + *	UNW_DEC_RESTORE(fmt,t,abreg,arg)
    9.57 + *	UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
    9.58 + *	UNW_DEC_SPILL_BASE(fmt,pspoff,arg)
    9.59 + *	UNW_DEC_SPILL_MASK(fmt,imaskp,arg)
    9.60 + *	UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg)
    9.61 + *	UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
    9.62 + *	UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg)
    9.63 + *	UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
    9.64 + *	UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg)
    9.65 + *	UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
    9.66 + */
    9.67 +
    9.68 +static unw_word
    9.69 +unw_decode_uleb128 (unsigned char **dpp)
    9.70 +{
    9.71 +  unsigned shift = 0;
    9.72 +  unw_word byte, result = 0;
    9.73 +  unsigned char *bp = *dpp;
    9.74 +
    9.75 +  while (1)
    9.76 +    {
    9.77 +      byte = *bp++;
    9.78 +      result |= (byte & 0x7f) << shift;
    9.79 +      if ((byte & 0x80) == 0)
    9.80 +	break;
    9.81 +      shift += 7;
    9.82 +    }
    9.83 +  *dpp = bp;
    9.84 +  return result;
    9.85 +}
    9.86 +
    9.87 +static unsigned char *
    9.88 +unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg)
    9.89 +{
    9.90 +  unsigned char byte1, abreg;
    9.91 +  unw_word t, off;
    9.92 +
    9.93 +  byte1 = *dp++;
    9.94 +  t = unw_decode_uleb128 (&dp);
    9.95 +  off = unw_decode_uleb128 (&dp);
    9.96 +  abreg = (byte1 & 0x7f);
    9.97 +  if (byte1 & 0x80)
    9.98 +	  UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
    9.99 +  else
   9.100 +	  UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
   9.101 +  return dp;
   9.102 +}
   9.103 +
   9.104 +static unsigned char *
   9.105 +unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg)
   9.106 +{
   9.107 +  unsigned char byte1, byte2, abreg, x, ytreg;
   9.108 +  unw_word t;
   9.109 +
   9.110 +  byte1 = *dp++; byte2 = *dp++;
   9.111 +  t = unw_decode_uleb128 (&dp);
   9.112 +  abreg = (byte1 & 0x7f);
   9.113 +  ytreg = byte2;
   9.114 +  x = (byte1 >> 7) & 1;
   9.115 +  if ((byte1 & 0x80) == 0 && ytreg == 0)
   9.116 +    UNW_DEC_RESTORE(X2, t, abreg, arg);
   9.117 +  else
   9.118 +    UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
   9.119 +  return dp;
   9.120 +}
   9.121 +
   9.122 +static unsigned char *
   9.123 +unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg)
   9.124 +{
   9.125 +  unsigned char byte1, byte2, abreg, qp;
   9.126 +  unw_word t, off;
   9.127 +
   9.128 +  byte1 = *dp++; byte2 = *dp++;
   9.129 +  t = unw_decode_uleb128 (&dp);
   9.130 +  off = unw_decode_uleb128 (&dp);
   9.131 +
   9.132 +  qp = (byte1 & 0x3f);
   9.133 +  abreg = (byte2 & 0x7f);
   9.134 +
   9.135 +  if (byte1 & 0x80)
   9.136 +    UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
   9.137 +  else
   9.138 +    UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
   9.139 +  return dp;
   9.140 +}
   9.141 +
   9.142 +static unsigned char *
   9.143 +unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg)
   9.144 +{
   9.145 +  unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg;
   9.146 +  unw_word t;
   9.147 +
   9.148 +  byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
   9.149 +  t = unw_decode_uleb128 (&dp);
   9.150 +
   9.151 +  qp = (byte1 & 0x3f);
   9.152 +  abreg = (byte2 & 0x7f);
   9.153 +  x = (byte2 >> 7) & 1;
   9.154 +  ytreg = byte3;
   9.155 +
   9.156 +  if ((byte2 & 0x80) == 0 && byte3 == 0)
   9.157 +    UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
   9.158 +  else
   9.159 +    UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
   9.160 +  return dp;
   9.161 +}
   9.162 +
   9.163 +static unsigned char *
   9.164 +unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg)
   9.165 +{
   9.166 +  int body = (code & 0x20) != 0;
   9.167 +  unw_word rlen;
   9.168 +
   9.169 +  rlen = (code & 0x1f);
   9.170 +  UNW_DEC_PROLOGUE(R1, body, rlen, arg);
   9.171 +  return dp;
   9.172 +}
   9.173 +
   9.174 +static unsigned char *
   9.175 +unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg)
   9.176 +{
   9.177 +  unsigned char byte1, mask, grsave;
   9.178 +  unw_word rlen;
   9.179 +
   9.180 +  byte1 = *dp++;
   9.181 +
   9.182 +  mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
   9.183 +  grsave = (byte1 & 0x7f);
   9.184 +  rlen = unw_decode_uleb128 (&dp);
   9.185 +  UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg);
   9.186 +  return dp;
   9.187 +}
   9.188 +
   9.189 +static unsigned char *
   9.190 +unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg)
   9.191 +{
   9.192 +  unw_word rlen;
   9.193 +
   9.194 +  rlen = unw_decode_uleb128 (&dp);
   9.195 +  UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg);
   9.196 +  return dp;
   9.197 +}
   9.198 +
   9.199 +static unsigned char *
   9.200 +unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg)
   9.201 +{
   9.202 +  unsigned char brmask = (code & 0x1f);
   9.203 +
   9.204 +  UNW_DEC_BR_MEM(P1, brmask, arg);
   9.205 +  return dp;
   9.206 +}
   9.207 +
   9.208 +static unsigned char *
   9.209 +unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg)
   9.210 +{
   9.211 +  if ((code & 0x10) == 0)
   9.212 +    {
   9.213 +      unsigned char byte1 = *dp++;
   9.214 +
   9.215 +      UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1),
   9.216 +		    (byte1 & 0x7f), arg);
   9.217 +    }
   9.218 +  else if ((code & 0x08) == 0)
   9.219 +    {
   9.220 +      unsigned char byte1 = *dp++, r, dst;
   9.221 +
   9.222 +      r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
   9.223 +      dst = (byte1 & 0x7f);
   9.224 +      switch (r)
   9.225 +	{
   9.226 +	case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break;
   9.227 +	case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break;
   9.228 +	case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break;
   9.229 +	case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break;
   9.230 +	case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break;
   9.231 +	case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break;
   9.232 +	case 6: UNW_DEC_RP_BR(P3, dst, arg); break;
   9.233 +	case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break;
   9.234 +	case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break;
   9.235 +	case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break;
   9.236 +	case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break;
   9.237 +	case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break;
   9.238 +	default: UNW_DEC_BAD_CODE(r); break;
   9.239 +	}
   9.240 +    }
   9.241 +  else if ((code & 0x7) == 0)
   9.242 +    UNW_DEC_SPILL_MASK(P4, dp, arg);
   9.243 +  else if ((code & 0x7) == 1)
   9.244 +    {
   9.245 +      unw_word grmask, frmask, byte1, byte2, byte3;
   9.246 +
   9.247 +      byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
   9.248 +      grmask = ((byte1 >> 4) & 0xf);
   9.249 +      frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3;
   9.250 +      UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg);
   9.251 +    }
   9.252 +  else
   9.253 +    UNW_DEC_BAD_CODE(code);
   9.254 +  return dp;
   9.255 +}
   9.256 +
   9.257 +static unsigned char *
   9.258 +unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg)
   9.259 +{
   9.260 +  int gregs = (code & 0x10) != 0;
   9.261 +  unsigned char mask = (code & 0x0f);
   9.262 +
   9.263 +  if (gregs)
   9.264 +    UNW_DEC_GR_MEM(P6, mask, arg);
   9.265 +  else
   9.266 +    UNW_DEC_FR_MEM(P6, mask, arg);
   9.267 +  return dp;
   9.268 +}
   9.269 +
   9.270 +static unsigned char *
   9.271 +unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg)
   9.272 +{
   9.273 +  unsigned char r, byte1, byte2;
   9.274 +  unw_word t, size;
   9.275 +
   9.276 +  if ((code & 0x10) == 0)
   9.277 +    {
   9.278 +      r = (code & 0xf);
   9.279 +      t = unw_decode_uleb128 (&dp);
   9.280 +      switch (r)
   9.281 +	{
   9.282 +	case 0:
   9.283 +	  size = unw_decode_uleb128 (&dp);
   9.284 +	  UNW_DEC_MEM_STACK_F(P7, t, size, arg);
   9.285 +	  break;
   9.286 +
   9.287 +	case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
   9.288 +	case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
   9.289 +	case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
   9.290 +	case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
   9.291 +	case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
   9.292 +	case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
   9.293 +	case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
   9.294 +	case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
   9.295 +	case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
   9.296 +	case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
   9.297 +	case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
   9.298 +	case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
   9.299 +	case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
   9.300 +	case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
   9.301 +	case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
   9.302 +	default: UNW_DEC_BAD_CODE(r); break;
   9.303 +	}
   9.304 +    }
   9.305 +  else
   9.306 +    {
   9.307 +      switch (code & 0xf)
   9.308 +	{
   9.309 +	case 0x0: /* p8 */
   9.310 +	  {
   9.311 +	    r = *dp++;
   9.312 +	    t = unw_decode_uleb128 (&dp);
   9.313 +	    switch (r)
   9.314 +	      {
   9.315 +	      case  1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
   9.316 +	      case  2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
   9.317 +	      case  3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
   9.318 +	      case  4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
   9.319 +	      case  5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
   9.320 +	      case  6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
   9.321 +	      case  7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
   9.322 +	      case  8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
   9.323 +	      case  9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
   9.324 +	      case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
   9.325 +	      case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
   9.326 +	      case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
   9.327 +	      case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
   9.328 +	      case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
   9.329 +	      case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
   9.330 +	      case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
   9.331 +	      case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
   9.332 +	      case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
   9.333 +	      case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
   9.334 +	      default: UNW_DEC_BAD_CODE(r); break;
   9.335 +	    }
   9.336 +	  }
   9.337 +	  break;
   9.338 +
   9.339 +	case 0x1:
   9.340 +	  byte1 = *dp++; byte2 = *dp++;
   9.341 +	  UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg);
   9.342 +	  break;
   9.343 +
   9.344 +	case 0xf: /* p10 */
   9.345 +	  byte1 = *dp++; byte2 = *dp++;
   9.346 +	  UNW_DEC_ABI(P10, byte1, byte2, arg);
   9.347 +	  break;
   9.348 +
   9.349 +	case 0x9:
   9.350 +	  return unw_decode_x1 (dp, code, arg);
   9.351 +
   9.352 +	case 0xa:
   9.353 +	  return unw_decode_x2 (dp, code, arg);
   9.354 +
   9.355 +	case 0xb:
   9.356 +	  return unw_decode_x3 (dp, code, arg);
   9.357 +
   9.358 +	case 0xc:
   9.359 +	  return unw_decode_x4 (dp, code, arg);
   9.360 +
   9.361 +	default:
   9.362 +	  UNW_DEC_BAD_CODE(code);
   9.363 +	  break;
   9.364 +	}
   9.365 +    }
   9.366 +  return dp;
   9.367 +}
   9.368 +
   9.369 +static unsigned char *
   9.370 +unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg)
   9.371 +{
   9.372 +  unw_word label = (code & 0x1f);
   9.373 +
   9.374 +  if ((code & 0x20) != 0)
   9.375 +    UNW_DEC_COPY_STATE(B1, label, arg);
   9.376 +  else
   9.377 +    UNW_DEC_LABEL_STATE(B1, label, arg);
   9.378 +  return dp;
   9.379 +}
   9.380 +
   9.381 +static unsigned char *
   9.382 +unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg)
   9.383 +{
   9.384 +  unw_word t;
   9.385 +
   9.386 +  t = unw_decode_uleb128 (&dp);
   9.387 +  UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
   9.388 +  return dp;
   9.389 +}
   9.390 +
   9.391 +static unsigned char *
   9.392 +unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg)
   9.393 +{
   9.394 +  unw_word t, ecount, label;
   9.395 +
   9.396 +  if ((code & 0x10) == 0)
   9.397 +    {
   9.398 +      t = unw_decode_uleb128 (&dp);
   9.399 +      ecount = unw_decode_uleb128 (&dp);
   9.400 +      UNW_DEC_EPILOGUE(B3, t, ecount, arg);
   9.401 +    }
   9.402 +  else if ((code & 0x07) == 0)
   9.403 +    {
   9.404 +      label = unw_decode_uleb128 (&dp);
   9.405 +      if ((code & 0x08) != 0)
   9.406 +	UNW_DEC_COPY_STATE(B4, label, arg);
   9.407 +      else
   9.408 +	UNW_DEC_LABEL_STATE(B4, label, arg);
   9.409 +    }
   9.410 +  else
   9.411 +    switch (code & 0x7)
   9.412 +      {
   9.413 +      case 1: return unw_decode_x1 (dp, code, arg);
   9.414 +      case 2: return unw_decode_x2 (dp, code, arg);
   9.415 +      case 3: return unw_decode_x3 (dp, code, arg);
   9.416 +      case 4: return unw_decode_x4 (dp, code, arg);
   9.417 +      default: UNW_DEC_BAD_CODE(code); break;
   9.418 +      }
   9.419 +  return dp;
   9.420 +}
   9.421 +
   9.422 +typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *);
   9.423 +
   9.424 +static unw_decoder unw_decode_table[2][8] =
   9.425 +{
   9.426 +  /* prologue table: */
   9.427 +  {
   9.428 +    unw_decode_r1,	/* 0 */
   9.429 +    unw_decode_r1,
   9.430 +    unw_decode_r2,
   9.431 +    unw_decode_r3,
   9.432 +    unw_decode_p1,	/* 4 */
   9.433 +    unw_decode_p2_p5,
   9.434 +    unw_decode_p6,
   9.435 +    unw_decode_p7_p10
   9.436 +  },
   9.437 +  {
   9.438 +    unw_decode_r1,	/* 0 */
   9.439 +    unw_decode_r1,
   9.440 +    unw_decode_r2,
   9.441 +    unw_decode_r3,
   9.442 +    unw_decode_b1,	/* 4 */
   9.443 +    unw_decode_b1,
   9.444 +    unw_decode_b2,
   9.445 +    unw_decode_b3_x4
   9.446 +  }
   9.447 +};
   9.448 +
   9.449 +/*
   9.450 + * Decode one descriptor and return address of next descriptor.
   9.451 + */
   9.452 +static inline unsigned char *
   9.453 +unw_decode (unsigned char *dp, int inside_body, void *arg)
   9.454 +{
   9.455 +  unw_decoder decoder;
   9.456 +  unsigned char code;
   9.457 +
   9.458 +  code = *dp++;
   9.459 +  decoder = unw_decode_table[inside_body][code >> 5];
   9.460 +  dp = (*decoder) (dp, code, arg);
   9.461 +  return dp;
   9.462 +}
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/arch/ia64/linux-xen/unwind_i.h	Tue Jan 03 19:06:14 2006 +0100
    10.3 @@ -0,0 +1,164 @@
    10.4 +/*
    10.5 + * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co
    10.6 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    10.7 + *
    10.8 + * Kernel unwind support.
    10.9 + */
   10.10 +
   10.11 +#define UNW_VER(x)		((x) >> 48)
   10.12 +#define UNW_FLAG_MASK		0x0000ffff00000000
   10.13 +#define UNW_FLAG_OSMASK		0x0000f00000000000
   10.14 +#define UNW_FLAG_EHANDLER(x)	((x) & 0x0000000100000000L)
   10.15 +#define UNW_FLAG_UHANDLER(x)	((x) & 0x0000000200000000L)
   10.16 +#define UNW_LENGTH(x)		((x) & 0x00000000ffffffffL)
   10.17 +
   10.18 +enum unw_register_index {
   10.19 +	/* primary unat: */
   10.20 +	UNW_REG_PRI_UNAT_GR,
   10.21 +	UNW_REG_PRI_UNAT_MEM,
   10.22 +
   10.23 +	/* register stack */
   10.24 +	UNW_REG_BSP,					/* register stack pointer */
   10.25 +	UNW_REG_BSPSTORE,
   10.26 +	UNW_REG_PFS,					/* previous function state */
   10.27 +	UNW_REG_RNAT,
   10.28 +	/* memory stack */
   10.29 +	UNW_REG_PSP,					/* previous memory stack pointer */
   10.30 +	/* return pointer: */
   10.31 +	UNW_REG_RP,
   10.32 +
   10.33 +	/* preserved registers: */
   10.34 +	UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7,
   10.35 +	UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR,
   10.36 +	UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5,
   10.37 +	UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5,
   10.38 +	UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19,
   10.39 +	UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23,
   10.40 +	UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27,
   10.41 +	UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31,
   10.42 +	UNW_NUM_REGS
   10.43 +};
   10.44 +
   10.45 +struct unw_info_block {
   10.46 +	u64 header;
   10.47 +	u64 desc[0];		/* unwind descriptors */
   10.48 +	/* personality routine and language-specific data follow behind descriptors */
   10.49 +};
   10.50 +
   10.51 +struct unw_table {
   10.52 +	struct unw_table *next;		/* must be first member! */
   10.53 +	const char *name;
   10.54 +	unsigned long gp;		/* global pointer for this load-module */
   10.55 +	unsigned long segment_base;	/* base for offsets in the unwind table entries */
   10.56 +	unsigned long start;
   10.57 +	unsigned long end;
   10.58 +	const struct unw_table_entry *array;
   10.59 +	unsigned long length;
   10.60 +};
   10.61 +
   10.62 +enum unw_where {
   10.63 +	UNW_WHERE_NONE,			/* register isn't saved at all */
   10.64 +	UNW_WHERE_GR,			/* register is saved in a general register */
   10.65 +	UNW_WHERE_FR,			/* register is saved in a floating-point register */
   10.66 +	UNW_WHERE_BR,			/* register is saved in a branch register */
   10.67 +	UNW_WHERE_SPREL,		/* register is saved on memstack (sp-relative) */
   10.68 +	UNW_WHERE_PSPREL,		/* register is saved on memstack (psp-relative) */
   10.69 +	/*
   10.70 +	 * At the end of each prologue these locations get resolved to
   10.71 +	 * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively:
   10.72 +	 */
   10.73 +	UNW_WHERE_SPILL_HOME,		/* register is saved in its spill home */
   10.74 +	UNW_WHERE_GR_SAVE		/* register is saved in next general register */
   10.75 +};
   10.76 +
   10.77 +#define UNW_WHEN_NEVER	0x7fffffff
   10.78 +
   10.79 +struct unw_reg_info {
   10.80 +	unsigned long val;		/* save location: register number or offset */
   10.81 +	enum unw_where where;		/* where the register gets saved */
   10.82 +	int when;			/* when the register gets saved */
   10.83 +};
   10.84 +
   10.85 +struct unw_reg_state {
   10.86 +	struct unw_reg_state *next;		/* next (outer) element on state stack */
   10.87 +	struct unw_reg_info reg[UNW_NUM_REGS];	/* register save locations */
   10.88 +};
   10.89 +
   10.90 +struct unw_labeled_state {
   10.91 +	struct unw_labeled_state *next;		/* next labeled state (or NULL) */
   10.92 +	unsigned long label;			/* label for this state */
   10.93 +	struct unw_reg_state saved_state;
   10.94 +};
   10.95 +
   10.96 +struct unw_state_record {
   10.97 +	unsigned int first_region : 1;	/* is this the first region? */
   10.98 +	unsigned int done : 1;		/* are we done scanning descriptors? */
   10.99 +	unsigned int any_spills : 1;	/* got any register spills? */
  10.100 +	unsigned int in_body : 1;	/* are we inside a body (as opposed to a prologue)? */
  10.101 +	unsigned long flags;		/* see UNW_FLAG_* in unwind.h */
  10.102 +
  10.103 +	u8 *imask;			/* imask of spill_mask record or NULL */
  10.104 +	unsigned long pr_val;		/* predicate values */
  10.105 +	unsigned long pr_mask;		/* predicate mask */
  10.106 +	long spill_offset;		/* psp-relative offset for spill base */
  10.107 +	int region_start;
  10.108 +	int region_len;
  10.109 +	int epilogue_start;
  10.110 +	int epilogue_count;
  10.111 +	int when_target;
  10.112 +
  10.113 +	u8 gr_save_loc;			/* next general register to use for saving a register */
  10.114 +	u8 return_link_reg;		/* branch register in which the return link is passed */
  10.115 +
  10.116 +	struct unw_labeled_state *labeled_states;	/* list of all labeled states */
  10.117 +	struct unw_reg_state curr;	/* current state */
  10.118 +};
  10.119 +
  10.120 +enum unw_nat_type {
  10.121 +	UNW_NAT_NONE,		/* NaT not represented */
  10.122 +	UNW_NAT_VAL,		/* NaT represented by NaT value (fp reg) */
  10.123 +	UNW_NAT_MEMSTK,		/* NaT value is in unat word at offset OFF  */
  10.124 +	UNW_NAT_REGSTK		/* NaT is in rnat */
  10.125 +};
  10.126 +
  10.127 +enum unw_insn_opcode {
  10.128 +	UNW_INSN_ADD,			/* s[dst] += val */
  10.129 +	UNW_INSN_ADD_PSP,		/* s[dst] = (s.psp + val) */
  10.130 +	UNW_INSN_ADD_SP,		/* s[dst] = (s.sp + val) */
  10.131 +	UNW_INSN_MOVE,			/* s[dst] = s[val] */
  10.132 +	UNW_INSN_MOVE2,			/* s[dst] = s[val]; s[dst+1] = s[val+1] */
  10.133 +	UNW_INSN_MOVE_STACKED,		/* s[dst] = ia64_rse_skip(*s.bsp, val) */
  10.134 +	UNW_INSN_SETNAT_MEMSTK,		/* s[dst+1].nat.type = MEMSTK;
  10.135 +					   s[dst+1].nat.off = *s.pri_unat - s[dst] */
  10.136 +	UNW_INSN_SETNAT_TYPE,		/* s[dst+1].nat.type = val */
  10.137 +	UNW_INSN_LOAD,			/* s[dst] = *s[val] */
  10.138 +	UNW_INSN_MOVE_SCRATCH,		/* s[dst] = scratch reg "val" */
  10.139 +	UNW_INSN_MOVE_CONST,            /* s[dst] = constant reg "val" */
  10.140 +};
  10.141 +
  10.142 +struct unw_insn {
  10.143 +	unsigned int opc	:  4;
  10.144 +	unsigned int dst	:  9;
  10.145 +	signed int val		: 19;
  10.146 +};
  10.147 +
  10.148 +/*
  10.149 + * Preserved general static registers (r4-r7) give rise to two script
  10.150 + * instructions; everything else yields at most one instruction; at
  10.151 + * the end of the script, the psp gets popped, accounting for one more
  10.152 + * instruction.
  10.153 + */
  10.154 +#define UNW_MAX_SCRIPT_LEN	(UNW_NUM_REGS + 5)
  10.155 +
  10.156 +struct unw_script {
  10.157 +	unsigned long ip;		/* ip this script is for */
  10.158 +	unsigned long pr_mask;		/* mask of predicates script depends on */
  10.159 +	unsigned long pr_val;		/* predicate values this script is for */
  10.160 +	rwlock_t lock;
  10.161 +	unsigned int flags;		/* see UNW_FLAG_* in unwind.h */
  10.162 +	unsigned short lru_chain;	/* used for least-recently-used chain */
  10.163 +	unsigned short coll_chain;	/* used for hash collisions */
  10.164 +	unsigned short hint;		/* hint for next script to try (or -1) */
  10.165 +	unsigned short count;		/* number of instructions in script */
  10.166 +	struct unw_insn insn[UNW_MAX_SCRIPT_LEN];
  10.167 +};
    11.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Tue Jan 03 16:57:41 2006 +0000
    11.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Tue Jan 03 19:06:14 2006 +0100
    11.3 @@ -53,6 +53,7 @@
    11.4  #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
    11.5  
    11.6  
    11.7 +extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    11.8  extern void rnat_consumption (VCPU *vcpu);
    11.9  #define DOMN_PAL_REQUEST    0x110000
   11.10  
   11.11 @@ -185,8 +186,11 @@ vmx_ia64_handle_break (unsigned long ifa
   11.12  	}else if(iim == DOMN_PAL_REQUEST){
   11.13          pal_emul(current);
   11.14  		vmx_vcpu_increment_iip(current);
   11.15 -    }  else
   11.16 +    } else {
   11.17 +		if (iim == 0) 
   11.18 +			die_if_kernel("bug check", regs, iim);
   11.19  		vmx_reflect_interruption(ifa,isr,iim,11,regs);
   11.20 +    }
   11.21  }
   11.22  
   11.23  
    12.1 --- a/xen/arch/ia64/xen/dom_fw.c	Tue Jan 03 16:57:41 2006 +0000
    12.2 +++ b/xen/arch/ia64/xen/dom_fw.c	Tue Jan 03 19:06:14 2006 +0100
    12.3 @@ -861,12 +861,16 @@ dom_fw_init (struct domain *d, char *arg
    12.4  	bp->console_info.orig_x = 0;
    12.5  	bp->console_info.orig_y = 24;
    12.6  	bp->fpswa = 0;
    12.7 -        bp->initrd_start = (dom0_start+dom0_size) -
    12.8 -                (PAGE_ALIGN(ia64_boot_param->initrd_size) + 4*1024*1024);
    12.9 -        bp->initrd_size = ia64_boot_param->initrd_size;
   12.10 -                printf(" initrd start %0xlx", bp->initrd_start);
   12.11 -                printf(" initrd size %0xlx", bp->initrd_size);
   12.12 -
   12.13 -
   12.14 +	if (d == dom0) {
   12.15 +		bp->initrd_start = (dom0_start+dom0_size) -
   12.16 +		  (PAGE_ALIGN(ia64_boot_param->initrd_size) + 4*1024*1024);
   12.17 +		bp->initrd_size = ia64_boot_param->initrd_size;
   12.18 +	}
   12.19 +	else {
   12.20 +		bp->initrd_start = d->arch.initrd_start;
   12.21 +		bp->initrd_size  = d->arch.initrd_len;
   12.22 +	}
   12.23 +	printf(" initrd start %0xlx", bp->initrd_start);
   12.24 +	printf(" initrd size %0xlx", bp->initrd_size);
   12.25  	return bp;
   12.26  }
    13.1 --- a/xen/arch/ia64/xen/domain.c	Tue Jan 03 16:57:41 2006 +0000
    13.2 +++ b/xen/arch/ia64/xen/domain.c	Tue Jan 03 19:06:14 2006 +0100
    13.3 @@ -19,6 +19,7 @@
    13.4  #include <xen/delay.h>
    13.5  #include <xen/softirq.h>
    13.6  #include <xen/mm.h>
    13.7 +#include <xen/iocap.h>
    13.8  #include <asm/ptrace.h>
    13.9  #include <asm/system.h>
   13.10  #include <asm/io.h>
   13.11 @@ -293,16 +294,7 @@ int arch_set_info_guest(struct vcpu *v, 
   13.12  	d->arch.cmdline      = c->cmdline;
   13.13  	new_thread(v, regs->cr_iip, 0, 0);
   13.14  
   13.15 -#ifdef CONFIG_IA64_SPLIT_CACHE
   13.16 -    /* Sync d/i cache conservatively */
   13.17 -    if (!running_on_sim) {
   13.18 -        ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   13.19 -        if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
   13.20 -            printk("PAL CACHE FLUSH failed for dom0.\n");
   13.21 -        else
   13.22 -            printk("Sync i/d cache for guest SUCC\n");
   13.23 -    }
   13.24 -#endif
   13.25 +	sync_split_caches();
   13.26   	v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
   13.27  	if ( c->vcpu.privregs && copy_from_user(v->arch.privregs,
   13.28  			   c->vcpu.privregs, sizeof(mapped_regs_t))) {
   13.29 @@ -430,7 +422,7 @@ extern unsigned long vhpt_paddr, vhpt_pe
   13.30  		{
   13.31  			p = alloc_domheap_page(d);
   13.32  			// zero out pages for security reasons
   13.33 -			memset(__va(page_to_phys(p)),0,PAGE_SIZE);
   13.34 +			if (p) memset(__va(page_to_phys(p)),0,PAGE_SIZE);
   13.35  		}
   13.36  		if (unlikely(!p)) {
   13.37  printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
   13.38 @@ -917,9 +909,9 @@ int construct_dom0(struct domain *d,
   13.39  	memset(si, 0, PAGE_SIZE);
   13.40  	d->shared_info->arch.start_info_pfn = __pa(si) >> PAGE_SHIFT;
   13.41  	sprintf(si->magic, "xen-%i.%i-ia64", XEN_VERSION, XEN_SUBVERSION);
   13.42 +	si->nr_pages     = d->tot_pages;
   13.43  
   13.44  #if 0
   13.45 -	si->nr_pages     = d->tot_pages;
   13.46  	si->shared_info  = virt_to_phys(d->shared_info);
   13.47  	si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
   13.48  	//si->pt_base      = vpt_start;
   13.49 @@ -964,16 +956,7 @@ int construct_dom0(struct domain *d,
   13.50  
   13.51  	new_thread(v, pkern_entry, 0, 0);
   13.52  	physdev_init_dom0(d);
   13.53 -#ifdef CONFIG_IA64_SPLIT_CACHE
   13.54 -    /* Sync d/i cache conservatively */
   13.55 -    if (!running_on_sim) {
   13.56 -        ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   13.57 -        if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
   13.58 -            printk("PAL CACHE FLUSH failed for dom0.\n");
   13.59 -        else
   13.60 -            printk("Sync i/d cache for guest SUCC\n");
   13.61 -    }
   13.62 -#endif
   13.63 +	sync_split_caches();
   13.64  
   13.65  	// FIXME: Hack for keyboard input
   13.66  #ifdef CLONE_DOMAIN0
   13.67 @@ -1032,16 +1015,7 @@ int construct_domU(struct domain *d,
   13.68  #endif
   13.69  	new_thread(v, pkern_entry, 0, 0);
   13.70  	printk("new_thread returns\n");
   13.71 -#ifdef CONFIG_IA64_SPLIT_CACHE
   13.72 -    /* Sync d/i cache conservatively */
   13.73 -    if (!running_on_sim) {
   13.74 -        ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   13.75 -        if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
   13.76 -            printk("PAL CACHE FLUSH failed for dom0.\n");
   13.77 -        else
   13.78 -            printk("Sync i/d cache for guest SUCC\n");
   13.79 -    }
   13.80 -#endif
   13.81 +	sync_split_caches();
   13.82  	__set_bit(0x30, VCPU(v, delivery_mask));
   13.83  
   13.84  	return 0;
   13.85 @@ -1055,16 +1029,7 @@ void reconstruct_domU(struct vcpu *v)
   13.86  		v->domain->domain_id);
   13.87  	loaddomainelfimage(v->domain,v->domain->arch.image_start);
   13.88  	new_thread(v, v->domain->arch.entry, 0, 0);
   13.89 -#ifdef CONFIG_IA64_SPLIT_CACHE
   13.90 -    /* Sync d/i cache conservatively */
   13.91 -    if (!running_on_sim) {
   13.92 -        ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   13.93 -        if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
   13.94 -            printk("PAL CACHE FLUSH failed for dom0.\n");
   13.95 -        else
   13.96 -            printk("Sync i/d cache for guest SUCC\n");
   13.97 -    }
   13.98 -#endif
   13.99 +	sync_split_caches();
  13.100  }
  13.101  #endif
  13.102  
    14.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Tue Jan 03 16:57:41 2006 +0000
    14.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Tue Jan 03 19:06:14 2006 +0100
    14.3 @@ -543,6 +543,13 @@ GLOBAL_ENTRY(fast_break_reflect)
    14.4  	extr.u r21=r30,IA64_PSR_PP_BIT,1 ;;
    14.5  	cmp.ne p7,p0=r21,r0 ;;
    14.6  (p7)	br.spnt.few dispatch_break_fault ;;
    14.7 +        movl r20=IA64_PSR_CPL ;; 
    14.8 +        and r22=r20,r30 ;;
    14.9 +        cmp.ne p7,p0=r22,r0
   14.10 +(p7)    br.spnt.many 1f ;;
   14.11 +        cmp.eq p7,p0=r17,r0
   14.12 +(p7)    br.spnt.few dispatch_break_fault ;;
   14.13 +1:
   14.14  #if 1 /* special handling in case running on simulator */
   14.15  	movl r20=first_break;;
   14.16  	ld4 r23=[r20];;
    15.1 --- a/xen/arch/ia64/xen/ivt.S	Tue Jan 03 16:57:41 2006 +0000
    15.2 +++ b/xen/arch/ia64/xen/ivt.S	Tue Jan 03 19:06:14 2006 +0100
    15.3 @@ -839,6 +839,8 @@ ENTRY(break_fault)
    15.4  	mov r17=cr.iim
    15.5  	mov r31=pr
    15.6  	;;
    15.7 +	cmp.eq p7,p0=r17,r0
    15.8 +(p7)	br.spnt.few dispatch_break_fault ;;
    15.9  	movl r18=XSI_PSR_IC
   15.10  	;;
   15.11  	ld8 r19=[r18]
    16.1 --- a/xen/arch/ia64/xen/process.c	Tue Jan 03 16:57:41 2006 +0000
    16.2 +++ b/xen/arch/ia64/xen/process.c	Tue Jan 03 19:06:14 2006 +0100
    16.3 @@ -33,6 +33,7 @@
    16.4  #include <xen/multicall.h>
    16.5  
    16.6  extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
    16.7 +extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    16.8  
    16.9  extern unsigned long dom0_start, dom0_size;
   16.10  
   16.11 @@ -686,6 +687,8 @@ ia64_handle_break (unsigned long ifa, st
   16.12  			vcpu_increment_iip(current);
   16.13  	}
   16.14  	else {
   16.15 +		if (iim == 0) 
   16.16 +			die_if_kernel("bug check", regs, iim);
   16.17  		PSCB(v,iim) = iim;
   16.18  		reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
   16.19  	}
    17.1 --- a/xen/arch/ia64/xen/xenmisc.c	Tue Jan 03 16:57:41 2006 +0000
    17.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Tue Jan 03 19:06:14 2006 +0100
    17.3 @@ -25,7 +25,6 @@ unsigned long wait_init_idle;
    17.4  int phys_proc_id[NR_CPUS];
    17.5  unsigned long loops_per_jiffy = (1<<12);	// from linux/init/main.c
    17.6  
    17.7 -void unw_init(void) { printf("unw_init() skipped (NEED FOR KERNEL UNWIND)\n"); }
    17.8  void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
    17.9  void ia64_mca_cpu_init(void *x) { }
   17.10  void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
   17.11 @@ -180,11 +179,6 @@ void pgtable_quicklist_free(void *pgtabl
   17.12  // from arch/ia64/traps.c
   17.13  ///////////////////////////////
   17.14  
   17.15 -void show_registers(struct pt_regs *regs)
   17.16 -{
   17.17 -	printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n");
   17.18 -}
   17.19 -
   17.20  int is_kernel_text(unsigned long addr)
   17.21  {
   17.22  	extern char _stext[], _etext[];
   17.23 @@ -236,7 +230,13 @@ void sys_exit(void)
   17.24  
   17.25  void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
   17.26  {
   17.27 -	printk("die_if_kernel: called, not implemented\n");
   17.28 +	if (user_mode(regs))
   17.29 +		return;
   17.30 +
   17.31 +	printk("%s: %s %ld\n", __func__, str, err);
   17.32 +	debugtrace_dump();
   17.33 +	show_registers(regs);
   17.34 +	domain_crash_synchronous();
   17.35  }
   17.36  
   17.37  long
   17.38 @@ -368,3 +368,23 @@ loop:
   17.39  		goto loop;
   17.40  	}
   17.41  }
   17.42 +
   17.43 +/* FIXME: for the forseeable future, all cpu's that enable VTi have split
   17.44 + *  caches and all cpu's that have split caches enable VTi.  This may
   17.45 + *  eventually be untrue though. */
   17.46 +#define cpu_has_split_cache	vmx_enabled
   17.47 +extern unsigned int vmx_enabled;
   17.48 +
   17.49 +void sync_split_caches(void)
   17.50 +{
   17.51 +	unsigned long ret, progress = 0;
   17.52 +
   17.53 +	if (cpu_has_split_cache) {
   17.54 +		/* Sync d/i cache conservatively */
   17.55 +		ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   17.56 +		if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
   17.57 +			printk("PAL CACHE FLUSH failed\n");
   17.58 +		else printk("Sync i/d cache for guest SUCC\n");
   17.59 +	}
   17.60 +	else printk("sync_split_caches ignored for CPU with no split cache\n");
   17.61 +}
    18.1 --- a/xen/include/asm-ia64/linux-xen/asm/pal.h	Tue Jan 03 16:57:41 2006 +0000
    18.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pal.h	Tue Jan 03 19:06:14 2006 +0100
    18.3 @@ -925,7 +925,11 @@ static inline s64
    18.4  ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector)
    18.5  {
    18.6  	struct ia64_pal_retval iprv;
    18.7 +#ifdef XEN	/* fix a bug in Linux... PAL has changed */
    18.8 +	PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
    18.9 +#else
   18.10  	PAL_CALL_IC_OFF(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
   18.11 +#endif
   18.12  	if (vector)
   18.13  		*vector = iprv.v0;
   18.14  	*progress = iprv.v1;