ia64/xen-unstable

changeset 10682:efdfbb40db3f

[IA64] INIT hadler for support coredumping feature

This patch is for supporting coredumping feature.
Some dump feature is necessary to save registers into memory.
So I implemented it and add a hook of calling dump function.
Because all cpu have to save registers,
the same handler is called by monarch and slave handler.

This patch is useful not only dump feature, but also debug xen.
(Because INIT hadler is always able to show register and call trace.)

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
[Updated for linux-xen location]
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Thu Jul 06 14:07:49 2006 -0600 (2006-07-06)
parents 00f20f7c0e23
children 97c290c7b015
files xen/arch/ia64/linux-xen/mca.c xen/arch/ia64/linux-xen/mca_asm.S
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/mca.c	Thu Jul 06 13:27:47 2006 -0600
     1.2 +++ b/xen/arch/ia64/linux-xen/mca.c	Thu Jul 06 14:07:49 2006 -0600
     1.3 @@ -88,7 +88,12 @@
     1.4  #endif
     1.5  
     1.6  /* Used by mca_asm.S */
     1.7 +#ifndef XEN
     1.8  ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state;
     1.9 +#else
    1.10 +ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state[NR_CPUS];
    1.11 +DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr); 
    1.12 +#endif
    1.13  ia64_mca_os_to_sal_state_t	ia64_os_to_sal_handoff_state;
    1.14  u64				ia64_mca_serialize;
    1.15  DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
    1.16 @@ -478,6 +483,43 @@ fetch_min_state (pal_min_state_area_t *m
    1.17  	PUT_NAT_BIT(sw->caller_unat, &pt->r30);	PUT_NAT_BIT(sw->caller_unat, &pt->r31);
    1.18  }
    1.19  
    1.20 +#ifdef XEN
    1.21 +static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
    1.22 +static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
    1.23 +
    1.24 +static void
    1.25 +save_ksp (struct unw_frame_info *info, void *arg)
    1.26 +{
    1.27 +	current->arch._thread.ksp = (__u64)(info->sw) - 16;
    1.28 +	wmb();
    1.29 +}
    1.30 +
    1.31 +/* FIXME */
    1.32 +int try_crashdump(struct pt_regs *a) { return 0; }
    1.33 +
    1.34 +#define CPU_FLUSH_RETRY_MAX 5
    1.35 +static void
    1.36 +init_cache_flush (void)
    1.37 +{
    1.38 +	unsigned long flags;
    1.39 +	int i;
    1.40 +	s64 rval = 0;
    1.41 +	u64 vector, progress = 0;
    1.42 +
    1.43 +	for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
    1.44 +		local_irq_save(flags);
    1.45 +		rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
    1.46 +		                            0, &progress, &vector);
    1.47 +		local_irq_restore(flags);
    1.48 +		if (rval == 0){
    1.49 +			printk("\nPAL cache flush success\n");
    1.50 +			return;
    1.51 +		}
    1.52 +	}
    1.53 +	printk("\nPAL cache flush failed. status=%ld\n",rval);
    1.54 +}
    1.55 +#endif /* XEN */
    1.56 +
    1.57  static void
    1.58  init_handler_platform (pal_min_state_area_t *ms,
    1.59  		       struct pt_regs *pt, struct switch_stack *sw)
    1.60 @@ -494,18 +536,36 @@ init_handler_platform (pal_min_state_are
    1.61  	 */
    1.62  	printk("Delaying for 5 seconds...\n");
    1.63  	udelay(5*1000000);
    1.64 +#ifdef XEN
    1.65 +	fetch_min_state(ms, pt, sw);
    1.66 +	spin_lock(&show_stack_lock);
    1.67 +#endif
    1.68  	show_min_state(ms);
    1.69  
    1.70 -#ifndef XEN
    1.71 -	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
    1.72 +#ifdef XEN
    1.73 +	printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
    1.74  #else
    1.75 -	printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
    1.76 +	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
    1.77 +	fetch_min_state(ms, pt, sw);
    1.78  #endif
    1.79 -	fetch_min_state(ms, pt, sw);
    1.80  	unw_init_from_interruption(&info, current, pt, sw);
    1.81  	ia64_do_show_stack(&info, NULL);
    1.82 +#ifdef XEN
    1.83 +	unw_init_running(save_ksp, NULL);
    1.84 +	spin_unlock(&show_stack_lock);
    1.85 +	wmb();
    1.86 +	init_cache_flush();
    1.87  
    1.88 -#ifndef XEN
    1.89 +	if (spin_trylock(&init_dump_lock)) {
    1.90 +#ifdef CONFIG_SMP
    1.91 +		udelay(5*1000000);
    1.92 +#endif
    1.93 +		if (try_crashdump(pt) == 0)
    1.94 +			printk("\nINIT dump complete.  Please reboot now.\n");
    1.95 +	}
    1.96 +	printk("%s: CPU%d init handler done\n",
    1.97 +	       __FUNCTION__, smp_processor_id());
    1.98 +#else /* XEN */
    1.99  #ifdef CONFIG_SMP
   1.100  	/* read_trylock() would be handy... */
   1.101  	if (!tasklist_lock.write_lock)
   1.102 @@ -525,9 +585,9 @@ init_handler_platform (pal_min_state_are
   1.103  	if (!tasklist_lock.write_lock)
   1.104  		read_unlock(&tasklist_lock);
   1.105  #endif
   1.106 -#endif /* !XEN */
   1.107  
   1.108  	printk("\nINIT dump complete.  Please reboot now.\n");
   1.109 +#endif /* XEN */
   1.110  	while (1);			/* hang city if no debugger */
   1.111  }
   1.112  
   1.113 @@ -1158,16 +1218,20 @@ void
   1.114  ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
   1.115  {
   1.116  	pal_min_state_area_t *ms;
   1.117 +#ifdef XEN
   1.118 +	int cpu = smp_processor_id();
   1.119 +
   1.120 +	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
   1.121 +	       ia64_sal_to_os_handoff_state[cpu].proc_state_param);
   1.122 +#endif
   1.123  
   1.124  #ifndef XEN
   1.125  	oops_in_progress = 1;	/* avoid deadlock in printk, but it makes recovery dodgy */
   1.126  	console_loglevel = 15;	/* make sure printks make it to console */
   1.127 -#endif
   1.128  
   1.129  	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
   1.130  		ia64_sal_to_os_handoff_state.proc_state_param);
   1.131  
   1.132 -#ifndef XEN
   1.133  	/*
   1.134  	 * Address of minstate area provided by PAL is physical,
   1.135  	 * uncacheable (bit 63 set). Convert to Linux virtual
   1.136 @@ -1176,7 +1240,7 @@ ia64_init_handler (struct pt_regs *pt, s
   1.137  	ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
   1.138  #else
   1.139  	/* Xen virtual address in region 7. */
   1.140 -	ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state));
   1.141 +	ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
   1.142  #endif
   1.143  
   1.144  	init_handler_platform(ms, pt, sw);	/* call platform specific routines */
   1.145 @@ -1273,7 +1337,14 @@ ia64_mca_cpu_init(void *cpu_data)
   1.146  	__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
   1.147  #ifdef XEN
   1.148  	IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
   1.149 -	               smp_processor_id(),__get_cpu_var(ia64_mca_data));
   1.150 +	               smp_processor_id(), __get_cpu_var(ia64_mca_data));
   1.151 +
   1.152 +	/* sal_to_os_handoff for smp support */
   1.153 +	__get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
   1.154 +	              __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
   1.155 +	IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
   1.156 +	               smp_processor_id(),
   1.157 +		       __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
   1.158  #endif
   1.159  
   1.160  	/*
   1.161 @@ -1325,6 +1396,8 @@ ia64_mca_init(void)
   1.162  #ifdef XEN
   1.163  	s64 rc;
   1.164  
   1.165 +	slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
   1.166 +
   1.167  	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
   1.168  #else
   1.169  	int i;
     2.1 --- a/xen/arch/ia64/linux-xen/mca_asm.S	Thu Jul 06 13:27:47 2006 -0600
     2.2 +++ b/xen/arch/ia64/linux-xen/mca_asm.S	Thu Jul 06 14:07:49 2006 -0600
     2.3 @@ -48,6 +48,20 @@
     2.4   *		5. GR11 = Rendez state
     2.5   *		6. GR12 = Return address to location within SAL_CHECK
     2.6   */
     2.7 +#ifdef XEN
     2.8 +#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
     2.9 +	movl	_tmp=THIS_CPU(ia64_sal_to_os_handoff_state_addr);;	\
    2.10 +	tpa	_tmp=_tmp;;				\
    2.11 +	ld8	_tmp=[_tmp];;				\
    2.12 +	st8	[_tmp]=r1,0x08;;			\
    2.13 +	st8	[_tmp]=r8,0x08;;			\
    2.14 +	st8	[_tmp]=r9,0x08;;			\
    2.15 +	st8	[_tmp]=r10,0x08;;			\
    2.16 +	st8	[_tmp]=r11,0x08;;			\
    2.17 +	st8	[_tmp]=r12,0x08;;			\
    2.18 +	st8	[_tmp]=r17,0x08;;			\
    2.19 +	st8	[_tmp]=r18,0x08
    2.20 +#else
    2.21  #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
    2.22  	LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
    2.23  	st8	[_tmp]=r1,0x08;;			\
    2.24 @@ -59,7 +73,6 @@
    2.25  	st8	[_tmp]=r17,0x08;;			\
    2.26  	st8	[_tmp]=r18,0x08
    2.27  
    2.28 -#ifndef XEN
    2.29  /*
    2.30   * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
    2.31   * (p6) is executed if we never entered virtual mode (TLB error)
    2.32 @@ -107,11 +120,11 @@
    2.33  	;;								\
    2.34  	ld8 reg=[reg]
    2.35  
    2.36 -#endif /* !XEN */
    2.37 +#endif /* XEN */
    2.38  	.global ia64_os_mca_dispatch
    2.39  	.global ia64_os_mca_dispatch_end
    2.40 +#ifndef XEN
    2.41  	.global ia64_sal_to_os_handoff_state
    2.42 -#ifndef XEN
    2.43  	.global	ia64_os_to_sal_handoff_state
    2.44  	.global ia64_do_tlb_purge
    2.45  #endif