ia64/xen-unstable

changeset 16113:2e13bfcf4abb

[IA64] Fix MCA error handler problems

Fixing MCA issues related to changes from kexec patch series...

[From "Kexec: Fix ia64_do_tlb_purge so that it works with XEN"]

> 2. Use the per_cpu variable to derive CURRENT_STACK_OFFSET rather
> than reading it from a kernel register. See 1) for explanation
> of why.

I added the same code in Reload DTR for stack part and also added a
code to avoid overlapping with kernel TR.

> 3. In the VHPT pruning code, don't use r25 as ia64_jump_to_sal,
> which branches to ia64_do_tlb_purge expects r25 to be preserved.
> There seems no reason not to use r2 as per the other purges
> done in ia64_do_tlb_purge. Furthermore use r16 and r18 instead
> of r20 and r24 for consistency reasons.

The r25 kept the value of __va_ul(vcpu_vhpt_maddr(v)), and it was
referred to by the following lines.

468 // r25 = __va_ul(vcpu_vhpt_maddr(v));
469 dep r20=0,r25,0,IA64_GRANULE_SHIFT
470 movl r26=PAGE_KERNEL
471 ;;
472 mov r21=IA64_TR_VHPT
473 dep r22=0,r20,60,4 // physical address of

I defined GET_VA_VCPU_VHPT_MADDR() macro to re-calculate the value of
__va_ul(vcpu_vhpt_maddr(v)) in each part.
And I renamed the register names for same reasons.

Signed-off-by: Kazuhiro Suzuki <kaz@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Fri Oct 12 15:02:06 2007 -0600 (2007-10-12)
parents 52d9f5028397
children 503756587ccf
files xen/arch/ia64/linux-xen/mca_asm.S
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/mca_asm.S	Fri Oct 12 14:49:37 2007 -0600
     1.2 +++ b/xen/arch/ia64/linux-xen/mca_asm.S	Fri Oct 12 15:02:06 2007 -0600
     1.3 @@ -185,6 +185,41 @@ 2:	br 2b;;			/* Endless loop on error */
     1.4  3:	add r4 = r6, r3;;					\
     1.5  	ld8 r4 = [r4];;						\
     1.6  	mov ar.k3=r4
     1.7 +
     1.8 +/*
     1.9 + * GET_VA_VCPU_VHPT_MADDR() emulates 'reg = __va_ul(vcpu_vhpt_maddr(v))'.
    1.10 + */
    1.11 +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
    1.12 +#define HAS_PERVCPU_VHPT_MASK	0x2
    1.13 +#define GET_VA_VCPU_VHPT_MADDR(reg,tmp)				\
    1.14 +	GET_THIS_PADDR(reg,cpu_kr);;				\
    1.15 +	add reg=IA64_KR_CURRENT_OFFSET,reg;;			\
    1.16 +	ld8 reg=[reg];;						\
    1.17 +	dep tmp=0,reg,60,4;;			/* V to P */	\
    1.18 +	add tmp=IA64_VCPU_DOMAIN_OFFSET,tmp;;			\
    1.19 +	ld8 tmp=[tmp];;						\
    1.20 +	dep tmp=0,tmp,60,4;;			/* V to P */	\
    1.21 +	add tmp=IA64_DOMAIN_FLAGS_OFFSET,tmp;;			\
    1.22 +	ld8 tmp=[tmp];;						\
    1.23 +	and tmp=HAS_PERVCPU_VHPT_MASK,tmp;;			\
    1.24 +	cmp.eq p6,p0=tmp,r0;					\
    1.25 +(p6)	br.cond.sptk 1f;					\
    1.26 +	add reg=IA64_VCPU_VHPT_MADDR_OFFSET,reg;;		\
    1.27 +	dep reg=0,reg,60,4;;			/* V to P */	\
    1.28 +	ld8 reg=[reg];;						\
    1.29 +	dep reg=-1,reg,60,4;			/* P to V */	\
    1.30 +	br.sptk	2f;						\
    1.31 +1:								\
    1.32 +	GET_THIS_PADDR(reg, vhpt_paddr);;			\
    1.33 +	ld8 reg=[reg];;						\
    1.34 +	dep reg=-1,reg,60,4;			/* P to V */	\
    1.35 +2:
    1.36 +#else /* CONFIG_XEN_IA64_PERVCPU_VHPT */
    1.37 +#define GET_VA_VCPU_VHPT_MADDR(reg,tmp)				\
    1.38 +	GET_THIS_PADDR(reg, vhpt_paddr);;			\
    1.39 +	ld8 reg=[reg];;						\
    1.40 +	dep reg=-1,reg,60,4			/* P to V */
    1.41 +#endif /* CONFIG_XEN_IA64_PERVCPU_VHPT */
    1.42  #endif	/* XEN */
    1.43  
    1.44  /*
    1.45 @@ -290,33 +325,8 @@ 4:
    1.46  	;;
    1.47  #ifdef XEN
    1.48  	// 5. VHPT
    1.49 -	// r2 = __va_ul(vcpu_vhpt_maddr(v));
    1.50  #if VHPT_ENABLED
    1.51 -	GET_THIS_PADDR(r2,cpu_kr);;
    1.52 -	add r2=IA64_KR_CURRENT_OFFSET,r2;;
    1.53 -	ld8 r2=[r2];;
    1.54 -#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
    1.55 -#define HAS_PERVCPU_VHPT_MASK	0x2
    1.56 -	dep r3=0,r2,60,4;;			// virtual to physical
    1.57 -	add r3=IA64_VCPU_DOMAIN_OFFSET,r3;;
    1.58 -	ld8 r3=[r3];; 
    1.59 -	dep r3=0,r3,60,4;;			// virtual to physical
    1.60 -	add r3=IA64_DOMAIN_FLAGS_OFFSET,r3;;
    1.61 -	ld8 r3=[r3];; 
    1.62 -	and r3=HAS_PERVCPU_VHPT_MASK,r3;;
    1.63 -	cmp.eq p6,p0=r3,r0;;
    1.64 -(p6)	br.cond.sptk	.not_pervcpu_vhpt
    1.65 -	add r2=IA64_VCPU_VHPT_MADDR_OFFSET,r2;;
    1.66 -	dep r2=0,r2,60,4;;			// virtual to physical
    1.67 -	ld8 r2=[r2];; 
    1.68 -	dep r2=-1,r2,60,4;;			// physical to virtual
    1.69 -	br.sptk		.percpu_vhpt_done
    1.70 -#endif
    1.71 -.not_pervcpu_vhpt:
    1.72 -	GET_THIS_PADDR(r2, vhpt_paddr);; 
    1.73 -	ld8 r2=[r2];; 
    1.74 -	dep r2=-1,r2,60,4;;			// physical to virtual
    1.75 -.percpu_vhpt_done:
    1.76 +	GET_VA_VCPU_VHPT_MADDR(r2,r3);;
    1.77  	dep r16=0,r2,0,IA64_GRANULE_SHIFT
    1.78  	mov r18=IA64_GRANULE_SHIFT<<2
    1.79  	;;
    1.80 @@ -443,7 +453,27 @@ ia64_reload_tr:
    1.81  	srlz.i
    1.82  	;;
    1.83  	// 4. Reload DTR for stack.
    1.84 +#ifdef XEN
    1.85 +	// avoid overlapping with kernel TR
    1.86 +	movl r17=KERNEL_START
    1.87 +	GET_THIS_PADDR(r2,cpu_kr);;
    1.88 +	add r2=IA64_KR_CURRENT_OFFSET,r2;;
    1.89 +	ld8 r16=[r2];;
    1.90 +	;;
    1.91 +	dep  r16=0,r16,0,KERNEL_TR_PAGE_SHIFT
    1.92 +	;;
    1.93 +	cmp.eq p7,p0=r17,r16
    1.94 +(p7)	br.cond.sptk	.reload_vhpt
    1.95 +	
    1.96 +	// Kernel registers are saved in a per_cpu cpu_kr_ia64_t
    1.97 +	// to allow the kernel registers themselves to be used by domains.
    1.98 +	GET_THIS_PADDR(r2, cpu_kr);;
    1.99 +	add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
   1.100 +	;;
   1.101 +	ld8 r16=[r2]
   1.102 +#else
   1.103  	mov r16=IA64_KR(CURRENT_STACK)
   1.104 +#endif
   1.105  	;;
   1.106  	shl r16=r16,IA64_GRANULE_SHIFT
   1.107  	movl r19=PAGE_OFFSET
   1.108 @@ -463,22 +493,23 @@ ia64_reload_tr:
   1.109  	srlz.d
   1.110  	;;
   1.111  #ifdef XEN
   1.112 +.reload_vhpt:
   1.113  	// 5. VHPT
   1.114  #if VHPT_ENABLED
   1.115 -	// r25 = __va_ul(vcpu_vhpt_maddr(v));
   1.116 -	dep r20=0,r25,0,IA64_GRANULE_SHIFT
   1.117 -	movl r26=PAGE_KERNEL
   1.118 +	GET_VA_VCPU_VHPT_MADDR(r2,r3);;
   1.119 +	dep r16=0,r2,0,IA64_GRANULE_SHIFT
   1.120 +	movl r20=PAGE_KERNEL
   1.121  	;;
   1.122 -	mov r21=IA64_TR_VHPT
   1.123 -	dep r22=0,r20,60,4		// physical address of
   1.124 +	mov r18=IA64_TR_VHPT
   1.125 +	dep r17=0,r16,60,4		// physical address of
   1.126  	                                // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
   1.127 -	mov r24=IA64_GRANULE_SHIFT<<2
   1.128 +	mov r19=IA64_GRANULE_SHIFT<<2
   1.129  	;;
   1.130 -	or r23=r22,r26			// construct PA | page properties
   1.131 -	mov cr.itir=r24
   1.132 -	mov cr.ifa=r20
   1.133 +	or r17=r17,r20			// construct PA | page properties
   1.134 +	mov cr.itir=r19
   1.135 +	mov cr.ifa=r16
   1.136  	;;
   1.137 -	itr.d dtr[r21]=r23		// wire in new mapping...
   1.138 +	itr.d dtr[r18]=r17		// wire in new mapping...
   1.139  	;;
   1.140  	srlz.d
   1.141  	;;