ia64/xen-unstable

changeset 8908:f94931b07c67

[IA64] tlb miss fix

make dtlb miss handler to handle xen/ia64 identity mapping area.
xen/ia64 enables vhpt walker for all regions unlink Linux.
So dtlb misses on identity mapping area are catched by
dtlb miss handler, not alt dltb miss handler.

- dtlb miss on identity mapping area must be handled
- alt dtlb miss must be handled
- itlb miss on the identity mapping area must not occur
panic via page_fault().
- alt itlb miss by a guest must be handled
it occurs during dom0 boot.
- alt itlb miss by xen must not occur
panic by FORCE_CRASH

vmx_ivt.S already has such tweaks by checking psr.vm bit.

TODO: optimization
dtlb miss handlers are performance critical so that
it should be heavily optimized like alt_dtlb_miss.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri Feb 24 08:34:11 2006 -0700 (2006-02-24)
parents 5de0ee4ae76b
children c1ba4af23ec9
files xen/arch/ia64/xen/ivt.S xen/include/asm-ia64/config.h
line diff
     1.1 --- a/xen/arch/ia64/xen/ivt.S	Fri Feb 24 08:29:52 2006 -0700
     1.2 +++ b/xen/arch/ia64/xen/ivt.S	Fri Feb 24 08:34:11 2006 -0700
     1.3 @@ -298,12 +298,83 @@ ENTRY(dtlb_miss)
     1.4  	DBG_FAULT(2)
     1.5  #ifdef XEN
     1.6  	VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
     1.7 +#if VHPT_ENABLED
     1.8 +	// XXX TODO optimization
     1.9 +	mov r31=pr				// save predicates
    1.10 +	mov r30=cr.ipsr
    1.11 +	mov r28=cr.iip			
    1.12 +	mov r16=cr.ifa				// get virtual address
    1.13 +	mov r17=cr.isr				// save predicates
    1.14 +	;;
    1.15 +
    1.16 +	extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2	// extract psr.cpl
    1.17 +	;; 
    1.18 +	cmp.ne p6, p0 = r0, r18			// cpl == 0?
    1.19 +(p6)	br.cond.sptk 2f
    1.20 +
    1.21 +	// is speculation bit on?
    1.22 +	tbit.nz p7,p0=r17,IA64_ISR_SP_BIT	
    1.23 +	;; 
    1.24 +(p7)	br.cond.spnt 2f
    1.25 +
    1.26 +	// is non-access bit on?
    1.27 +	tbit.nz p8,p0=r17,IA64_ISR_NA_BIT	
    1.28 +	;;
    1.29 +(p8)	br.cond.spnt 2f
    1.30 +
    1.31 +	// cr.isr.code == IA64_ISR_CODE_LFETCH?
    1.32 +	and r18=IA64_ISR_CODE_MASK,r17		// get the isr.code field
    1.33 +	;; 
    1.34 +	cmp.eq p9,p0=IA64_ISR_CODE_LFETCH,r18	// check isr.code field
    1.35 +(p9)	br.cond.spnt 2f
    1.36 +
    1.37 +	// Is the faulted iip in vmm area?
    1.38 +	// check [59:58] bit
    1.39 +	// 00, 11: guest
    1.40 +	// 01, 10: vmm
    1.41 +	extr.u r19 = r28, 58, 2
    1.42 +	;; 
    1.43 +	cmp.eq p10, p0 = 0x0, r19
    1.44 +(p10)	br.cond.sptk 2f
    1.45 +	cmp.eq p11, p0 = 0x3, r19
    1.46 +(p11)	br.cond.sptk 2f
    1.47 +
    1.48 +	// Is the faulted address is in the identity mapping area?
    1.49 +	// 0xf000... or 0xe8000...
    1.50 +	extr.u r20 = r16, 59, 5
    1.51 +	;; 
    1.52 +	cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
    1.53 +(p12)	br.cond.spnt 1f
    1.54 +	cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
    1.55 +(p13)	br.cond.sptk 2f
    1.56 +
    1.57 +1:
    1.58 +	// xen identity mappin area.
    1.59 +	movl r24=PAGE_KERNEL
    1.60 +	movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
    1.61 +	;;
    1.62 +	shr.u r26=r16,55	// move address bit 59 to bit 4
    1.63 +	and r25=r25,r16		// clear ed, reserved bits, and PTE control bits
    1.64 +	;;
    1.65 +	and r26=0x10,r26	// bit 4=address-bit(59)
    1.66 +	;; 
    1.67 +	or r25=r25,r24		// insert PTE control bits into r25
    1.68 +	;;
    1.69 +	or r25=r25,r26		// set bit 4 (uncached) if the access was to region 6
    1.70 +	;;
    1.71 +	itc.d r25		// insert the TLB entry
    1.72 +	mov pr=r31,-1
    1.73 +	rfi
    1.74 +
    1.75 +2:
    1.76 +#endif	
    1.77  #ifdef VHPT_GLOBAL
    1.78  //	br.cond.sptk page_fault
    1.79  	br.cond.sptk fast_tlb_miss_reflect
    1.80  	;;
    1.81  #endif
    1.82 -#endif
    1.83 +	mov r29=b0				// save b0
    1.84 +#else	
    1.85  	/*
    1.86  	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
    1.87  	 * page table.  If a nested TLB miss occurs, we switch into physical
    1.88 @@ -313,6 +384,7 @@ ENTRY(dtlb_miss)
    1.89  	mov r16=cr.ifa				// get virtual address
    1.90  	mov r29=b0				// save b0
    1.91  	mov r31=pr				// save predicates
    1.92 +#endif
    1.93  dtlb_fault:
    1.94  	mov r17=cr.iha				// get virtual address of L3 PTE
    1.95  	movl r30=1f				// load nested fault continuation point
    1.96 @@ -399,6 +471,9 @@ late_alt_itlb_miss:
    1.97  	;;
    1.98  	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
    1.99  (p8)	br.cond.spnt page_fault
   1.100 +#ifdef XEN
   1.101 +	FORCE_CRASH
   1.102 +#endif	
   1.103  	;;
   1.104  	itc.i r19		// insert the TLB entry
   1.105  	mov pr=r31,-1
     2.1 --- a/xen/include/asm-ia64/config.h	Fri Feb 24 08:29:52 2006 -0700
     2.2 +++ b/xen/include/asm-ia64/config.h	Fri Feb 24 08:34:11 2006 -0700
     2.3 @@ -251,8 +251,6 @@ struct screen_info { };
     2.4  #define seq_printf(a,b...) printf(b)
     2.5  #define CONFIG_BLK_DEV_INITRD // needed to reserve memory for domain0
     2.6  
     2.7 -#define FORCE_CRASH()	asm("break 0;;");
     2.8 -
     2.9  void dummy_called(char *function);
    2.10  #define dummy()	dummy_called(__FUNCTION__)
    2.11  
    2.12 @@ -301,6 +299,9 @@ extern int ht_per_core;
    2.13  
    2.14  #ifndef __ASSEMBLY__
    2.15  #include <linux/linkage.h>
    2.16 +#define FORCE_CRASH()	asm("break.m 0;;");
    2.17 +#else
    2.18 +#define FORCE_CRASH	break.m 0;;
    2.19  #endif
    2.20  
    2.21  #endif	/* _IA64_CONFIG_H_ */