ia64/xen-unstable

changeset 5381:ba7d283dc8dd

bitkeeper revision 1.1668.1.5 (42a7aa43Fmo2V-QK6GDh3TF5foWBdQ)

Initial work on fast reflection (break), still disabled
Signed-off by: Dan Magenheimer <dan.magenheimer@hp.com>
author djm@kirby.fc.hp.com
date Thu Jun 09 02:32:35 2005 +0000 (2005-06-09)
parents 7aa93c8e8029
children f40340412b5a
files xen/arch/ia64/hyperprivop.S xen/arch/ia64/ivt.S xen/arch/ia64/patch/linux-2.6.11/efi.c
line diff
     1.1 --- a/xen/arch/ia64/hyperprivop.S	Tue Jun 07 22:00:55 2005 +0000
     1.2 +++ b/xen/arch/ia64/hyperprivop.S	Thu Jun 09 02:32:35 2005 +0000
     1.3 @@ -15,10 +15,12 @@
     1.4  #include <public/arch-ia64.h>
     1.5  
     1.6  #define FAST_HYPERPRIVOP_CNT
     1.7 +#define FAST_REFLECT_CNT
     1.8  
     1.9  // Should be included from common header file (also in process.c)
    1.10  //  NO PSR_CLR IS DIFFERENT! (CPL)
    1.11  #define IA64_PSR_CPL1	(__IA64_UL(1) << IA64_PSR_CPL1_BIT)
    1.12 +#define IA64_PSR_CPL0	(__IA64_UL(1) << IA64_PSR_CPL0_BIT)
    1.13  // note IA64_PSR_PK removed from following, why is this necessary?
    1.14  #define	DELIVER_PSR_SET	(IA64_PSR_IC | IA64_PSR_I | \
    1.15  			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
    1.16 @@ -116,7 +118,6 @@ GLOBAL_ENTRY(fast_hyperprivop)
    1.17  //	r17 == cr.iim
    1.18  //	r18 == XSI_PSR_IC
    1.19  //	r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
    1.20 -//	r22 == IA64_KR(CURRENT)+IA64_VCPU_BREAKIMM_OFFSET
    1.21  //	r31 == pr
    1.22  ENTRY(hyper_ssm_i)
    1.23  	// give up for now if: ipsr.be==1, ipsr.pp==1
    1.24 @@ -135,8 +136,8 @@ ENTRY(hyper_ssm_i)
    1.25  	st8 [r20]=r21;;
    1.26  #endif
    1.27  	// set shared_mem iip to instruction after HYPER_SSM_I
    1.28 -extr.u r20=r30,41,2 ;;
    1.29 -cmp.eq p6,p7=2,r20 ;;
    1.30 +	extr.u r20=r30,41,2 ;;
    1.31 +	cmp.eq p6,p7=2,r20 ;;
    1.32  (p6)	mov r20=0
    1.33  (p6)	adds r29=16,r29
    1.34  (p7)	adds r20=1,r20 ;;
    1.35 @@ -218,59 +219,82 @@ cmp.eq p6,p7=2,r20 ;;
    1.36  //	r17 == cr.iim
    1.37  //	r18 == XSI_PSR_IC
    1.38  //	r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
    1.39 -//	r22 == IA64_KR(CURRENT)+IA64_VCPU_BREAKIMM_OFFSET
    1.40  //	r31 == pr
    1.41  GLOBAL_ENTRY(fast_break_reflect)
    1.42 -	mov r20=cr.ipsr;;
    1.43 -	// if big-endian domain or privileged-perfmon bits set, do slow way
    1.44 -	extr.u r21=r20,IA64_PSR_BE_BIT,1 ;;
    1.45 -	cmp.ne p7,p0=r21,r0
    1.46 -(p7)	br.sptk.many dispatch_break_fault ;;
    1.47 -	extr.u r21=r20,IA64_PSR_PP_BIT,1 ;;
    1.48 -	cmp.ne p7,p0=r21,r0
    1.49 +//#define FAST_BREAK
    1.50 +#ifndef FAST_BREAK
    1.51 +	br.sptk.many dispatch_break_fault ;;
    1.52 +#endif
    1.53 +	mov r30=cr.ipsr;;
    1.54 +	mov r29=cr.iip;;
    1.55 +	extr.u r21=r30,IA64_PSR_BE_BIT,1 ;;
    1.56 +	cmp.ne p7,p0=r21,r0 ;;
    1.57  (p7)	br.sptk.many dispatch_break_fault ;;
    1.58 -	// ensure ipsr.cpl==2, ipsr.ri==0
    1.59 -	// FIXME: any other psr bits need to be properly set/validated?
    1.60 -	//   ...see process.c: DELIVER_PSR_CLR/SET
    1.61 -	extr.u r21=r20,IA64_PSR_CPL0_BIT,2;;
    1.62 -	extr.u r23=r20,IA64_PSR_RI_BIT,2;;
    1.63 -	dep r20=-1,r20,IA64_PSR_CPL1_BIT,1 ;;
    1.64 -	dep r20=0,r20,IA64_PSR_CPL0_BIT,1 ;;
    1.65 -	dep r20=0,r20,IA64_PSR_RI_BIT,2 ;;
    1.66 -	mov cr.ipsr=r20;;
    1.67 -	// save ipsr in shared_info, vipsr.cpl==(ipsr.cpl==3)?3:0
    1.68 -	cmp.ne p7,p0=3,r21;;
    1.69 -(p7)	mov r21=r0 ;;
    1.70 -	dep r20=r21,r20,IA64_PSR_CPL0_BIT,2 ;;
    1.71 -	dep r20=r23,r20,IA64_PSR_RI_BIT,2 ;;
    1.72 -	// vipsr.i=vpsr.i
    1.73 -	adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.74 -	ld4 r21=[r21];;
    1.75 -	dep r20=r21,r20,IA64_PSR_I_BIT,1 ;;
    1.76 -	adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.77 -	// FIXME: any other vpsr bits need to be properly set/validated?
    1.78 -	st8 [r21]=r20;;
    1.79 +	extr.u r21=r30,IA64_PSR_PP_BIT,1 ;;
    1.80 +	cmp.ne p7,p0=r21,r0 ;;
    1.81 +(p7)	br.sptk.many dispatch_break_fault ;;
    1.82 +#ifdef FAST_REFLECT_CNT
    1.83 +	movl r20=fast_reflect_count+((0x2c00>>8)*8);;
    1.84 +	ld8 r21=[r20];;
    1.85 +	adds r21=1,r21;;
    1.86 +	st8 [r20]=r21;;
    1.87 +#endif
    1.88  	// save iim in shared_info
    1.89  	adds r21=XSI_IIM_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.90  	st8 [r21]=r17;;
    1.91 -	// save iip in shared_info
    1.92 -	mov r20=cr.iip;;
    1.93 +	// save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
    1.94  	adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.95 -	st8 [r21]=r20;;
    1.96 -	// save ifs in shared_info
    1.97 +	st8 [r21]=r29;;
    1.98 +	// set shared_mem isr
    1.99 +	adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 
   1.100 +	st8 [r21]=r16 ;;
   1.101 +	// set cr.ipsr
   1.102 +	mov r29=r30 ;;
   1.103 +	movl r28=DELIVER_PSR_SET;;
   1.104 +	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
   1.105 +	or r29=r29,r28;;
   1.106 +	and r29=r29,r27;;
   1.107 +	mov cr.ipsr=r29;;
   1.108 +	// set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
   1.109 +	extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
   1.110 +	cmp.eq p6,p7=3,r29;;
   1.111 +(p6)	dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
   1.112 +(p7)	dep r30=0,r30,IA64_PSR_CPL0_BIT,2
   1.113 +	;;
   1.114 +	movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT);;
   1.115 +	movl r27=~(IA64_PSR_BE|IA64_PSR_PP|IA64_PSR_BN);;
   1.116 +	or r30=r30,r28;;
   1.117 +	and r30=r30,r27;;
   1.118 +	// also set shared_mem ipsr.i and ipsr.ic appropriately
   1.119 +	ld8 r20=[r18];;
   1.120 +	extr.u r22=r20,32,32
   1.121 +	cmp4.eq p6,p7=r20,r0;;
   1.122 +(p6)	dep r30=0,r30,IA64_PSR_IC_BIT,1
   1.123 +(p7)	dep r30=-1,r30,IA64_PSR_IC_BIT,1 ;;
   1.124 +	cmp4.eq p6,p7=r22,r0;;
   1.125 +(p6)	dep r30=0,r30,IA64_PSR_I_BIT,1
   1.126 +(p7)	dep r30=-1,r30,IA64_PSR_I_BIT,1 ;;
   1.127 +	adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.128 +	st8 [r21]=r30 ;;
   1.129 +	// set shared_mem interrupt_delivery_enabled to 0
   1.130 +	// set shared_mem interrupt_collection_enabled to 0
   1.131 +	st8 [r18]=r0;;
   1.132 +	// cover and set shared_mem precover_ifs to cr.ifs
   1.133 +	// set shared_mem ifs and incomplete_regframe to 0
   1.134 +	cover ;;
   1.135 +	mov r20=cr.ifs;;
   1.136  	adds r21=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.137  	st4 [r21]=r0 ;;
   1.138  	adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.139  	st8 [r21]=r0 ;;
   1.140 -	cover ;;
   1.141 -	mov r20=cr.ifs;;
   1.142  	adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.143 -	st8 [r21]=r20;;
   1.144 +	st8 [r21]=r20 ;;
   1.145  	// vpsr.i = vpsr.ic = 0 on delivery of interruption
   1.146  	st8 [r18]=r0;;
   1.147  	// FIXME: need to save iipa and isr to be arch-compliant
   1.148  	// set iip to go to domain IVA break instruction vector
   1.149 -	adds r22=IA64_VCPU_IVA_OFFSET-IA64_VCPU_BREAKIMM_OFFSET,r22;;
   1.150 +	mov r22=IA64_KR(CURRENT);;
   1.151 +	adds r22=IA64_VCPU_IVA_OFFSET,r22;;
   1.152  	ld8 r23=[r22];;
   1.153  	movl r24=0x2c00;;
   1.154  	add r24=r24,r23;;
     2.1 --- a/xen/arch/ia64/ivt.S	Tue Jun 07 22:00:55 2005 +0000
     2.2 +++ b/xen/arch/ia64/ivt.S	Thu Jun 09 02:32:35 2005 +0000
     2.3 @@ -798,12 +798,7 @@ ENTRY(break_fault)
     2.4  	cmp4.eq p6,p7=r23,r17			// Xen-reserved breakimm?
     2.5  (p6)	br.spnt.many dispatch_break_fault
     2.6  	;;
     2.7 -//#define FAST_BREAK
     2.8 -#ifdef FAST_BREAK
     2.9  	br.sptk.many fast_break_reflect
    2.10 -#else
    2.11 -	br.spnt.many dispatch_break_fault
    2.12 -#endif
    2.13  	;;
    2.14  #endif
    2.15  	mov r16=IA64_KR(CURRENT)		// r16 = current task; 12 cycle read lat.
     3.1 --- a/xen/arch/ia64/patch/linux-2.6.11/efi.c	Tue Jun 07 22:00:55 2005 +0000
     3.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/efi.c	Thu Jun 09 02:32:35 2005 +0000
     3.3 @@ -1,17 +1,19 @@
     3.4  --- ../../linux-2.6.11/arch/ia64/kernel/efi.c	2005-03-02 00:37:47.000000000 -0700
     3.5 -+++ arch/ia64/efi.c	2005-04-29 14:09:24.000000000 -0600
     3.6 -@@ -320,6 +320,10 @@
     3.7 ++++ arch/ia64/efi.c	2005-06-08 20:23:39.000000000 -0600
     3.8 +@@ -320,6 +320,12 @@
     3.9   		if (!(md->attribute & EFI_MEMORY_WB))
    3.10   			continue;
    3.11   
    3.12  +#ifdef XEN
    3.13 ++// this works around a problem in the ski bootloader
    3.14 ++		if (md->type != EFI_CONVENTIONAL_MEMORY)  continue;
    3.15  +// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
    3.16  +		if (md->phys_addr >= 0x100000000) continue;
    3.17  +#endif
    3.18   		/*
    3.19   		 * granule_addr is the base of md's first granule.
    3.20   		 * [granule_addr - first_non_wb_addr) is guaranteed to
    3.21 -@@ -719,6 +723,30 @@
    3.22 +@@ -719,6 +725,30 @@
    3.23   	return 0;
    3.24   }
    3.25