ia64/xen-unstable

changeset 5345:c061e9a30cdf

bitkeeper revision 1.1668.1.1 (42a4842f6oumXgOUTTVUMx7uABT97w)

More hyperprivop stuff
Signed-off by : Dan Magenheimer <dan.magenheimer@hp.com>
author djm@kirby.fc.hp.com
date Mon Jun 06 17:13:19 2005 +0000 (2005-06-06)
parents fea2f7f8df31 a0723a72717d
children 507ef62e9bec
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/dom_fw.c xen/arch/ia64/domain.c xen/arch/ia64/hyperprivop.S xen/arch/ia64/ivt.S xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/vcpu.c xen/include/asm-ia64/domain.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Fri Jun 03 22:22:23 2005 +0000
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Mon Jun 06 17:13:19 2005 +0000
     1.3 @@ -46,12 +46,18 @@ void foo(void)
     1.4  	DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled)));
     1.5  	DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled));
     1.6  	DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip));
     1.7 +	DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr)));
     1.8  	DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
     1.9  	DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
    1.10 +	DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim));
    1.11  	DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
    1.12 +	DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
    1.13 +	DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
    1.14  	DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
    1.15 +	DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
    1.16  	DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe));
    1.17  	DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
    1.18 +	DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
    1.19  	//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
    1.20  	//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
    1.21  	//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
    1.22 @@ -64,6 +70,11 @@ void foo(void)
    1.23  	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp));
    1.24  	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack));
    1.25  
    1.26 +	DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_rr0));
    1.27 +	DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
    1.28 +	DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
    1.29 +	DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
    1.30 +
    1.31  	BLANK();
    1.32  
    1.33  	//DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));
     2.1 --- a/xen/arch/ia64/dom_fw.c	Fri Jun 03 22:22:23 2005 +0000
     2.2 +++ b/xen/arch/ia64/dom_fw.c	Mon Jun 06 17:13:19 2005 +0000
     2.3 @@ -50,7 +50,7 @@ void dom_efi_hypercall_patch(struct doma
     2.4  
     2.5  	if (d == dom0) paddr += dom0_start;
     2.6  	imva = domain_mpa_to_imva(d,paddr);
     2.7 -	build_hypercall_bundle(imva,d->breakimm,hypercall,1);
     2.8 +	build_hypercall_bundle(imva,d->arch.breakimm,hypercall,1);
     2.9  }
    2.10  
    2.11  
    2.12 @@ -61,7 +61,7 @@ void dom_fw_hypercall_patch(struct domai
    2.13  
    2.14  	if (d == dom0) paddr += dom0_start;
    2.15  	imva = domain_mpa_to_imva(d,paddr);
    2.16 -	build_hypercall_bundle(imva,d->breakimm,hypercall,ret);
    2.17 +	build_hypercall_bundle(imva,d->arch.breakimm,hypercall,ret);
    2.18  }
    2.19  
    2.20  
     3.1 --- a/xen/arch/ia64/domain.c	Fri Jun 03 22:22:23 2005 +0000
     3.2 +++ b/xen/arch/ia64/domain.c	Mon Jun 06 17:13:19 2005 +0000
     3.3 @@ -210,7 +210,7 @@ void arch_do_createdomain(struct vcpu *v
     3.4  	 */
     3.5  	d->xen_vastart = 0xf000000000000000;
     3.6  	d->xen_vaend = 0xf300000000000000;
     3.7 -	d->breakimm = 0x1000;
     3.8 +	d->arch.breakimm = 0x1000;
     3.9  
    3.10  	// stay on kernel stack because may get interrupts!
    3.11  	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    3.12 @@ -244,9 +244,11 @@ void arch_do_createdomain(struct vcpu *v
    3.13  	}
    3.14  #endif
    3.15  	d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
    3.16 -	if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
    3.17 +	if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
    3.18  		BUG();
    3.19  	v->vcpu_info->arch.metaphysical_mode = 1;
    3.20 +	v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
    3.21 +	v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
    3.22  #define DOMAIN_RID_BITS_DEFAULT 18
    3.23  	if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
    3.24  		BUG();
    3.25 @@ -254,7 +256,8 @@ void arch_do_createdomain(struct vcpu *v
    3.26  	d->xen_vastart = 0xf000000000000000;
    3.27  	d->xen_vaend = 0xf300000000000000;
    3.28  	d->shared_info_va = 0xf100000000000000;
    3.29 -	d->breakimm = 0x1000;
    3.30 +	d->arch.breakimm = 0x1000;
    3.31 +	ed->arch.breakimm = d->arch.breakimm;
    3.32  	// stay on kernel stack because may get interrupts!
    3.33  	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    3.34  	// to user stack
     4.1 --- a/xen/arch/ia64/hyperprivop.S	Fri Jun 03 22:22:23 2005 +0000
     4.2 +++ b/xen/arch/ia64/hyperprivop.S	Mon Jun 06 17:13:19 2005 +0000
     4.3 @@ -32,11 +32,131 @@ GLOBAL_ENTRY(fast_hyperprivop)
     4.4  	// HYPERPRIVOP_RFI?
     4.5  	cmp.eq p7,p6=XEN_HYPER_RFI,r17
     4.6  (p7)	br.sptk.many hyper_rfi;;
     4.7 -	// if not rfi, give up for now and do it the slow way
     4.8 +
     4.9 +#if 0
    4.10 +	// HYPERPRIVOP_SSM_I?
    4.11 +	cmp.eq p7,p6=XEN_HYPER_SSM_I,r17
    4.12 +(p7)	br.sptk.many hyper_ssm_i;;
    4.13 +#endif
    4.14 +
    4.15 +#if 1
    4.16 +// hard to test, because only called from rbs_switch
    4.17 +	// HYPERPRIVOP_COVER?
    4.18 +	cmp.eq p7,p6=XEN_HYPER_COVER,r17
    4.19 +(p7)	br.sptk.many hyper_cover;;
    4.20 +#endif
    4.21 +
    4.22 +#if 0 // FIXME: This inexplicably causes the number of ssm_dt's to
    4.23 +      // skyrocket, thus slowing down everything
    4.24 +	// HYPERPRIVOP_SSM_DT?
    4.25 +	cmp.eq p7,p6=XEN_HYPER_SSM_DT,r17
    4.26 +(p7)	br.sptk.many hyper_ssm_dt;;
    4.27 +#endif
    4.28 +
    4.29 +#if 1
    4.30 +	// HYPERPRIVOP_RSM_DT?
    4.31 +	cmp.eq p7,p6=XEN_HYPER_RSM_DT,r17
    4.32 +(p7)	br.sptk.many hyper_rsm_dt;;
    4.33 +#endif
    4.34 +
    4.35 +	// if not one of the above, give up for now and do it the slow way
    4.36  	br.sptk.many dispatch_break_fault ;;
    4.37  
    4.38 +// reflect domain breaks directly to domain
    4.39 +// FIXME: DOES NOT WORK YET
    4.40 +//	r16 == cr.isr
    4.41 +//	r17 == cr.iim
    4.42 +//	r18 == XSI_PSR_IC
    4.43 +//	r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
    4.44 +//	r22 == IA64_KR(CURRENT)+IA64_VCPU_BREAKIMM_OFFSET
    4.45 +//	r31 == pr
    4.46 +GLOBAL_ENTRY(fast_break_reflect)
    4.47 +	mov r20=cr.ipsr;;
    4.48 +	// if big-endian domain or privileged-perfmon bits set, do slow way
    4.49 +	extr.u r21=r20,IA64_PSR_BE_BIT,1 ;;
    4.50 +	cmp.ne p7,p0=r21,r0
    4.51 +(p7)	br.sptk.many dispatch_break_fault ;;
    4.52 +	extr.u r21=r20,IA64_PSR_PP_BIT,1 ;;
    4.53 +	cmp.ne p7,p0=r21,r0
    4.54 +(p7)	br.sptk.many dispatch_break_fault ;;
    4.55 +	// ensure ipsr.cpl==2, ipsr.ri==0
    4.56 +	// FIXME: any other psr bits need to be properly set/validated?
    4.57 +	//   ...see process.c: DELIVER_PSR_CLR/SET
    4.58 +	extr.u r21=r20,IA64_PSR_CPL0_BIT,2;;
    4.59 +	extr.u r23=r20,IA64_PSR_RI_BIT,2;;
    4.60 +	dep r20=-1,r20,IA64_PSR_CPL1_BIT,1 ;;
    4.61 +	dep r20=0,r20,IA64_PSR_CPL0_BIT,1 ;;
    4.62 +	dep r20=0,r20,IA64_PSR_RI_BIT,2 ;;
    4.63 +	mov cr.ipsr=r20;;
    4.64 +	// save ipsr in shared_info, vipsr.cpl==(ipsr.cpl==3)?3:0
    4.65 +	cmp.ne p7,p0=3,r21;;
    4.66 +(p7)	mov r21=r0
    4.67 +	dep r20=r21,r20,IA64_PSR_CPL0_BIT,2 ;;
    4.68 +	dep r20=r23,r20,IA64_PSR_RI_BIT,2 ;;
    4.69 +	// vipsr.i=vpsr.i
    4.70 +	adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;;
    4.71 +	ld4 r21=[r21];;
    4.72 +	dep r20=r21,r20,IA64_PSR_I_BIT,1 ;;
    4.73 +	adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
    4.74 +	// FIXME: any other vpsr bits need to be properly set/validated?
    4.75 +	st8 [r21]=r20;;
    4.76 +	// save iim in shared_info
    4.77 +	adds r21=XSI_IIM_OFS-XSI_PSR_IC_OFS,r18 ;;
    4.78 +	st8 [r21]=r17;;
    4.79 +	// save iip in shared_info
    4.80 +	mov r20=cr.iip;;
    4.81 +	adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
    4.82 +	st8 [r21]=r20;;
    4.83 +	// save ifs in shared_info
    4.84 +	adds r21=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
    4.85 +	st4 [r21]=r0 ;;
    4.86 +	adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18
    4.87 +	st8 [r21]=r0 ;;
    4.88 +	cover ;;
    4.89 +	mov r20=cr.ifs;;
    4.90 +	adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
    4.91 +	st8 [r21]=r20;;
    4.92 +	// vpsr.i = vpsr.ic = 0 on delivery of interruption
    4.93 +	st8 [r18]=r0;;
    4.94 +	// FIXME: need to save iipa and isr to be arch-compliant
    4.95 +	// set iip to go to domain IVA break instruction vector
    4.96 +	adds r22=IA64_VCPU_IVA_OFFSET-IA64_VCPU_BREAKIMM_OFFSET,r22;;
    4.97 +	ld8 r23=[r22];;
    4.98 +	movl r24=0x2c00;;
    4.99 +	add r24=r24,r23;;
   4.100 +	mov cr.iip=r24;;
   4.101 +	// OK, now all set to go except for switch to virtual bank0
   4.102 +	mov r30=r2; mov r29=r3;;
   4.103 +	adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
   4.104 +	adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
   4.105 +	bsw.1;;
   4.106 +	st8 [r2]=r16,16; st8 [r3]=r17,16 ;;
   4.107 +	st8 [r2]=r18,16; st8 [r3]=r19,16 ;;
   4.108 +	st8 [r2]=r20,16; st8 [r3]=r21,16 ;;
   4.109 +	st8 [r2]=r22,16; st8 [r3]=r23,16 ;;
   4.110 +	st8 [r2]=r24,16; st8 [r3]=r25,16 ;;
   4.111 +	st8 [r2]=r26,16; st8 [r3]=r27,16 ;;
   4.112 +	st8 [r2]=r28,16; st8 [r3]=r29,16 ;;
   4.113 +	st8 [r2]=r30,16; st8 [r3]=r31,16 ;;
   4.114 +	movl r31=XSI_IPSR;;
   4.115 +	bsw.0 ;;
   4.116 +	mov r2=r30; mov r3=r29;;
   4.117 +	adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.118 +	st4 [r20]=r0 ;;
   4.119 +	mov pr=r31,-1 ;;
   4.120 +	rfi
   4.121 +	;;
   4.122 +
   4.123 +
   4.124  // ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
   4.125  ENTRY(hyper_rfi)
   4.126 +#define FAST_HYPERPRIVOP_CNT
   4.127 +#ifdef FAST_HYPERPRIVOP_CNT
   4.128 +	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
   4.129 +	ld8 r21=[r20];;
   4.130 +	adds r21=1,r21;;
   4.131 +	st8 [r20]=r21;;
   4.132 +#endif
   4.133  	adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.134  	ld8 r21=[r20];;		// r21 = vcr.ipsr
   4.135  	extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
   4.136 @@ -78,8 +198,6 @@ ENTRY(hyper_rfi)
   4.137  	ld8 r20=[r20];;
   4.138  	dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
   4.139  	mov cr.ifs=r20 ;;
   4.140 -// TODO: increment a counter so we can count how many rfi's go the fast way
   4.141 -//    but where?  counter must be pinned
   4.142  	// ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
   4.143  	dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
   4.144  	// vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
   4.145 @@ -101,3 +219,129 @@ ENTRY(hyper_rfi)
   4.146  	;;
   4.147  	rfi
   4.148  	;;
   4.149 +
   4.150 +ENTRY(hyper_cover)
   4.151 +#ifdef FAST_HYPERPRIVOP_CNT
   4.152 +	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_COVER);;
   4.153 +	ld8 r21=[r20];;
   4.154 +	adds r21=1,r21;;
   4.155 +	st8 [r20]=r21;;
   4.156 +#endif
   4.157 +	mov r24=cr.ipsr
   4.158 +	mov r25=cr.iip;;
   4.159 +	// skip test for vpsr.ic.. it's a prerequisite for hyperprivops
   4.160 +	cover ;;
   4.161 +	adds r20=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.162 +	mov r30=cr.ifs;;
   4.163 +	adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18
   4.164 +	ld4 r21=[r20] ;;
   4.165 +	cmp.eq p6,p7=r21,r0 ;;
   4.166 +(p6)	st8 [r22]=r30;;
   4.167 +(p7)	st4 [r20]=r0;;
   4.168 +	mov cr.ifs=r0;;
   4.169 +	// adjust return address to skip over break instruction
   4.170 +	extr.u r26=r24,41,2 ;;
   4.171 +	cmp.eq p6,p7=2,r26 ;;
   4.172 +(p6)	mov r26=0
   4.173 +(p6)	adds r25=16,r25
   4.174 +(p7)	adds r26=1,r26
   4.175 +	;;
   4.176 +	dep r24=r26,r24,41,2
   4.177 +	;;
   4.178 +	mov cr.ipsr=r24
   4.179 +	mov cr.iip=r25
   4.180 +	mov pr=r31,-1 ;;
   4.181 +	rfi
   4.182 +	;;
   4.183 +
   4.184 +#if 1
   4.185 +// return from metaphysical mode (meta=1) to virtual mode (meta=0)
   4.186 +ENTRY(hyper_ssm_dt)
   4.187 +#ifdef FAST_HYPERPRIVOP_CNT
   4.188 +	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SSM_DT);;
   4.189 +	ld8 r21=[r20];;
   4.190 +	adds r21=1,r21;;
   4.191 +	st8 [r20]=r21;;
   4.192 +#endif
   4.193 +	mov r24=cr.ipsr
   4.194 +	mov r25=cr.iip;;
   4.195 +	adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.196 +	ld4 r21=[r20];;
   4.197 +	cmp.eq p7,p0=r21,r0	// meta==0?
   4.198 +(p7)	br.spnt.many	1f ;;	// already in virtual mode
   4.199 +	mov r22=IA64_KR(CURRENT);;
   4.200 +	adds r22=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
   4.201 +	ld4 r23=[r22];;
   4.202 +	mov rr[r0]=r23;;
   4.203 +	srlz.i;;
   4.204 +	st4 [r20]=r0 ;;
   4.205 +	// adjust return address to skip over break instruction
   4.206 +	extr.u r26=r24,41,2 ;;
   4.207 +	cmp.eq p6,p7=2,r26 ;;
   4.208 +(p6)	mov r26=0
   4.209 +(p6)	adds r25=16,r25
   4.210 +(p7)	adds r26=1,r26
   4.211 +	;;
   4.212 +	dep r24=r26,r24,41,2
   4.213 +	;;
   4.214 +	mov cr.ipsr=r24
   4.215 +	mov cr.iip=r25
   4.216 +1:	mov pr=r31,-1 ;;
   4.217 +	rfi
   4.218 +	;;
   4.219 +
   4.220 +// go to metaphysical mode (meta=1) from virtual mode (meta=0)
   4.221 +ENTRY(hyper_rsm_dt)
   4.222 +#ifdef FAST_HYPERPRIVOP_CNT
   4.223 +	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RSM_DT);;
   4.224 +	ld8 r21=[r20];;
   4.225 +	adds r21=1,r21;;
   4.226 +	st8 [r20]=r21;;
   4.227 +#endif
   4.228 +	mov r24=cr.ipsr
   4.229 +	mov r25=cr.iip;;
   4.230 +	adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.231 +	ld4 r21=[r20];;
   4.232 +	cmp.ne p7,p0=r21,r0	// meta==0?
   4.233 +(p7)	br.spnt.many	1f ;;	// already in metaphysical mode
   4.234 +	mov r22=IA64_KR(CURRENT);;
   4.235 +	adds r22=IA64_VCPU_META_RR0_OFFSET,r22;;
   4.236 +	ld4 r23=[r22];;
   4.237 +	mov rr[r0]=r23;;
   4.238 +	srlz.i;;
   4.239 +	adds r21=1,r0 ;;
   4.240 +	st4 [r20]=r21 ;;
   4.241 +	// adjust return address to skip over break instruction
   4.242 +	extr.u r26=r24,41,2 ;;
   4.243 +	cmp.eq p6,p7=2,r26 ;;
   4.244 +(p6)	mov r26=0
   4.245 +(p6)	adds r25=16,r25
   4.246 +(p7)	adds r26=1,r26
   4.247 +	;;
   4.248 +	dep r24=r26,r24,41,2
   4.249 +	;;
   4.250 +	mov cr.ipsr=r24
   4.251 +	mov cr.iip=r25
   4.252 +1:	mov pr=r31,-1 ;;
   4.253 +	rfi
   4.254 +	;;
   4.255 +
   4.256 +// enable interrupts (and also interrupt collection)
   4.257 +ENTRY(hyper_ssm_i)
   4.258 +#ifdef FAST_HYPERPRIVOP_CNT
   4.259 +	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SSM_I);;
   4.260 +	ld8 r21=[r20];;
   4.261 +	adds r21=1,r21;;
   4.262 +	st8 [r20]=r21;;
   4.263 +#endif
   4.264 +	mov r24=cr.ipsr
   4.265 +	mov r25=cr.iip;;
   4.266 +	movl r20=0x100000001;;
   4.267 +	st8 [r18]=r20;;	// turn on both vpsr.i and vpsr.ic
   4.268 +// FIXME: NEED TO UPDATE IPSR/IIP TO SKIP BREAK INST
   4.269 +// FIXME: NEED TO CHECK FOR PENDING INTERRUPTS AND DELIVER THEM!
   4.270 +1:	mov pr=r31,-1 ;;
   4.271 +	rfi
   4.272 +	;;
   4.273 +#endif
   4.274 +
     5.1 --- a/xen/arch/ia64/ivt.S	Fri Jun 03 22:22:23 2005 +0000
     5.2 +++ b/xen/arch/ia64/ivt.S	Mon Jun 06 17:13:19 2005 +0000
     5.3 @@ -783,20 +783,26 @@ ENTRY(break_fault)
     5.4  	ld8 r19=[r18]
     5.5  	;;
     5.6  	cmp.eq p7,p0=r0,r17			// is this a psuedo-cover?
     5.7 -(p7)	br.sptk.many dispatch_privop_fault
     5.8 +(p7)	br.spnt.many dispatch_privop_fault
     5.9  	;;
    5.10 -	cmp4.ne p7,p0=r0,r19
    5.11 -(p7)	br.sptk.many dispatch_break_fault
    5.12 -	// If we get to here, we have a hyperprivop
    5.13 -	// For now, hyperprivops are handled through the break mechanism
    5.14 -	// Later, they will be fast hand-coded assembly with psr.ic off
    5.15 +	// if vpsr.ic is off, we have a hyperprivop
    5.16 +	// A hyperprivop is hand-coded assembly with psr.ic off
    5.17  	// which means no calls, no use of r1-r15 and no memory accesses
    5.18  	// except to pinned addresses!
    5.19 -#define FAST_HYPERPRIVOPS
    5.20 -#ifdef FAST_HYPERPRIVOPS
    5.21 -	br.sptk.many fast_hyperprivop
    5.22 +	cmp4.eq p7,p0=r0,r19
    5.23 +(p7)	br.sptk.many fast_hyperprivop
    5.24 +	;;
    5.25 +	mov r22=IA64_KR(CURRENT);;
    5.26 +	adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
    5.27 +	ld4 r23=[r22];;
    5.28 +	cmp4.eq p6,p7=r23,r17			// Xen-reserved breakimm?
    5.29 +(p6)	br.spnt.many dispatch_break_fault
    5.30 +	;;
    5.31 +//#define FAST_BREAK
    5.32 +#ifdef FAST_BREAK
    5.33 +	br.sptk.many fast_break_reflect
    5.34  #else
    5.35 -	br.sptk.many dispatch_break_fault
    5.36 +	br.spnt.many dispatch_break_fault
    5.37  #endif
    5.38  	;;
    5.39  #endif
     6.1 --- a/xen/arch/ia64/privop.c	Fri Jun 03 22:22:23 2005 +0000
     6.2 +++ b/xen/arch/ia64/privop.c	Mon Jun 06 17:13:19 2005 +0000
     6.3 @@ -747,14 +747,16 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
     6.4  #define HYPERPRIVOP_COVER		0x4
     6.5  #define HYPERPRIVOP_ITC_D		0x5
     6.6  #define HYPERPRIVOP_ITC_I		0x6
     6.7 -#define HYPERPRIVOP_MAX			0x6
     6.8 +#define HYPERPRIVOP_SSM_I		0x7
     6.9 +#define HYPERPRIVOP_MAX			0x7
    6.10  
    6.11  char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
    6.12 -	0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i",
    6.13 +	0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
    6.14  	0
    6.15  };
    6.16  
    6.17 -unsigned long hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
    6.18 +unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
    6.19 +unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
    6.20  
    6.21  /* hyperprivops are generally executed in assembly (with physical psr.ic off)
    6.22   * so this code is primarily used for debugging them */
    6.23 @@ -765,13 +767,12 @@ ia64_hyperprivop(unsigned long iim, REGS
    6.24  	INST64 inst;
    6.25  	UINT64 val;
    6.26  
    6.27 -// FIXME: Add instrumentation for these
    6.28  // FIXME: Handle faults appropriately for these
    6.29  	if (!iim || iim > HYPERPRIVOP_MAX) {
    6.30  		printf("bad hyperprivop; ignored\n");
    6.31  		return 1;
    6.32  	}
    6.33 -	hyperpriv_cnt[iim]++;
    6.34 +	slow_hyperpriv_cnt[iim]++;
    6.35  	switch(iim) {
    6.36  	    case HYPERPRIVOP_RFI:
    6.37  		(void)vcpu_rfi(v);
    6.38 @@ -793,6 +794,9 @@ ia64_hyperprivop(unsigned long iim, REGS
    6.39  		inst.inst = 0;
    6.40  		(void)priv_itc_i(v,inst);
    6.41  		return 1;
    6.42 +	    case HYPERPRIVOP_SSM_I:
    6.43 +		(void)vcpu_set_psr_i(ed);
    6.44 +		return 1;
    6.45  	}
    6.46  	return 0;
    6.47  }
    6.48 @@ -981,18 +985,28 @@ int dump_hyperprivop_counts(char *buf)
    6.49  {
    6.50  	int i;
    6.51  	char *s = buf;
    6.52 -	s += sprintf(s,"Slow hyperprivops:\n");
    6.53 +	unsigned long total = 0;
    6.54 +	for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i];
    6.55 +	s += sprintf(s,"Slow hyperprivops (total %d):\n",total);
    6.56  	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
    6.57 -		if (hyperpriv_cnt[i])
    6.58 +		if (slow_hyperpriv_cnt[i])
    6.59  			s += sprintf(s,"%10d %s\n",
    6.60 -				hyperpriv_cnt[i], hyperpriv_str[i]);
    6.61 +				slow_hyperpriv_cnt[i], hyperpriv_str[i]);
    6.62 +	total = 0;
    6.63 +	for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i];
    6.64 +	s += sprintf(s,"Fast hyperprivops (total %d):\n",total);
    6.65 +	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
    6.66 +		if (fast_hyperpriv_cnt[i])
    6.67 +			s += sprintf(s,"%10d %s\n",
    6.68 +				fast_hyperpriv_cnt[i], hyperpriv_str[i]);
    6.69  	return s - buf;
    6.70  }
    6.71  
    6.72  void zero_hyperprivop_counts(void)
    6.73  {
    6.74  	int i;
    6.75 -	for (i = 0; i <= HYPERPRIVOP_MAX; i++) hyperpriv_cnt[i] = 0;
    6.76 +	for (i = 0; i <= HYPERPRIVOP_MAX; i++) slow_hyperpriv_cnt[i] = 0;
    6.77 +	for (i = 0; i <= HYPERPRIVOP_MAX; i++) fast_hyperpriv_cnt[i] = 0;
    6.78  }
    6.79  
    6.80  #define TMPBUFLEN 8*1024
    6.81 @@ -1002,6 +1016,7 @@ int dump_privop_counts_to_user(char __us
    6.82  	int n = dump_privop_counts(buf);
    6.83  
    6.84  	n += dump_hyperprivop_counts(buf + n);
    6.85 +	n += dump_reflect_counts(buf + n);
    6.86  #ifdef PRIVOP_ADDR_COUNT
    6.87  	n += dump_privop_addrs(buf + n);
    6.88  #endif
    6.89 @@ -1019,6 +1034,7 @@ int zero_privop_counts_to_user(char __us
    6.90  #ifdef PRIVOP_ADDR_COUNT
    6.91  	zero_privop_addrs();
    6.92  #endif
    6.93 +	zero_reflect_counts();
    6.94  	if (len < TMPBUFLEN) return -1;
    6.95  	if (__copy_to_user(ubuf,buf,n)) return -1;
    6.96  	return n;
     7.1 --- a/xen/arch/ia64/process.c	Fri Jun 03 22:22:23 2005 +0000
     7.2 +++ b/xen/arch/ia64/process.c	Mon Jun 06 17:13:19 2005 +0000
     7.3 @@ -130,6 +130,42 @@ unsigned long translate_domain_mpaddr(un
     7.4  	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
     7.5  }
     7.6  
     7.7 +unsigned long slow_reflect_count[0x80] = { 0 };
     7.8 +unsigned long fast_reflect_count[0x80] = { 0 };
     7.9 +
    7.10 +#define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
    7.11 +
    7.12 +void zero_reflect_counts(void)
    7.13 +{
    7.14 +	int i;
    7.15 +	for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
    7.16 +	for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
    7.17 +}
    7.18 +
    7.19 +int dump_reflect_counts(char *buf)
    7.20 +{
    7.21 +	int i,j,cnt;
    7.22 +	char *s = buf;
    7.23 +
    7.24 +	s += sprintf(s,"Slow reflections by vector:\n");
    7.25 +	for (i = 0, j = 0; i < 0x80; i++) {
    7.26 +		if (cnt = slow_reflect_count[i]) {
    7.27 +			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
    7.28 +			if ((j++ & 3) == 3) s += sprintf(s,"\n");
    7.29 +		}
    7.30 +	}
    7.31 +	if (j & 3) s += sprintf(s,"\n");
    7.32 +	s += sprintf(s,"Fast reflections by vector:\n");
    7.33 +	for (i = 0, j = 0; i < 0x80; i++) {
    7.34 +		if (cnt = fast_reflect_count[i]) {
    7.35 +			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
    7.36 +			if ((j++ & 3) == 3) s += sprintf(s,"\n");
    7.37 +		}
    7.38 +	}
    7.39 +	if (j & 3) s += sprintf(s,"\n");
    7.40 +	return s - buf;
    7.41 +}
    7.42 +
    7.43  void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
    7.44  {
    7.45  	unsigned long vcpu_get_ipsr_int_state(struct vcpu *,unsigned long);
    7.46 @@ -165,6 +201,7 @@ panic_domain(regs,"psr.ic off, deliverin
    7.47  		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    7.48  // NOTE: nested trap must NOT pass PSCB address
    7.49  		//regs->r31 = (unsigned long) &PSCB(v);
    7.50 +		inc_slow_reflect_count(vector);
    7.51  		return;
    7.52  
    7.53  	}
    7.54 @@ -195,6 +232,8 @@ panic_domain(regs,"psr.ic off, deliverin
    7.55  
    7.56  	PSCB(v,interrupt_delivery_enabled) = 0;
    7.57  	PSCB(v,interrupt_collection_enabled) = 0;
    7.58 +
    7.59 +	inc_slow_reflect_count(vector);
    7.60  }
    7.61  
    7.62  void foodpi(void) {}
    7.63 @@ -748,7 +787,7 @@ ia64_handle_break (unsigned long ifa, st
    7.64  		if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
    7.65  		else do_ssc(vcpu_get_gr(current,36), regs);
    7.66  	}
    7.67 -	else if (iim == d->breakimm) {
    7.68 +	else if (iim == d->arch.breakimm) {
    7.69  		if (ia64_hypercall(regs))
    7.70  			vcpu_increment_iip(current);
    7.71  	}
     8.1 --- a/xen/arch/ia64/regionreg.c	Fri Jun 03 22:22:23 2005 +0000
     8.2 +++ b/xen/arch/ia64/regionreg.c	Mon Jun 06 17:13:19 2005 +0000
     8.3 @@ -63,9 +63,14 @@ unsigned long allocate_reserved_rid(void
     8.4  
     8.5  
     8.6  // returns -1 if none available
     8.7 -unsigned long allocate_metaphysical_rid(void)
     8.8 +unsigned long allocate_metaphysical_rr0(void)
     8.9  {
    8.10 -	unsigned long rid = allocate_reserved_rid();
    8.11 +	ia64_rr rrv;
    8.12 +
    8.13 +	rrv.rid = allocate_reserved_rid();
    8.14 +	rrv.ps = PAGE_SHIFT;
    8.15 +	rrv.ve = 0;
    8.16 +	return rrv.rrval;
    8.17  }
    8.18  
    8.19  int deallocate_metaphysical_rid(unsigned long rid)
    8.20 @@ -282,22 +287,20 @@ int set_one_rr(unsigned long rr, unsigne
    8.21  		if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
    8.22  		else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
    8.23  		newrrv.ps = PAGE_SHIFT;
    8.24 +		if (rreg == 0) ed->arch.metaphysical_saved_rr0 = newrrv.rrval;
    8.25  		set_rr(rr,newrrv.rrval);
    8.26  	}
    8.27  	return 1;
    8.28  }
    8.29  
    8.30  // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
    8.31 -int set_metaphysical_rr(unsigned long rr, unsigned long rid)
    8.32 +int set_metaphysical_rr0(void)
    8.33  {
    8.34 +	struct exec_domain *ed = current;
    8.35  	ia64_rr rrv;
    8.36  	
    8.37 -	rrv.rrval = 0;
    8.38 -	rrv.rid = rid;
    8.39 -	rrv.ps = PAGE_SHIFT;
    8.40  //	rrv.ve = 1; 	FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
    8.41 -	rrv.ve = 0;
    8.42 -	set_rr(rr,rrv.rrval);
    8.43 +	set_rr(0,ed->arch.metaphysical_rr0);
    8.44  }
    8.45  
    8.46  // validates/changes region registers 0-6 in the currently executing domain
    8.47 @@ -323,6 +326,7 @@ void init_all_rr(struct vcpu *v)
    8.48  
    8.49  	rrv.rrval = 0;
    8.50  	rrv.rid = v->domain->metaphysical_rid;
    8.51 +	rrv.rrval = ed->domain->arch.metaphysical_rr0;
    8.52  	rrv.ps = PAGE_SHIFT;
    8.53  	rrv.ve = 1;
    8.54  if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
    8.55 @@ -378,6 +382,7 @@ unsigned long load_region_regs(struct vc
    8.56  		rrv.rrval = 0;
    8.57  		rrv.rid = v->domain->metaphysical_rid;
    8.58  		rrv.ps = PAGE_SHIFT;
    8.59 +		rrv.rrval = v->domain->arch.metaphysical_rr0;
    8.60  		rrv.ve = 1;
    8.61  		rr0 = rrv.rrval;
    8.62  		set_rr_no_srlz(0x0000000000000000L, rr0);
     9.1 --- a/xen/arch/ia64/vcpu.c	Fri Jun 03 22:22:23 2005 +0000
     9.2 +++ b/xen/arch/ia64/vcpu.c	Mon Jun 06 17:13:19 2005 +0000
     9.3 @@ -117,7 +117,7 @@ void vcpu_set_metaphysical_mode(VCPU *vc
     9.4  {
     9.5  	/* only do something if mode changes */
     9.6  	if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
     9.7 -		if (newmode) set_metaphysical_rr(0,vcpu->domain->metaphysical_rid);
     9.8 +		if (newmode) set_metaphysical_rr0();
     9.9  		else if (PSCB(vcpu,rrs[0]) != -1)
    9.10  			set_one_rr(0, PSCB(vcpu,rrs[0]));
    9.11  		PSCB(vcpu,metaphysical_mode) = newmode;
    9.12 @@ -170,6 +170,13 @@ IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
    9.13  	return IA64_NO_FAULT;
    9.14  }
    9.15  
    9.16 +IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
    9.17 +{
    9.18 +	PSCB(vcpu,interrupt_delivery_enabled) = 1;
    9.19 +	PSCB(vcpu,interrupt_collection_enabled) = 1;
    9.20 +	return IA64_NO_FAULT;
    9.21 +}
    9.22 +
    9.23  IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
    9.24  {
    9.25  	struct ia64_psr psr, imm, *ipsr;
    9.26 @@ -643,6 +650,7 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
    9.27  #ifdef HEARTBEAT_FREQ
    9.28  #define N_DOMS 16	// period in seconds
    9.29  	static long count[N_DOMS] = { 0 };
    9.30 +	static long nonclockcount[N_DOMS] = { 0 };
    9.31  	REGS *regs = vcpu_regs(vcpu);
    9.32  	unsigned domid = vcpu->domain->domain_id;
    9.33  #endif
    9.34 @@ -664,15 +672,15 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
    9.35  	}
    9.36  #ifdef HEARTBEAT_FREQ
    9.37  	if (domid >= N_DOMS) domid = N_DOMS-1;
    9.38 -	if (vector == (PSCB(vcpu,itv) & 0xff) &&
    9.39 -	    !(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
    9.40 -		printf("Dom%d heartbeat... iip=%p,psr.i=%d,pend=%d\n",
    9.41 -			domid, regs->cr_iip,
    9.42 -			current->vcpu_info->arch.interrupt_delivery_enabled,
    9.43 -			current->vcpu_info->arch.pending_interruption);
    9.44 -		count[domid] = 0;
    9.45 -		dump_runq();
    9.46 +	if (vector == (PSCB(vcpu,itv) & 0xff)) {
    9.47 +	    if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
    9.48 +		printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
    9.49 +			domid, count[domid], nonclockcount[domid]);
    9.50 +		//count[domid] = 0;
    9.51 +		//dump_runq();
    9.52 +	    }
    9.53  	}
    9.54 +	else nonclockcount[domid]++;
    9.55  #endif
    9.56  	// now have an unmasked, pending, deliverable vector!
    9.57  	// getting ivr has "side effects"
    10.1 --- a/xen/include/asm-ia64/domain.h	Fri Jun 03 22:22:23 2005 +0000
    10.2 +++ b/xen/include/asm-ia64/domain.h	Mon Jun 06 17:13:19 2005 +0000
    10.3 @@ -27,7 +27,7 @@ struct trap_bounce {
    10.4  struct arch_domain {
    10.5      struct mm_struct *active_mm;
    10.6      struct mm_struct *mm;
    10.7 -    int metaphysical_rid;
    10.8 +    int metaphysical_rr0;
    10.9      int starting_rid;		/* first RID assigned to domain */
   10.10      int ending_rid;		/* one beyond highest RID assigned to domain */
   10.11      int rid_bits;		/* number of virtual rid bits (default: 18) */
   10.12 @@ -47,11 +47,9 @@ struct arch_domain {
   10.13      u64 entry;
   10.14  #endif
   10.15  };
   10.16 -#define metaphysical_rid arch.metaphysical_rid
   10.17  #define starting_rid arch.starting_rid
   10.18  #define ending_rid arch.ending_rid
   10.19  #define rid_bits arch.rid_bits
   10.20 -#define breakimm arch.breakimm
   10.21  #define xen_vastart arch.xen_vastart
   10.22  #define xen_vaend arch.xen_vaend
   10.23  #define shared_info_va arch.shared_info_va
   10.24 @@ -75,6 +73,9 @@ struct arch_vcpu {
   10.25  	unsigned long xen_timer_interval;
   10.26  #endif
   10.27      void *regs;	/* temporary until find a better way to do privops */
   10.28 +    int metaphysical_rr0;		// from arch_domain (so is pinned)
   10.29 +    int metaphysical_saved_rr0;		// from arch_domain (so is pinned)
   10.30 +    int breakimm;			// from arch_domain (so is pinned)
   10.31      struct mm_struct *active_mm;
   10.32      struct thread_struct _thread;	// this must be last
   10.33  #ifdef CONFIG_VTI
    11.1 --- a/xen/include/public/arch-ia64.h	Fri Jun 03 22:22:23 2005 +0000
    11.2 +++ b/xen/include/public/arch-ia64.h	Mon Jun 06 17:13:19 2005 +0000
    11.3 @@ -81,10 +81,11 @@ typedef struct vcpu_guest_context {
    11.4  #endif /* !__ASSEMBLY__ */
    11.5  
    11.6  #define	XEN_HYPER_RFI			1
    11.7 -#define	XEN_HYPER_RSM_PSR_DT		2
    11.8 -#define	XEN_HYPER_SSM_PSR_DT		3
    11.9 +#define	XEN_HYPER_RSM_DT		2
   11.10 +#define	XEN_HYPER_SSM_DT		3
   11.11  #define	XEN_HYPER_COVER			4
   11.12  #define	XEN_HYPER_ITC_D			5
   11.13  #define	XEN_HYPER_ITC_I			6
   11.14 +#define	XEN_HYPER_SSM_I			7
   11.15  
   11.16  #endif /* __HYPERVISOR_IF_IA64_H__ */