direct-io.hg

changeset 13084:ea2dc4a3c8eb

[IA64] evtchn_callback fix and clean

Since we had changed to use event callback to deliver interrupts,

1. The pending_interruption is changed to pending_event.

2. get_ivr, set_tpr, get_trp and set_eoi are not used or only used
in the initialization phase. There is no need to write this code
in assembly. This code is deleted.

3. hyper_ssm_i needs to be rewritten to jump to entchn_callback_handler
instead of iva+0x3000 interrupt handler. I will do this later.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild2.aw
date Mon Dec 18 10:20:34 2006 -0700 (2006-12-18)
parents 893b786cc66a
children 6e68e8a8cc99
files linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c linux-2.6-xen-sparse/arch/ia64/kernel/gate.S linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h xen/arch/ia64/asm-offsets.c xen/arch/ia64/asm-xsi-offsets.c xen/arch/ia64/xen/hyperprivop.S
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c	Mon Dec 18 10:04:49 2006 -0700
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c	Mon Dec 18 10:20:34 2006 -0700
     1.3 @@ -280,7 +280,6 @@ void foo(void)
     1.4  	DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
     1.5  	DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
     1.6  	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
     1.7 -	DEFINE_MAPPED_REG_OFS(XSI_PEND_OFS, pending_interruption);
     1.8  	DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
     1.9  	DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
    1.10  	DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
     2.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S	Mon Dec 18 10:04:49 2006 -0700
     2.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S	Mon Dec 18 10:20:34 2006 -0700
     2.3 @@ -128,9 +128,9 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
     2.4  	;;
     2.5  #ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
     2.6  	// r20 = 1
     2.7 -	// r22 = &vcpu->evtchn_mask
     2.8 +	// r22 = &vcpu->vcpu_info->evtchn_upcall_mask
     2.9  	// r23 = &vpsr.ic
    2.10 -	// r24 = &vcpu->pending_interruption
    2.11 +	// r24 = &vcpu->vcpu_info->evtchn_upcall_pending
    2.12  	// r25 = tmp
    2.13  	// r28 = &running_on_xen
    2.14  	// r30 = running_on_xen
    2.15 @@ -144,8 +144,11 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
    2.16  #define isRaw	p13
    2.17  	LOAD_RUNNING_ON_XEN(r28)
    2.18  	movl r22=XSI_PSR_I_ADDR
    2.19 +	;;
    2.20 +	ld8 r22=[r22]
    2.21 +	;;
    2.22  	movl r23=XSI_PSR_IC
    2.23 -	movl r24=XSI_PSR_I_ADDR+(XSI_PEND_OFS-XSI_PSR_I_ADDR_OFS)
    2.24 +	adds r24=-1,r22
    2.25  	mov r20=1
    2.26  	;;
    2.27  	ld4 r30=[r28]
     3.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S	Mon Dec 18 10:04:49 2006 -0700
     3.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S	Mon Dec 18 10:20:34 2006 -0700
     3.3 @@ -356,8 +356,6 @@ END(xen_send_ipi)
     3.4  // Those are vdso specialized.
     3.5  // In fsys mode, call, ret can't be used.
     3.6  GLOBAL_ENTRY(xen_rsm_be_i)
     3.7 -	ld8 r22=[r22]
     3.8 -	;; 
     3.9  	st1 [r22]=r20
    3.10  	st4 [r23]=r0
    3.11  	XEN_HYPER_RSM_BE
    3.12 @@ -380,23 +378,23 @@ GLOBAL_ENTRY(xen_get_psr)
    3.13  END(xen_get_psr)
    3.14  
    3.15  	// see xen_ssm_i() in privop.h
    3.16 -	// r22 = &vcpu->evtchn_mask
    3.17 +	// r22 = &vcpu->vcpu_info->evtchn_upcall_mask
    3.18  	// r23 = &vpsr.ic
    3.19 -	// r24 = &vcpu->pending_interruption
    3.20 +	// r24 = &vcpu->vcpu_info->evtchn_upcall_pending
    3.21  	// r25 = tmp
    3.22  	// r31 = tmp
    3.23  	// p11 = tmp
    3.24  	// p14 = tmp
    3.25  #define XEN_SET_PSR_I			\
    3.26 -	ld4 r31=[r22];			\
    3.27 -	ld4 r25=[r24];			\
    3.28 +	ld1 r31=[r22];			\
    3.29 +	ld1 r25=[r24];			\
    3.30  	;;				\
    3.31 -	st4 [r22]=r0;			\
    3.32 +	st1 [r22]=r0;			\
    3.33  	cmp.ne.unc p14,p0=r0,r31;	\
    3.34  	;;				\
    3.35  (p14)	cmp.ne.unc p11,p0=r0,r25;	\
    3.36  	;;				\
    3.37 -(p11)	st4 [r22]=r20;			\
    3.38 +(p11)	st1 [r22]=r20;			\
    3.39  (p11)	st4 [r23]=r0;			\
    3.40  (p11)	XEN_HYPER_SSM_I;
    3.41  		
     4.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S	Mon Dec 18 10:04:49 2006 -0700
     4.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S	Mon Dec 18 10:20:34 2006 -0700
     4.3 @@ -737,10 +737,10 @@ xen_page_fault:
     4.4  	;;
     4.5  (p15)	ld8 r3=[r3]
     4.6  	;;
     4.7 -(p15)	st1 [r3]=r0,XSI_PEND_OFS-XSI_PSR_I_ADDR_OFS	// if (p15) vpsr.i = 1
     4.8 +(p15)	st1 [r3]=r0,-1	// if (p15) vpsr.i = 1
     4.9  	mov r14=r0
    4.10  	;;
    4.11 -(p15)	ld4 r14=[r3]				// if (pending_interrupts)
    4.12 +(p15)	ld1 r14=[r3]				// if (pending_events)
    4.13  	adds r3=8,r2				// re-set up second base pointer
    4.14  	;;
    4.15  (p15)	cmp.ne	p15,p0=r14,r0
    4.16 @@ -1170,10 +1170,10 @@ 1:
    4.17  #ifdef CONFIG_XEN
    4.18  (p15)	ld8 r16=[r16]				// vpsr.i
    4.19  	;;
    4.20 -(p15)	st1 [r16]=r0,XSI_PEND_OFS-XSI_PSR_I_ADDR_OFS	// if (p15) vpsr.i = 1
    4.21 +(p15)	st1 [r16]=r0,-1		// if (p15) vpsr.i = 1
    4.22  	mov r2=r0
    4.23  	;;
    4.24 -(p15)	ld4 r2=[r16]				// if (pending_interrupts)
    4.25 +(p15)	ld1 r2=[r16]				// if (pending_events)
    4.26  	;;
    4.27  	cmp.ne	p6,p0=r2,r0
    4.28  	;;
    4.29 @@ -2159,13 +2159,22 @@ GLOBAL_ENTRY(xen_event_callback)
    4.30  	;;
    4.31  	SAVE_REST
    4.32  	;;
    4.33 +1:
    4.34  	alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
    4.35  	add out0=16,sp		// pass pointer to pt_regs as first arg
    4.36  	;;
    4.37 -	srlz.d			// make sure we see the effect of cr.ivr
    4.38 -	movl r14=ia64_leave_kernel
    4.39 +	br.call.sptk.many b0=evtchn_do_upcall
    4.40 +	;;
    4.41 +	movl r20=XSI_PSR_I_ADDR
    4.42 +	;;
    4.43 +	ld8 r20=[r20]
    4.44  	;;
    4.45 -	mov rp=r14
    4.46 -	br.call.sptk.many b6=evtchn_do_upcall
    4.47 +	adds r20=-1,r20		// vcpu_info->evtchn_upcall_pending
    4.48 +	;;
    4.49 +	ld1 r20=[r20]
    4.50 +	;;
    4.51 +	cmp.ne p6,p0=r20,r0	// if there are pending events, 
    4.52 +	(p6) br.spnt.few 1b	// call evtchn_do_upcall again.
    4.53 +	br.sptk.many ia64_leave_kernel   
    4.54  END(xen_event_callback)
    4.55  #endif
     5.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h	Mon Dec 18 10:04:49 2006 -0700
     5.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h	Mon Dec 18 10:20:34 2006 -0700
     5.3 @@ -113,7 +113,8 @@ extern void xen_set_eflag(unsigned long)
     5.4  	({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
     5.5  #define xen_set_virtual_psr_ic(_val)	\
     5.6  	({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
     5.7 -#define xen_get_virtual_pend()		(XEN_MAPPEDREGS->pending_interruption)
     5.8 +#define xen_get_virtual_pend()		\
     5.9 +	(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
    5.10  
    5.11  /* Hyperprivops are "break" instructions with a well-defined API.
    5.12   * In particular, the virtual psr.ic bit must be off; in this way
     6.1 --- a/xen/arch/ia64/asm-offsets.c	Mon Dec 18 10:04:49 2006 -0700
     6.2 +++ b/xen/arch/ia64/asm-offsets.c	Mon Dec 18 10:20:34 2006 -0700
     6.3 @@ -41,6 +41,8 @@ void foo(void)
     6.4  	DEFINE(VCPU_VTM_OFFSET_OFS, offsetof(struct vcpu, arch.arch_vmx.vtm.vtm_offset));
     6.5  	DEFINE(VCPU_VTM_LAST_ITC_OFS, offsetof(struct vcpu, arch.arch_vmx.vtm.last_itc));
     6.6  	DEFINE(VCPU_VRR0_OFS, offsetof(struct vcpu, arch.arch_vmx.vrr[0]));
     6.7 +	DEFINE(VCPU_ITR0_OFS, offsetof(struct vcpu, arch.itrs[0]));
     6.8 +	DEFINE(VCPU_CALLBACK_OFS, offsetof(struct vcpu, arch.event_callback_ip));
     6.9  #ifdef   VTI_DEBUG
    6.10  	DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current));
    6.11  	DEFINE(IVT_DBG_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_debug));
     7.1 --- a/xen/arch/ia64/asm-xsi-offsets.c	Mon Dec 18 10:04:49 2006 -0700
     7.2 +++ b/xen/arch/ia64/asm-xsi-offsets.c	Mon Dec 18 10:20:34 2006 -0700
     7.3 @@ -62,7 +62,6 @@ void foo(void)
     7.4  	DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
     7.5  	DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
     7.6  	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
     7.7 -	DEFINE_MAPPED_REG_OFS(XSI_PEND_OFS, pending_interruption);
     7.8  	DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
     7.9  	DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
    7.10  	DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
     8.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Mon Dec 18 10:04:49 2006 -0700
     8.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Mon Dec 18 10:20:34 2006 -0700
     8.3 @@ -37,8 +37,10 @@
     8.4  # define FAST_BREAK
     8.5  # undef FAST_ACCESS_REFLECT 	//XXX TODO fast_access_reflect
     8.6                              	//    doesn't support dom0 vp yet.
     8.7 -# define FAST_RFI
     8.8 -# define FAST_SSM_I
     8.9 +//# define FAST_RFI
    8.10 +// TODO: Since we use callback to deliver interrupt, 
    8.11 +//       FAST_SSM_I needs to be rewritten.
    8.12 +//# define FAST_SSM_I
    8.13  # define FAST_PTC_GA
    8.14  # undef RFI_TO_INTERRUPT // not working yet
    8.15  #endif
    8.16 @@ -87,29 +89,13 @@ GLOBAL_ENTRY(fast_hyperprivop)
    8.17  	cmp.eq p7,p6=HYPERPRIVOP_SSM_I,r17
    8.18  (p7)	br.sptk.many hyper_ssm_i;;
    8.19  
    8.20 -	// FIXME. This algorithm gives up (goes to the slow path) if there
    8.21 -	// are ANY interrupts pending, even if they are currently
    8.22 -	// undeliverable.  This should be improved later...
    8.23 -	adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
    8.24 -	ld4 r20=[r20] ;;
    8.25 -	cmp.eq p7,p0=r0,r20
    8.26 -(p7)	br.cond.sptk.many 1f
    8.27 -	movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
    8.28 -	ld8 r20=[r20];;
    8.29 -	adds r21=IA64_VCPU_IRR0_OFFSET,r20;
    8.30 -	adds r22=IA64_VCPU_IRR0_OFFSET+8,r20;;
    8.31 -	ld8 r23=[r21],16; ld8 r24=[r22],16;;
    8.32 -	ld8 r21=[r21]; ld8 r22=[r22];;
    8.33 -	or r23=r23,r24; or r21=r21,r22;;
    8.34 -	or r20=r23,r21;;
    8.35 -1:	// when we get to here r20=~=interrupts pending
    8.36  	// Check pending event indication
    8.37 -(p7)	movl r20=THIS_CPU(current_psr_i_addr);;
    8.38 -(p7)	ld8 r20=[r20]
    8.39 +	adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS, r18;;
    8.40 +	ld8 r20=[r20]
    8.41  	;;
    8.42 -(p7)	adds r20=-1,r20				// evtchn_upcall_pending
    8.43 +	ld1 r22=[r20],-1	// evtchn_upcall_mask
    8.44  	;;
    8.45 -(p7)	ld1 r20=[r20]
    8.46 +	ld1 r20=[r20]		// evtchn_upcall_pending
    8.47  	;;
    8.48  
    8.49  	// HYPERPRIVOP_RFI?
    8.50 @@ -117,12 +103,10 @@ 1:	// when we get to here r20=~=interrup
    8.51  (p7)	br.sptk.many hyper_rfi
    8.52  	;;
    8.53  
    8.54 -	// HYPERPRIVOP_GET_IVR?
    8.55 -	cmp.eq p7,p6=HYPERPRIVOP_GET_IVR,r17
    8.56 -(p7)	br.sptk.many hyper_get_ivr
    8.57 +	// if event enabled and there are pending events
    8.58 +	cmp.ne p7,p0=r20,r0
    8.59  	;;
    8.60 -
    8.61 -	cmp.ne p7,p0=r20,r0
    8.62 +	cmp.eq.and p7,p0=r22,r0
    8.63  (p7)	br.spnt.many dispatch_break_fault
    8.64  	;;
    8.65  
    8.66 @@ -141,21 +125,6 @@ 1:	// when we get to here r20=~=interrup
    8.67  (p7)	br.sptk.many hyper_rsm_dt
    8.68  	;;
    8.69  
    8.70 -	// HYPERPRIVOP_GET_TPR?
    8.71 -	cmp.eq p7,p6=HYPERPRIVOP_GET_TPR,r17
    8.72 -(p7)	br.sptk.many hyper_get_tpr
    8.73 -	;;
    8.74 -
    8.75 -	// HYPERPRIVOP_SET_TPR?
    8.76 -	cmp.eq p7,p6=HYPERPRIVOP_SET_TPR,r17
    8.77 -(p7)	br.sptk.many hyper_set_tpr
    8.78 -	;;
    8.79 -
    8.80 -	// HYPERPRIVOP_EOI?
    8.81 -	cmp.eq p7,p6=HYPERPRIVOP_EOI,r17
    8.82 -(p7)	br.sptk.many hyper_eoi
    8.83 -	;;
    8.84 -
    8.85  	// HYPERPRIVOP_SET_ITM?
    8.86  	cmp.eq p7,p6=HYPERPRIVOP_SET_ITM,r17
    8.87  (p7)	br.sptk.many hyper_set_itm
    8.88 @@ -425,10 +394,11 @@ GLOBAL_ENTRY(fast_tick_reflect)
    8.89  	ld8 r23=[r21];;
    8.90  	or r22=r22,r23;;
    8.91  	st8 [r21]=r22;;
    8.92 -	// set PSCB(pending_interruption)!
    8.93 -	adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
    8.94 -	st4 [r20]=r25;;
    8.95 -	
    8.96 +	// set evtchn_upcall_pending!
    8.97 +	adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18;;
    8.98 +	ld8 r20=[r20];;
    8.99 +	adds r20=-1,r20;;		// evtchn_upcall_pending
   8.100 +	st1 [r20]=r25;;
   8.101  	// if interrupted at pl0, we're done
   8.102  	extr.u r16=r17,IA64_PSR_CPL0_BIT,2;;
   8.103  	cmp.eq p6,p0=r16,r0;;
   8.104 @@ -1465,236 +1435,6 @@ 1:	extr.u r26=r24,41,2 ;;
   8.105  	;;
   8.106  END(hyper_rsm_dt)
   8.107  
   8.108 -ENTRY(hyper_get_tpr)
   8.109 -#ifdef FAST_HYPERPRIVOP_CNT
   8.110 -	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_TPR);;
   8.111 -	ld4 r21=[r20];;
   8.112 -	adds r21=1,r21;;
   8.113 -	st4 [r20]=r21;;
   8.114 -#endif
   8.115 -	mov r24=cr.ipsr
   8.116 -	mov r25=cr.iip;;
   8.117 -	adds r20=XSI_TPR_OFS-XSI_PSR_IC_OFS,r18 ;;
   8.118 -	ld8 r8=[r20];;
   8.119 -	extr.u r26=r24,41,2 ;;
   8.120 -	cmp.eq p6,p7=2,r26 ;;
   8.121 -(p6)	mov r26=0
   8.122 -(p6)	adds r25=16,r25
   8.123 -(p7)	adds r26=1,r26
   8.124 -	;;
   8.125 -	dep r24=r26,r24,41,2
   8.126 -	;;
   8.127 -	mov cr.ipsr=r24
   8.128 -	mov cr.iip=r25
   8.129 -	mov pr=r31,-1 ;;
   8.130 -	rfi
   8.131 -	;;
   8.132 -END(hyper_get_tpr)
   8.133 -
   8.134 -// if we get to here, there are no interrupts pending so we
   8.135 -// can change virtual tpr to any value without fear of provoking
   8.136 -// (or accidentally missing) delivering an interrupt
   8.137 -ENTRY(hyper_set_tpr)
   8.138 -#ifdef FAST_HYPERPRIVOP_CNT
   8.139 -	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_TPR);;
   8.140 -	ld4 r21=[r20];;
   8.141 -	adds r21=1,r21;;
   8.142 -	st4 [r20]=r21;;
   8.143 -#endif
   8.144 -	mov r24=cr.ipsr
   8.145 -	mov r25=cr.iip;;
   8.146 -	movl r27=0xff00;;
   8.147 -	adds r20=XSI_TPR_OFS-XSI_PSR_IC_OFS,r18 ;;
   8.148 -	andcm r8=r8,r27;;
   8.149 -	st8 [r20]=r8;;
   8.150 -	extr.u r26=r24,41,2 ;;
   8.151 -	cmp.eq p6,p7=2,r26 ;;
   8.152 -(p6)	mov r26=0
   8.153 -(p6)	adds r25=16,r25
   8.154 -(p7)	adds r26=1,r26
   8.155 -	;;
   8.156 -	dep r24=r26,r24,41,2
   8.157 -	;;
   8.158 -	mov cr.ipsr=r24
   8.159 -	mov cr.iip=r25
   8.160 -	mov pr=r31,-1 ;;
   8.161 -	rfi
   8.162 -	;;
   8.163 -END(hyper_set_tpr)
   8.164 -
   8.165 -ENTRY(hyper_get_ivr)
   8.166 -#ifdef FAST_HYPERPRIVOP_CNT
   8.167 -	movl r22=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_IVR);;
   8.168 -	ld4 r21=[r22];;
   8.169 -	adds r21=1,r21;;
   8.170 -	st4 [r22]=r21;;
   8.171 -#endif
   8.172 -	mov r8=15;;
   8.173 -	// when we get to here r20=~=interrupts pending
   8.174 -	cmp.eq p7,p0=r20,r0;;
   8.175 -(p7)	adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
   8.176 -(p7)	st4 [r20]=r0;;
   8.177 -(p7)	br.spnt.many 1f ;;
   8.178 -	movl r30=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   8.179 -	ld8 r30=[r30];;
   8.180 -	adds r24=IA64_VCPU_INSVC3_OFFSET,r30;;
   8.181 -	mov r25=192
   8.182 -	adds r22=IA64_VCPU_IRR3_OFFSET,r30;;
   8.183 -	ld8 r23=[r22];;
   8.184 -	cmp.eq p6,p0=r23,r0;;
   8.185 -(p6)	adds r22=-8,r22;;
   8.186 -(p6)	adds r24=-8,r24;;
   8.187 -(p6)	adds r25=-64,r25;;
   8.188 -(p6)	ld8 r23=[r22];;
   8.189 -(p6)	cmp.eq p6,p0=r23,r0;;
   8.190 -(p6)	adds r22=-8,r22;;
   8.191 -(p6)	adds r24=-8,r24;;
   8.192 -(p6)	adds r25=-64,r25;;
   8.193 -(p6)	ld8 r23=[r22];;
   8.194 -(p6)	cmp.eq p6,p0=r23,r0;;
   8.195 -(p6)	adds r22=-8,r22;;
   8.196 -(p6)	adds r24=-8,r24;;
   8.197 -(p6)	adds r25=-64,r25;;
   8.198 -(p6)	ld8 r23=[r22];;
   8.199 -(p6)	cmp.eq p6,p0=r23,r0;;
   8.200 -	cmp.eq p6,p0=r23,r0
   8.201 -(p6)	br.cond.spnt.few 1f;	// this is actually an error
   8.202 -	// r22 points to non-zero element of irr, r23 has value
   8.203 -	// r24 points to corr element of insvc, r25 has elt*64
   8.204 -	ld8 r26=[r24];;
   8.205 -	cmp.geu p6,p0=r26,r23
   8.206 -(p6)	br.cond.spnt.many 1f;
   8.207 -	// not masked by insvc, get vector number
   8.208 -	shr.u r26=r23,1;;
   8.209 -	or r26=r23,r26;;
   8.210 -	shr.u r27=r26,2;;
   8.211 -	or r26=r26,r27;;
   8.212 -	shr.u r27=r26,4;;
   8.213 -	or r26=r26,r27;;
   8.214 -	shr.u r27=r26,8;;
   8.215 -	or r26=r26,r27;;
   8.216 -	shr.u r27=r26,16;;
   8.217 -	or r26=r26,r27;;
   8.218 -	shr.u r27=r26,32;;
   8.219 -	or r26=r26,r27;;
   8.220 -	andcm r26=0xffffffffffffffff,r26;;
   8.221 -	popcnt r26=r26;;
   8.222 -	sub r26=63,r26;;
   8.223 -	// r26 now contains the bit index (mod 64)
   8.224 -	mov r27=1;;
   8.225 -	shl r27=r27,r26;;
   8.226 -	// r27 now contains the (within the proper word) bit mask 
   8.227 -	add r26=r25,r26
   8.228 -	// r26 now contains the vector [0..255]
   8.229 -	adds r20=XSI_TPR_OFS-XSI_PSR_IC_OFS,r18 ;;
   8.230 -	ld8 r20=[r20] ;;
   8.231 -	extr.u r28=r20,16,1
   8.232 -	extr.u r29=r20,4,4 ;;
   8.233 -	cmp.ne p6,p0=r28,r0	// if tpr.mmi is set, return SPURIOUS
   8.234 -(p6)	br.cond.spnt.few 1f;
   8.235 -	shl r29=r29,4;;
   8.236 -	adds r29=15,r29;;
   8.237 -	cmp.ge p6,p0=r29,r26
   8.238 -(p6)	br.cond.spnt.few 1f;
   8.239 -	// OK, have an unmasked vector to process/return
   8.240 -	ld8 r25=[r24];;
   8.241 -	or r25=r25,r27;;
   8.242 -	st8 [r24]=r25;;
   8.243 -	ld8 r25=[r22];;
   8.244 -	andcm r25=r25,r27;;
   8.245 -	st8 [r22]=r25;;
   8.246 -	mov r8=r26;;
   8.247 -	// if its a clock tick, remember itm to avoid delivering it twice
   8.248 -	adds r20=XSI_ITV_OFS-XSI_PSR_IC_OFS,r18 ;;
   8.249 -	ld8 r20=[r20];;
   8.250 -	extr.u r20=r20,0,8;;
   8.251 -	cmp.eq p6,p0=r20,r8
   8.252 -	adds r22=IA64_VCPU_DOMAIN_ITM_LAST_OFFSET,r30
   8.253 -	adds r23=IA64_VCPU_DOMAIN_ITM_OFFSET,r30;;
   8.254 -	ld8 r23=[r23];;
   8.255 -(p6)	st8 [r22]=r23;;
   8.256 -	// all done
   8.257 -1:	mov r24=cr.ipsr
   8.258 -	mov r25=cr.iip;;
   8.259 -	extr.u r26=r24,41,2 ;;
   8.260 -	cmp.eq p6,p7=2,r26 ;;
   8.261 -(p6)	mov r26=0
   8.262 -(p6)	adds r25=16,r25
   8.263 -(p7)	adds r26=1,r26
   8.264 -	;;
   8.265 -	dep r24=r26,r24,41,2
   8.266 -	;;
   8.267 -	mov cr.ipsr=r24
   8.268 -	mov cr.iip=r25
   8.269 -	mov pr=r31,-1 ;;
   8.270 -	rfi
   8.271 -	;;
   8.272 -END(hyper_get_ivr)
   8.273 -
   8.274 -ENTRY(hyper_eoi)
   8.275 -	// when we get to here r20=~=interrupts pending
   8.276 -	cmp.ne p7,p0=r20,r0
   8.277 -(p7)	br.spnt.many dispatch_break_fault ;;
   8.278 -#ifdef FAST_HYPERPRIVOP_CNT
   8.279 -	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_EOI);;
   8.280 -	ld4 r21=[r20];;
   8.281 -	adds r21=1,r21;;
   8.282 -	st4 [r20]=r21;;
   8.283 -#endif
   8.284 -	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   8.285 -	ld8 r22=[r22];;
   8.286 -	adds r22=IA64_VCPU_INSVC3_OFFSET,r22;;
   8.287 -	ld8 r23=[r22];;
   8.288 -	cmp.eq p6,p0=r23,r0;;
   8.289 -(p6)	adds r22=-8,r22;;
   8.290 -(p6)	ld8 r23=[r22];;
   8.291 -(p6)	cmp.eq p6,p0=r23,r0;;
   8.292 -(p6)	adds r22=-8,r22;;
   8.293 -(p6)	ld8 r23=[r22];;
   8.294 -(p6)	cmp.eq p6,p0=r23,r0;;
   8.295 -(p6)	adds r22=-8,r22;;
   8.296 -(p6)	ld8 r23=[r22];;
   8.297 -(p6)	cmp.eq p6,p0=r23,r0;;
   8.298 -	cmp.eq p6,p0=r23,r0
   8.299 -(p6)	br.cond.spnt.few 1f;	// this is actually an error
   8.300 -	// r22 points to non-zero element of insvc, r23 has value
   8.301 -	shr.u r24=r23,1;;
   8.302 -	or r24=r23,r24;;
   8.303 -	shr.u r25=r24,2;;
   8.304 -	or r24=r24,r25;;
   8.305 -	shr.u r25=r24,4;;
   8.306 -	or r24=r24,r25;;
   8.307 -	shr.u r25=r24,8;;
   8.308 -	or r24=r24,r25;;
   8.309 -	shr.u r25=r24,16;;
   8.310 -	or r24=r24,r25;;
   8.311 -	shr.u r25=r24,32;;
   8.312 -	or r24=r24,r25;;
   8.313 -	andcm r24=0xffffffffffffffff,r24;;
   8.314 -	popcnt r24=r24;;
   8.315 -	sub r24=63,r24;;
   8.316 -	// r24 now contains the bit index
   8.317 -	mov r25=1;;
   8.318 -	shl r25=r25,r24;;
   8.319 -	andcm r23=r23,r25;;
   8.320 -	st8 [r22]=r23;;
   8.321 -1:	mov r24=cr.ipsr
   8.322 -	mov r25=cr.iip;;
   8.323 -	extr.u r26=r24,41,2 ;;
   8.324 -	cmp.eq p6,p7=2,r26 ;;
   8.325 -(p6)	mov r26=0
   8.326 -(p6)	adds r25=16,r25
   8.327 -(p7)	adds r26=1,r26
   8.328 -	;;
   8.329 -	dep r24=r26,r24,41,2
   8.330 -	;;
   8.331 -	mov cr.ipsr=r24
   8.332 -	mov cr.iip=r25
   8.333 -	mov pr=r31,-1 ;;
   8.334 -	rfi
   8.335 -	;;
   8.336 -END(hyper_eoi)
   8.337 -
   8.338  ENTRY(hyper_set_itm)
   8.339  	// when we get to here r20=~=interrupts pending
   8.340  	cmp.ne p7,p0=r20,r0