ia64/xen-unstable

changeset 5526:83f563ab42f6

bitkeeper revision 1.1713.2.13 (42b8385f7G_ZWD4cCDAhRx2ry3L3QA)

More hyperprivop stuff
Signed-off-by: Dan Magenheimer <dan.magenheimer@hp.com>
author djm@kirby.fc.hp.com
date Tue Jun 21 15:55:11 2005 +0000 (2005-06-21)
parents b58f7c4ead49
children a787d0c6f797
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/domain.c xen/arch/ia64/hyperprivop.S xen/arch/ia64/regionreg.c xen/include/asm-ia64/domain.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Mon Jun 20 20:36:39 2005 +0000
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Tue Jun 21 15:55:11 2005 +0000
     1.3 @@ -54,6 +54,7 @@ void foo(void)
     1.4  	DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
     1.5  	DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
     1.6  	DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
     1.7 +	DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
     1.8  	DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
     1.9  	DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
    1.10  	DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe));
    1.11 @@ -79,6 +80,8 @@ void foo(void)
    1.12  	DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
    1.13  	DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
    1.14  	DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
    1.15 +	DEFINE(IA64_VCPU_STARTING_RID_OFFSET, offsetof (struct vcpu, arch.starting_rid));
    1.16 +	DEFINE(IA64_VCPU_ENDING_RID_OFFSET, offsetof (struct vcpu, arch.ending_rid));
    1.17  	DEFINE(IA64_VCPU_DOMAIN_ITM_OFFSET, offsetof (struct vcpu, arch.domain_itm));
    1.18  
    1.19  	BLANK();
     2.1 --- a/xen/arch/ia64/domain.c	Mon Jun 20 20:36:39 2005 +0000
     2.2 +++ b/xen/arch/ia64/domain.c	Tue Jun 21 15:55:11 2005 +0000
     2.3 @@ -258,6 +258,8 @@ void arch_do_createdomain(struct vcpu *v
     2.4  #define DOMAIN_RID_BITS_DEFAULT 18
     2.5  	if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
     2.6  		BUG();
     2.7 +	v->arch.starting_rid = d->arch.starting_rid;
     2.8 +	v->arch.ending_rid = d->arch.ending_rid;
     2.9  	// the following will eventually need to be negotiated dynamically
    2.10  	d->xen_vastart = 0xf000000000000000;
    2.11  	d->xen_vaend = 0xf300000000000000;
     3.1 --- a/xen/arch/ia64/hyperprivop.S	Mon Jun 20 20:36:39 2005 +0000
     3.2 +++ b/xen/arch/ia64/hyperprivop.S	Tue Jun 21 15:55:11 2005 +0000
     3.3 @@ -101,6 +101,10 @@ 1:	// when we get to here r20=~=interrup
     3.4  	cmp.eq p7,p6=XEN_HYPER_SET_ITM,r17
     3.5  (p7)	br.sptk.many hyper_set_itm;;
     3.6  
     3.7 +	// HYPERPRIVOP_SET_RR?
     3.8 +	cmp.eq p7,p6=XEN_HYPER_SET_RR,r17
     3.9 +(p7)	br.sptk.many hyper_set_rr;;
    3.10 +
    3.11  	// if not one of the above, give up for now and do it the slow way
    3.12  	br.sptk.many dispatch_break_fault ;;
    3.13  
    3.14 @@ -849,3 +853,63 @@ 1:	mov r24=cr.ipsr
    3.15  	rfi
    3.16  	;;
    3.17  END(hyper_set_itm)
    3.18 +
    3.19 +ENTRY(hyper_set_rr)
    3.20 +#if 1
    3.21 +	br.sptk.many dispatch_break_fault ;;
    3.22 +#endif
    3.23 +	extr.u r25=r8,61,3;;
    3.24 +	cmp.leu p7,p0=7,r25	// punt on setting rr7
    3.25 +(p7)	br.spnt.many dispatch_break_fault ;;
    3.26 +#ifdef FAST_HYPERPRIVOP_CNT
    3.27 +	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SET_RR);;
    3.28 +	ld8 r21=[r20];;
    3.29 +	adds r21=1,r21;;
    3.30 +	st8 [r20]=r21;;
    3.31 +#endif
    3.32 +	extr.u r26=r9,8,24	// r26 = r9.rid
    3.33 +	mov r20=IA64_KR(CURRENT);;
    3.34 +	adds r21=IA64_VCPU_STARTING_RID_OFFSET,r20;;
    3.35 +	ld4 r22=[r21];;
    3.36 +	adds r21=IA64_VCPU_ENDING_RID_OFFSET,r20;;
    3.37 +	ld4 r23=[r21];;
    3.38 +	adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
    3.39 +	add r22=r26,r22;;
    3.40 +	cmp.geu p6,p0=r22,r23	// if r9.rid + starting_rid >= ending_rid
    3.41 +(p6)	br.cond.sptk.many 1f;	// this is an error, but just ignore/return
    3.42 +	// r21=starting_rid
    3.43 +	adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
    3.44 +	shl r25=r25,3;;
    3.45 +	add r20=r20,r25;;
    3.46 +	st8 [r20]=r9;;		// store away exactly what was passed
    3.47 +	// but adjust value actually placed in rr[r8]
    3.48 +	// r22 contains adjusted rid, "mangle" it (see regionreg.c)
    3.49 +	// and set ps to PAGE_SHIFT and ve to 1
    3.50 +	extr.u r27=r22,0,8
    3.51 +	extr.u r28=r22,8,8
    3.52 +	extr.u r29=r22,16,8;;
    3.53 +	dep.z r23=PAGE_SHIFT,2,6;;
    3.54 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
    3.55 +	dep r23=r27,r23,24,8;;
    3.56 +	dep r23=r28,r23,16,8;;
    3.57 +	dep r23=r29,r23,8,8
    3.58 +	cmp.eq p6,p0=r25,r0;;	// if rr0, save for metaphysical
    3.59 +(p6)	st4 [r24]=r23
    3.60 +	mov rr[r8]=r23;;
    3.61 +	// done, mosey on back
    3.62 +1:	mov r24=cr.ipsr
    3.63 +	mov r25=cr.iip;;
    3.64 +	extr.u r26=r24,41,2 ;;
    3.65 +	cmp.eq p6,p7=2,r26 ;;
    3.66 +(p6)	mov r26=0
    3.67 +(p6)	adds r25=16,r25
    3.68 +(p7)	adds r26=1,r26
    3.69 +	;;
    3.70 +	dep r24=r26,r24,41,2
    3.71 +	;;
    3.72 +	mov cr.ipsr=r24
    3.73 +	mov cr.iip=r25
    3.74 +	mov pr=r31,-1 ;;
    3.75 +	rfi
    3.76 +	;;
    3.77 +END(hyper_set_rr)
     4.1 --- a/xen/arch/ia64/regionreg.c	Mon Jun 20 20:36:39 2005 +0000
     4.2 +++ b/xen/arch/ia64/regionreg.c	Tue Jun 21 15:55:11 2005 +0000
     4.3 @@ -148,11 +148,10 @@ int allocate_rid_range(struct domain *d,
     4.4  	for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
     4.5  	
     4.6  	// setup domain struct
     4.7 -	d->rid_bits = ridbits;
     4.8 -	d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
     4.9 -	d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
    4.10 +	d->arch.rid_bits = ridbits;
    4.11 +	d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
    4.12  printf("###allocating rid_range, domain %p: starting_rid=%lx, ending_rid=%lx\n",
    4.13 -d,d->starting_rid, d->ending_rid);
    4.14 +d,d->arch.starting_rid, d->arch.ending_rid);
    4.15  	
    4.16  	return 1;
    4.17  }
    4.18 @@ -161,14 +160,14 @@ d,d->starting_rid, d->ending_rid);
    4.19  int deallocate_rid_range(struct domain *d)
    4.20  {
    4.21  	int i;
    4.22 -	int rid_block_end = d->ending_rid >> IA64_MIN_IMPL_RID_BITS;
    4.23 -	int rid_block_start = d->starting_rid >> IA64_MIN_IMPL_RID_BITS;
    4.24 +	int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
    4.25 +	int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
    4.26  
    4.27  	return 1;  // KLUDGE ALERT
    4.28  	//
    4.29  	// not all domains will have allocated RIDs (physical mode loaders for instance)
    4.30  	//
    4.31 -	if (d->rid_bits == 0) return 1;
    4.32 +	if (d->arch.rid_bits == 0) return 1;
    4.33  
    4.34  #ifdef DEBUG
    4.35  	for (i = rid_block_start; i < rid_block_end; ++i) {
    4.36 @@ -179,9 +178,9 @@ int deallocate_rid_range(struct domain *
    4.37  	for (i = rid_block_start; i < rid_block_end; ++i)
    4.38  	ridblock_owner[i] = NULL;
    4.39  	
    4.40 -	d->rid_bits = 0;
    4.41 -	d->starting_rid = 0;
    4.42 -	d->ending_rid = 0;
    4.43 +	d->arch.rid_bits = 0;
    4.44 +	d->arch.starting_rid = 0;
    4.45 +	d->arch.ending_rid = 0;
    4.46  	return 1;
    4.47  }
    4.48  
    4.49 @@ -193,9 +192,8 @@ int deallocate_rid_range(struct domain *
    4.50  //  a region register; anytime it is "viewable" outside of this module,
    4.51  //  it should be unmangled
    4.52  
    4.53 -//This appears to work in Xen... turn it on later so no complications yet
    4.54 -#define CONFIG_MANGLE_RIDS
    4.55 -#ifdef CONFIG_MANGLE_RIDS
    4.56 +// NOTE: this function is also implemented in assembly code in hyper_set_rr!!
    4.57 +// Must ensure these two remain consistent!
    4.58  static inline unsigned long
    4.59  vmMangleRID(unsigned long RIDVal)
    4.60  {
    4.61 @@ -214,11 +212,6 @@ vmMangleRID(unsigned long RIDVal)
    4.62  
    4.63  // since vmMangleRID is symmetric, use it for unmangling also
    4.64  #define vmUnmangleRID(x)	vmMangleRID(x)
    4.65 -#else
    4.66 -// no mangling/unmangling
    4.67 -#define vmMangleRID(x)	(x)
    4.68 -#define vmUnmangleRID(x) (x)
    4.69 -#endif
    4.70  
    4.71  static inline void
    4.72  set_rr_no_srlz(unsigned long rr, unsigned long rrval)
    4.73 @@ -265,12 +258,12 @@ int set_one_rr(unsigned long rr, unsigne
    4.74  
    4.75  	rrv.rrval = val;
    4.76  	newrrv.rrval = 0;
    4.77 -	newrid = v->domain->starting_rid + rrv.rid;
    4.78 +	newrid = v->arch.starting_rid + rrv.rid;
    4.79  
    4.80 -	if (newrid > v->domain->ending_rid) {
    4.81 +	if (newrid > v->arch.ending_rid) {
    4.82  		printk("can't set rr%d to %lx, starting_rid=%lx,"
    4.83  			"ending_rid=%lx, val=%lx\n", rreg, newrid,
    4.84 -			v->domain->starting_rid,v->domain->ending_rid,val);
    4.85 +			v->arch.starting_rid,v->arch.ending_rid,val);
    4.86  		return 0;
    4.87  	}
    4.88  
    4.89 @@ -358,7 +351,7 @@ unsigned long physicalize_rid(struct vcp
    4.90  	ia64_rr rrv;
    4.91  	    
    4.92  	rrv.rrval = rrval;
    4.93 -	rrv.rid += v->domain->starting_rid;
    4.94 +	rrv.rid += v->arch.starting_rid;
    4.95  	return rrv.rrval;
    4.96  }
    4.97  
    4.98 @@ -368,7 +361,7 @@ virtualize_rid(struct vcpu *v, unsigned 
    4.99  	ia64_rr rrv;
   4.100  	    
   4.101  	rrv.rrval = rrval;
   4.102 -	rrv.rid -= v->domain->starting_rid;
   4.103 +	rrv.rid -= v->arch.starting_rid;
   4.104  	return rrv.rrval;
   4.105  }
   4.106  
     5.1 --- a/xen/include/asm-ia64/domain.h	Mon Jun 20 20:36:39 2005 +0000
     5.2 +++ b/xen/include/asm-ia64/domain.h	Tue Jun 21 15:55:11 2005 +0000
     5.3 @@ -54,9 +54,6 @@ struct arch_domain {
     5.4      u64 entry;
     5.5  #endif
     5.6  };
     5.7 -#define starting_rid arch.starting_rid
     5.8 -#define ending_rid arch.ending_rid
     5.9 -#define rid_bits arch.rid_bits
    5.10  #define xen_vastart arch.xen_vastart
    5.11  #define xen_vaend arch.xen_vaend
    5.12  #define shared_info_va arch.shared_info_va
    5.13 @@ -83,6 +80,8 @@ struct arch_vcpu {
    5.14      int metaphysical_rr0;		// from arch_domain (so is pinned)
    5.15      int metaphysical_saved_rr0;		// from arch_domain (so is pinned)
    5.16      int breakimm;			// from arch_domain (so is pinned)
    5.17 +    int starting_rid;		/* first RID assigned to domain */
    5.18 +    int ending_rid;		/* one beyond highest RID assigned to domain */
    5.19      struct mm_struct *active_mm;
    5.20      struct thread_struct _thread;	// this must be last
    5.21  #ifdef CONFIG_VTI