ia64/xen-unstable

changeset 7335:70de2b71f439

Fix itir bugs in vcpu_transalte and do some cleanup
to avoid a gazillion unnecessary function calls.
Signed-off-by: Dan Magenheimer <dan.magenheimer@hp.com>
author djm@kirby.fc.hp.com
date Fri Oct 14 11:56:18 2005 -0600 (2005-10-14)
parents c03dc328bf3a
children f1dc942257e5
files xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/domain.h
line diff
     1.1 --- a/xen/arch/ia64/xen/vcpu.c	Thu Oct 13 16:02:35 2005 -0600
     1.2 +++ b/xen/arch/ia64/xen/vcpu.c	Fri Oct 14 11:56:18 2005 -0600
     1.3 @@ -66,8 +66,16 @@ unsigned long phys_translate_count = 0;
     1.4  unsigned long vcpu_verbose = 0;
     1.5  #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
     1.6  
     1.7 -extern TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa);
     1.8 -extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa);
     1.9 +//#define vcpu_quick_region_check(_tr_regions,_ifa)	1
    1.10 +#define vcpu_quick_region_check(_tr_regions,_ifa)			\
    1.11 +	(_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
    1.12 +#define vcpu_quick_region_set(_tr_regions,_ifa)				\
    1.13 +	do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
    1.14 +
    1.15 +// FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
    1.16 +#define vcpu_match_tr_entry(_trp,_ifa,_rid)				\
    1.17 +	((_trp->p && (_trp->rid==_rid) && (_ifa >= _trp->vadr) &&	\
    1.18 +	(_ifa < (_trp->vadr + (1L<< _trp->ps)) - 1)))
    1.19  
    1.20  /**************************************************************************
    1.21   VCPU general register access routines
    1.22 @@ -620,7 +628,7 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
    1.23  		return;
    1.24  	}
    1.25      if ( VMX_DOMAIN(vcpu) ) {
    1.26 - 	    set_bit(vector,VCPU(vcpu,irr));
    1.27 +	    set_bit(vector,VCPU(vcpu,irr));
    1.28      } else
    1.29      {
    1.30  	/* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
    1.31 @@ -630,16 +638,6 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
    1.32  	set_bit(vector,PSCBX(vcpu,irr));
    1.33  	PSCB(vcpu,pending_interruption) = 1;
    1.34      }
    1.35 -
    1.36 -#if 0
    1.37 -    /* Keir: I think you should unblock when an interrupt is pending. */
    1.38 -    {
    1.39 -        int running = test_bit(_VCPUF_running, &vcpu->vcpu_flags);
    1.40 -        vcpu_unblock(vcpu);
    1.41 -        if ( running )
    1.42 -            smp_send_event_check_cpu(vcpu->processor);
    1.43 -    }
    1.44 -#endif
    1.45  }
    1.46  
    1.47  void early_tick(VCPU *vcpu)
    1.48 @@ -710,14 +708,6 @@ UINT64 vcpu_check_pending_interrupts(VCP
    1.49  	}
    1.50  
    1.51  //printf("returned to caller\n");
    1.52 -#if 0
    1.53 -if (vector == (PSCB(vcpu,itv) & 0xff)) {
    1.54 -	UINT64 now = ia64_get_itc();
    1.55 -	UINT64 itm = PSCBX(vcpu,domain_itm);
    1.56 -	if (now < itm) early_tick(vcpu);
    1.57 -
    1.58 -}
    1.59 -#endif
    1.60  	return vector;
    1.61  }
    1.62  
    1.63 @@ -1070,23 +1060,6 @@ void vcpu_set_next_timer(VCPU *vcpu)
    1.64  	/* gloss over the wraparound problem for now... we know it exists
    1.65  	 * but it doesn't matter right now */
    1.66  
    1.67 -#if 0
    1.68 -	/* ensure at least next SP tick is in the future */
    1.69 -	if (!interval) PSCBX(vcpu,xen_itm) = now +
    1.70 -#if 0
    1.71 -		(running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
    1.72 -					DEFAULT_CLOCK_RATE);
    1.73 -#else
    1.74 -	3000000;
    1.75 -//printf("vcpu_set_next_timer: HACK!\n");
    1.76 -#endif
    1.77 -#if 0
    1.78 -	if (PSCBX(vcpu,xen_itm) < now)
    1.79 -		while (PSCBX(vcpu,xen_itm) < now + (interval>>1))
    1.80 -			PSCBX(vcpu,xen_itm) += interval;
    1.81 -#endif
    1.82 -#endif
    1.83 -
    1.84  	if (is_idle_task(vcpu->domain)) {
    1.85  //		printf("****** vcpu_set_next_timer called during idle!!\n");
    1.86  		vcpu_safe_set_itm(s);
    1.87 @@ -1177,14 +1150,6 @@ void vcpu_pend_timer(VCPU *vcpu)
    1.88  		// don't deliver another
    1.89  		return;
    1.90  	}
    1.91 -#if 0
    1.92 -	// attempt to flag "timer tick before its due" source
    1.93 -	{
    1.94 -	UINT64 itm = PSCBX(vcpu,domain_itm);
    1.95 -	UINT64 now = ia64_get_itc();
    1.96 -	if (now < itm) printf("******* vcpu_pend_timer: pending before due!\n");
    1.97 -	}
    1.98 -#endif
    1.99  	vcpu_pend_interrupt(vcpu, itv);
   1.100  }
   1.101  
   1.102 @@ -1199,33 +1164,6 @@ UINT64 vcpu_timer_pending_early(VCPU *vc
   1.103  	return (vcpu_deliverable_timer(vcpu) && (now < itm));
   1.104  }
   1.105  
   1.106 -//FIXME: This is a hack because everything dies if a timer tick is lost
   1.107 -void vcpu_poke_timer(VCPU *vcpu)
   1.108 -{
   1.109 -	UINT64 itv = PSCB(vcpu,itv) & 0xff;
   1.110 -	UINT64 now = ia64_get_itc();
   1.111 -	UINT64 itm = PSCBX(vcpu,domain_itm);
   1.112 -	UINT64 irr;
   1.113 -
   1.114 -	if (vcpu_timer_disabled(vcpu)) return;
   1.115 -	if (!itm) return;
   1.116 -	if (itv != 0xefL) {
   1.117 -		printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
   1.118 -		while(1);
   1.119 -	}
   1.120 -	// using 0xef instead of itv so can get real irr
   1.121 -	if (now > itm && !test_bit(0xefL, PSCBX(vcpu,insvc))) {
   1.122 -		if (!test_bit(0xefL,PSCBX(vcpu,irr))) {
   1.123 -			irr = ia64_getreg(_IA64_REG_CR_IRR3);
   1.124 -			if (irr & (1L<<(0xef-0xc0))) return;
   1.125 -if (now-itm>0x800000)
   1.126 -printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
   1.127 -			vcpu_pend_timer(vcpu);
   1.128 -		}
   1.129 -	}
   1.130 -}
   1.131 -
   1.132 -
   1.133  /**************************************************************************
   1.134  Privileged operation emulation routines
   1.135  **************************************************************************/
   1.136 @@ -1318,13 +1256,6 @@ IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 
   1.137  	UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
   1.138  			VHPT_addr3;
   1.139  
   1.140 -#if 0
   1.141 -	if (VHPT_addr1 == 0xe000000000000000L) {
   1.142 -	    printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
   1.143 -		PSCB(vcpu,iip));
   1.144 -	    return (IA64_ILLOP_FAULT);
   1.145 -	}
   1.146 -#endif
   1.147  //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
   1.148  	*pval = VHPT_addr;
   1.149  	return (IA64_NO_FAULT);
   1.150 @@ -1343,8 +1274,10 @@ unsigned long vhpt_translate_count = 0;
   1.151  
   1.152  IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
   1.153  {
   1.154 -	unsigned long pta, pta_mask, pte, ps;
   1.155 +	unsigned long pta, pta_mask, pte, ps, rid, itir_addr;
   1.156 +	int i;
   1.157  	TR_ENTRY *trp;
   1.158 +	IA64FAULT fault;
   1.159  	ia64_rr rr;
   1.160  
   1.161  	if (!(address >> 61)) {
   1.162 @@ -1362,68 +1295,83 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
   1.163  		phys_translate_count++;
   1.164  		return IA64_NO_FAULT;
   1.165  	}
   1.166 -
   1.167 -	/* check translation registers */
   1.168 -	if ((trp = match_tr(vcpu,address))) {
   1.169 -			tr_translate_count++;
   1.170 -		*pteval = trp->page_flags;
   1.171 -		*itir = trp->itir;
   1.172 -		return IA64_NO_FAULT;
   1.173 +	rid = virtualize_rid(vcpu,get_rr(address) & RR_RID_MASK);
   1.174 +	if (is_data) {
   1.175 +		if (vcpu_quick_region_check(vcpu->arch.dtr_regions,address)) {
   1.176 +			for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++) {
   1.177 +				if (vcpu_match_tr_entry(trp,address,rid)) {
   1.178 +					*pteval = trp->page_flags;
   1.179 +					*itir = trp->itir;
   1.180 +					tr_translate_count++;
   1.181 +					return IA64_NO_FAULT;
   1.182 +				}
   1.183 +			}
   1.184 +		}
   1.185 +	}
   1.186 +	// FIXME?: check itr's for data accesses too, else bad things happen?
   1.187 +	/* else */ {
   1.188 +		if (vcpu_quick_region_check(vcpu->arch.itr_regions,address)) {
   1.189 +			for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++) {
   1.190 +				if (vcpu_match_tr_entry(trp,address,rid)) {
   1.191 +					*pteval = trp->page_flags;
   1.192 +					*itir = trp->itir;
   1.193 +					tr_translate_count++;
   1.194 +					return IA64_NO_FAULT;
   1.195 +				}
   1.196 +			}
   1.197 +		}
   1.198  	}
   1.199  
   1.200  	/* check 1-entry TLB */
   1.201 -	if ((trp = match_dtlb(vcpu,address))) {
   1.202 -		dtlb_translate_count++;
   1.203 +	// FIXME?: check dtlb for inst accesses too, else bad things happen?
   1.204 +	trp = &vcpu->arch.dtlb;
   1.205 +	if (/* is_data && */ vcpu_match_tr_entry(trp,address,rid)) {
   1.206  		if (vcpu->domain==dom0 && !in_tpa) *pteval = trp->page_flags;
   1.207  		else *pteval = vcpu->arch.dtlb_pte;
   1.208 -//		printf("DTLB MATCH... NEW, DOM%s, %s\n", vcpu->domain==dom0?
   1.209 -//			"0":"U", in_tpa?"vcpu_tpa":"ia64_do_page_fault");
   1.210  		*itir = trp->itir;
   1.211 +		dtlb_translate_count++;
   1.212  		return IA64_NO_FAULT;
   1.213  	}
   1.214  
   1.215  	/* check guest VHPT */
   1.216  	pta = PSCB(vcpu,pta);
   1.217 -	rr.rrval = PSCB(vcpu,rrs)[address>>61];
   1.218 -	if (!rr.ve || !(pta & IA64_PTA_VE)) {
   1.219 -// FIXME? does iha get set for alt faults? does xenlinux depend on it?
   1.220 -		vcpu_thash(vcpu, address, iha);
   1.221 -// FIXME?: does itir get set for alt faults?
   1.222 -		*itir = vcpu_get_itir_on_fault(vcpu,address);
   1.223 -		return (is_data ? IA64_ALT_DATA_TLB_VECTOR :
   1.224 -				IA64_ALT_INST_TLB_VECTOR);
   1.225 -	}
   1.226  	if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
   1.227 -		// thash won't work right?
   1.228  		panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
   1.229  		//return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
   1.230  	}
   1.231  
   1.232 -	/* avoid recursively walking (short format) VHPT */
   1.233 -	pta_mask = (itir_mask(pta) << 3) >> 3;
   1.234 -	if (((address ^ pta) & pta_mask) == 0)
   1.235 -		return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
   1.236 -
   1.237  	vcpu_thash(vcpu, address, iha);
   1.238 -	if (__copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0) {
   1.239 -// FIXME?: does itir get set for vhpt faults?
   1.240 -		*itir = vcpu_get_itir_on_fault(vcpu,*iha);
   1.241 -		return IA64_VHPT_FAULT;
   1.242 +	rr.rrval = PSCB(vcpu,rrs)[address>>61];
   1.243 +	fault = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
   1.244 +	if (!rr.ve || !(pta & IA64_PTA_VE)) {
   1.245 +	// architecturally, iha is optionally set for alt faults but xenlinux
   1.246 +	// depends on it so should document it as part of PV interface
   1.247 +		fault += IA64_ALT_INST_TLB_VECTOR - IA64_INST_TLB_VECTOR;
   1.248  	}
   1.249  
   1.250 -	/*
   1.251 -	 * Optimisation: this VHPT walker aborts on not-present pages
   1.252 -	 * instead of inserting a not-present translation, this allows
   1.253 -	 * vectoring directly to the miss handler.
   1.254 -	 */
   1.255 -	if (pte & _PAGE_P) {
   1.256 -		*pteval = pte;
   1.257 -		*itir = vcpu_get_itir_on_fault(vcpu,address);
   1.258 -		vhpt_translate_count++;
   1.259 -		return IA64_NO_FAULT;
   1.260 +	/* avoid recursively walking (short format) VHPT */
   1.261 +	else if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) != 0) {
   1.262 +
   1.263 +		if (__copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
   1.264 +			// virtual VHPT walker "missed" in TLB
   1.265 +			fault = IA64_VHPT_FAULT;
   1.266 +
   1.267 +		/*
   1.268 +		* Optimisation: this VHPT walker aborts on not-present pages
   1.269 +		* instead of inserting a not-present translation, this allows
   1.270 +		* vectoring directly to the miss handler.
   1.271 +		*/
   1.272 +		else if (pte & _PAGE_P) {
   1.273 +			*pteval = pte;
   1.274 +			vhpt_translate_count++;
   1.275 +			return IA64_NO_FAULT;
   1.276 +		}
   1.277  	}
   1.278 -	*itir = vcpu_get_itir_on_fault(vcpu,address);
   1.279 -	return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
   1.280 +
   1.281 +	// for VHPT fault, use itir based on iha, not on fault address
   1.282 +	itir_addr = (fault == IA64_VHPT_FAULT) ? *iha : address;
   1.283 +	*itir = vcpu_get_itir_on_fault(vcpu,itir_addr);
   1.284 +	return fault;
   1.285  }
   1.286  
   1.287  IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
   1.288 @@ -1738,33 +1686,6 @@ static void vcpu_set_tr_entry(TR_ENTRY *
   1.289  	}
   1.290  }
   1.291  
   1.292 -TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
   1.293 -{
   1.294 -	unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
   1.295 -	int i;
   1.296 -
   1.297 -	for (i = 0; i < count; i++, trp++) {
   1.298 -		if (!trp->p) continue;
   1.299 -		if (physicalize_rid(vcpu,trp->rid) != rid) continue;
   1.300 -		if (ifa < trp->vadr) continue;
   1.301 -		if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
   1.302 -		//if (trp->key && !match_pkr(vcpu,trp->key)) continue;
   1.303 -		return trp;
   1.304 -	}
   1.305 -	return 0;
   1.306 -}
   1.307 -
   1.308 -TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
   1.309 -{
   1.310 -	TR_ENTRY *trp;
   1.311 -
   1.312 -	trp = vcpu_match_tr_entry(vcpu,vcpu->arch.dtrs,ifa,NDTRS);
   1.313 -	if (trp) return trp;
   1.314 -	trp = vcpu_match_tr_entry(vcpu,vcpu->arch.itrs,ifa,NITRS);
   1.315 -	if (trp) return trp;
   1.316 -	return 0;
   1.317 -}
   1.318 -
   1.319  IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
   1.320  		UINT64 itir, UINT64 ifa)
   1.321  {
   1.322 @@ -1774,6 +1695,7 @@ IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 
   1.323  	trp = &PSCBX(vcpu,dtrs[slot]);
   1.324  //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
   1.325  	vcpu_set_tr_entry(trp,pte,itir,ifa);
   1.326 +	vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
   1.327  	return IA64_NO_FAULT;
   1.328  }
   1.329  
   1.330 @@ -1786,6 +1708,7 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 
   1.331  	trp = &PSCBX(vcpu,itrs[slot]);
   1.332  //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
   1.333  	vcpu_set_tr_entry(trp,pte,itir,ifa);
   1.334 +	vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
   1.335  	return IA64_NO_FAULT;
   1.336  }
   1.337  
   1.338 @@ -1837,17 +1760,6 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
   1.339  	}
   1.340  }
   1.341  
   1.342 -// NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
   1.343 -// the physical address contained for correctness
   1.344 -TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
   1.345 -{
   1.346 -	TR_ENTRY *trp;
   1.347 -
   1.348 -	if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1))
   1.349 -		return (&vcpu->arch.dtlb);
   1.350 -	return 0UL;
   1.351 -}
   1.352 -
   1.353  IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
   1.354  {
   1.355  	unsigned long pteval, logps = (itir >> 2) & 0x3f;
   1.356 @@ -1954,12 +1866,14 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
   1.357  IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
   1.358  {
   1.359  	printf("vcpu_ptr_d: Purging TLB is unsupported\n");
   1.360 +	// don't forget to recompute dtr_regions
   1.361  	return (IA64_ILLOP_FAULT);
   1.362  }
   1.363  
   1.364  IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
   1.365  {
   1.366  	printf("vcpu_ptr_i: Purging TLB is unsupported\n");
   1.367 +	// don't forget to recompute itr_regions
   1.368  	return (IA64_ILLOP_FAULT);
   1.369  }
   1.370  
     2.1 --- a/xen/include/asm-ia64/domain.h	Thu Oct 13 16:02:35 2005 -0600
     2.2 +++ b/xen/include/asm-ia64/domain.h	Fri Oct 14 11:56:18 2005 -0600
     2.3 @@ -49,6 +49,8 @@ struct arch_vcpu {
     2.4  	TR_ENTRY dtrs[NDTRS];
     2.5  	TR_ENTRY itlb;
     2.6  	TR_ENTRY dtlb;
     2.7 +	unsigned int itr_regions;
     2.8 +	unsigned int dtr_regions;
     2.9  	unsigned long itlb_pte;
    2.10  	unsigned long dtlb_pte;
    2.11  	unsigned long irr[4];