ia64/xen-unstable

changeset 4806:e24efdd35ac2

bitkeeper revision 1.1389.9.3 (427fb12c-ZTnIDL6puEsDOsDVeEKJg)

Move per-domain-per-cpu variables out of shared page for security
author djm@kirby.fc.hp.com
date Mon May 09 18:51:24 2005 +0000 (2005-05-09)
parents 214e5c4b003d
children 1bc0400523f0
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/process.c xen/arch/ia64/vcpu.c xen/include/asm-ia64/domain.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Fri May 06 21:20:51 2005 +0000
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Mon May 09 18:51:24 2005 +0000
     1.3 @@ -50,8 +50,8 @@ void foo(void)
     1.4  	//DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
     1.5  	//DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
     1.6  	//DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
     1.7 -	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
     1.8 -	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));
     1.9 +	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct exec_domain, arch._thread.ksp));
    1.10 +	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct exec_domain, arch._thread.on_ustack));
    1.11  
    1.12  	BLANK();
    1.13  
     2.1 --- a/xen/arch/ia64/process.c	Fri May 06 21:20:51 2005 +0000
     2.2 +++ b/xen/arch/ia64/process.c	Mon May 09 18:51:24 2005 +0000
     2.3 @@ -51,6 +51,7 @@ extern unsigned long dom0_start, dom0_si
     2.4  			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
     2.5  
     2.6  #define PSCB(x,y)	x->vcpu_info->arch.y
     2.7 +#define PSCBX(x,y)	x->arch.y
     2.8  
     2.9  extern unsigned long vcpu_verbose;
    2.10  
    2.11 @@ -154,7 +155,7 @@ panic_domain(regs,"psr.ic off, deliverin
    2.12  		}
    2.13  //printf("Delivering NESTED DATA TLB fault\n");
    2.14  		vector = IA64_DATA_NESTED_TLB_VECTOR;
    2.15 -		regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
    2.16 +		regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
    2.17  		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    2.18  // NOTE: nested trap must NOT pass PSCB address
    2.19  		//regs->r31 = (unsigned long) &PSCB(ed);
    2.20 @@ -187,7 +188,7 @@ panic_domain(regs,"psr.ic off, deliverin
    2.21  	PSCB(ed,ifs) = 0;
    2.22  	PSCB(ed,incomplete_regframe) = 0;
    2.23  
    2.24 -	regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
    2.25 +	regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
    2.26  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    2.27  #ifdef CONFIG_SMP
    2.28  #error "sharedinfo doesn't handle smp yet"
     3.1 --- a/xen/arch/ia64/vcpu.c	Fri May 06 21:20:51 2005 +0000
     3.2 +++ b/xen/arch/ia64/vcpu.c	Mon May 09 18:51:24 2005 +0000
     3.3 @@ -1,6 +1,6 @@
     3.4  /*
     3.5   * Virtualized CPU functions
     3.6 - * 
     3.7 + *
     3.8   * Copyright (C) 2004 Hewlett-Packard Co.
     3.9   *	Dan Magenheimer (dan.magenheimer@hp.com)
    3.10   *
    3.11 @@ -26,6 +26,7 @@ typedef	union {
    3.12  // this def for vcpu_regs won't work if kernel stack is present
    3.13  #define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
    3.14  #define	PSCB(x,y)	x->vcpu_info->arch.y
    3.15 +#define	PSCBX(x,y)	x->arch.y
    3.16  
    3.17  #define	TRUE	1
    3.18  #define	FALSE	0
    3.19 @@ -289,9 +290,9 @@ BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
    3.20  
    3.21  UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
    3.22  {
    3.23 -	UINT64 dcr = PSCB(vcpu,dcr);
    3.24 +	UINT64 dcr = PSCBX(vcpu,dcr);
    3.25  	PSR psr = {0};
    3.26 -	
    3.27 +
    3.28  	//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
    3.29  	psr.i64 = prevpsr;
    3.30  	psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
    3.31 @@ -317,13 +318,13 @@ extern unsigned long privop_trace;
    3.32  //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
    3.33  	// Reads of cr.dcr on Xen always have the sign bit set, so
    3.34  	// a domain can differentiate whether it is running on SP or not
    3.35 -	*pval = PSCB(vcpu,dcr) | 0x8000000000000000L;
    3.36 +	*pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
    3.37  	return (IA64_NO_FAULT);
    3.38  }
    3.39  
    3.40  IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
    3.41  {
    3.42 -	*pval = PSCB(vcpu,iva) & ~0x7fffL;
    3.43 +	*pval = PSCBX(vcpu,iva) & ~0x7fffL;
    3.44  	return (IA64_NO_FAULT);
    3.45  }
    3.46  
    3.47 @@ -423,13 +424,13 @@ extern unsigned long privop_trace;
    3.48  	// a domain can differentiate whether it is running on SP or not
    3.49  	// Thus, writes of DCR should ignore the sign bit
    3.50  //verbose("vcpu_set_dcr: called\n");
    3.51 -	PSCB(vcpu,dcr) = val & ~0x8000000000000000L;
    3.52 +	PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
    3.53  	return (IA64_NO_FAULT);
    3.54  }
    3.55  
    3.56  IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
    3.57  {
    3.58 -	PSCB(vcpu,iva) = val & ~0x7fffL;
    3.59 +	PSCBX(vcpu,iva) = val & ~0x7fffL;
    3.60  	return (IA64_NO_FAULT);
    3.61  }
    3.62  
    3.63 @@ -523,16 +524,16 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
    3.64  		return;
    3.65  	}
    3.66  	if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
    3.67 -	if (test_bit(vector,PSCB(vcpu,irr))) {
    3.68 +	if (test_bit(vector,PSCBX(vcpu,irr))) {
    3.69  //printf("vcpu_pend_interrupt: overrun\n");
    3.70  	}
    3.71 -	set_bit(vector,PSCB(vcpu,irr));
    3.72 +	set_bit(vector,PSCBX(vcpu,irr));
    3.73  	PSCB(vcpu,pending_interruption) = 1;
    3.74  }
    3.75  
    3.76  void early_tick(VCPU *vcpu)
    3.77  {
    3.78 -	UINT64 *p = &PSCB(vcpu,irr[3]);
    3.79 +	UINT64 *p = &PSCBX(vcpu,irr[3]);
    3.80  	printf("vcpu_check_pending: about to deliver early tick\n");
    3.81  	printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
    3.82  }
    3.83 @@ -550,9 +551,9 @@ UINT64 vcpu_check_pending_interrupts(VCP
    3.84  {
    3.85  	UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
    3.86  
    3.87 -	p = &PSCB(vcpu,irr[3]);
    3.88 +	p = &PSCBX(vcpu,irr[3]);
    3.89  	q = &PSCB(vcpu,delivery_mask[3]);
    3.90 -	r = &PSCB(vcpu,insvc[3]);
    3.91 +	r = &PSCBX(vcpu,insvc[3]);
    3.92  	for (i = 3; ; p--, q--, r--, i--) {
    3.93  		bits = *p & *q;
    3.94  		if (bits) break; // got a potential interrupt
    3.95 @@ -592,9 +593,9 @@ UINT64 vcpu_check_pending_interrupts(VCP
    3.96  #if 0
    3.97  if (vector == (PSCB(vcpu,itv) & 0xff)) {
    3.98  	UINT64 now = ia64_get_itc();
    3.99 -	UINT64 itm = PSCB(vcpu,domain_itm);
   3.100 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   3.101  	if (now < itm) early_tick(vcpu);
   3.102 -	
   3.103 +
   3.104  }
   3.105  #endif
   3.106  	return vector;
   3.107 @@ -654,13 +655,13 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
   3.108  	i = vector >> 6;
   3.109  	mask = 1L << (vector & 0x3f);
   3.110  //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
   3.111 -	PSCB(vcpu,insvc[i]) |= mask;
   3.112 -	PSCB(vcpu,irr[i]) &= ~mask;
   3.113 +	PSCBX(vcpu,insvc[i]) |= mask;
   3.114 +	PSCBX(vcpu,irr[i]) &= ~mask;
   3.115  	//PSCB(vcpu,pending_interruption)--;
   3.116  	*pval = vector;
   3.117  	// if delivering a timer interrupt, remember domain_itm
   3.118  	if (vector == (PSCB(vcpu,itv) & 0xff)) {
   3.119 -		PSCB(vcpu,domain_itm_last) = PSCB(vcpu,domain_itm);
   3.120 +		PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
   3.121  	}
   3.122  	return IA64_NO_FAULT;
   3.123  }
   3.124 @@ -775,7 +776,7 @@ IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT6
   3.125  	UINT64 *p, bits, vec, bitnum;
   3.126  	int i;
   3.127  
   3.128 -	p = &PSCB(vcpu,insvc[3]);
   3.129 +	p = &PSCBX(vcpu,insvc[3]);
   3.130  	for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
   3.131  	if (i < 0) {
   3.132  		printf("Trying to EOI interrupt when none are in-service.\r\n");
   3.133 @@ -826,8 +827,8 @@ extern unsigned long privop_trace;
   3.134  	if (val & 0xef00) return (IA64_ILLOP_FAULT);
   3.135  	PSCB(vcpu,itv) = val;
   3.136  	if (val & 0x10000) {
   3.137 -printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu,domain_itm));
   3.138 -		PSCB(vcpu,domain_itm) = 0;
   3.139 +printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCBX(vcpu,domain_itm));
   3.140 +		PSCBX(vcpu,domain_itm) = 0;
   3.141  	}
   3.142  	else vcpu_enable_timer(vcpu,1000000L);
   3.143  	return (IA64_NO_FAULT);
   3.144 @@ -860,14 +861,14 @@ BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
   3.145  BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
   3.146  {
   3.147  	UINT64 itv = PSCB(vcpu,itv);
   3.148 -	return (test_bit(itv, PSCB(vcpu,insvc)));
   3.149 +	return (test_bit(itv, PSCBX(vcpu,insvc)));
   3.150  }
   3.151  
   3.152  BOOLEAN vcpu_timer_expired(VCPU *vcpu)
   3.153  {
   3.154 -	unsigned long domain_itm = PSCB(vcpu,domain_itm);
   3.155 +	unsigned long domain_itm = PSCBX(vcpu,domain_itm);
   3.156  	unsigned long now = ia64_get_itc();
   3.157 - 
   3.158 +
   3.159  	if (!domain_itm) return FALSE;
   3.160  	if (now < domain_itm) return FALSE;
   3.161  	if (vcpu_timer_disabled(vcpu)) return FALSE;
   3.162 @@ -892,36 +893,36 @@ void vcpu_safe_set_itm(unsigned long val
   3.163  
   3.164  void vcpu_set_next_timer(VCPU *vcpu)
   3.165  {
   3.166 -	UINT64 d = PSCB(vcpu,domain_itm);
   3.167 -	//UINT64 s = PSCB(vcpu,xen_itm);
   3.168 +	UINT64 d = PSCBX(vcpu,domain_itm);
   3.169 +	//UINT64 s = PSCBX(vcpu,xen_itm);
   3.170  	UINT64 s = local_cpu_data->itm_next;
   3.171  	UINT64 now = ia64_get_itc();
   3.172 -	//UINT64 interval = PSCB(vcpu,xen_timer_interval);
   3.173 +	//UINT64 interval = PSCBX(vcpu,xen_timer_interval);
   3.174  
   3.175  	/* gloss over the wraparound problem for now... we know it exists
   3.176  	 * but it doesn't matter right now */
   3.177  
   3.178  #if 0
   3.179  	/* ensure at least next SP tick is in the future */
   3.180 -	if (!interval) PSCB(vcpu,xen_itm) = now +
   3.181 +	if (!interval) PSCBX(vcpu,xen_itm) = now +
   3.182  #if 0
   3.183  		(running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
   3.184 -		 			DEFAULT_CLOCK_RATE);
   3.185 +					DEFAULT_CLOCK_RATE);
   3.186  #else
   3.187  	3000000;
   3.188  //printf("vcpu_set_next_timer: HACK!\n");
   3.189  #endif
   3.190  #if 0
   3.191 -	if (PSCB(vcpu,xen_itm) < now)
   3.192 -		while (PSCB(vcpu,xen_itm) < now + (interval>>1))
   3.193 -			PSCB(vcpu,xen_itm) += interval;
   3.194 +	if (PSCBX(vcpu,xen_itm) < now)
   3.195 +		while (PSCBX(vcpu,xen_itm) < now + (interval>>1))
   3.196 +			PSCBX(vcpu,xen_itm) += interval;
   3.197  #endif
   3.198  #endif
   3.199  
   3.200  	if (is_idle_task(vcpu->domain)) {
   3.201  		printf("****** vcpu_set_next_timer called during idle!!\n");
   3.202  	}
   3.203 -	//s = PSCB(vcpu,xen_itm);
   3.204 +	//s = PSCBX(vcpu,xen_itm);
   3.205  	if (d && (d > now) && (d < s)) {
   3.206  		vcpu_safe_set_itm(d);
   3.207  		//using_domain_as_itm++;
   3.208 @@ -935,10 +936,10 @@ void vcpu_set_next_timer(VCPU *vcpu)
   3.209  // parameter is a time interval specified in cycles
   3.210  void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
   3.211  {
   3.212 -    PSCB(vcpu,xen_timer_interval) = cycles;
   3.213 +    PSCBX(vcpu,xen_timer_interval) = cycles;
   3.214      vcpu_set_next_timer(vcpu);
   3.215      printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
   3.216 -             PSCB(vcpu,xen_timer_interval));
   3.217 +             PSCBX(vcpu,xen_timer_interval));
   3.218      __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
   3.219  }
   3.220  
   3.221 @@ -948,30 +949,30 @@ IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT6
   3.222  
   3.223  	//if (val < now) val = now + 1000;
   3.224  //printf("*** vcpu_set_itm: called with %lx\n",val);
   3.225 -	PSCB(vcpu,domain_itm) = val;
   3.226 +	PSCBX(vcpu,domain_itm) = val;
   3.227  	vcpu_set_next_timer(vcpu);
   3.228  	return (IA64_NO_FAULT);
   3.229  }
   3.230  
   3.231  IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
   3.232  {
   3.233 -	
   3.234 +
   3.235  	UINT64 oldnow = ia64_get_itc();
   3.236 -	UINT64 olditm = PSCB(vcpu,domain_itm);
   3.237 +	UINT64 olditm = PSCBX(vcpu,domain_itm);
   3.238  	unsigned long d = olditm - oldnow;
   3.239  	unsigned long x = local_cpu_data->itm_next - oldnow;
   3.240 -	
   3.241 +
   3.242  	UINT64 newnow = val, min_delta;
   3.243  
   3.244  	local_irq_disable();
   3.245  	if (olditm) {
   3.246  printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
   3.247 -		PSCB(vcpu,domain_itm) = newnow + d;
   3.248 +		PSCBX(vcpu,domain_itm) = newnow + d;
   3.249  	}
   3.250  	local_cpu_data->itm_next = newnow + x;
   3.251 -	d = PSCB(vcpu,domain_itm);
   3.252 +	d = PSCBX(vcpu,domain_itm);
   3.253  	x = local_cpu_data->itm_next;
   3.254 -	
   3.255 +
   3.256  	ia64_set_itc(newnow);
   3.257  	if (d && (d > newnow) && (d < x)) {
   3.258  		vcpu_safe_set_itm(d);
   3.259 @@ -1006,7 +1007,7 @@ void vcpu_pend_timer(VCPU *vcpu)
   3.260  
   3.261  	if (vcpu_timer_disabled(vcpu)) return;
   3.262  	//if (vcpu_timer_inservice(vcpu)) return;
   3.263 -	if (PSCB(vcpu,domain_itm_last) == PSCB(vcpu,domain_itm)) {
   3.264 +	if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
   3.265  		// already delivered an interrupt for this so
   3.266  		// don't deliver another
   3.267  		return;
   3.268 @@ -1014,7 +1015,7 @@ void vcpu_pend_timer(VCPU *vcpu)
   3.269  #if 0
   3.270  	// attempt to flag "timer tick before its due" source
   3.271  	{
   3.272 -	UINT64 itm = PSCB(vcpu,domain_itm);
   3.273 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   3.274  	UINT64 now = ia64_get_itc();
   3.275  	if (now < itm) printf("******* vcpu_pend_timer: pending before due!\n");
   3.276  	}
   3.277 @@ -1026,7 +1027,7 @@ void vcpu_pend_timer(VCPU *vcpu)
   3.278  UINT64 vcpu_timer_pending_early(VCPU *vcpu)
   3.279  {
   3.280  	UINT64 now = ia64_get_itc();
   3.281 -	UINT64 itm = PSCB(vcpu,domain_itm);
   3.282 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   3.283  
   3.284  	if (vcpu_timer_disabled(vcpu)) return 0;
   3.285  	if (!itm) return 0;
   3.286 @@ -1038,7 +1039,7 @@ void vcpu_poke_timer(VCPU *vcpu)
   3.287  {
   3.288  	UINT64 itv = PSCB(vcpu,itv) & 0xff;
   3.289  	UINT64 now = ia64_get_itc();
   3.290 -	UINT64 itm = PSCB(vcpu,domain_itm);
   3.291 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   3.292  	UINT64 irr;
   3.293  
   3.294  	if (vcpu_timer_disabled(vcpu)) return;
   3.295 @@ -1048,8 +1049,8 @@ void vcpu_poke_timer(VCPU *vcpu)
   3.296  		while(1);
   3.297  	}
   3.298  	// using 0xef instead of itv so can get real irr
   3.299 -	if (now > itm && !test_bit(0xefL, PSCB(vcpu,insvc))) {
   3.300 -		if (!test_bit(0xefL,PSCB(vcpu,irr))) {
   3.301 +	if (now > itm && !test_bit(0xefL, PSCBX(vcpu,insvc))) {
   3.302 +		if (!test_bit(0xefL,PSCBX(vcpu,irr))) {
   3.303  			irr = ia64_getreg(_IA64_REG_CR_IRR3);
   3.304  			if (irr & (1L<<(0xef-0xc0))) return;
   3.305  if (now-itm>0x800000)
   3.306 @@ -1106,7 +1107,7 @@ printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGE
   3.307  printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
   3.308  while(1);
   3.309  }
   3.310 -		// TODO: validate PSCB(vcpu,iip) 
   3.311 +		// TODO: validate PSCB(vcpu,iip)
   3.312  		// TODO: PSCB(vcpu,ipsr) = psr;
   3.313  		PSCB(vcpu,ipsr) = psr.i64;
   3.314  		// now set up the trampoline
   3.315 @@ -1353,7 +1354,6 @@ IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UIN
   3.316  
   3.317  unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
   3.318  {
   3.319 -	
   3.320  	ia64_rr rr;
   3.321  
   3.322  	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   3.323 @@ -1363,7 +1363,6 @@ unsigned long vcpu_get_rr_ve(VCPU *vcpu,
   3.324  
   3.325  unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
   3.326  {
   3.327 -	
   3.328  	ia64_rr rr;
   3.329  
   3.330  	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   3.331 @@ -1373,7 +1372,6 @@ unsigned long vcpu_get_rr_ps(VCPU *vcpu,
   3.332  
   3.333  unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
   3.334  {
   3.335 -	
   3.336  	ia64_rr rr;
   3.337  
   3.338  	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   3.339 @@ -1460,8 +1458,8 @@ TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu
   3.340  	for (i = 0; i < count; i++, trp++) {
   3.341  		if (!trp->p) continue;
   3.342  		if (physicalize_rid(vcpu,trp->rid) != rid) continue;
   3.343 -        	if (ifa < trp->vadr) continue;
   3.344 -        	if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
   3.345 +		if (ifa < trp->vadr) continue;
   3.346 +		if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
   3.347  		//if (trp->key && !match_pkr(vcpu,trp->key)) continue;
   3.348  		return trp;
   3.349  	}
   3.350 @@ -1472,9 +1470,9 @@ TR_ENTRY *match_tr(VCPU *vcpu, unsigned 
   3.351  {
   3.352  	TR_ENTRY *trp;
   3.353  
   3.354 -	trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.dtrs,ifa,NDTRS);
   3.355 +	trp = vcpu_match_tr_entry(vcpu,vcpu->arch.dtrs,ifa,NDTRS);
   3.356  	if (trp) return trp;
   3.357 -	trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.itrs,ifa,NITRS);
   3.358 +	trp = vcpu_match_tr_entry(vcpu,vcpu->arch.itrs,ifa,NITRS);
   3.359  	if (trp) return trp;
   3.360  	return 0;
   3.361  }
   3.362 @@ -1485,7 +1483,8 @@ IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 
   3.363  	TR_ENTRY *trp;
   3.364  
   3.365  	if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
   3.366 -	trp = &PSCB(vcpu,dtrs[slot]);
   3.367 +	trp = &PSCBX(vcpu,dtrs[slot]);
   3.368 +//printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
   3.369  	vcpu_set_tr_entry(trp,pte,itir,ifa);
   3.370  	return IA64_NO_FAULT;
   3.371  }
   3.372 @@ -1496,7 +1495,8 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 
   3.373  	TR_ENTRY *trp;
   3.374  
   3.375  	if (slot >= NITRS) return IA64_RSVDREG_FAULT;
   3.376 -	trp = &PSCB(vcpu,itrs[slot]);
   3.377 +	trp = &PSCBX(vcpu,itrs[slot]);
   3.378 +//printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
   3.379  	vcpu_set_tr_entry(trp,pte,itir,ifa);
   3.380  	return IA64_NO_FAULT;
   3.381  }
   3.382 @@ -1539,12 +1539,12 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
   3.383  #endif
   3.384  	if (IorD & 0x4) return;  // don't place in 1-entry TLB
   3.385  	if (IorD & 0x1) {
   3.386 -		vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,ps<<2,vaddr);
   3.387 -		PSCB(vcpu,itlb_pte) = mp_pte;
   3.388 +		vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
   3.389 +		PSCBX(vcpu,itlb_pte) = mp_pte;
   3.390  	}
   3.391  	if (IorD & 0x2) {
   3.392 -		vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,ps<<2,vaddr);
   3.393 -		PSCB(vcpu,dtlb_pte) = mp_pte;
   3.394 +		vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
   3.395 +		PSCBX(vcpu,dtlb_pte) = mp_pte;
   3.396  	}
   3.397  }
   3.398  
   3.399 @@ -1554,9 +1554,9 @@ unsigned long match_dtlb(VCPU *vcpu, uns
   3.400  {
   3.401  	TR_ENTRY *trp;
   3.402  
   3.403 -	if (trp = vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1)) {
   3.404 +	if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1)) {
   3.405  		if (ps) *ps = trp->ps;
   3.406 -		if (mp_pte) *mp_pte = vcpu->vcpu_info->arch.dtlb_pte;
   3.407 +		if (mp_pte) *mp_pte = vcpu->arch.dtlb_pte;
   3.408  		return (trp->page_flags);
   3.409  	}
   3.410  	return 0UL;
   3.411 @@ -1660,8 +1660,8 @@ IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 
   3.412  #endif
   3.413  	local_flush_tlb_all();
   3.414  	// just invalidate the "whole" tlb
   3.415 -	vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
   3.416 -	vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
   3.417 +	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
   3.418 +	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
   3.419  	return IA64_NO_FAULT;
   3.420  }
   3.421  
   3.422 @@ -1681,8 +1681,8 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
   3.423  	vhpt_flush_address(vadr,addr_range);
   3.424  #endif
   3.425  	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
   3.426 -	vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
   3.427 -	vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
   3.428 +	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
   3.429 +	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
   3.430  	return IA64_NO_FAULT;
   3.431  }
   3.432  
     4.1 --- a/xen/include/asm-ia64/domain.h	Fri May 06 21:20:51 2005 +0000
     4.2 +++ b/xen/include/asm-ia64/domain.h	Mon May 09 18:51:24 2005 +0000
     4.3 @@ -37,10 +37,28 @@ struct arch_domain {
     4.4  #define shared_info_va arch.shared_info_va
     4.5  
     4.6  struct arch_exec_domain {
     4.7 +#if 1
     4.8 +	TR_ENTRY itrs[NITRS];
     4.9 +	TR_ENTRY dtrs[NDTRS];
    4.10 +	TR_ENTRY itlb;
    4.11 +	TR_ENTRY dtlb;
    4.12 +	unsigned long itlb_pte;
    4.13 +	unsigned long dtlb_pte;
    4.14 +	unsigned long irr[4];
    4.15 +	unsigned long insvc[4];
    4.16 +	unsigned long iva;
    4.17 +	unsigned long dcr;
    4.18 +	unsigned long itc;
    4.19 +	unsigned long domain_itm;
    4.20 +	unsigned long domain_itm_last;
    4.21 +	unsigned long xen_itm;
    4.22 +	unsigned long xen_timer_interval;
    4.23 +#endif
    4.24      void *regs;	/* temporary until find a better way to do privops */
    4.25 -    struct thread_struct _thread;
    4.26      struct mm_struct *active_mm;
    4.27 +    struct thread_struct _thread;	// this must be last
    4.28  };
    4.29 +
    4.30  #define active_mm arch.active_mm
    4.31  #define thread arch._thread
    4.32  
     5.1 --- a/xen/include/public/arch-ia64.h	Fri May 06 21:20:51 2005 +0000
     5.2 +++ b/xen/include/public/arch-ia64.h	Mon May 09 18:51:24 2005 +0000
     5.3 @@ -63,24 +63,8 @@ typedef struct {
     5.4  	unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
     5.5  	unsigned long rrs[8];	// region registers
     5.6  	unsigned long krs[8];	// kernel registers
     5.7 -	unsigned long pkrs[8]; // protection key registers
     5.8 -	// FIXME:  These shouldn't be here as they can be overwritten by guests
     5.9 -	// and validation at TLB miss time would be too expensive.
    5.10 -	TR_ENTRY itrs[NITRS];
    5.11 -	TR_ENTRY dtrs[NDTRS];
    5.12 -	TR_ENTRY itlb;
    5.13 -	TR_ENTRY dtlb;
    5.14 -	unsigned long itlb_pte;
    5.15 -	unsigned long dtlb_pte;
    5.16 -	unsigned long irr[4];
    5.17 -	unsigned long insvc[4];
    5.18 -	unsigned long iva;
    5.19 -	unsigned long dcr;
    5.20 -	unsigned long itc;
    5.21 -	unsigned long domain_itm;
    5.22 -	unsigned long domain_itm_last;
    5.23 -	unsigned long xen_itm;
    5.24 -	unsigned long xen_timer_interval;
    5.25 +	unsigned long pkrs[8];	// protection key registers
    5.26 +	unsigned long tmp[8];	// temp registers (e.g. for hyperprivops)
    5.27  //} PACKED arch_shared_info_t;
    5.28  } arch_vcpu_info_t;		// DON'T PACK 
    5.29