ia64/xen-unstable

changeset 15726:b5dbf184df6c

[IA64] Support of 4k page size for individual guests

4k pagesize support per vcpu

Signed-off-by: Juergen Gross <juergen.gross@fujitsu-siemens.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu Aug 16 10:03:26 2007 -0600 (2007-08-16)
parents f317c27973f5
children 778985f246a0
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/regionreg.c xen/arch/ia64/xen/vcpu.c xen/arch/ia64/xen/vhpt.c xen/include/asm-ia64/domain.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Thu Aug 16 09:37:54 2007 -0600
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Thu Aug 16 10:03:26 2007 -0600
     1.3 @@ -72,6 +72,7 @@ void foo(void)
     1.4  	DEFINE(IA64_VCPU_DOMAIN_ITM_LAST_OFFSET, offsetof (struct vcpu, arch.domain_itm_last));
     1.5  	DEFINE(IA64_VCPU_ITLB_OFFSET, offsetof (struct vcpu, arch.itlb));
     1.6  	DEFINE(IA64_VCPU_DTLB_OFFSET, offsetof (struct vcpu, arch.dtlb));
     1.7 +	DEFINE(IA64_VCPU_VHPT_PG_SHIFT_OFFSET, offsetof (struct vcpu, arch.vhpt_pg_shift));
     1.8  
     1.9  	BLANK();
    1.10  
     2.1 --- a/xen/arch/ia64/xen/domain.c	Thu Aug 16 09:37:54 2007 -0600
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Thu Aug 16 10:03:26 2007 -0600
     2.3 @@ -446,6 +446,7 @@ int vcpu_initialise(struct vcpu *v)
     2.4  	    v->arch.ending_rid = d->arch.ending_rid;
     2.5  	    v->arch.breakimm = d->arch.breakimm;
     2.6  	    v->arch.last_processor = INVALID_PROCESSOR;
     2.7 +	    v->arch.vhpt_pg_shift = PAGE_SHIFT;
     2.8  	}
     2.9  
    2.10  	if (!VMX_DOMAIN(v))
     3.1 --- a/xen/arch/ia64/xen/faults.c	Thu Aug 16 09:37:54 2007 -0600
     3.2 +++ b/xen/arch/ia64/xen/faults.c	Thu Aug 16 10:03:26 2007 -0600
     3.3 @@ -239,6 +239,8 @@ void ia64_do_page_fault(unsigned long ad
     3.4  		    (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
     3.5  		regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
     3.6  					       IA64_PSR_CPL0_BIT);
     3.7 +		if (PSCB(current, dcr) & IA64_DCR_BE)
     3.8 +			regs->cr_ipsr |= IA64_PSR_BE;
     3.9  
    3.10  		if (PSCB(current, hpsr_dfh))
    3.11  			regs->cr_ipsr |= IA64_PSR_DFH;  
    3.12 @@ -741,7 +743,8 @@ ia64_shadow_fault(unsigned long ifa, uns
    3.13  	pte = vlfe->page_flags;
    3.14  	if (vlfe->ti_tag == ia64_ttag(ifa)) {
    3.15  		/* The VHPT entry is valid.  */
    3.16 -		gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
    3.17 +		gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
    3.18 +					 v->arch.vhpt_pg_shift);
    3.19  		BUG_ON(gpfn == INVALID_M2P_ENTRY);
    3.20  	} else {
    3.21  		unsigned long itir, iha;
    3.22 @@ -757,10 +760,10 @@ ia64_shadow_fault(unsigned long ifa, uns
    3.23  		/* Try again!  */
    3.24  		if (fault != IA64_NO_FAULT) {
    3.25  			/* This will trigger a dtlb miss.  */
    3.26 -			ia64_ptcl(ifa, PAGE_SHIFT << 2);
    3.27 +			ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
    3.28  			return;
    3.29  		}
    3.30 -		gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
    3.31 +		gpfn = ((pte & _PAGE_PPN_MASK) >> v->arch.vhpt_pg_shift);
    3.32  		if (pte & _PAGE_D)
    3.33  			pte |= _PAGE_VIRT_D;
    3.34  	}
    3.35 @@ -788,7 +791,7 @@ ia64_shadow_fault(unsigned long ifa, uns
    3.36  			/* Purge the TC locally.
    3.37  			   It will be reloaded from the VHPT iff the
    3.38  			   VHPT entry is still valid.  */
    3.39 -			ia64_ptcl(ifa, PAGE_SHIFT << 2);
    3.40 +			ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
    3.41  
    3.42  			atomic64_inc(&d->arch.shadow_fault_count);
    3.43  		} else {
    3.44 @@ -800,6 +803,6 @@ ia64_shadow_fault(unsigned long ifa, uns
    3.45  		/* We don't know wether or not the fault must be
    3.46  		   reflected.  The VHPT entry is not valid.  */
    3.47  		/* FIXME: in metaphysical mode, we could do an ITC now.  */
    3.48 -		ia64_ptcl(ifa, PAGE_SHIFT << 2);
    3.49 +		ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
    3.50  	}
    3.51  }
     4.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Thu Aug 16 09:37:54 2007 -0600
     4.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Thu Aug 16 10:03:26 2007 -0600
     4.3 @@ -1604,26 +1604,27 @@ ENTRY(hyper_set_rr)
     4.4  	extr.u r26=r9,IA64_RR_RID,IA64_RR_RID_LEN	// r26 = r9.rid
     4.5  	movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
     4.6  	ld8 r20=[r20];;
     4.7 -	adds r21=IA64_VCPU_STARTING_RID_OFFSET,r20;;
     4.8 -	ld4 r22=[r21];;
     4.9 -	adds r21=IA64_VCPU_ENDING_RID_OFFSET,r20;;
    4.10 -	ld4 r23=[r21];;
    4.11 -	adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r20;;
    4.12 +	adds r22=IA64_VCPU_STARTING_RID_OFFSET,r20
    4.13 +	adds r23=IA64_VCPU_ENDING_RID_OFFSET,r20
    4.14 +	adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r20
    4.15 +	adds r21=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r20;;
    4.16 +	ld4 r22=[r22]
    4.17 +	ld4 r23=[r23]
    4.18 +	ld1 r21=[r21];;
    4.19  	add r22=r26,r22;;
    4.20  	cmp.geu p6,p0=r22,r23	// if r9.rid + starting_rid >= ending_rid
    4.21  (p6)	br.cond.spnt.few 1f;	// this is an error, but just ignore/return
    4.22 -	// r21=starting_rid
    4.23  	adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
    4.24  	shl r25=r25,3;;
    4.25  	add r20=r20,r25;;
    4.26  	st8 [r20]=r9;;		// store away exactly what was passed
    4.27  	// but adjust value actually placed in rr[r8]
    4.28  	// r22 contains adjusted rid, "mangle" it (see regionreg.c)
    4.29 -	// and set ps to PAGE_SHIFT and ve to 1
    4.30 +	// and set ps to v->arch.vhpt_pg_shift and ve to 1
    4.31  	extr.u r27=r22,0,8
    4.32  	extr.u r28=r22,8,8
    4.33 -	extr.u r29=r22,16,8;;
    4.34 -	dep.z r23=PAGE_SHIFT,IA64_RR_PS,IA64_RR_PS_LEN;;
    4.35 +	extr.u r29=r22,16,8
    4.36 +	dep.z r23=r21,IA64_RR_PS,IA64_RR_PS_LEN;;
    4.37  	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
    4.38  	dep r23=r27,r23,24,8;;
    4.39  	dep r23=r28,r23,16,8;;
    4.40 @@ -1673,34 +1674,38 @@ ENTRY(hyper_set_rr0_to_rr4)
    4.41  	ld8 r17=[r17];;
    4.42  
    4.43  	adds r21=IA64_VCPU_STARTING_RID_OFFSET,r17
    4.44 -	adds r25=IA64_VCPU_ENDING_RID_OFFSET,r17
    4.45 +	adds r22=IA64_VCPU_ENDING_RID_OFFSET,r17
    4.46 +	adds r23=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r17
    4.47  	;; 
    4.48 -	ld4 r22=[r21] // r22 = current->starting_rid
    4.49 +	ld4 r21=[r21] // r21 = current->starting_rid
    4.50  	extr.u r26=r8,IA64_RR_RID,IA64_RR_RID_LEN	// r26 = r8.rid
    4.51  	extr.u r27=r9,IA64_RR_RID,IA64_RR_RID_LEN	// r27 = r9.rid
    4.52 -	ld4 r23=[r25] // r23 = current->ending_rid
    4.53 +	ld4 r22=[r22] // r22 = current->ending_rid
    4.54  	extr.u r28=r10,IA64_RR_RID,IA64_RR_RID_LEN	// r28 = r10.rid
    4.55  	extr.u r29=r11,IA64_RR_RID,IA64_RR_RID_LEN	// r29 = r11.rid
    4.56  	adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r17
    4.57  	extr.u r30=r14,IA64_RR_RID,IA64_RR_RID_LEN	// r30 = r14.rid
    4.58 +	ld1 r23=[r23] // r23 = current->vhpt_pg_shift
    4.59  	;; 
    4.60 -	add r16=r26,r22
    4.61 -	add r17=r27,r22
    4.62 -	add r19=r28,r22
    4.63 -	add r20=r29,r22
    4.64 -	add r21=r30,r22	
    4.65 +	add r16=r26,r21
    4.66 +	add r17=r27,r21
    4.67 +	add r19=r28,r21
    4.68 +	add r20=r29,r21
    4.69 +	add r21=r30,r21	
    4.70 +	dep.z r23=r23,IA64_RR_PS,IA64_RR_PS_LEN		// r23 = rr.ps
    4.71  	;; 
    4.72 -	cmp.geu p6,p0=r16,r23	// if r8.rid + starting_rid >= ending_rid
    4.73 -	cmp.geu p7,p0=r17,r23	// if r9.rid + starting_rid >= ending_rid
    4.74 -	cmp.geu p8,p0=r19,r23	// if r10.rid + starting_rid >= ending_rid
    4.75 +	cmp.geu p6,p0=r16,r22	// if r8.rid + starting_rid >= ending_rid
    4.76 +	cmp.geu p7,p0=r17,r22	// if r9.rid + starting_rid >= ending_rid
    4.77 +	cmp.geu p8,p0=r19,r22	// if r10.rid + starting_rid >= ending_rid
    4.78  (p6)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
    4.79  (p7)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
    4.80 -	cmp.geu p9,p0=r20,r23	// if r11.rid + starting_rid >= ending_rid
    4.81 +	cmp.geu p9,p0=r20,r22	// if r11.rid + starting_rid >= ending_rid
    4.82  (p8)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
    4.83  (p9)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
    4.84 -	cmp.geu p10,p0=r21,r23	// if r14.rid + starting_rid >= ending_rid
    4.85 +	cmp.geu p10,p0=r21,r22	// if r14.rid + starting_rid >= ending_rid
    4.86  (p10)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
    4.87 -	
    4.88 +	dep r23=-1,r23,0,1	// add rr.ve
    4.89 +	;;
    4.90  	mov r25=1
    4.91  	adds r22=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
    4.92  	;;
    4.93 @@ -1715,13 +1720,11 @@ ENTRY(hyper_set_rr0_to_rr4)
    4.94  	extr.u r27=r16,0,8
    4.95  	extr.u r28=r16,8,8
    4.96  	extr.u r29=r16,16,8;;
    4.97 -	dep.z r23=PAGE_SHIFT,2,6;;
    4.98 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
    4.99 -	dep r23=r27,r23,24,8;;
   4.100 -	dep r23=r28,r23,16,8;;
   4.101 -	dep r23=r29,r23,8,8;; 
   4.102 -	st8 [r24]=r23		// save for metaphysical
   4.103 -	mov rr[r26]=r23
   4.104 +	dep r25=r27,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.105 +	dep r25=r28,r25,16,8;;
   4.106 +	dep r25=r29,r25,8,8;; 
   4.107 +	st8 [r24]=r25		// save for metaphysical
   4.108 +	mov rr[r26]=r25
   4.109  	dv_serialize_data
   4.110  
   4.111  	// rr1
   4.112 @@ -1730,12 +1733,10 @@ ENTRY(hyper_set_rr0_to_rr4)
   4.113  	extr.u r27=r17,0,8
   4.114  	extr.u r28=r17,8,8
   4.115  	extr.u r29=r17,16,8;;
   4.116 -	dep.z r23=PAGE_SHIFT,2,6;;
   4.117 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   4.118 -	dep r23=r27,r23,24,8;;
   4.119 -	dep r23=r28,r23,16,8;;
   4.120 -	dep r23=r29,r23,8,8;; 
   4.121 -	mov rr[r26]=r23
   4.122 +	dep r25=r27,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.123 +	dep r25=r28,r25,16,8;;
   4.124 +	dep r25=r29,r25,8,8;; 
   4.125 +	mov rr[r26]=r25
   4.126  	dv_serialize_data
   4.127  
   4.128  	// rr2
   4.129 @@ -1744,12 +1745,10 @@ ENTRY(hyper_set_rr0_to_rr4)
   4.130  	extr.u r27=r19,0,8
   4.131  	extr.u r28=r19,8,8
   4.132  	extr.u r29=r19,16,8;;
   4.133 -	dep.z r23=PAGE_SHIFT,2,6;;
   4.134 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   4.135 -	dep r23=r27,r23,24,8;;
   4.136 -	dep r23=r28,r23,16,8;;
   4.137 -	dep r23=r29,r23,8,8;; 
   4.138 -	mov rr[r26]=r23
   4.139 +	dep r25=r27,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.140 +	dep r25=r28,r25,16,8;;
   4.141 +	dep r25=r29,r25,8,8;; 
   4.142 +	mov rr[r26]=r25
   4.143  	dv_serialize_data
   4.144  
   4.145  	// rr3
   4.146 @@ -1759,12 +1758,10 @@ ENTRY(hyper_set_rr0_to_rr4)
   4.147  	extr.u r27=r20,0,8
   4.148  	extr.u r28=r20,8,8
   4.149  	extr.u r29=r20,16,8;;
   4.150 -	dep.z r23=PAGE_SHIFT,2,6;;
   4.151 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   4.152 -	dep r23=r27,r23,24,8;;
   4.153 -	dep r23=r28,r23,16,8;;
   4.154 -	dep r23=r29,r23,8,8;; 
   4.155 -	mov rr[r26]=r23
   4.156 +	dep r25=r27,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.157 +	dep r25=r28,r25,16,8;;
   4.158 +	dep r25=r29,r25,8,8;; 
   4.159 +	mov rr[r26]=r25
   4.160  	dv_serialize_data
   4.161  	
   4.162  	// rr4
   4.163 @@ -1774,49 +1771,43 @@ ENTRY(hyper_set_rr0_to_rr4)
   4.164  	extr.u r27=r21,0,8
   4.165  	extr.u r28=r21,8,8
   4.166  	extr.u r29=r21,16,8;;
   4.167 -	dep.z r23=PAGE_SHIFT,2,6;;
   4.168 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   4.169 -	dep r23=r27,r23,24,8;;
   4.170 -	dep r23=r28,r23,16,8;;
   4.171 -	dep r23=r29,r23,8,8;; 
   4.172 -	mov rr[r26]=r23
   4.173 +	dep r25=r27,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.174 +	dep r25=r28,r25,16,8;;
   4.175 +	dep r25=r29,r25,8,8;; 
   4.176 +	mov rr[r26]=r25
   4.177  	dv_serialize_data
   4.178  #else
   4.179  	// shuffled version
   4.180  	// rr0
   4.181  	// uses r27, r28, r29 for mangling
   4.182 -	//      r23           for mangled value
   4.183 +	//      r25           for mangled value
   4.184  	st8 [r22]=r8, 8 // current->rrs[0] = r8
   4.185  	mov r26=0	// r26=0x0000000000000000
   4.186  	extr.u r27=r16,0,8
   4.187  	extr.u r28=r16,8,8
   4.188 -	extr.u r29=r16,16,8
   4.189 -	dep.z r23=PAGE_SHIFT,2,6;;
   4.190 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   4.191 -	extr.u r25=r17,0,8
   4.192 -	dep r23=r27,r23,24,8;;
   4.193 -	dep r23=r28,r23,16,8;;
   4.194 -	dep r23=r29,r23,8,8;; 
   4.195 -	st8 [r24]=r23		// save for metaphysical
   4.196 -	mov rr[r26]=r23
   4.197 +	extr.u r29=r16,16,8;;
   4.198 +	dep r25=r27,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.199 +	dep r25=r28,r25,16,8;;
   4.200 +	dep r25=r29,r25,8,8;; 
   4.201 +	st8 [r24]=r25		// save for metaphysical
   4.202 +	mov rr[r26]=r25
   4.203  	dv_serialize_data
   4.204  
   4.205  	// r16, r24, r25 is usable.
   4.206  	// rr1
   4.207  	// uses r25, r28, r29 for mangling
   4.208 -	//      r23           for mangled value
   4.209 +	//      r25           for mangled value
   4.210 +	extr.u r25=r17,0,8
   4.211  	extr.u r28=r17,8,8
   4.212  	st8 [r22]=r9, 8 // current->rrs[1] = r9
   4.213  	extr.u r29=r17,16,8 ;; 
   4.214 -	dep.z r23=PAGE_SHIFT,2,6;;
   4.215  	add r26=r26,r30	// r26 = 0x2000000000000000
   4.216 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   4.217  	extr.u r24=r19,8,8
   4.218  	extr.u r16=r19,0,8
   4.219 -	dep r23=r25,r23,24,8;;
   4.220 -	dep r23=r28,r23,16,8;;
   4.221 -	dep r23=r29,r23,8,8;; 
   4.222 -	mov rr[r26]=r23
   4.223 +	dep r25=r25,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.224 +	dep r25=r28,r25,16,8;;
   4.225 +	dep r25=r29,r25,8,8;; 
   4.226 +	mov rr[r26]=r25
   4.227  	dv_serialize_data
   4.228  
   4.229  	// r16, r17, r24, r25 is usable
   4.230 @@ -1826,10 +1817,8 @@ ENTRY(hyper_set_rr0_to_rr4)
   4.231  	extr.u r29=r19,16,8
   4.232  	extr.u r27=r20,0,8
   4.233  	st8 [r22]=r10, 8 // current->rrs[2] = r10
   4.234 -	dep.z r17=PAGE_SHIFT,2,6;;
   4.235  	add r26=r26,r30	// r26 = 0x4000000000000000	
   4.236 -	dep r17=-1,r17,0,1;;	// mangling is swapping bytes 1 & 3
   4.237 -	dep r17=r16,r17,24,8;;
   4.238 +	dep r17=r16,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.239  	dep r17=r24,r17,16,8;;
   4.240  	dep r17=r29,r17,8,8;; 
   4.241  	mov rr[r26]=r17
   4.242 @@ -1838,18 +1827,16 @@ ENTRY(hyper_set_rr0_to_rr4)
   4.243  	// r16, r17, r19, r24, r25 is usable
   4.244  	// rr3
   4.245  	// uses r27, r28, r29 for mangling
   4.246 -	//      r23           for mangled value
   4.247 +	//      r25           for mangled value
   4.248  	extr.u r28=r20,8,8
   4.249  	extr.u r29=r20,16,8
   4.250  	st8 [r22]=r11, 8 // current->rrs[3] = r11
   4.251  	extr.u r16=r21,0,8
   4.252 -	dep.z r23=PAGE_SHIFT,2,6;;
   4.253  	add r26=r26,r30	// r26 = 0x6000000000000000
   4.254 -	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   4.255 -	dep r23=r27,r23,24,8;;
   4.256 -	dep r23=r28,r23,16,8;;
   4.257 -	dep r23=r29,r23,8,8;; 
   4.258 -	mov rr[r26]=r23
   4.259 +	dep r25=r27,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.260 +	dep r25=r28,r25,16,8;;
   4.261 +	dep r25=r29,r25,8,8;; 
   4.262 +	mov rr[r26]=r25
   4.263  	dv_serialize_data
   4.264  	
   4.265  	// r16, r17, r19, r20, r24, r25
   4.266 @@ -1859,10 +1846,8 @@ ENTRY(hyper_set_rr0_to_rr4)
   4.267  	extr.u r17=r21,8,8
   4.268  	extr.u r24=r21,16,8
   4.269  	st8 [r22]=r14 // current->rrs[4] = r14
   4.270 -	dep.z r25=PAGE_SHIFT,2,6;;
   4.271  	add r26=r26,r30	// r26 = 0x8000000000000000
   4.272 -	dep r25=-1,r25,0,1;;	// mangling is swapping bytes 1 & 3
   4.273 -	dep r25=r16,r25,24,8;;
   4.274 +	dep r25=r16,r23,24,8;;	// mangling is swapping bytes 1 & 3
   4.275  	dep r25=r17,r25,16,8;;
   4.276  	dep r25=r24,r25,8,8;; 
   4.277  	mov rr[r26]=r25
   4.278 @@ -2024,26 +2009,30 @@ ENTRY(hyper_ptc_ga)
   4.279  	adds r21=1,r21;;
   4.280  	st4 [r20]=r21;;
   4.281  #endif
   4.282 +	movl r21=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.283 +	ld8 r21=[r21];;
   4.284 +	adds r22=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r21
   4.285  	mov r28=r8
   4.286  	extr.u r19=r9,2,6		// addr_range=1<<((r9&0xfc)>>2)
   4.287  	mov r20=1
   4.288  	shr.u r24=r8,61
   4.289 -	addl r27=56,r0			// PAGE_SHIFT<<2 (for ptc.ga)
   4.290  	movl r26=0x8000000000000000	// INVALID_TI_TAG
   4.291  	mov r30=ar.lc
   4.292  	;;
   4.293 +	ld1 r22=[r22]			// current->arch.vhpt_pg_shift
   4.294  	shl r19=r20,r19
   4.295  	cmp.eq p7,p0=7,r24
   4.296  (p7)	br.spnt.many dispatch_break_fault ;;	// slow way for rr7
   4.297  	;;
   4.298 +	shl r27=r22,2			// vhpt_pg_shift<<2 (for ptc.ga)
   4.299 +	shr.u r23=r19,r22		// repeat loop for n pages
   4.300  	cmp.le p7,p0=r19,r0		// skip flush if size<=0
   4.301  (p7)	br.cond.dpnt 2f ;;
   4.302 -	extr.u r24=r19,0,PAGE_SHIFT
   4.303 -	shr.u r23=r19,PAGE_SHIFT ;;	// repeat loop for n pages
   4.304 -	cmp.ne p7,p0=r24,r0 ;;
   4.305 +	shl r24=r23,r22;;
   4.306 +	cmp.ne p7,p0=r24,r23 ;;
   4.307  (p7)	adds r23=1,r23 ;;		// n_pages<size<n_pages+1? extra iter
   4.308  	mov ar.lc=r23
   4.309 -	movl r29=PAGE_SIZE;;
   4.310 +	shl r29=r20,r22;;		// page_size
   4.311  1:
   4.312  	thash r25=r28 ;;
   4.313  	adds r25=16,r25 ;;
   4.314 @@ -2060,10 +2049,8 @@ 2:
   4.315  	mov ar.lc=r30 ;;
   4.316  	mov r29=cr.ipsr
   4.317  	mov r30=cr.iip;;
   4.318 -	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.319 -	ld8 r27=[r27];;
   4.320 -	adds r25=IA64_VCPU_DTLB_OFFSET,r27
   4.321 -	adds r26=IA64_VCPU_ITLB_OFFSET,r27;;
   4.322 +	adds r25=IA64_VCPU_DTLB_OFFSET,r21
   4.323 +	adds r26=IA64_VCPU_ITLB_OFFSET,r21;;
   4.324  	ld8 r24=[r25]
   4.325  	ld8 r27=[r26] ;;
   4.326  	and r24=-2,r24
   4.327 @@ -2110,10 +2097,14 @@ hyper_itc_d:
   4.328  	br.sptk.many dispatch_break_fault ;;
   4.329  #else
   4.330  	// ensure itir.ps >= xen's pagesize
   4.331 +	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.332 +	ld8 r27=[r27];;
   4.333 +	adds r22=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r27
   4.334  	adds r23=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.335 +	ld1 r22=[r22]
   4.336  	ld8 r23=[r23];;
   4.337  	extr.u r24=r23,IA64_ITIR_PS,IA64_ITIR_PS_LEN;;		// r24==logps
   4.338 -	cmp.gt p7,p0=PAGE_SHIFT,r24
   4.339 +	cmp.gt p7,p0=r22,r24
   4.340  (p7)	br.spnt.many dispatch_break_fault ;;
   4.341  	adds r21=XSI_IFA_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.342  	ld8 r21=[r21];;
   4.343 @@ -2121,8 +2112,6 @@ hyper_itc_d:
   4.344  	extr.u r21=r21,61,3;;
   4.345  	cmp.eq p7,p0=r21,r0
   4.346  (p7)	br.spnt.many dispatch_break_fault ;;
   4.347 -	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.348 -	ld8 r27=[r27];;
   4.349  	adds r27=IA64_VCPU_DOMAIN_OFFSET,r27;;
   4.350  	ld8 r27=[r27]
   4.351  // FIXME: is the global var dom0 always pinned? assume so for now
   4.352 @@ -2160,18 +2149,24 @@ END(hyper_itc)
   4.353  //	r31 == pr
   4.354  ENTRY(fast_insert)
   4.355  	// translate_domain_pte(r16=pteval,PSCB(ifa)=address,r24=itir)
   4.356 -	mov r19=1;;
   4.357 -	shl r20=r19,r24;;
   4.358 +	mov r19=1
   4.359 +	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.360 +	shl r20=r19,r24
   4.361 +	ld8 r27=[r27];;
   4.362 +	adds r23=IA64_VCPU_VHPT_PG_SHIFT_OFFSET,r27
   4.363  	adds r20=-1,r20		// r20 == mask
   4.364  	movl r19=_PAGE_PPN_MASK;;
   4.365 +	ld1 r23=[r23]
   4.366 +	mov r25=-1
   4.367  	and r22=r16,r19;;	// r22 == pteval & _PAGE_PPN_MASK
   4.368  	andcm r19=r22,r20
   4.369 +	shl r25=r25,r23		// -1 << current->arch.vhpt_pg_shift
   4.370  	adds r21=XSI_IFA_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.371  	ld8 r21=[r21];;
   4.372  	and r20=r21,r20;;
   4.373  	or r19=r19,r20;;	// r19 == mpaddr
   4.374  // FIXME: for now, just do domain0 and skip mpaddr range checks
   4.375 -	dep r20=r0,r19,0,PAGE_SHIFT
   4.376 +	and r20=r25,r19
   4.377  	movl r21=PAGE_PHYS ;;
   4.378  	or r20=r20,r21 ;;	// r20==return value from lookup_domain_mpa
   4.379  	// r16=pteval,r20=pteval2
   4.380 @@ -2208,8 +2203,6 @@ ENTRY(fast_insert)
   4.381  	// vcpu_set_tr_entry(trp,r22=pte|1,r24=itir,r23=ifa)
   4.382  	// TR_ENTRY = {page_flags,itir,addr,rid}
   4.383  	tbit.z p6,p7=r17,0
   4.384 -	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.385 -	ld8 r27=[r27];;
   4.386  	adds r28=IA64_VCPU_STARTING_RID_OFFSET,r27
   4.387  (p6)	adds r27=IA64_VCPU_DTLB_OFFSET,r27
   4.388  (p7)	adds r27=IA64_VCPU_ITLB_OFFSET,r27;;
     5.1 --- a/xen/arch/ia64/xen/mm.c	Thu Aug 16 09:37:54 2007 -0600
     5.2 +++ b/xen/arch/ia64/xen/mm.c	Thu Aug 16 10:03:26 2007 -0600
     5.3 @@ -447,7 +447,7 @@ gmfn_to_mfn_foreign(struct domain *d, un
     5.4  // given a domain virtual address, pte and pagesize, extract the metaphysical
     5.5  // address, convert the pte for a physical address for (possibly different)
     5.6  // Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
     5.7 -// PAGE_SIZE!)
     5.8 +// current->arch.vhpt_pg_shift!)
     5.9  u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir,
    5.10                           struct p2m_entry* entry)
    5.11  {
    5.12 @@ -457,20 +457,25 @@ u64 translate_domain_pte(u64 pteval, u64
    5.13  	u64 arflags;
    5.14  	u64 arflags2;
    5.15  	u64 maflags2;
    5.16 +	u64 ps;
    5.17  
    5.18  	pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
    5.19  
    5.20  	// FIXME address had better be pre-validated on insert
    5.21  	mask = ~itir_mask(_itir.itir);
    5.22  	mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
    5.23 +	ps = current->arch.vhpt_pg_shift ? current->arch.vhpt_pg_shift :
    5.24 +					   PAGE_SHIFT;
    5.25  
    5.26 -	if (_itir.ps > PAGE_SHIFT)
    5.27 -		_itir.ps = PAGE_SHIFT;
    5.28 +	if (_itir.ps > ps)
    5.29 +		_itir.ps = ps;
    5.30  
    5.31  	((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */
    5.32  	((ia64_itir_t*)itir)->ps = _itir.ps;	/* Overwrite ps part! */
    5.33  
    5.34  	pteval2 = lookup_domain_mpa(d, mpaddr, entry);
    5.35 +	if (ps < PAGE_SHIFT)
    5.36 +		pteval2 |= address & (PAGE_SIZE - 1) & ~((1L << ps) - 1);
    5.37  
    5.38  	/* Check access rights.  */
    5.39  	arflags  = pteval  & _PAGE_AR_MASK;
    5.40 @@ -544,10 +549,11 @@ u64 translate_domain_pte(u64 pteval, u64
    5.41      			pteval &= ~_PAGE_D;
    5.42  	}
    5.43      
    5.44 -	/* Ignore non-addr bits of pteval2 and force PL0->2
    5.45 +	/* Ignore non-addr bits of pteval2 and force PL0->1
    5.46  	   (PL3 is unaffected) */
    5.47 -	return (pteval & ~_PAGE_PPN_MASK) |
    5.48 -	       (pteval2 & _PAGE_PPN_MASK) | _PAGE_PL_PRIV;
    5.49 +	return (pteval & ~(_PAGE_PPN_MASK | _PAGE_PL_MASK)) |
    5.50 +	       (pteval2 & _PAGE_PPN_MASK) |
    5.51 +	       (vcpu_pl_adjust(pteval, 7) & _PAGE_PL_MASK);
    5.52  }
    5.53  
    5.54  // given a current domain metaphysical address, return the physical address
     6.1 --- a/xen/arch/ia64/xen/regionreg.c	Thu Aug 16 09:37:54 2007 -0600
     6.2 +++ b/xen/arch/ia64/xen/regionreg.c	Thu Aug 16 10:03:26 2007 -0600
     6.3 @@ -72,7 +72,7 @@ static unsigned long allocate_metaphysic
     6.4  
     6.5  	rrv.rrval = 0;	// Or else may see reserved bit fault
     6.6  	rrv.rid = d->arch.starting_mp_rid + n;
     6.7 -	rrv.ps = PAGE_SHIFT;
     6.8 +	rrv.ps = PAGE_SHIFT;	// only used at domain creation
     6.9  	rrv.ve = 0;
    6.10  	/* Mangle metaphysical rid */
    6.11  	rrv.rrval = vmMangleRID(rrv.rrval);
    6.12 @@ -254,7 +254,7 @@ int set_one_rr(unsigned long rr, unsigne
    6.13  	memrrv.rrval = rrv.rrval;
    6.14  	newrrv.rid = newrid;
    6.15  	newrrv.ve = 1;  // VHPT now enabled for region 7!!
    6.16 -	newrrv.ps = PAGE_SHIFT;
    6.17 +	newrrv.ps = v->arch.vhpt_pg_shift;
    6.18  
    6.19  	if (rreg == 0) {
    6.20  		v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
    6.21 @@ -288,7 +288,7 @@ void init_all_rr(struct vcpu *v)
    6.22  
    6.23  	rrv.rrval = 0;
    6.24  	//rrv.rrval = v->domain->arch.metaphysical_rr0;
    6.25 -	rrv.ps = PAGE_SHIFT;
    6.26 +	rrv.ps = v->arch.vhpt_pg_shift;
    6.27  	rrv.ve = 1;
    6.28  if (!v->vcpu_info) { panic("Stopping in init_all_rr\n"); }
    6.29  	VCPU(v,rrs[0]) = -1;
     7.1 --- a/xen/arch/ia64/xen/vcpu.c	Thu Aug 16 09:37:54 2007 -0600
     7.2 +++ b/xen/arch/ia64/xen/vcpu.c	Thu Aug 16 10:03:26 2007 -0600
     7.3 @@ -1697,7 +1697,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6
     7.4  		} else {
     7.5  			*pteval = (address & _PAGE_PPN_MASK) |
     7.6  				__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RWX;
     7.7 -			*itir = PAGE_SHIFT << 2;
     7.8 +			*itir = vcpu->arch.vhpt_pg_shift << 2;
     7.9  			perfc_incr(phys_translate);
    7.10  			return IA64_NO_FAULT;
    7.11  		}
    7.12 @@ -2292,13 +2292,29 @@ IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 
    7.13   VCPU translation cache access routines
    7.14  **************************************************************************/
    7.15  
    7.16 +static void
    7.17 +vcpu_rebuild_vhpt(VCPU * vcpu, u64 ps)
    7.18 +{
    7.19 +#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
    7.20 +	printk("vhpt rebuild: using page_shift %d\n", (int)ps);
    7.21 +	vcpu->arch.vhpt_pg_shift = ps;
    7.22 +	vcpu_purge_tr_entry(&PSCBX(vcpu, dtlb));
    7.23 +	vcpu_purge_tr_entry(&PSCBX(vcpu, itlb));
    7.24 +	local_vhpt_flush();
    7.25 +	load_region_regs(vcpu);
    7.26 +#else
    7.27 +	panic_domain(NULL, "domain trying to use smaller page size!\n");
    7.28 +#endif
    7.29 +}
    7.30 +
    7.31  void
    7.32  vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
    7.33                   u64 mp_pte, u64 itir, struct p2m_entry *entry)
    7.34  {
    7.35  	ia64_itir_t _itir = {.itir = itir};
    7.36  	unsigned long psr;
    7.37 -	unsigned long ps = (vcpu->domain == dom0) ? _itir.ps : PAGE_SHIFT;
    7.38 +	unsigned long ps = (vcpu->domain == dom0) ? _itir.ps :
    7.39 +						    vcpu->arch.vhpt_pg_shift;
    7.40  
    7.41  	check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);
    7.42  
    7.43 @@ -2307,7 +2323,7 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, 
    7.44  		panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
    7.45  		             "smaller page size!\n");
    7.46  
    7.47 -	BUG_ON(_itir.ps > PAGE_SHIFT);
    7.48 +	BUG_ON(_itir.ps > vcpu->arch.vhpt_pg_shift);
    7.49  	vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
    7.50  	psr = ia64_clear_ic();
    7.51  	pte &= ~(_PAGE_RV2 | _PAGE_RV1);	// Mask out the reserved bits.
    7.52 @@ -2320,7 +2336,7 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, 
    7.53  		// addresses never get flushed.  More work needed if this
    7.54  		// ever happens.
    7.55  //printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
    7.56 -		if (_itir.ps > PAGE_SHIFT)
    7.57 +		if (_itir.ps > vcpu->arch.vhpt_pg_shift)
    7.58  			vhpt_multiple_insert(vaddr, pte, _itir.itir);
    7.59  		else
    7.60  			vhpt_insert(vaddr, pte, _itir.itir);
    7.61 @@ -2328,7 +2344,7 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, 
    7.62  	// even if domain pagesize is larger than PAGE_SIZE, just put
    7.63  	// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
    7.64  	else {
    7.65 -		_itir.ps = PAGE_SHIFT;
    7.66 +		_itir.ps = vcpu->arch.vhpt_pg_shift;
    7.67  		vhpt_insert(vaddr, pte, _itir.itir);
    7.68  	}
    7.69  }
    7.70 @@ -2340,12 +2356,11 @@ IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pt
    7.71  	struct p2m_entry entry;
    7.72  	ia64_itir_t _itir = {.itir = itir};
    7.73  
    7.74 -	if (_itir.ps < PAGE_SHIFT)
    7.75 -		panic_domain(NULL, "vcpu_itc_d: domain trying to use "
    7.76 -		             "smaller page size!\n");
    7.77 +	if (_itir.ps < vcpu->arch.vhpt_pg_shift)
    7.78 +		vcpu_rebuild_vhpt(vcpu, _itir.ps);
    7.79  
    7.80   again:
    7.81 -	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
    7.82 +	//itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
    7.83  	pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
    7.84  	if (!pteval)
    7.85  		return IA64_ILLOP_FAULT;
    7.86 @@ -2369,11 +2384,11 @@ IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pt
    7.87  	struct p2m_entry entry;
    7.88  	ia64_itir_t _itir = {.itir = itir};
    7.89  
    7.90 -	if (_itir.ps < PAGE_SHIFT)
    7.91 -		panic_domain(NULL, "vcpu_itc_i: domain trying to use "
    7.92 -		             "smaller page size!\n");
    7.93 +	if (_itir.ps < vcpu->arch.vhpt_pg_shift)
    7.94 +		vcpu_rebuild_vhpt(vcpu, _itir.ps);
    7.95 +
    7.96        again:
    7.97 -	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
    7.98 +	//itir = (itir & ~0xfc) | (vcpu->arch.vhpt_pg_shift<<2); // ign dom pgsz
    7.99  	pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
   7.100  	if (!pteval)
   7.101  		return IA64_ILLOP_FAULT;
     8.1 --- a/xen/arch/ia64/xen/vhpt.c	Thu Aug 16 09:37:54 2007 -0600
     8.2 +++ b/xen/arch/ia64/xen/vhpt.c	Thu Aug 16 10:03:26 2007 -0600
     8.3 @@ -88,15 +88,16 @@ void vhpt_insert (unsigned long vadr, un
     8.4  void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
     8.5  			   unsigned long itir)
     8.6  {
     8.7 +	unsigned char ps = current->arch.vhpt_pg_shift;
     8.8  	ia64_itir_t _itir = {.itir = itir};
     8.9  	unsigned long mask = (1L << _itir.ps) - 1;
    8.10  	int i;
    8.11  
    8.12 -	if (_itir.ps-PAGE_SHIFT > 10 && !running_on_sim) {
    8.13 +	if (_itir.ps - ps > 10 && !running_on_sim) {
    8.14  		// if this happens, we may want to revisit this algorithm
    8.15  		panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
    8.16  	}
    8.17 -	if (_itir.ps-PAGE_SHIFT > 2) {
    8.18 +	if (_itir.ps - ps > 2) {
    8.19  		// FIXME: Should add counter here to see how often this
    8.20  		//  happens (e.g. for 16MB pages!) and determine if it
    8.21  		//  is a performance problem.  On a quick look, it takes
    8.22 @@ -111,9 +112,9 @@ void vhpt_multiple_insert(unsigned long 
    8.23  	}
    8.24  	vaddr &= ~mask;
    8.25  	pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
    8.26 -	for (i = 1L << (_itir.ps-PAGE_SHIFT); i > 0; i--) {
    8.27 +	for (i = 1L << (_itir.ps - ps); i > 0; i--) {
    8.28  		vhpt_insert(vaddr, pte, _itir.itir);
    8.29 -		vaddr += PAGE_SIZE;
    8.30 +		vaddr += (1L << ps);
    8.31  	}
    8.32  }
    8.33  
    8.34 @@ -291,6 +292,7 @@ static void
    8.35  __flush_vhpt_range(unsigned long vhpt_maddr, u64 vadr, u64 addr_range)
    8.36  {
    8.37  	void *vhpt_base = __va(vhpt_maddr);
    8.38 +	u64 pgsz = 1L << current->arch.vhpt_pg_shift;
    8.39  
    8.40  	while ((long)addr_range > 0) {
    8.41  		/* Get the VHPT entry.  */
    8.42 @@ -298,8 +300,8 @@ static void
    8.43  			__va_ul(vcpu_vhpt_maddr(current));
    8.44  		struct vhpt_lf_entry *v = vhpt_base + off;
    8.45  		v->ti_tag = INVALID_TI_TAG;
    8.46 -		addr_range -= PAGE_SIZE;
    8.47 -		vadr += PAGE_SIZE;
    8.48 +		addr_range -= pgsz;
    8.49 +		vadr += pgsz;
    8.50  	}
    8.51  }
    8.52  
    8.53 @@ -362,7 +364,8 @@ void domain_flush_vtlb_range (struct dom
    8.54  	// ptc.ga has release semantics.
    8.55  
    8.56  	/* ptc.ga  */
    8.57 -	platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT);
    8.58 +	platform_global_tlb_purge(vadr, vadr + addr_range,
    8.59 +				  current->arch.vhpt_pg_shift);
    8.60  	perfc_incr(domain_flush_vtlb_range);
    8.61  }
    8.62  
    8.63 @@ -381,6 +384,7 @@ void
    8.64  	int cpu;
    8.65  	int vcpu;
    8.66  	int local_purge = 1;
    8.67 +	unsigned char ps = current->arch.vhpt_pg_shift;
    8.68  	
    8.69  	BUG_ON((vaddr >> VRN_SHIFT) != VRN7);
    8.70  	/*
    8.71 @@ -413,7 +417,7 @@ void
    8.72  				continue;
    8.73  
    8.74  			/* Invalidate VHPT entries.  */
    8.75 -			vcpu_flush_vhpt_range(v, vaddr, PAGE_SIZE);
    8.76 +			vcpu_flush_vhpt_range(v, vaddr, 1L << ps);
    8.77  
    8.78  			/*
    8.79  			 * current->processor == v->processor
    8.80 @@ -427,7 +431,7 @@ void
    8.81  	} else {
    8.82  		for_each_cpu_mask(cpu, entry->pcpu_dirty_mask) {
    8.83  			/* Invalidate VHPT entries.  */
    8.84 -			cpu_flush_vhpt_range(cpu, vaddr, PAGE_SIZE);
    8.85 +			cpu_flush_vhpt_range(cpu, vaddr, 1L << ps);
    8.86  
    8.87  			if (d->vcpu[cpu] != current)
    8.88  				local_purge = 0;
    8.89 @@ -436,12 +440,11 @@ void
    8.90  
    8.91  	/* ptc.ga  */
    8.92  	if (local_purge) {
    8.93 -		ia64_ptcl(vaddr, PAGE_SHIFT << 2);
    8.94 +		ia64_ptcl(vaddr, ps << 2);
    8.95  		perfc_incr(domain_flush_vtlb_local);
    8.96  	} else {
    8.97  		/* ptc.ga has release semantics. */
    8.98 -		platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE,
    8.99 -		                          PAGE_SHIFT);
   8.100 +		platform_global_tlb_purge(vaddr, vaddr + (1L << ps), ps);
   8.101  		perfc_incr(domain_flush_vtlb_global);
   8.102  	}
   8.103  
     9.1 --- a/xen/include/asm-ia64/domain.h	Thu Aug 16 09:37:54 2007 -0600
     9.2 +++ b/xen/include/asm-ia64/domain.h	Thu Aug 16 10:03:26 2007 -0600
     9.3 @@ -246,6 +246,7 @@ struct arch_vcpu {
     9.4  #define XEN_IA64_PKR_IN_USE	0x1		/* If psr.pk = 1 was set. */
     9.5      unsigned char pkr_flags;
     9.6  
     9.7 +    unsigned char       vhpt_pg_shift;		/* PAGE_SHIFT or less */
     9.8  #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
     9.9      PTA                 pta;
    9.10      unsigned long       vhpt_maddr;