ia64/xen-unstable

changeset 15766:3cd445aecf59

[IA64] Fixes for 4k page support.

Some code is dependent on PAGE_SIZE and shouldn't be changed.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Tue Aug 28 12:30:31 2007 -0600 (2007-08-28)
parents 9341dd055619
children 12be90e2f831
files xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/vhpt.c
line diff
     1.1 --- a/xen/arch/ia64/xen/faults.c	Tue Aug 28 12:27:39 2007 -0600
     1.2 +++ b/xen/arch/ia64/xen/faults.c	Tue Aug 28 12:30:31 2007 -0600
     1.3 @@ -729,6 +729,17 @@ ia64_shadow_fault(unsigned long ifa, uns
     1.4  	unsigned long pte = 0;
     1.5  	struct vhpt_lf_entry *vlfe;
     1.6  
     1.7 +	/*
     1.8 +	 * v->arch.vhpt_pg_shift shouldn't be used here.
     1.9 +	 * Currently dirty page logging bitmap is allocated based
    1.10 +	 * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI.
    1.11 +	 * If we want to log dirty pages in finer grained when
    1.12 +	 * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to
    1.13 +	 * revise the ABI and update this function and the related
    1.14 +	 * tool stack (live relocation).
    1.15 +	 */
    1.16 +	unsigned long vhpt_pg_shift = PAGE_SHIFT;
    1.17 +
    1.18  	/* There are 2 jobs to do:
    1.19  	   -  marking the page as dirty (the metaphysical address must be
    1.20  	      extracted to do that).
    1.21 @@ -744,7 +755,7 @@ ia64_shadow_fault(unsigned long ifa, uns
    1.22  	if (vlfe->ti_tag == ia64_ttag(ifa)) {
    1.23  		/* The VHPT entry is valid.  */
    1.24  		gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
    1.25 -					 v->arch.vhpt_pg_shift);
    1.26 +					 vhpt_pg_shift);
    1.27  		BUG_ON(gpfn == INVALID_M2P_ENTRY);
    1.28  	} else {
    1.29  		unsigned long itir, iha;
    1.30 @@ -760,10 +771,10 @@ ia64_shadow_fault(unsigned long ifa, uns
    1.31  		/* Try again!  */
    1.32  		if (fault != IA64_NO_FAULT) {
    1.33  			/* This will trigger a dtlb miss.  */
    1.34 -			ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
    1.35 +			ia64_ptcl(ifa, vhpt_pg_shift << 2);
    1.36  			return;
    1.37  		}
    1.38 -		gpfn = ((pte & _PAGE_PPN_MASK) >> v->arch.vhpt_pg_shift);
    1.39 +		gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift);
    1.40  		if (pte & _PAGE_D)
    1.41  			pte |= _PAGE_VIRT_D;
    1.42  	}
    1.43 @@ -791,7 +802,7 @@ ia64_shadow_fault(unsigned long ifa, uns
    1.44  			/* Purge the TC locally.
    1.45  			   It will be reloaded from the VHPT iff the
    1.46  			   VHPT entry is still valid.  */
    1.47 -			ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
    1.48 +			ia64_ptcl(ifa, vhpt_pg_shift << 2);
    1.49  
    1.50  			atomic64_inc(&d->arch.shadow_fault_count);
    1.51  		} else {
    1.52 @@ -803,6 +814,6 @@ ia64_shadow_fault(unsigned long ifa, uns
    1.53  		/* We don't know wether or not the fault must be
    1.54  		   reflected.  The VHPT entry is not valid.  */
    1.55  		/* FIXME: in metaphysical mode, we could do an ITC now.  */
    1.56 -		ia64_ptcl(ifa, v->arch.vhpt_pg_shift << 2);
    1.57 +		ia64_ptcl(ifa, vhpt_pg_shift << 2);
    1.58  	}
    1.59  }
     2.1 --- a/xen/arch/ia64/xen/vhpt.c	Tue Aug 28 12:27:39 2007 -0600
     2.2 +++ b/xen/arch/ia64/xen/vhpt.c	Tue Aug 28 12:30:31 2007 -0600
     2.3 @@ -384,7 +384,12 @@ void
     2.4  	int cpu;
     2.5  	int vcpu;
     2.6  	int local_purge = 1;
     2.7 -	unsigned char ps = current->arch.vhpt_pg_shift;
     2.8 +
     2.9 +	/* tlb inert tracking is done in PAGE_SIZE uint. */
    2.10 +	unsigned char ps = max_t(unsigned char,
    2.11 +				 current->arch.vhpt_pg_shift, PAGE_SHIFT);
    2.12 +	/* This case isn't supported (yet). */
    2.13 +	BUG_ON(current->arch.vhpt_pg_shift > PAGE_SHIFT);
    2.14  	
    2.15  	BUG_ON((vaddr >> VRN_SHIFT) != VRN7);
    2.16  	/*