ia64/xen-unstable

changeset 15663:255abff9d1f7

[IA64] Extend interfaces to use itir instead logps

Changed some interfaces to use cr.itir instead of only the logps part in
handling itc_i/itc_d and vhpt_insert.

Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
author Alex Williamson <alex.williamson@hp.com>
date Mon Jul 30 16:38:47 2007 -0600 (2007-07-30)
parents 85c2f2d754ef
children 57f519c41534
files xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/vcpu.c xen/arch/ia64/xen/vhpt.c xen/include/asm-ia64/linux-xen/asm/processor.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/vhpt.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmmu.c	Mon Jul 30 16:10:17 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Mon Jul 30 16:38:47 2007 -0600
     1.3 @@ -232,10 +232,10 @@ void machine_tlb_insert(struct vcpu *v, 
     1.4  
     1.5      psr = ia64_clear_ic();
     1.6      if ( cl == ISIDE_TLB ) {
     1.7 -        ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
     1.8 +        ia64_itc(1, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0));
     1.9      }
    1.10      else {
    1.11 -        ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
    1.12 +        ia64_itc(2, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0));
    1.13      }
    1.14      ia64_set_psr(psr);
    1.15      ia64_srlz_i();
     2.1 --- a/xen/arch/ia64/vmx/vtlb.c	Mon Jul 30 16:10:17 2007 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Mon Jul 30 16:38:47 2007 -0600
     2.3 @@ -199,7 +199,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte,
     2.4      } else {
     2.5          phy_pte  &= ~PAGE_FLAGS_RV_MASK;
     2.6          psr = ia64_clear_ic();
     2.7 -        ia64_itc(type + 1, va, phy_pte, itir_ps(itir));
     2.8 +        ia64_itc(type + 1, va, phy_pte, itir);
     2.9          ia64_set_psr(psr);
    2.10          ia64_srlz_i();
    2.11      }
    2.12 @@ -562,7 +562,7 @@ int thash_purge_and_insert(VCPU *v, u64 
    2.13              u64 psr;
    2.14              phy_pte  &= ~PAGE_FLAGS_RV_MASK;
    2.15              psr = ia64_clear_ic();
    2.16 -            ia64_itc(type + 1, ifa, phy_pte, ps);
    2.17 +            ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
    2.18              ia64_set_psr(psr);
    2.19              ia64_srlz_i();
    2.20              // ps < mrr.ps, this is not supported
     3.1 --- a/xen/arch/ia64/xen/faults.c	Mon Jul 30 16:10:17 2007 -0600
     3.2 +++ b/xen/arch/ia64/xen/faults.c	Mon Jul 30 16:38:47 2007 -0600
     3.3 @@ -168,7 +168,7 @@ void ia64_do_page_fault(unsigned long ad
     3.4  	unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
     3.5  	IA64FAULT fault;
     3.6  	int is_ptc_l_needed = 0;
     3.7 -	u64 logps;
     3.8 +	ia64_itir_t _itir = {.itir = itir};
     3.9  
    3.10  	if ((isr & IA64_ISR_SP)
    3.11  	    || ((isr & IA64_ISR_NA)
    3.12 @@ -190,14 +190,14 @@ void ia64_do_page_fault(unsigned long ad
    3.13  		struct p2m_entry entry;
    3.14  		unsigned long m_pteval;
    3.15  		m_pteval = translate_domain_pte(pteval, address, itir,
    3.16 -		                                &logps, &entry);
    3.17 +		                                &(_itir.itir), &entry);
    3.18  		vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
    3.19 -		                 m_pteval, pteval, logps, &entry);
    3.20 +		                 m_pteval, pteval, _itir.itir, &entry);
    3.21  		if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
    3.22  		    p2m_entry_retry(&entry)) {
    3.23  			/* dtlb has been purged in-between.  This dtlb was
    3.24  			   matching.  Undo the work.  */
    3.25 -			vcpu_flush_tlb_vhpt_range(address, logps);
    3.26 +			vcpu_flush_tlb_vhpt_range(address, _itir.ps);
    3.27  
    3.28  			// the stale entry which we inserted above
    3.29  			// may remains in tlb cache.
    3.30 @@ -209,7 +209,7 @@ void ia64_do_page_fault(unsigned long ad
    3.31  	}
    3.32  
    3.33  	if (is_ptc_l_needed)
    3.34 -		vcpu_ptc_l(current, address, logps);
    3.35 +		vcpu_ptc_l(current, address, _itir.ps);
    3.36  	if (!guest_mode(regs)) {
    3.37  		/* The fault occurs inside Xen.  */
    3.38  		if (!ia64_done_with_exception(regs)) {
     4.1 --- a/xen/arch/ia64/xen/mm.c	Mon Jul 30 16:10:17 2007 -0600
     4.2 +++ b/xen/arch/ia64/xen/mm.c	Mon Jul 30 16:38:47 2007 -0600
     4.3 @@ -448,11 +448,11 @@ gmfn_to_mfn_foreign(struct domain *d, un
     4.4  // address, convert the pte for a physical address for (possibly different)
     4.5  // Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
     4.6  // PAGE_SIZE!)
     4.7 -u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps,
     4.8 +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir,
     4.9                           struct p2m_entry* entry)
    4.10  {
    4.11  	struct domain *d = current->domain;
    4.12 -	ia64_itir_t itir = {.itir = itir__};
    4.13 +	ia64_itir_t _itir = {.itir = itir__};
    4.14  	u64 mask, mpaddr, pteval2;
    4.15  	u64 arflags;
    4.16  	u64 arflags2;
    4.17 @@ -461,13 +461,14 @@ u64 translate_domain_pte(u64 pteval, u64
    4.18  	pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
    4.19  
    4.20  	// FIXME address had better be pre-validated on insert
    4.21 -	mask = ~itir_mask(itir.itir);
    4.22 +	mask = ~itir_mask(_itir.itir);
    4.23  	mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
    4.24  
    4.25 -	if (itir.ps > PAGE_SHIFT)
    4.26 -		itir.ps = PAGE_SHIFT;
    4.27 +	if (_itir.ps > PAGE_SHIFT)
    4.28 +		_itir.ps = PAGE_SHIFT;
    4.29  
    4.30 -	*logps = itir.ps;
    4.31 +	((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */
    4.32 +	((ia64_itir_t*)itir)->ps = _itir.ps;	/* Overwrite ps part! */
    4.33  
    4.34  	pteval2 = lookup_domain_mpa(d, mpaddr, entry);
    4.35  
     5.1 --- a/xen/arch/ia64/xen/vcpu.c	Mon Jul 30 16:10:17 2007 -0600
     5.2 +++ b/xen/arch/ia64/xen/vcpu.c	Mon Jul 30 16:38:47 2007 -0600
     5.3 @@ -2200,23 +2200,25 @@ IA64FAULT vcpu_set_dtr(VCPU * vcpu, u64 
     5.4  
     5.5  void
     5.6  vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
     5.7 -                 u64 mp_pte, u64 logps, struct p2m_entry *entry)
     5.8 +                 u64 mp_pte, u64 itir, struct p2m_entry *entry)
     5.9  {
    5.10 +	ia64_itir_t _itir = {.itir = itir};
    5.11  	unsigned long psr;
    5.12 -	unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT;
    5.13 +	unsigned long ps = (vcpu->domain == dom0) ? _itir.ps : PAGE_SHIFT;
    5.14  
    5.15 -	check_xen_space_overlap("itc", vaddr, 1UL << logps);
    5.16 +	check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);
    5.17  
    5.18  	// FIXME, must be inlined or potential for nested fault here!
    5.19 -	if ((vcpu->domain == dom0) && (logps < PAGE_SHIFT))
    5.20 +	if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))
    5.21  		panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
    5.22  		             "smaller page size!\n");
    5.23  
    5.24 -	BUG_ON(logps > PAGE_SHIFT);
    5.25 +	BUG_ON(_itir.ps > PAGE_SHIFT);
    5.26  	vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
    5.27  	psr = ia64_clear_ic();
    5.28  	pte &= ~(_PAGE_RV2 | _PAGE_RV1);	// Mask out the reserved bits.
    5.29 -	ia64_itc(IorD, vaddr, pte, ps);	// FIXME: look for bigger mappings
    5.30 +					// FIXME: look for bigger mappings
    5.31 +	ia64_itc(IorD, vaddr, pte, IA64_ITIR_PS_KEY(ps, _itir.key));
    5.32  	ia64_set_psr(psr);
    5.33  	// ia64_srlz_i(); // no srls req'd, will rfi later
    5.34  	if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
    5.35 @@ -2224,39 +2226,42 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, 
    5.36  		// addresses never get flushed.  More work needed if this
    5.37  		// ever happens.
    5.38  //printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
    5.39 -		if (logps > PAGE_SHIFT)
    5.40 -			vhpt_multiple_insert(vaddr, pte, logps);
    5.41 +		if (_itir.ps > PAGE_SHIFT)
    5.42 +			vhpt_multiple_insert(vaddr, pte, _itir.itir);
    5.43  		else
    5.44 -			vhpt_insert(vaddr, pte, logps << 2);
    5.45 +			vhpt_insert(vaddr, pte, _itir.itir);
    5.46  	}
    5.47  	// even if domain pagesize is larger than PAGE_SIZE, just put
    5.48  	// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
    5.49 -	else
    5.50 -		vhpt_insert(vaddr, pte, PAGE_SHIFT << 2);
    5.51 +	else {
    5.52 +		_itir.ps = PAGE_SHIFT;
    5.53 +		vhpt_insert(vaddr, pte, _itir.itir);
    5.54 +	}
    5.55  }
    5.56  
    5.57  IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
    5.58  {
    5.59 -	unsigned long pteval, logps = itir_ps(itir);
    5.60 +	unsigned long pteval;
    5.61  	BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
    5.62  	struct p2m_entry entry;
    5.63 +	ia64_itir_t _itir = {.itir = itir};
    5.64  
    5.65 -	if (logps < PAGE_SHIFT)
    5.66 +	if (_itir.ps < PAGE_SHIFT)
    5.67  		panic_domain(NULL, "vcpu_itc_d: domain trying to use "
    5.68  		             "smaller page size!\n");
    5.69  
    5.70   again:
    5.71  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
    5.72 -	pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
    5.73 +	pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
    5.74  	if (!pteval)
    5.75  		return IA64_ILLOP_FAULT;
    5.76  	if (swap_rr0)
    5.77  		set_one_rr(0x0, PSCB(vcpu, rrs[0]));
    5.78 -	vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry);
    5.79 +	vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
    5.80  	if (swap_rr0)
    5.81  		set_metaphysical_rr0();
    5.82  	if (p2m_entry_retry(&entry)) {
    5.83 -		vcpu_flush_tlb_vhpt_range(ifa, logps);
    5.84 +		vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
    5.85  		goto again;
    5.86  	}
    5.87  	vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
    5.88 @@ -2265,25 +2270,26 @@ IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pt
    5.89  
    5.90  IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
    5.91  {
    5.92 -	unsigned long pteval, logps = itir_ps(itir);
    5.93 +	unsigned long pteval;
    5.94  	BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
    5.95  	struct p2m_entry entry;
    5.96 +	ia64_itir_t _itir = {.itir = itir};
    5.97  
    5.98 -	if (logps < PAGE_SHIFT)
    5.99 +	if (_itir.ps < PAGE_SHIFT)
   5.100  		panic_domain(NULL, "vcpu_itc_i: domain trying to use "
   5.101  		             "smaller page size!\n");
   5.102        again:
   5.103  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
   5.104 -	pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
   5.105 +	pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
   5.106  	if (!pteval)
   5.107  		return IA64_ILLOP_FAULT;
   5.108  	if (swap_rr0)
   5.109  		set_one_rr(0x0, PSCB(vcpu, rrs[0]));
   5.110 -	vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry);
   5.111 +	vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
   5.112  	if (swap_rr0)
   5.113  		set_metaphysical_rr0();
   5.114  	if (p2m_entry_retry(&entry)) {
   5.115 -		vcpu_flush_tlb_vhpt_range(ifa, logps);
   5.116 +		vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
   5.117  		goto again;
   5.118  	}
   5.119  	vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
     6.1 --- a/xen/arch/ia64/xen/vhpt.c	Mon Jul 30 16:10:17 2007 -0600
     6.2 +++ b/xen/arch/ia64/xen/vhpt.c	Mon Jul 30 16:38:47 2007 -0600
     6.3 @@ -71,7 +71,7 @@ vhpt_erase(unsigned long vhpt_maddr)
     6.4  	// initialize cache too???
     6.5  }
     6.6  
     6.7 -void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps)
     6.8 +void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir)
     6.9  {
    6.10  	struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
    6.11  	unsigned long tag = ia64_ttag (vadr);
    6.12 @@ -80,21 +80,23 @@ void vhpt_insert (unsigned long vadr, un
    6.13  	 * because the processor may support speculative VHPT walk.  */
    6.14  	vlfe->ti_tag = INVALID_TI_TAG;
    6.15  	wmb();
    6.16 -	vlfe->itir = logps;
    6.17 +	vlfe->itir = itir;
    6.18  	vlfe->page_flags = pte | _PAGE_P;
    6.19  	*(volatile unsigned long*)&vlfe->ti_tag = tag;
    6.20  }
    6.21  
    6.22 -void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
    6.23 +void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
    6.24 +			   unsigned long itir)
    6.25  {
    6.26 -	unsigned long mask = (1L << logps) - 1;
    6.27 +	ia64_itir_t _itir = {.itir = itir};
    6.28 +	unsigned long mask = (1L << _itir.ps) - 1;
    6.29  	int i;
    6.30  
    6.31 -	if (logps-PAGE_SHIFT > 10 && !running_on_sim) {
    6.32 +	if (_itir.ps-PAGE_SHIFT > 10 && !running_on_sim) {
    6.33  		// if this happens, we may want to revisit this algorithm
    6.34  		panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
    6.35  	}
    6.36 -	if (logps-PAGE_SHIFT > 2) {
    6.37 +	if (_itir.ps-PAGE_SHIFT > 2) {
    6.38  		// FIXME: Should add counter here to see how often this
    6.39  		//  happens (e.g. for 16MB pages!) and determine if it
    6.40  		//  is a performance problem.  On a quick look, it takes
    6.41 @@ -109,8 +111,8 @@ void vhpt_multiple_insert(unsigned long 
    6.42  	}
    6.43  	vaddr &= ~mask;
    6.44  	pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
    6.45 -	for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
    6.46 -		vhpt_insert(vaddr,pte,logps<<2);
    6.47 +	for (i = 1L << (_itir.ps-PAGE_SHIFT); i > 0; i--) {
    6.48 +		vhpt_insert(vaddr, pte, _itir.itir);
    6.49  		vaddr += PAGE_SIZE;
    6.50  	}
    6.51  }
     7.1 --- a/xen/include/asm-ia64/linux-xen/asm/processor.h	Mon Jul 30 16:10:17 2007 -0600
     7.2 +++ b/xen/include/asm-ia64/linux-xen/asm/processor.h	Mon Jul 30 16:38:47 2007 -0600
     7.3 @@ -533,6 +533,20 @@ ia64_itr (__u64 target_mask, __u64 tr_nu
     7.4   * Insert a translation into the instruction and/or data translation
     7.5   * cache.
     7.6   */
     7.7 +#ifdef XEN
     7.8 +static inline void
     7.9 +ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 itir)
    7.10 +{
    7.11 +	ia64_setreg(_IA64_REG_CR_ITIR, itir);
    7.12 +	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
    7.13 +	ia64_stop();
    7.14 +	/* as per EAS2.6, itc must be the last instruction in an instruction group */
    7.15 +	if (target_mask & 0x1)
    7.16 +		ia64_itci(pte);
    7.17 +	if (target_mask & 0x2)
    7.18 +		ia64_itcd(pte);
    7.19 +}
    7.20 +#else
    7.21  static inline void
    7.22  ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
    7.23  	  __u64 log_page_size)
    7.24 @@ -546,6 +560,7 @@ ia64_itc (__u64 target_mask, __u64 vmadd
    7.25  	if (target_mask & 0x2)
    7.26  		ia64_itcd(pte);
    7.27  }
    7.28 +#endif
    7.29  
    7.30  /*
    7.31   * Purge a range of addresses from instruction and/or data translation
     8.1 --- a/xen/include/asm-ia64/mm.h	Mon Jul 30 16:10:17 2007 -0600
     8.2 +++ b/xen/include/asm-ia64/mm.h	Mon Jul 30 16:38:47 2007 -0600
     8.3 @@ -447,7 +447,8 @@ extern unsigned long dom0vp_expose_p2m(s
     8.4  
     8.5  extern volatile unsigned long *mpt_table;
     8.6  extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
     8.7 -extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry);
     8.8 +extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__,
     8.9 +				u64* itir, struct p2m_entry* entry);
    8.10  #define machine_to_phys_mapping	mpt_table
    8.11  
    8.12  #define INVALID_M2P_ENTRY        (~0UL)
     9.1 --- a/xen/include/asm-ia64/vhpt.h	Mon Jul 30 16:10:17 2007 -0600
     9.2 +++ b/xen/include/asm-ia64/vhpt.h	Mon Jul 30 16:38:47 2007 -0600
     9.3 @@ -38,9 +38,9 @@ struct vhpt_lf_entry {
     9.4  extern void vhpt_init (void);
     9.5  extern void gather_vhpt_stats(void);
     9.6  extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
     9.7 -				 unsigned long logps);
     9.8 +				 unsigned long itir);
     9.9  extern void vhpt_insert (unsigned long vadr, unsigned long pte,
    9.10 -			 unsigned long logps);
    9.11 +			 unsigned long itir);
    9.12  void local_vhpt_flush(void);
    9.13  extern void vcpu_vhpt_flush(struct vcpu* v);
    9.14