ia64/xen-unstable

changeset 5795:0e7741276468

Cleanup virtual translation code
Add some additional statistics
Signed-off by: Matt Chapman <matthewc@hp.com>
Signed-off by: Dan Magenheimer <dan.magenheimer@hp.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:36:13 2005 -0700 (2005-07-09)
parents 40be48f67a33
children 89d92ce10924
files xen/arch/ia64/hypercall.c xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/vcpu.c xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/hypercall.c	Sat Jul 09 07:25:29 2005 -0700
     1.2 +++ b/xen/arch/ia64/hypercall.c	Sat Jul 09 07:36:13 2005 -0700
     1.3 @@ -19,12 +19,16 @@ extern unsigned long translate_domain_mp
     1.4  extern struct ia64_sal_retval pal_emulator_static(UINT64);
     1.5  extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
     1.6  
     1.7 +unsigned long idle_when_pending = 0;
     1.8 +unsigned long pal_halt_light_count = 0;
     1.9 +
    1.10  int
    1.11  ia64_hypercall (struct pt_regs *regs)
    1.12  {
    1.13  	struct vcpu *v = (struct domain *) current;
    1.14  	struct ia64_sal_retval x;
    1.15  	unsigned long *tv, *tc;
    1.16 +	int pi;
    1.17  
    1.18  	switch (regs->r2) {
    1.19  	    case FW_HYPERCALL_PAL_CALL:
    1.20 @@ -40,19 +44,21 @@ ia64_hypercall (struct pt_regs *regs)
    1.21  #endif
    1.22  		x = pal_emulator_static(regs->r28);
    1.23  		if (regs->r28 == PAL_HALT_LIGHT) {
    1.24 -#if 1
    1.25  #define SPURIOUS_VECTOR 15
    1.26 -			if (vcpu_check_pending_interrupts(v)!=SPURIOUS_VECTOR) {
    1.27 -//				int pi = vcpu_check_pending_interrupts(v);
    1.28 +			pi = vcpu_check_pending_interrupts(v);
    1.29 +			if (pi != SPURIOUS_VECTOR) {
    1.30 +				idle_when_pending++;
    1.31 +				pi = vcpu_pend_unspecified_interrupt(v);
    1.32  //printf("idle w/int#%d pending!\n",pi);
    1.33  //this shouldn't happen, but it apparently does quite a bit!  so don't
    1.34  //allow it to happen... i.e. if a domain has an interrupt pending and
    1.35  //it tries to halt itself because it thinks it is idle, just return here
    1.36  //as deliver_pending_interrupt is called on the way out and will deliver it
    1.37  			}
    1.38 -			else
    1.39 -#endif
    1.40 -			do_sched_op(SCHEDOP_yield);
    1.41 +			else {
    1.42 +				pal_halt_light_count++;
    1.43 +				do_sched_op(SCHEDOP_yield);
    1.44 +			}
    1.45  			//break;
    1.46  		}
    1.47  		regs->r8 = x.status; regs->r9 = x.v0;
     2.1 --- a/xen/arch/ia64/privop.c	Sat Jul 09 07:25:29 2005 -0700
     2.2 +++ b/xen/arch/ia64/privop.c	Sat Jul 09 07:36:13 2005 -0700
     2.3 @@ -1033,6 +1033,36 @@ void zero_privop_addrs(void)
     2.4  }
     2.5  #endif
     2.6  
     2.7 +extern unsigned long dtlb_translate_count;
     2.8 +extern unsigned long tr_translate_count;
     2.9 +extern unsigned long phys_translate_count;
    2.10 +extern unsigned long vhpt_translate_count;
    2.11 +extern unsigned long lazy_cover_count;
    2.12 +extern unsigned long idle_when_pending;
    2.13 +extern unsigned long pal_halt_light_count;
    2.14 +
    2.15 +int dump_misc_stats(char *buf)
    2.16 +{
    2.17 +	char *s = buf;
    2.18 +	s += sprintf(s,"Virtual TR translations: %d\n",tr_translate_count);
    2.19 +	s += sprintf(s,"Virtual VHPT translations: %d\n",vhpt_translate_count);
    2.20 +	s += sprintf(s,"Virtual DTLB translations: %d\n",dtlb_translate_count);
    2.21 +	s += sprintf(s,"Physical translations: %d\n",phys_translate_count);
    2.22 +	s += sprintf(s,"Idle when pending: %d\n",idle_when_pending);
    2.23 +	s += sprintf(s,"PAL_HALT_LIGHT (no pending): %d\n",pal_halt_light_count);
    2.24 +	s += sprintf(s,"Lazy covers: %d\n",lazy_cover_count);
    2.25 +	return s - buf;
    2.26 +}
    2.27 +
    2.28 +void zero_misc_stats(void)
    2.29 +{
    2.30 +	dtlb_translate_count = 0;
    2.31 +	tr_translate_count = 0;
    2.32 +	phys_translate_count = 0;
    2.33 +	vhpt_translate_count = 0;
    2.34 +	lazy_cover_count = 0;
    2.35 +}
    2.36 +
    2.37  int dump_hyperprivop_counts(char *buf)
    2.38  {
    2.39  	int i;
    2.40 @@ -1072,6 +1102,7 @@ int dump_privop_counts_to_user(char __us
    2.41  #ifdef PRIVOP_ADDR_COUNT
    2.42  	n += dump_privop_addrs(buf + n);
    2.43  #endif
    2.44 +	n += dump_misc_stats(buf + n);
    2.45  	if (len < TMPBUFLEN) return -1;
    2.46  	if (__copy_to_user(ubuf,buf,n)) return -1;
    2.47  	return n;
    2.48 @@ -1086,6 +1117,7 @@ int zero_privop_counts_to_user(char __us
    2.49  #ifdef PRIVOP_ADDR_COUNT
    2.50  	zero_privop_addrs();
    2.51  #endif
    2.52 +	zero_misc_stats();
    2.53  	zero_reflect_counts();
    2.54  	if (len < TMPBUFLEN) return -1;
    2.55  	if (__copy_to_user(ubuf,buf,n)) return -1;
     3.1 --- a/xen/arch/ia64/process.c	Sat Jul 09 07:25:29 2005 -0700
     3.2 +++ b/xen/arch/ia64/process.c	Sat Jul 09 07:36:13 2005 -0700
     3.3 @@ -76,8 +76,6 @@ void schedule_tail(struct vcpu *next)
     3.4  #endif // CONFIG_VTI
     3.5  }
     3.6  
     3.7 -extern TR_ENTRY *match_tr(struct vcpu *v, unsigned long ifa);
     3.8 -
     3.9  void tdpfoo(void) { }
    3.10  
    3.11  // given a domain virtual address, pte and pagesize, extract the metaphysical
    3.12 @@ -260,140 +258,29 @@ printf("*#*#*#* about to deliver early t
    3.13  			++pending_false_positive;
    3.14  	}
    3.15  }
    3.16 +unsigned long lazy_cover_count = 0;
    3.17  
    3.18  int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
    3.19  {
    3.20  	if (!PSCB(v,interrupt_collection_enabled)) {
    3.21 -		if (isr & IA64_ISR_IR) {
    3.22 -//			printf("Handling lazy cover\n");
    3.23 -			PSCB(v,ifs) = regs->cr_ifs;
    3.24 -			PSCB(v,incomplete_regframe) = 1;
    3.25 -			regs->cr_ifs = 0;
    3.26 -			return(1); // retry same instruction with cr.ifs off
    3.27 -		}
    3.28 +		PSCB(v,ifs) = regs->cr_ifs;
    3.29 +		PSCB(v,incomplete_regframe) = 1;
    3.30 +		regs->cr_ifs = 0;
    3.31 +		lazy_cover_count++;
    3.32 +		return(1); // retry same instruction with cr.ifs off
    3.33  	}
    3.34  	return(0);
    3.35  }
    3.36  
    3.37 -#define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
    3.38 -
    3.39 -void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
    3.40 +void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
    3.41  {
    3.42 -	struct domain *d = (struct domain *) current->domain;
    3.43 -	struct domain *ed = (struct vcpu *) current;
    3.44 -	TR_ENTRY *trp;
    3.45 -	unsigned long psr = regs->cr_ipsr, mask, flags;
    3.46  	unsigned long iip = regs->cr_iip;
    3.47  	// FIXME should validate address here
    3.48 -	unsigned long pteval, mpaddr, ps;
    3.49 -	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
    3.50 -	unsigned long match_dtlb(struct vcpu *,unsigned long, unsigned long *, unsigned long *);
    3.51 +	unsigned long pteval;
    3.52 +	unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
    3.53  	IA64FAULT fault;
    3.54  
    3.55 -// NEED TO HANDLE THREE CASES:
    3.56 -// 1) domain is in metaphysical mode
    3.57 -// 2) domain address is in TR
    3.58 -// 3) domain address is not in TR (reflect data miss)
    3.59 -
    3.60 -		// got here trying to read a privop bundle
    3.61 -	     	//if (d->metaphysical_mode) {
    3.62 -     	if (PSCB(current,metaphysical_mode) && !(address>>61)) {  //FIXME
    3.63 -		if (d == dom0) {
    3.64 -			if (address < dom0_start || address >= dom0_start + dom0_size) {
    3.65 -				printk("xen_handle_domain_access: out-of-bounds"
    3.66 -				   "dom0 mpaddr %p! continuing...\n",mpaddr);
    3.67 -				tdpfoo();
    3.68 -			}
    3.69 -		}
    3.70 -		pteval = lookup_domain_mpa(d,address);
    3.71 -		//FIXME: check return value?
    3.72 -		// would be nice to have a counter here
    3.73 -		vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
    3.74 -		return;
    3.75 -	}
    3.76 -if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
    3.77 -		
    3.78 -	if (trp = match_tr(current,address)) {
    3.79 -		// FIXME address had better be pre-validated on insert
    3.80 -		pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
    3.81 -		vcpu_itc_no_srlz(current,6,address,pteval,-1UL,(trp->itir>>2)&0x3f);
    3.82 -		return;
    3.83 -	}
    3.84 -	// if we are fortunate enough to have it in the 1-entry TLB...
    3.85 -	if (pteval = match_dtlb(ed,address,&ps,NULL)) {
    3.86 -		vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
    3.87 -		return;
    3.88 -	}
    3.89 -	if (ia64_done_with_exception(regs)) {
    3.90 -//if (!(uacnt++ & 0x3ff)) printk("*** xen_handle_domain_access: successfully handled cnt=%d iip=%p, addr=%p...\n",uacnt,iip,address);
    3.91 -			return;
    3.92 -	}
    3.93 -	else {
    3.94 -		// should never happen.  If it does, region 0 addr may
    3.95 -		// indicate a bad xen pointer
    3.96 -		printk("*** xen_handle_domain_access: exception table"
    3.97 -                       " lookup failed, iip=%p, addr=%p, spinning...\n",
    3.98 -			iip,address);
    3.99 -		panic_domain(regs,"*** xen_handle_domain_access: exception table"
   3.100 -                       " lookup failed, iip=%p, addr=%p, spinning...\n",
   3.101 -			iip,address);
   3.102 -	}
   3.103 -}
   3.104 -
   3.105 -void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   3.106 -{
   3.107 -	struct domain *d = (struct domain *) current->domain;
   3.108 -	TR_ENTRY *trp;
   3.109 -	unsigned long psr = regs->cr_ipsr, mask, flags;
   3.110 -	unsigned long iip = regs->cr_iip;
   3.111 -	// FIXME should validate address here
   3.112 -	unsigned long iha, pteval, mpaddr;
   3.113 -	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   3.114 -	unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
   3.115 -	unsigned long vector;
   3.116 -	IA64FAULT fault;
   3.117 -
   3.118 -
   3.119 -	//The right way is put in VHPT and take another miss!
   3.120 -
   3.121 -	// weak attempt to avoid doing both I/D tlb insert to avoid
   3.122 -	// problems for privop bundle fetch, doesn't work, deal with later
   3.123 -	if (IS_XEN_ADDRESS(d,iip) && !IS_XEN_ADDRESS(d,address)) {
   3.124 -		xen_handle_domain_access(address, isr, regs, itir);
   3.125 -
   3.126 -		return;
   3.127 -	}
   3.128 -
   3.129 -	// FIXME: no need to pass itir in to this routine as we need to
   3.130 -	// compute the virtual itir anyway (based on domain's RR.ps)
   3.131 -	// AND ACTUALLY reflect_interruption doesn't use it anyway!
   3.132 -	itir = vcpu_get_itir_on_fault(current,address);
   3.133 -
   3.134 -	if (PSCB(current,metaphysical_mode) && (is_data || !(address>>61))) {  //FIXME
   3.135 -		// FIXME should validate mpaddr here
   3.136 -		if (d == dom0) {
   3.137 -			if (address < dom0_start || address >= dom0_start + dom0_size) {
   3.138 -				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
   3.139 -				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
   3.140 -				tdpfoo();
   3.141 -			}
   3.142 -		}
   3.143 -		pteval = lookup_domain_mpa(d,address);
   3.144 -		// FIXME, must be inlined or potential for nested fault here!
   3.145 -		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,PAGE_SHIFT);
   3.146 -		return;
   3.147 -	}
   3.148 -	if (trp = match_tr(current,address)) {
   3.149 -		// FIXME address had better be pre-validated on insert
   3.150 -		pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
   3.151 -		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(trp->itir>>2)&0x3f);
   3.152 -		return;
   3.153 -	}
   3.154 -
   3.155 -	if (handle_lazy_cover(current, isr, regs)) return;
   3.156 -if (!(address>>61)) {
   3.157 -panic_domain(0,"ia64_do_page_fault: @%p???, iip=%p, b0=%p, itc=%p (spinning...)\n",address,iip,regs->b0,ia64_get_itc());
   3.158 -}
   3.159 +	if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs)) return;
   3.160  	if ((isr & IA64_ISR_SP)
   3.161  	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
   3.162  	{
   3.163 @@ -406,37 +293,29 @@ panic_domain(0,"ia64_do_page_fault: @%p?
   3.164  		return;
   3.165  	}
   3.166  
   3.167 -	if (vcpu_get_rr_ve(current, address) && (PSCB(current,pta) & IA64_PTA_VE))
   3.168 +	fault = vcpu_translate(current,address,is_data,&pteval,&itir);
   3.169 +	if (fault == IA64_NO_FAULT)
   3.170 +	{
   3.171 +		pteval = translate_domain_pte(pteval,address,itir);
   3.172 +		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
   3.173 +		return;
   3.174 +	}
   3.175 +	else if (IS_VMM_ADDRESS(iip))
   3.176  	{
   3.177 -		if (PSCB(current,pta) & IA64_PTA_VF)
   3.178 -		{
   3.179 -			/* long format VHPT - not implemented */
   3.180 -			vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
   3.181 +		if (!ia64_done_with_exception(regs)) {
   3.182 +			// should never happen.  If it does, region 0 addr may
   3.183 +			// indicate a bad xen pointer
   3.184 +			printk("*** xen_handle_domain_access: exception table"
   3.185 +			       " lookup failed, iip=%p, addr=%p, spinning...\n",
   3.186 +				iip,address);
   3.187 +			panic_domain(regs,"*** xen_handle_domain_access: exception table"
   3.188 +			       " lookup failed, iip=%p, addr=%p, spinning...\n",
   3.189 +				iip,address);
   3.190  		}
   3.191 -		else
   3.192 -		{
   3.193 -			/* short format VHPT */
   3.194 -			vcpu_thash(current, address, &iha);
   3.195 -			if (__copy_from_user(&pteval, iha, sizeof(pteval)) == 0)
   3.196 -			{
   3.197 -				/* 
   3.198 -				 * Optimisation: this VHPT walker aborts on not-present pages
   3.199 -				 * instead of inserting a not-present translation, this allows
   3.200 -				 * vectoring directly to the miss handler.
   3.201 -	\			 */
   3.202 -				if (pteval & _PAGE_P)
   3.203 -				{
   3.204 -					pteval = translate_domain_pte(pteval,address,itir);
   3.205 -					vcpu_itc_no_srlz(current,is_data?6:1,address,pteval,-1UL,(itir>>2)&0x3f);
   3.206 -					return;
   3.207 -				}
   3.208 -				else vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
   3.209 -			}
   3.210 -			else vector = IA64_VHPT_TRANS_VECTOR;
   3.211 -		}
   3.212 +		return;
   3.213  	}
   3.214 -	else vector = is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR;
   3.215 -	reflect_interruption(address, isr, itir, regs, vector);
   3.216 +
   3.217 +	reflect_interruption(address, isr, 0, regs, fault);
   3.218  }
   3.219  
   3.220  void
   3.221 @@ -865,6 +744,6 @@ printf("*** Handled privop masquerading 
   3.222  		while(vector);
   3.223  		return;
   3.224  	}
   3.225 -	if (check_lazy_cover && handle_lazy_cover(v, isr, regs)) return;
   3.226 +	if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
   3.227  	reflect_interruption(ifa,isr,itir,regs,vector);
   3.228  }
     4.1 --- a/xen/arch/ia64/vcpu.c	Sat Jul 09 07:25:29 2005 -0700
     4.2 +++ b/xen/arch/ia64/vcpu.c	Sat Jul 09 07:36:13 2005 -0700
     4.3 @@ -53,9 +53,16 @@ extern void privop_count_addr(unsigned l
     4.4  #define	PRIVOP_COUNT_ADDR(x,y) do {} while (0)
     4.5  #endif
     4.6  
     4.7 +unsigned long dtlb_translate_count = 0;
     4.8 +unsigned long tr_translate_count = 0;
     4.9 +unsigned long phys_translate_count = 0;
    4.10 +
    4.11  unsigned long vcpu_verbose = 0;
    4.12  #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
    4.13  
    4.14 +extern TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa);
    4.15 +extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa);
    4.16 +
    4.17  /**************************************************************************
    4.18   VCPU general register access routines
    4.19  **************************************************************************/
    4.20 @@ -224,6 +231,9 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI
    4.21  //else printf("but nothing pending\n");
    4.22  	}
    4.23  #endif
    4.24 +	if (enabling_interrupts &&
    4.25 +		vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
    4.26 +			PSCB(vcpu,pending_interruption) = 1;
    4.27  	return IA64_NO_FAULT;
    4.28  }
    4.29  
    4.30 @@ -267,6 +277,9 @@ IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UIN
    4.31  			return IA64_EXTINT_VECTOR;
    4.32  	}
    4.33  #endif
    4.34 +	if (enabling_interrupts &&
    4.35 +		vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
    4.36 +			PSCB(vcpu,pending_interruption) = 1;
    4.37  	return IA64_NO_FAULT;
    4.38  }
    4.39  
    4.40 @@ -532,6 +545,11 @@ IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT6
    4.41   VCPU interrupt control register access routines
    4.42  **************************************************************************/
    4.43  
    4.44 +void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
    4.45 +{
    4.46 +	PSCB(vcpu,pending_interruption) = 1;
    4.47 +}
    4.48 +
    4.49  void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
    4.50  {
    4.51  	if (vector & ~0xff) {
    4.52 @@ -1241,28 +1259,101 @@ IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 v
    4.53  	return (IA64_ILLOP_FAULT);
    4.54  }
    4.55  
    4.56 +#define itir_ps(itir)	((itir >> 2) & 0x3f)
    4.57 +#define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
    4.58 +
    4.59 +unsigned long vhpt_translate_count = 0;
    4.60 +
    4.61 +IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir)
    4.62 +{
    4.63 +	unsigned long pta, pta_mask, iha, pte, ps;
    4.64 +	TR_ENTRY *trp;
    4.65 +	ia64_rr rr;
    4.66 +
    4.67 +	if (!(address >> 61)) {
    4.68 +		if (!PSCB(vcpu,metaphysical_mode))
    4.69 +			panic_domain(0,"vcpu_translate: bad address %p\n", address);
    4.70 +
    4.71 +		*pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
    4.72 +		*itir = PAGE_SHIFT << 2;
    4.73 +		phys_translate_count++;
    4.74 +		return IA64_NO_FAULT;
    4.75 +	}
    4.76 +
    4.77 +	/* check translation registers */
    4.78 +	if ((trp = match_tr(vcpu,address))) {
    4.79 +			tr_translate_count++;
    4.80 +		*pteval = trp->page_flags;
    4.81 +		*itir = trp->itir;
    4.82 +		return IA64_NO_FAULT;
    4.83 +	}
    4.84 +
    4.85 +	/* check 1-entry TLB */
    4.86 +	if ((trp = match_dtlb(vcpu,address))) {
    4.87 +		dtlb_translate_count++;
    4.88 +		*pteval = trp->page_flags;
    4.89 +		*itir = trp->itir;
    4.90 +		return IA64_NO_FAULT;
    4.91 +	}
    4.92 +
    4.93 +	/* check guest VHPT */
    4.94 +	pta = PSCB(vcpu,pta);
    4.95 +	rr.rrval = PSCB(vcpu,rrs)[address>>61];
    4.96 +	if (rr.ve && (pta & IA64_PTA_VE))
    4.97 +	{
    4.98 +		if (pta & IA64_PTA_VF)
    4.99 +		{
   4.100 +			/* long format VHPT - not implemented */
   4.101 +			return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
   4.102 +		}
   4.103 +		else
   4.104 +		{
   4.105 +			/* short format VHPT */
   4.106 +
   4.107 +			/* avoid recursively walking VHPT */
   4.108 +			pta_mask = (itir_mask(pta) << 3) >> 3;
   4.109 +			if (((address ^ pta) & pta_mask) == 0)
   4.110 +				return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
   4.111 +
   4.112 +			vcpu_thash(vcpu, address, &iha);
   4.113 +			if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) != 0)
   4.114 +				return IA64_VHPT_TRANS_VECTOR;
   4.115 +
   4.116 +			/* 
   4.117 +			 * Optimisation: this VHPT walker aborts on not-present pages
   4.118 +			 * instead of inserting a not-present translation, this allows
   4.119 +			 * vectoring directly to the miss handler.
   4.120 +	\		 */
   4.121 +			if (pte & _PAGE_P)
   4.122 +			{
   4.123 +				*pteval = pte;
   4.124 +				*itir = vcpu_get_itir_on_fault(vcpu,address);
   4.125 +				vhpt_translate_count++;
   4.126 +				return IA64_NO_FAULT;
   4.127 +			}
   4.128 +			return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
   4.129 +		}
   4.130 +	}
   4.131 +	return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
   4.132 +}
   4.133 +
   4.134  IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
   4.135  {
   4.136 -	extern TR_ENTRY *match_tr(VCPU *,UINT64);
   4.137 -	unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *);
   4.138 -	TR_ENTRY *trp;
   4.139 -	UINT64 mask, pteval, mp_pte, ps;
   4.140 +	UINT64 pteval, itir, mask;
   4.141 +	IA64FAULT fault;
   4.142  
   4.143 -extern unsigned long privop_trace;
   4.144 -	if (pteval = match_dtlb(vcpu, vadr, &ps, &mp_pte) && (mp_pte != -1UL)) {
   4.145 -		mask = (1L << ps) - 1;
   4.146 -		*padr = ((mp_pte & _PAGE_PPN_MASK) & ~mask) | (vadr & mask);
   4.147 -		verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
   4.148 +	fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir);
   4.149 +	if (fault == IA64_NO_FAULT)
   4.150 +	{
   4.151 +		mask = itir_mask(itir);
   4.152 +		*padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
   4.153  		return (IA64_NO_FAULT);
   4.154  	}
   4.155 -	if (trp=match_tr(current,vadr)) {
   4.156 -		mask = (1L << trp->ps) - 1;
   4.157 -		*padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
   4.158 -		verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
   4.159 -		return (IA64_NO_FAULT);
   4.160 +	else
   4.161 +	{
   4.162 +		PSCB(vcpu,tmp[0]) = vadr;       // save ifa in vcpu structure, then specify IA64_FORCED_IFA
   4.163 +		return (fault | IA64_FORCED_IFA);
   4.164  	}
   4.165 -	verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu,iip));
   4.166 -	return vcpu_force_data_miss(vcpu, vadr);
   4.167  }
   4.168  
   4.169  IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
   4.170 @@ -1614,15 +1705,12 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
   4.171  
   4.172  // NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
   4.173  // the physical address contained for correctness
   4.174 -unsigned long match_dtlb(VCPU *vcpu, unsigned long ifa, unsigned long *ps, unsigned long *mp_pte)
   4.175 +TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
   4.176  {
   4.177  	TR_ENTRY *trp;
   4.178  
   4.179 -	if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1)) {
   4.180 -		if (ps) *ps = trp->ps;
   4.181 -		if (mp_pte) *mp_pte = vcpu->arch.dtlb_pte;
   4.182 -		return (trp->page_flags);
   4.183 -	}
   4.184 +	if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1))
   4.185 +		return (&vcpu->arch.dtlb);
   4.186  	return 0UL;
   4.187  }
   4.188  
   4.189 @@ -1679,11 +1767,12 @@ IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vad
   4.190  	// TODO: Only allowed for current vcpu
   4.191  	UINT64 mpaddr, ps;
   4.192  	IA64FAULT fault;
   4.193 -	unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *);
   4.194 +	TR_ENTRY *trp;
   4.195  	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   4.196  	unsigned long pteval, dom_imva;
   4.197  
   4.198 -	if (pteval = match_dtlb(vcpu, vadr, NULL, NULL)) {
   4.199 +	if ((trp = match_dtlb(vcpu,vadr))) {
   4.200 +		pteval = trp->page_flags;
   4.201  		dom_imva = __va(pteval & _PFN_MASK);
   4.202  		ia64_fc(dom_imva);
   4.203  		return IA64_NO_FAULT;
     5.1 --- a/xen/include/asm-ia64/ia64_int.h	Sat Jul 09 07:25:29 2005 -0700
     5.2 +++ b/xen/include/asm-ia64/ia64_int.h	Sat Jul 09 07:36:13 2005 -0700
     5.3 @@ -3,11 +3,11 @@
     5.4  
     5.5  //#include "ia64.h"
     5.6  
     5.7 -#define	IA64_VHPT_TRANS_VECTOR			0x0000	/* UNUSED */
     5.8 +#define	IA64_VHPT_TRANS_VECTOR			0x0000
     5.9  #define IA64_INST_TLB_VECTOR			0x0400
    5.10  #define IA64_DATA_TLB_VECTOR			0x0800
    5.11 -#define IA64_ALT_INST_TLB_VECTOR		0x0c00	/* UNUSED */
    5.12 -#define IA64_ALT_DATA_TLB_VECTOR		0x1000	/* UNUSED */
    5.13 +#define IA64_ALT_INST_TLB_VECTOR		0x0c00
    5.14 +#define IA64_ALT_DATA_TLB_VECTOR		0x1000
    5.15  #define IA64_DATA_NESTED_TLB_VECTOR		0x1400
    5.16  #define IA64_INST_KEY_MISS_VECTOR		0x1800
    5.17  #define IA64_DATA_KEY_MISS_VECTOR		0x1c00
    5.18 @@ -33,12 +33,11 @@
    5.19  #define IA64_TAKEN_BRANCH_TRAP_VECTOR		0x5f00
    5.20  #define IA64_SINGLE_STEP_TRAP_VECTOR		0x6000
    5.21  
    5.22 -#define	IA64_NO_FAULT		0x0000
    5.23 -#define	IA64_RFI_IN_PROGRESS	0x0001
    5.24 -#define IA64_RETRY              0x0002
    5.25 +#define	IA64_NO_FAULT		0x0001
    5.26 +#define	IA64_RFI_IN_PROGRESS	0x0002
    5.27 +#define IA64_RETRY              0x0003
    5.28  #ifdef  CONFIG_VTI
    5.29 -#define IA64_FAULT		0x0001
    5.30 -#define IA64_INJ_FAULT		0x0005
    5.31 +#define IA64_FAULT		0x0002
    5.32  #endif      //CONFIG_VTI
    5.33  #define IA64_FORCED_IFA         0x0004
    5.34  #define	IA64_ILLOP_FAULT	(IA64_GENEX_VECTOR | 0x00)
     6.1 --- a/xen/include/asm-ia64/vcpu.h	Sat Jul 09 07:25:29 2005 -0700
     6.2 +++ b/xen/include/asm-ia64/vcpu.h	Sat Jul 09 07:36:13 2005 -0700
     6.3 @@ -135,6 +135,7 @@ extern IA64FAULT vcpu_ptc_g(VCPU *vcpu, 
     6.4  extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
     6.5  extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
     6.6  extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
     6.7 +extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir);
     6.8  extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
     6.9  /* misc */
    6.10  extern IA64FAULT vcpu_rfi(VCPU *vcpu);
    6.11 @@ -150,5 +151,4 @@ extern void vcpu_itc_no_srlz(VCPU *vcpu,
    6.12  extern UINT64 vcpu_get_tmp(VCPU *, UINT64);
    6.13  extern void vcpu_set_tmp(VCPU *, UINT64, UINT64);
    6.14  
    6.15 -
    6.16  #endif