direct-io.hg

changeset 10364:b8711fcb1e95

[IA64] rewrite priv_handle_op()

introduce vcpu_get_domain_bundle() and replace __get_domain_handle call
in priv_handle_op with it.
priv_handle_op() uses __get_domain_handle to domain's bundle.
it directly access guest ip with guest virtual address which may results
in data tlb miss which cause some trobles.
This patch also cleans up vcpu_translate().

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Sat Jun 03 11:16:34 2006 -0600 (2006-06-03)
parents 25483d9b55d4
children f8ab23b4f704
files xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/xen/privop.c	Wed May 31 16:07:47 2006 -0600
     1.2 +++ b/xen/arch/ia64/xen/privop.c	Sat Jun 03 11:16:34 2006 -0600
     1.3 @@ -615,16 +615,11 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
     1.4  	int x6;
     1.5  	
     1.6  	// make a local copy of the bundle containing the privop
     1.7 -#if 1
     1.8 -	bundle = __get_domain_bundle(iip);
     1.9 -	if (!bundle.i64[0] && !bundle.i64[1])
    1.10 -#else
    1.11 -	if (__copy_from_user(&bundle,iip,sizeof(bundle)))
    1.12 -#endif
    1.13 -	{
    1.14 -//printf("*** priv_handle_op: privop bundle at 0x%lx not mapped, retrying\n",iip);
    1.15 -		return vcpu_force_data_miss(vcpu,regs->cr_iip);
    1.16 +	if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) {
    1.17 +		//return vcpu_force_data_miss(vcpu, regs->cr_iip);
    1.18 +		return vcpu_force_inst_miss(vcpu, regs->cr_iip);
    1.19  	}
    1.20 +
    1.21  #if 0
    1.22  	if (iip==0xa000000100001820) {
    1.23  		static int firstpagefault = 1;
     2.1 --- a/xen/arch/ia64/xen/vcpu.c	Wed May 31 16:07:47 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/vcpu.c	Sat Jun 03 11:16:34 2006 -0600
     2.3 @@ -17,6 +17,7 @@
     2.4  #include <asm/vmx_vcpu.h>
     2.5  #include <asm/vhpt.h>
     2.6  #include <asm/tlbflush.h>
     2.7 +#include <asm/privop.h>
     2.8  #include <xen/event.h>
     2.9  #include <asm/vmx_phy_mode.h>
    2.10  
    2.11 @@ -29,6 +30,7 @@ extern void setfpreg (unsigned long regn
    2.12  
    2.13  extern void panic_domain(struct pt_regs *, const char *, ...);
    2.14  extern unsigned long translate_domain_mpaddr(unsigned long);
    2.15 +extern IA64_BUNDLE __get_domain_bundle(UINT64);
    2.16  
    2.17  typedef	union {
    2.18  	struct ia64_psr ia64_psr;
    2.19 @@ -1184,15 +1186,26 @@ UINT64 vcpu_timer_pending_early(VCPU *vc
    2.20  Privileged operation emulation routines
    2.21  **************************************************************************/
    2.22  
    2.23 +static void
    2.24 +vcpu_force_tlb_miss(VCPU* vcpu, UINT64 ifa)
    2.25 +{
    2.26 +	PSCB(vcpu, ifa) = ifa;
    2.27 +	PSCB(vcpu, itir) = vcpu_get_itir_on_fault(vcpu, ifa);
    2.28 +	vcpu_thash(current, ifa, &PSCB(current, iha));
    2.29 +}
    2.30 +
    2.31 +IA64FAULT vcpu_force_inst_miss(VCPU *vcpu, UINT64 ifa)
    2.32 +{
    2.33 +	vcpu_force_tlb_miss(vcpu, ifa);
    2.34 +	return (vcpu_get_rr_ve(vcpu, ifa)? IA64_INST_TLB_VECTOR: IA64_ALT_INST_TLB_VECTOR);
    2.35 +}
    2.36 +
    2.37  IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
    2.38  {
    2.39 -	PSCB(vcpu,ifa) = ifa;
    2.40 -	PSCB(vcpu,itir) = vcpu_get_itir_on_fault(vcpu,ifa);
    2.41 -	vcpu_thash(current, ifa, &PSCB(current,iha));
    2.42 -	return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR);
    2.43 +	vcpu_force_tlb_miss(vcpu, ifa);
    2.44 +	return (vcpu_get_rr_ve(vcpu, ifa)? IA64_DATA_TLB_VECTOR: IA64_ALT_DATA_TLB_VECTOR);
    2.45  }
    2.46  
    2.47 -
    2.48  IA64FAULT vcpu_rfi(VCPU *vcpu)
    2.49  {
    2.50  	// TODO: Only allowed for current vcpu
    2.51 @@ -1303,12 +1316,117 @@ static inline int vcpu_match_tr_entry(TR
    2.52  	return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
    2.53  }
    2.54  
    2.55 +static TR_ENTRY*
    2.56 +vcpu_tr_lookup(VCPU* vcpu, unsigned long va, UINT64 rid, BOOLEAN is_data)
    2.57 +{
    2.58 +	unsigned int* regions;
    2.59 +	TR_ENTRY *trp;
    2.60 +	int tr_max;
    2.61 +	int i;
    2.62 +
    2.63 +	if (is_data) {
    2.64 +		// data
    2.65 +		regions = &vcpu->arch.dtr_regions;
    2.66 +		trp = vcpu->arch.dtrs;
    2.67 +		tr_max = sizeof(vcpu->arch.dtrs)/sizeof(vcpu->arch.dtrs[0]);
    2.68 +	} else {
    2.69 +		// instruction
    2.70 +		regions = &vcpu->arch.itr_regions;
    2.71 +		trp = vcpu->arch.itrs;
    2.72 +		tr_max = sizeof(vcpu->arch.itrs)/sizeof(vcpu->arch.itrs[0]);
    2.73 +	}
    2.74 +
    2.75 +	if (!vcpu_quick_region_check(*regions, va)) {
    2.76 +		return NULL;
    2.77 +	}
    2.78 +	for (i = 0; i < tr_max; i++, trp++) {
    2.79 +		if (vcpu_match_tr_entry(trp, va, rid)) {
    2.80 +			return trp;
    2.81 +		}
    2.82 +	}
    2.83 +	return NULL;
    2.84 +}
    2.85 +
    2.86 +// return value
    2.87 +// 0: failure
    2.88 +// 1: success
    2.89 +int
    2.90 +vcpu_get_domain_bundle(VCPU* vcpu, REGS* regs, UINT64 gip, IA64_BUNDLE* bundle)
    2.91 +{
    2.92 +	UINT64 gpip;// guest pseudo phyiscal ip
    2.93 +
    2.94 +#if 0
    2.95 +	// Currently xen doesn't track psr.it bits.
    2.96 +	// it assumes always psr.it = 1.
    2.97 +	if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
    2.98 +		gpip = gip;
    2.99 +	} else
   2.100 +#endif
   2.101 +	{
   2.102 +		unsigned long region = REGION_NUMBER(gip);
   2.103 +		unsigned long rr = PSCB(vcpu, rrs)[region];
   2.104 +		unsigned long rid = rr & RR_RID_MASK;
   2.105 +		BOOLEAN swap_rr0;
   2.106 +		TR_ENTRY* trp;
   2.107 +
   2.108 +		// vcpu->arch.{i, d}tlb are volatile,
   2.109 +		// copy its value to the variable, tr, before use.
   2.110 +		TR_ENTRY tr;
   2.111 +
   2.112 +		trp = vcpu_tr_lookup(vcpu, gip, rid, 0);
   2.113 +		if (trp != NULL) {
   2.114 +			tr = *trp;
   2.115 +			goto found;
   2.116 +		}
   2.117 +		// When it failed to get a bundle, itlb miss is reflected.
   2.118 +		// Last itc.i value is cached to PSCBX(vcpu, itlb).
   2.119 +		tr = PSCBX(vcpu, itlb);
   2.120 +		if (vcpu_match_tr_entry(&tr, gip, rid)) {
   2.121 +			//DPRINTK("%s gip 0x%lx gpip 0x%lx\n", __func__, gip, gpip);
   2.122 +			goto found;
   2.123 +		}
   2.124 +		trp = vcpu_tr_lookup(vcpu, gip, rid, 1);
   2.125 +		if (trp != NULL) {
   2.126 +			tr = *trp;
   2.127 +			goto found;
   2.128 +		}
   2.129 +#if 0
   2.130 +		tr = PSCBX(vcpu, dtlb);
   2.131 +		if (vcpu_match_tr_entry(&tr, gip, rid)) {
   2.132 +			goto found;
   2.133 +		}
   2.134 +#endif
   2.135 +
   2.136 +		// try to access gip with guest virtual address
   2.137 +		// This may cause tlb miss. see vcpu_translate(). Be careful!
   2.138 +		swap_rr0 = (!region && PSCB(vcpu, metaphysical_mode));
   2.139 +		if (swap_rr0) {
   2.140 +			set_one_rr(0x0, PSCB(vcpu, rrs[0]));
   2.141 +		}
   2.142 +		*bundle = __get_domain_bundle(gip);
   2.143 +		if (swap_rr0) {
   2.144 +			set_metaphysical_rr0();
   2.145 +		}
   2.146 +		if (bundle->i64[0] == 0 && bundle->i64[1] == 0) {
   2.147 +			DPRINTK("%s gip 0x%lx\n", __func__, gip);
   2.148 +			return 0;
   2.149 +		}
   2.150 +		return 1;
   2.151 +        
   2.152 +	found:
   2.153 +		gpip = ((tr.pte.ppn >> (tr.ps - 12)) << tr.ps) |
   2.154 +			(gip & ((1 << tr.ps) - 1));
   2.155 +	}
   2.156 +
   2.157 +	*bundle = *((IA64_BUNDLE*)__va(__gpa_to_mpa(vcpu->domain, gpip)));
   2.158 +	return 1;
   2.159 +}
   2.160 +
   2.161  IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
   2.162  {
   2.163  	unsigned long region = address >> 61;
   2.164  	unsigned long pta, rid, rr;
   2.165  	union pte_flags pte;
   2.166 -	int i;
   2.167  	TR_ENTRY *trp;
   2.168  
   2.169  	if (PSCB(vcpu,metaphysical_mode) && !(!is_data && region)) {
   2.170 @@ -1349,28 +1467,22 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
   2.171  	rr = PSCB(vcpu,rrs)[region];
   2.172  	rid = rr & RR_RID_MASK;
   2.173  	if (is_data) {
   2.174 -		if (vcpu_quick_region_check(vcpu->arch.dtr_regions,address)) {
   2.175 -			for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++) {
   2.176 -				if (vcpu_match_tr_entry(trp,address,rid)) {
   2.177 -					*pteval = trp->pte.val;
   2.178 -					*itir = trp->itir;
   2.179 -					tr_translate_count++;
   2.180 -					return IA64_NO_FAULT;
   2.181 -				}
   2.182 -			}
   2.183 +		trp = vcpu_tr_lookup(vcpu, address, rid, 1);
   2.184 +		if (trp != NULL) {
   2.185 +			*pteval = trp->pte.val;
   2.186 +			*itir = trp->itir;
   2.187 +			tr_translate_count++;
   2.188 +			return IA64_NO_FAULT;
   2.189  		}
   2.190  	}
   2.191  	// FIXME?: check itr's for data accesses too, else bad things happen?
   2.192  	/* else */ {
   2.193 -		if (vcpu_quick_region_check(vcpu->arch.itr_regions,address)) {
   2.194 -			for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++) {
   2.195 -				if (vcpu_match_tr_entry(trp,address,rid)) {
   2.196 -					*pteval = trp->pte.val;
   2.197 -					*itir = trp->itir;
   2.198 -					tr_translate_count++;
   2.199 -					return IA64_NO_FAULT;
   2.200 -				}
   2.201 -			}
   2.202 +		trp = vcpu_tr_lookup(vcpu, address, rid, 0);
   2.203 +		if (trp != NULL) {
   2.204 +			*pteval = trp->pte.val;
   2.205 +			*itir = trp->itir;
   2.206 +			tr_translate_count++;
   2.207 +			return IA64_NO_FAULT;
   2.208  		}
   2.209  	}
   2.210  
     3.1 --- a/xen/include/asm-ia64/vcpu.h	Wed May 31 16:07:47 2006 -0600
     3.2 +++ b/xen/include/asm-ia64/vcpu.h	Sat Jun 03 11:16:34 2006 -0600
     3.3 @@ -151,9 +151,12 @@ extern IA64FAULT vcpu_ptc_g(VCPU *vcpu, 
     3.4  extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
     3.5  extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
     3.6  extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
     3.7 +union U_IA64_BUNDLE;
     3.8 +extern int vcpu_get_domain_bundle(VCPU *vcpu, REGS *regs, UINT64 gip, union U_IA64_BUNDLE *bundle);
     3.9  extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data,
    3.10  				UINT64 *pteval, UINT64 *itir, UINT64 *iha);
    3.11  extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
    3.12 +extern IA64FAULT vcpu_force_inst_miss(VCPU *vcpu, UINT64 ifa);
    3.13  extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);
    3.14  extern IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr);
    3.15  /* misc */