ia64/xen-unstable

changeset 12392:d6e40274f923

[IA64] faults.c indentation

Run arch/ia64/xen/faults.c through Lindent to make it manageble.

Signed-off-by: Jes Sorensen <jes@sgi.com>
author awilliam@xenbuild.aw
date Fri Nov 10 12:37:34 2006 -0700 (2006-11-10)
parents 881f5b951553
children dbfd94442e46
files xen/arch/ia64/xen/faults.c
line diff
     1.1 --- a/xen/arch/ia64/xen/faults.c	Fri Nov 10 12:04:55 2006 -0700
     1.2 +++ b/xen/arch/ia64/xen/faults.c	Fri Nov 10 12:37:34 2006 -0700
     1.3 @@ -44,54 +44,58 @@ extern IA64FAULT ia64_hypercall(struct p
     1.4  			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
     1.5  			IA64_PSR_IT | IA64_PSR_BN)
     1.6  
     1.7 -#define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
     1.8 -			IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI |	\
     1.9 -			IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
    1.10 -			IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
    1.11 -			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
    1.12 -			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
    1.13 -
    1.14 +#define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH |	\
    1.15 +			 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI |	\
    1.16 +			 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB |	\
    1.17 +			 IA64_PSR_CPL| IA64_PSR_MC | IA64_PSR_IS |	\
    1.18 +			 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |	\
    1.19 +			 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
    1.20  
    1.21  extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
    1.22  
    1.23  // should never panic domain... if it does, stack may have been overrun
    1.24 -void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
    1.25 +void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs,
    1.26 +                                   unsigned long vector)
    1.27  {
    1.28  	struct vcpu *v = current;
    1.29  
    1.30 -	if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
    1.31 -		panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
    1.32 +	if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
    1.33 +		panic_domain(regs,
    1.34 +		             "psr.dt off, trying to deliver nested dtlb!\n");
    1.35  	}
    1.36  	vector &= ~0xf;
    1.37  	if (vector != IA64_DATA_TLB_VECTOR &&
    1.38  	    vector != IA64_ALT_DATA_TLB_VECTOR &&
    1.39  	    vector != IA64_VHPT_TRANS_VECTOR) {
    1.40 -		panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
    1.41 -		             vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
    1.42 +		panic_domain(regs, "psr.ic off, delivering fault=%lx,"
    1.43 +		             "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
    1.44 +		             vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
    1.45 +		             isr, PSCB(v, iip));
    1.46  	}
    1.47  }
    1.48  
    1.49 -void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
    1.50 +void reflect_interruption(unsigned long isr, struct pt_regs *regs,
    1.51 +                          unsigned long vector)
    1.52  {
    1.53  	struct vcpu *v = current;
    1.54  
    1.55 -	if (!PSCB(v,interrupt_collection_enabled))
    1.56 -		check_bad_nested_interruption(isr,regs,vector);
    1.57 -	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
    1.58 -	PSCB(v,precover_ifs) = regs->cr_ifs;
    1.59 +	if (!PSCB(v, interrupt_collection_enabled))
    1.60 +		check_bad_nested_interruption(isr, regs, vector);
    1.61 +	PSCB(v, unat) = regs->ar_unat;	// not sure if this is really needed?
    1.62 +	PSCB(v, precover_ifs) = regs->cr_ifs;
    1.63  	vcpu_bsw0(v);
    1.64 -	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
    1.65 -	PSCB(v,isr) = isr;
    1.66 -	PSCB(v,iip) = regs->cr_iip;
    1.67 -	PSCB(v,ifs) = 0;
    1.68 -	PSCB(v,incomplete_regframe) = 0;
    1.69 +	PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr);
    1.70 +	PSCB(v, isr) = isr;
    1.71 +	PSCB(v, iip) = regs->cr_iip;
    1.72 +	PSCB(v, ifs) = 0;
    1.73 +	PSCB(v, incomplete_regframe) = 0;
    1.74  
    1.75 -	regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
    1.76 +	regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
    1.77  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    1.78  	regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
    1.79  
    1.80  	v->vcpu_info->evtchn_upcall_mask = 1;
    1.81 -	PSCB(v,interrupt_collection_enabled) = 0;
    1.82 +	PSCB(v, interrupt_collection_enabled) = 0;
    1.83  
    1.84  	perfc_incra(slow_reflect, vector >> 8);
    1.85  }
    1.86 @@ -105,13 +109,15 @@ void reflect_extint(struct pt_regs *regs
    1.87  	static int first_extint = 1;
    1.88  
    1.89  	if (first_extint) {
    1.90 -		printk("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
    1.91 +		printk("Delivering first extint to domain: isr=0x%lx, "
    1.92 +		       "iip=0x%lx\n", isr, regs->cr_iip);
    1.93  		first_extint = 0;
    1.94  	}
    1.95  	if (vcpu_timer_pending_early(v))
    1.96 -printk("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
    1.97 -	PSCB(current,itir) = 0;
    1.98 -	reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
    1.99 +		printk("*#*#*#* about to deliver early timer to domain %d!!\n",
   1.100 +		       v->domain->domain_id);
   1.101 +	PSCB(current, itir) = 0;
   1.102 +	reflect_interruption(isr, regs, IA64_EXTINT_VECTOR);
   1.103  }
   1.104  
   1.105  void reflect_event(struct pt_regs *regs)
   1.106 @@ -128,24 +134,25 @@ void reflect_event(struct pt_regs *regs)
   1.107  	if (!event_pending(v))
   1.108  		return;
   1.109  
   1.110 -	if (!PSCB(v,interrupt_collection_enabled))
   1.111 -		printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
   1.112 +	if (!PSCB(v, interrupt_collection_enabled))
   1.113 +		printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
   1.114 +		       "isr=%lx,viip=0x%lx\n",
   1.115  		       regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
   1.116 -	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
   1.117 -	PSCB(v,precover_ifs) = regs->cr_ifs;
   1.118 +	PSCB(v, unat) = regs->ar_unat;	// not sure if this is really needed?
   1.119 +	PSCB(v, precover_ifs) = regs->cr_ifs;
   1.120  	vcpu_bsw0(v);
   1.121 -	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
   1.122 -	PSCB(v,isr) = isr;
   1.123 -	PSCB(v,iip) = regs->cr_iip;
   1.124 -	PSCB(v,ifs) = 0;
   1.125 -	PSCB(v,incomplete_regframe) = 0;
   1.126 +	PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr);
   1.127 +	PSCB(v, isr) = isr;
   1.128 +	PSCB(v, iip) = regs->cr_iip;
   1.129 +	PSCB(v, ifs) = 0;
   1.130 +	PSCB(v, incomplete_regframe) = 0;
   1.131  
   1.132  	regs->cr_iip = v->arch.event_callback_ip;
   1.133  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   1.134  	regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
   1.135  
   1.136  	v->vcpu_info->evtchn_upcall_mask = 1;
   1.137 -	PSCB(v,interrupt_collection_enabled) = 0;
   1.138 +	PSCB(v, interrupt_collection_enabled) = 0;
   1.139  }
   1.140  
   1.141  // ONLY gets called from ia64_leave_kernel
   1.142 @@ -159,25 +166,25 @@ void deliver_pending_interrupt(struct pt
   1.143  	if (!is_idle_domain(d) && user_mode(regs)) {
   1.144  		if (vcpu_deliverable_interrupts(v))
   1.145  			reflect_extint(regs);
   1.146 -		else if (PSCB(v,pending_interruption))
   1.147 +		else if (PSCB(v, pending_interruption))
   1.148  			++pending_false_positive;
   1.149  	}
   1.150  }
   1.151  
   1.152 -static int
   1.153 -handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
   1.154 +static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
   1.155  {
   1.156 -	if (!PSCB(v,interrupt_collection_enabled)) {
   1.157 -		PSCB(v,ifs) = regs->cr_ifs;
   1.158 -		PSCB(v,incomplete_regframe) = 1;
   1.159 +	if (!PSCB(v, interrupt_collection_enabled)) {
   1.160 +		PSCB(v, ifs) = regs->cr_ifs;
   1.161 +		PSCB(v, incomplete_regframe) = 1;
   1.162  		regs->cr_ifs = 0;
   1.163  		perfc_incrc(lazy_cover);
   1.164 -		return(1); // retry same instruction with cr.ifs off
   1.165 +		return 1;	// retry same instruction with cr.ifs off
   1.166  	}
   1.167 -	return(0);
   1.168 +	return 0;
   1.169  }
   1.170  
   1.171 -void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   1.172 +void ia64_do_page_fault(unsigned long address, unsigned long isr,
   1.173 +                        struct pt_regs *regs, unsigned long itir)
   1.174  {
   1.175  	unsigned long iip = regs->cr_iip, iha;
   1.176  	// FIXME should validate address here
   1.177 @@ -188,25 +195,27 @@ void ia64_do_page_fault (unsigned long a
   1.178  	u64 logps;
   1.179  
   1.180  	if ((isr & IA64_ISR_SP)
   1.181 -	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
   1.182 -	{
   1.183 +	    || ((isr & IA64_ISR_NA)
   1.184 +		&& (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
   1.185  		/*
   1.186 -		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
   1.187 -		 * bit in the psr to ensure forward progress.  (Target register will get a
   1.188 -		 * NaT for ld.s, lfetch will be canceled.)
   1.189 +		 * This fault was due to a speculative load or lfetch.fault,
   1.190 +		 * set the "ed" bit in the psr to ensure forward progress.
   1.191 +		 * (Target register will get a NaT for ld.s, lfetch will be
   1.192 +		 * canceled.)
   1.193  		 */
   1.194  		ia64_psr(regs)->ed = 1;
   1.195  		return;
   1.196  	}
   1.197  
   1.198   again:
   1.199 -	fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
   1.200 +	fault = vcpu_translate(current, address, is_data, &pteval,
   1.201 +	                       &itir, &iha);
   1.202  	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
   1.203  		struct p2m_entry entry;
   1.204  		unsigned long m_pteval;
   1.205  		m_pteval = translate_domain_pte(pteval, address, itir,
   1.206  		                                &logps, &entry);
   1.207 -		vcpu_itc_no_srlz(current, (is_data? 2: 1) | 4, 
   1.208 +		vcpu_itc_no_srlz(current, (is_data ? 2 : 1) | 4,
   1.209  		                 address, m_pteval, pteval, logps, &entry);
   1.210  		if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
   1.211  		    p2m_entry_retry(&entry)) {
   1.212 @@ -225,17 +234,18 @@ void ia64_do_page_fault (unsigned long a
   1.213  
   1.214  	if (is_ptc_l_needed)
   1.215  		vcpu_ptc_l(current, address, logps);
   1.216 -	if (!user_mode (regs)) {
   1.217 +	if (!user_mode(regs)) {
   1.218  		/* The fault occurs inside Xen.  */
   1.219  		if (!ia64_done_with_exception(regs)) {
   1.220  			// should never happen.  If it does, region 0 addr may
   1.221  			// indicate a bad xen pointer
   1.222  			printk("*** xen_handle_domain_access: exception table"
   1.223 -			       " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
   1.224 -			       iip, address);
   1.225 -			panic_domain(regs,"*** xen_handle_domain_access: exception table"
   1.226 -				     " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
   1.227 -				     iip, address);
   1.228 +			       " lookup failed, iip=0x%lx, addr=0x%lx, "
   1.229 +			       "spinning...\n", iip, address);
   1.230 +			panic_domain(regs, "*** xen_handle_domain_access: "
   1.231 +			             "exception table lookup failed, "
   1.232 +			             "iip=0x%lx, addr=0x%lx, spinning...\n",
   1.233 +			             iip, address);
   1.234  		}
   1.235  		return;
   1.236  	}
   1.237 @@ -243,45 +253,47 @@ void ia64_do_page_fault (unsigned long a
   1.238  	if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
   1.239  		return;
   1.240  
   1.241 -	if (!PSCB(current,interrupt_collection_enabled)) {
   1.242 -		check_bad_nested_interruption(isr,regs,fault);
   1.243 +	if (!PSCB(current, interrupt_collection_enabled)) {
   1.244 +		check_bad_nested_interruption(isr, regs, fault);
   1.245  		//printk("Delivering NESTED DATA TLB fault\n");
   1.246  		fault = IA64_DATA_NESTED_TLB_VECTOR;
   1.247 -		regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
   1.248 -		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   1.249 +		regs->cr_iip =
   1.250 +		    ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
   1.251 +		regs->cr_ipsr =
   1.252 +		    (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   1.253  		// NOTE: nested trap must NOT pass PSCB address
   1.254  		//regs->r31 = (unsigned long) &PSCB(current);
   1.255  		perfc_incra(slow_reflect, fault >> 8);
   1.256  		return;
   1.257  	}
   1.258  
   1.259 -	PSCB(current,itir) = itir;
   1.260 -	PSCB(current,iha) = iha;
   1.261 -	PSCB(current,ifa) = address;
   1.262 +	PSCB(current, itir) = itir;
   1.263 +	PSCB(current, iha) = iha;
   1.264 +	PSCB(current, ifa) = address;
   1.265  	reflect_interruption(isr, regs, fault);
   1.266  }
   1.267  
   1.268  fpswa_interface_t *fpswa_interface = 0;
   1.269  
   1.270 -void trap_init (void)
   1.271 +void trap_init(void)
   1.272  {
   1.273  	if (ia64_boot_param->fpswa)
   1.274 -		/* FPSWA fixup: make the interface pointer a virtual address: */
   1.275 +		/* FPSWA fixup: make the interface pointer a virtual address */
   1.276  		fpswa_interface = __va(ia64_boot_param->fpswa);
   1.277  	else
   1.278  		printk("No FPSWA supported.\n");
   1.279  }
   1.280  
   1.281  static fpswa_ret_t
   1.282 -fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
   1.283 -	    unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
   1.284 -	    unsigned long *ifs, struct pt_regs *regs)
   1.285 +fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
   1.286 +           unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
   1.287 +           unsigned long *ifs, struct pt_regs *regs)
   1.288  {
   1.289  	fp_state_t fp_state;
   1.290  	fpswa_ret_t ret;
   1.291  
   1.292  	if (!fpswa_interface)
   1.293 -		return ((fpswa_ret_t) {-1, 0, 0, 0});
   1.294 +		return (fpswa_ret_t) {-1, 0, 0, 0};
   1.295  
   1.296  	memset(&fp_state, 0, sizeof(fp_state_t));
   1.297  
   1.298 @@ -290,7 +302,7 @@ fp_emulate (int fp_fault, void *bundle, 
   1.299  	 * kernel, so set those bits in the mask and set the low volatile
   1.300  	 * pointer to point to these registers.
   1.301  	 */
   1.302 -	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
   1.303 +	fp_state.bitmask_low64 = 0xfc0;	/* bit6..bit11 */
   1.304  
   1.305  	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
   1.306  	/*
   1.307 @@ -304,8 +316,8 @@ fp_emulate (int fp_fault, void *bundle, 
   1.308  	 *      unsigned long    *pifs,
   1.309  	 *      void             *fp_state);
   1.310  	 */
   1.311 -	ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
   1.312 -					ipsr, fpsr, isr, pr, ifs, &fp_state);
   1.313 +	ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
   1.314 +	                                 ipsr, fpsr, isr, pr, ifs, &fp_state);
   1.315  
   1.316  	return ret;
   1.317  }
   1.318 @@ -314,7 +326,7 @@ fp_emulate (int fp_fault, void *bundle, 
   1.319   * Handle floating-point assist faults and traps for domain.
   1.320   */
   1.321  unsigned long
   1.322 -handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
   1.323 +handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
   1.324  {
   1.325  	struct vcpu *v = current;
   1.326  	IA64_BUNDLE bundle;
   1.327 @@ -324,7 +336,8 @@ handle_fpu_swa (int fp_fault, struct pt_
   1.328  	fault_ip = regs->cr_iip;
   1.329  	/*
   1.330  	 * When the FP trap occurs, the trapping instruction is completed.
   1.331 -	 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
   1.332 +	 * If ipsr.ri == 0, there is the trapping instruction in previous
   1.333 +	 * bundle.
   1.334  	 */
   1.335  	if (!fp_fault && (ia64_psr(regs)->ri == 0))
   1.336  		fault_ip -= 16;
   1.337 @@ -332,8 +345,7 @@ handle_fpu_swa (int fp_fault, struct pt_
   1.338  	if (VMX_DOMAIN(current)) {
   1.339  		if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle))
   1.340  			return IA64_RETRY;
   1.341 -	}
   1.342 -	else 
   1.343 +	} else
   1.344  		bundle = __get_domain_bundle(fault_ip);
   1.345  
   1.346  	if (!bundle.i64[0] && !bundle.i64[1]) {
   1.347 @@ -348,20 +360,20 @@ handle_fpu_swa (int fp_fault, struct pt_
   1.348  	if (ret.status) {
   1.349  		PSCBX(v, fpswa_ret) = ret;
   1.350  		printk("%s(%s): fp_emulate() returned %ld\n",
   1.351 -		       __FUNCTION__, fp_fault?"fault":"trap", ret.status);
   1.352 +		       __FUNCTION__, fp_fault ? "fault" : "trap", ret.status);
   1.353  	}
   1.354  
   1.355  	return ret.status;
   1.356  }
   1.357  
   1.358  void
   1.359 -ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
   1.360 -	    unsigned long iim, unsigned long itir, unsigned long arg5,
   1.361 -	    unsigned long arg6, unsigned long arg7, unsigned long stack)
   1.362 +ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
   1.363 +           unsigned long iim, unsigned long itir, unsigned long arg5,
   1.364 +           unsigned long arg6, unsigned long arg7, unsigned long stack)
   1.365  {
   1.366 -	struct pt_regs *regs = (struct pt_regs *) &stack;
   1.367 +	struct pt_regs *regs = (struct pt_regs *)&stack;
   1.368  	unsigned long code;
   1.369 -	static const char * const reason[] = {
   1.370 +	static const char *const reason[] = {
   1.371  		"IA-64 Illegal Operation fault",
   1.372  		"IA-64 Privileged Operation fault",
   1.373  		"IA-64 Privileged Register fault",
   1.374 @@ -374,14 +386,15 @@ ia64_fault (unsigned long vector, unsign
   1.375  		"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
   1.376  	};
   1.377  
   1.378 -	printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
   1.379 -	       vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
   1.380 -
   1.381 +	printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
   1.382 +	       "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
   1.383 +	       regs->cr_iip, regs->cr_ipsr, isr);
   1.384  
   1.385 -	if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
   1.386 +	if ((isr & IA64_ISR_NA) &&
   1.387 +	    ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
   1.388  		/*
   1.389 -		 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
   1.390 -		 * the lfetch.
   1.391 +		 * This fault was due to lfetch.fault, set "ed" bit in the
   1.392 +		 * psr to cancel the lfetch.
   1.393  		 */
   1.394  		ia64_psr(regs)->ed = 1;
   1.395  		printk("ia64_fault: handled lfetch.fault\n");
   1.396 @@ -389,108 +402,107 @@ ia64_fault (unsigned long vector, unsign
   1.397  	}
   1.398  
   1.399  	switch (vector) {
   1.400 -	    case 0:
   1.401 +	case 0:
   1.402  		printk("VHPT Translation.\n");
   1.403  		break;
   1.404 -	  
   1.405 -	    case 4:
   1.406 +
   1.407 +	case 4:
   1.408  		printk("Alt DTLB.\n");
   1.409  		break;
   1.410 -	  
   1.411 -	    case 6:
   1.412 +
   1.413 +	case 6:
   1.414  		printk("Instruction Key Miss.\n");
   1.415  		break;
   1.416  
   1.417 -	    case 7: 
   1.418 +	case 7:
   1.419  		printk("Data Key Miss.\n");
   1.420  		break;
   1.421  
   1.422 -	    case 8: 
   1.423 +	case 8:
   1.424  		printk("Dirty-bit.\n");
   1.425  		break;
   1.426  
   1.427 -	    case 20:
   1.428 +	case 20:
   1.429  		printk("Page Not Found.\n");
   1.430  		break;
   1.431  
   1.432 -	    case 21:
   1.433 +	case 21:
   1.434  		printk("Key Permission.\n");
   1.435  		break;
   1.436  
   1.437 -	    case 22:
   1.438 +	case 22:
   1.439  		printk("Instruction Access Rights.\n");
   1.440  		break;
   1.441  
   1.442 -	    case 24: /* General Exception */
   1.443 +	case 24:	/* General Exception */
   1.444  		code = (isr >> 4) & 0xf;
   1.445  		printk("General Exception: %s%s.\n", reason[code],
   1.446 -		        (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
   1.447 +		       (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
   1.448  		                       " (data access)") : "");
   1.449  		if (code == 8) {
   1.450 -# ifdef CONFIG_IA64_PRINT_HAZARDS
   1.451 -			printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
   1.452 -			       current->comm, current->pid,
   1.453 -			       regs->cr_iip + ia64_psr(regs)->ri,
   1.454 -			       regs->pr);
   1.455 -# endif
   1.456 +#ifdef CONFIG_IA64_PRINT_HAZARDS
   1.457 +			printk("%s[%d]: possible hazard @ ip=%016lx "
   1.458 +			       "(pr = %016lx)\n", current->comm, current->pid,
   1.459 +			       regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
   1.460 +#endif
   1.461  			printk("ia64_fault: returning on hazard\n");
   1.462  			return;
   1.463  		}
   1.464  		break;
   1.465  
   1.466 -	    case 25:
   1.467 +	case 25:
   1.468  		printk("Disabled FP-Register.\n");
   1.469  		break;
   1.470  
   1.471 -	    case 26:
   1.472 +	case 26:
   1.473  		printk("NaT consumption.\n");
   1.474  		break;
   1.475  
   1.476 -	    case 29:
   1.477 +	case 29:
   1.478  		printk("Debug.\n");
   1.479  		break;
   1.480  
   1.481 -	    case 30:
   1.482 +	case 30:
   1.483  		printk("Unaligned Reference.\n");
   1.484  		break;
   1.485  
   1.486 -	    case 31:
   1.487 +	case 31:
   1.488  		printk("Unsupported data reference.\n");
   1.489  		break;
   1.490  
   1.491 -	    case 32:
   1.492 +	case 32:
   1.493  		printk("Floating-Point Fault.\n");
   1.494  		break;
   1.495  
   1.496 -	    case 33:
   1.497 +	case 33:
   1.498  		printk("Floating-Point Trap.\n");
   1.499  		break;
   1.500  
   1.501 -	    case 34:
   1.502 +	case 34:
   1.503  		printk("Lower Privilege Transfer Trap.\n");
   1.504  		break;
   1.505  
   1.506 -	    case 35:
   1.507 +	case 35:
   1.508  		printk("Taken Branch Trap.\n");
   1.509  		break;
   1.510  
   1.511 -	    case 36:
   1.512 +	case 36:
   1.513  		printk("Single Step Trap.\n");
   1.514  		break;
   1.515 -    
   1.516 -	    case 45:
   1.517 +
   1.518 +	case 45:
   1.519  		printk("IA-32 Exception.\n");
   1.520  		break;
   1.521  
   1.522 -	    case 46:
   1.523 +	case 46:
   1.524  		printk("IA-32 Intercept.\n");
   1.525  		break;
   1.526  
   1.527 -	    case 47:
   1.528 +	case 47:
   1.529  		printk("IA-32 Interrupt.\n");
   1.530  		break;
   1.531  
   1.532 -	    default:
   1.533 +	default:
   1.534  		printk("Fault %lu\n", vector);
   1.535  		break;
   1.536  	}
   1.537 @@ -501,65 +513,65 @@ ia64_fault (unsigned long vector, unsign
   1.538  
   1.539  unsigned long running_on_sim = 0;
   1.540  
   1.541 -
   1.542  /* Also read in hyperprivop.S  */
   1.543  int first_break = 0;
   1.544  
   1.545  void
   1.546 -ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   1.547 +ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
   1.548 +                  unsigned long iim)
   1.549  {
   1.550  	struct domain *d = current->domain;
   1.551  	struct vcpu *v = current;
   1.552  	IA64FAULT vector;
   1.553  
   1.554 -	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
   1.555 -		do_ssc(vcpu_get_gr(current,36), regs);
   1.556 -	} 
   1.557 +	if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
   1.558 +		do_ssc(vcpu_get_gr(current, 36), regs);
   1.559 +	}
   1.560  #ifdef CRASH_DEBUG
   1.561  	else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
   1.562  		if (iim == 0)
   1.563  			show_registers(regs);
   1.564 -		debugger_trap_fatal(0 /* don't care */, regs);
   1.565 -	} 
   1.566 +		debugger_trap_fatal(0 /* don't care */ , regs);
   1.567 +	}
   1.568  #endif
   1.569 -	else if (iim == d->arch.breakimm &&
   1.570 -	         ia64_get_cpl(regs->cr_ipsr) == 2) {
   1.571 +	else if (iim == d->arch.breakimm && ia64_get_cpl(regs->cr_ipsr) == 2) {
   1.572  		/* by default, do not continue */
   1.573  		v->arch.hypercall_continuation = 0;
   1.574  
   1.575  		if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
   1.576  			if (!PSCBX(v, hypercall_continuation))
   1.577  				vcpu_increment_iip(current);
   1.578 -		}
   1.579 -		else reflect_interruption(isr, regs, vector);
   1.580 -	}
   1.581 -	else if (!PSCB(v,interrupt_collection_enabled)) {
   1.582 -		if (ia64_hyperprivop(iim,regs))
   1.583 +		} else
   1.584 +			reflect_interruption(isr, regs, vector);
   1.585 +	} else if (!PSCB(v, interrupt_collection_enabled)) {
   1.586 +		if (ia64_hyperprivop(iim, regs))
   1.587  			vcpu_increment_iip(current);
   1.588 -	}
   1.589 -	else {
   1.590 -		if (iim == 0) 
   1.591 +	} else {
   1.592 +		if (iim == 0)
   1.593  			die_if_kernel("bug check", regs, iim);
   1.594 -		PSCB(v,iim) = iim;
   1.595 -		reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
   1.596 +		PSCB(v, iim) = iim;
   1.597 +		reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
   1.598  	}
   1.599  }
   1.600  
   1.601  void
   1.602 -ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
   1.603 +ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
   1.604 +                   unsigned long itir)
   1.605  {
   1.606  	IA64FAULT vector;
   1.607  
   1.608 -	vector = priv_emulate(current,regs,isr);
   1.609 +	vector = priv_emulate(current, regs, isr);
   1.610  	if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
   1.611  		// Note: if a path results in a vector to reflect that requires
   1.612  		// iha/itir (e.g. vcpu_force_data_miss), they must be set there
   1.613 -		reflect_interruption(isr,regs,vector);
   1.614 +		reflect_interruption(isr, regs, vector);
   1.615  	}
   1.616  }
   1.617  
   1.618  void
   1.619 -ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
   1.620 +ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
   1.621 +                       unsigned long isr, unsigned long iim,
   1.622 +                       unsigned long vector)
   1.623  {
   1.624  	struct vcpu *v = current;
   1.625  	unsigned long check_lazy_cover = 0;
   1.626 @@ -567,39 +579,48 @@ ia64_handle_reflection (unsigned long if
   1.627  	unsigned long status;
   1.628  
   1.629  	/* Following faults shouldn'g be seen from Xen itself */
   1.630 -	BUG_ON (!(psr & IA64_PSR_CPL));
   1.631 +	BUG_ON(!(psr & IA64_PSR_CPL));
   1.632  
   1.633 -	switch(vector) {
   1.634 -	    case 8:
   1.635 -		vector = IA64_DIRTY_BIT_VECTOR; break;
   1.636 -	    case 9:
   1.637 -		vector = IA64_INST_ACCESS_BIT_VECTOR; break;
   1.638 -	    case 10:
   1.639 +	switch (vector) {
   1.640 +	case 8:
   1.641 +		vector = IA64_DIRTY_BIT_VECTOR;
   1.642 +		break;
   1.643 +	case 9:
   1.644 +		vector = IA64_INST_ACCESS_BIT_VECTOR;
   1.645 +		break;
   1.646 +	case 10:
   1.647  		check_lazy_cover = 1;
   1.648 -		vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
   1.649 -	    case 20:
   1.650 +		vector = IA64_DATA_ACCESS_BIT_VECTOR;
   1.651 +		break;
   1.652 +	case 20:
   1.653  		check_lazy_cover = 1;
   1.654 -		vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
   1.655 -	    case 22:
   1.656 -		vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
   1.657 -	    case 23:
   1.658 +		vector = IA64_PAGE_NOT_PRESENT_VECTOR;
   1.659 +		break;
   1.660 +	case 22:
   1.661 +		vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
   1.662 +		break;
   1.663 +	case 23:
   1.664  		check_lazy_cover = 1;
   1.665 -		vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
   1.666 -	    case 25:
   1.667 +		vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
   1.668 +		break;
   1.669 +	case 25:
   1.670  		vector = IA64_DISABLED_FPREG_VECTOR;
   1.671  		break;
   1.672 -	    case 26:
   1.673 +	case 26:
   1.674  		if (((isr >> 4L) & 0xfL) == 1) {
   1.675  			/* Fault is due to a register NaT consumption fault. */
   1.676  			//regs->eml_unat = 0;  FIXME: DO WE NEED THIS??
   1.677 -			printk("ia64_handle_reflection: handling regNaT fault\n");
   1.678 -			vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   1.679 +			printk("ia64_handle_reflection: handling regNaT "
   1.680 +			       "fault\n");
   1.681 +			vector = IA64_NAT_CONSUMPTION_VECTOR;
   1.682 +			break;
   1.683  		}
   1.684  #if 1
   1.685  		// pass null pointer dereferences through with no error
   1.686  		// but retain debug output for non-zero ifa
   1.687  		if (!ifa) {
   1.688 -			vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   1.689 +			vector = IA64_NAT_CONSUMPTION_VECTOR;
   1.690 +			break;
   1.691  		}
   1.692  #endif
   1.693  #ifdef CONFIG_PRIVIFY
   1.694 @@ -610,32 +631,38 @@ ia64_handle_reflection (unsigned long if
   1.695  		       isr, ifa, regs->cr_iip, psr);
   1.696  		//regs->eml_unat = 0;  FIXME: DO WE NEED THIS???
   1.697  		// certain NaT faults are higher priority than privop faults
   1.698 -		vector = priv_emulate(v,regs,isr);
   1.699 +		vector = priv_emulate(v, regs, isr);
   1.700  		if (vector == IA64_NO_FAULT) {
   1.701 -			printk("*** Handled privop masquerading as NaT fault\n");
   1.702 +			printk("*** Handled privop masquerading as NaT "
   1.703 +			       "fault\n");
   1.704  			return;
   1.705  		}
   1.706  #endif
   1.707 -		vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   1.708 -	    case 27:
   1.709 -		//printk("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
   1.710 -		PSCB(current,iim) = iim;
   1.711 -		vector = IA64_SPECULATION_VECTOR; break;
   1.712 -	    case 30:
   1.713 +		vector = IA64_NAT_CONSUMPTION_VECTOR;
   1.714 +		break;
   1.715 +	case 27:
   1.716 +		//printk("*** Handled speculation vector, itc=%lx!\n",
   1.717 +		//       ia64_get_itc());
   1.718 +		PSCB(current, iim) = iim;
   1.719 +		vector = IA64_SPECULATION_VECTOR;
   1.720 +		break;
   1.721 +	case 30:
   1.722  		// FIXME: Should we handle unaligned refs in Xen??
   1.723 -		vector = IA64_UNALIGNED_REF_VECTOR; break;
   1.724 -	    case 32:
   1.725 +		vector = IA64_UNALIGNED_REF_VECTOR;
   1.726 +		break;
   1.727 +	case 32:
   1.728  		status = handle_fpu_swa(1, regs, isr);
   1.729  		if (!status) {
   1.730 -		    vcpu_increment_iip(v);
   1.731 -		    return;
   1.732 +			vcpu_increment_iip(v);
   1.733 +			return;
   1.734  		}
   1.735  		// fetch code fail
   1.736  		if (IA64_RETRY == status)
   1.737  			return;
   1.738  		printk("ia64_handle_reflection: handling FP fault\n");
   1.739 -		vector = IA64_FP_FAULT_VECTOR; break;
   1.740 -	    case 33:
   1.741 +		vector = IA64_FP_FAULT_VECTOR;
   1.742 +		break;
   1.743 +	case 33:
   1.744  		status = handle_fpu_swa(0, regs, isr);
   1.745  		if (!status)
   1.746  			return;
   1.747 @@ -643,26 +670,34 @@ ia64_handle_reflection (unsigned long if
   1.748  		if (IA64_RETRY == status)
   1.749  			return;
   1.750  		printk("ia64_handle_reflection: handling FP trap\n");
   1.751 -		vector = IA64_FP_TRAP_VECTOR; break;
   1.752 -	    case 34:
   1.753 +		vector = IA64_FP_TRAP_VECTOR;
   1.754 +		break;
   1.755 +	case 34:
   1.756  		printk("ia64_handle_reflection: handling lowerpriv trap\n");
   1.757 -		vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
   1.758 -	    case 35:
   1.759 +		vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
   1.760 +		break;
   1.761 +	case 35:
   1.762  		printk("ia64_handle_reflection: handling taken branch trap\n");
   1.763 -		vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
   1.764 -	    case 36:
   1.765 +		vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
   1.766 +		break;
   1.767 +	case 36:
   1.768  		printk("ia64_handle_reflection: handling single step trap\n");
   1.769 -		vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
   1.770 +		vector = IA64_SINGLE_STEP_TRAP_VECTOR;
   1.771 +		break;
   1.772  
   1.773 -	    default:
   1.774 -		printk("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
   1.775 -		while(vector);
   1.776 +	default:
   1.777 +		printk("ia64_handle_reflection: unhandled vector=0x%lx\n",
   1.778 +		       vector);
   1.779 +		while (vector)
   1.780 +			/* spin */;
   1.781  		return;
   1.782  	}
   1.783 -	if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
   1.784 -	PSCB(current,ifa) = ifa;
   1.785 -	PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
   1.786 -	reflect_interruption(isr,regs,vector);
   1.787 +	if (check_lazy_cover && (isr & IA64_ISR_IR) &&
   1.788 +	    handle_lazy_cover(v, regs))
   1.789 +		return;
   1.790 +	PSCB(current, ifa) = ifa;
   1.791 +	PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
   1.792 +	reflect_interruption(isr, regs, vector);
   1.793  }
   1.794  
   1.795  void
   1.796 @@ -681,7 +716,7 @@ ia64_shadow_fault(unsigned long ifa, uns
   1.797  	   -  reflecting or not the fault (the virtual Dirty bit must be
   1.798  	      extracted to decide).
   1.799  	   Unfortunatly these informations are not immediatly available!
   1.800 -	*/
   1.801 +	 */
   1.802  
   1.803  	/* Extract the metaphysical address.
   1.804  	   Try to get it from VHPT and M2P as we need the flags.  */
   1.805 @@ -691,8 +726,7 @@ ia64_shadow_fault(unsigned long ifa, uns
   1.806  		/* The VHPT entry is valid.  */
   1.807  		gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT);
   1.808  		BUG_ON(gpfn == INVALID_M2P_ENTRY);
   1.809 -	}
   1.810 -	else {
   1.811 +	} else {
   1.812  		unsigned long itir, iha;
   1.813  		IA64FAULT fault;
   1.814  
   1.815 @@ -715,14 +749,14 @@ ia64_shadow_fault(unsigned long ifa, uns
   1.816  	}
   1.817  
   1.818  	/* Set the dirty bit in the bitmap.  */
   1.819 -	shadow_mark_page_dirty (d, gpfn);
   1.820 +	shadow_mark_page_dirty(d, gpfn);
   1.821  
   1.822  	/* Update the local TC/VHPT and decides wether or not the fault should
   1.823  	   be reflected.
   1.824  	   SMP note: we almost ignore the other processors.  The shadow_bitmap
   1.825  	   has been atomically updated.  If the dirty fault happen on another
   1.826  	   processor, it will do its job.
   1.827 -	*/
   1.828 +	 */
   1.829  
   1.830  	if (pte != 0) {
   1.831  		/* We will know how to handle the fault.  */
   1.832 @@ -733,21 +767,19 @@ ia64_shadow_fault(unsigned long ifa, uns
   1.833  			   cpu VHPT owner can write page_flags.  */
   1.834  			if (vlfe)
   1.835  				vlfe->page_flags = pte | _PAGE_D;
   1.836 -			
   1.837 +
   1.838  			/* Purge the TC locally.
   1.839  			   It will be reloaded from the VHPT iff the
   1.840  			   VHPT entry is still valid.  */
   1.841  			ia64_ptcl(ifa, PAGE_SHIFT << 2);
   1.842  
   1.843  			atomic64_inc(&d->arch.shadow_fault_count);
   1.844 -		}
   1.845 -		else {
   1.846 +		} else {
   1.847  			/* Reflect.
   1.848  			   In this case there is no need to purge.  */
   1.849  			ia64_handle_reflection(ifa, regs, isr, 0, 8);
   1.850  		}
   1.851 -	}
   1.852 -	else {
   1.853 +	} else {
   1.854  		/* We don't know wether or not the fault must be
   1.855  		   reflected.  The VHPT entry is not valid.  */
   1.856  		/* FIXME: in metaphysical mode, we could do an ITC now.  */