ia64/xen-unstable

changeset 3086:b7cbbc4c7a3e

bitkeeper revision 1.1159.187.1 (41a26ec1W-iw8iKd-EuzGrvNLX-08g)

Initial ia64 checkin.
author iap10@labyrinth.cl.cam.ac.uk
date Mon Nov 22 22:57:05 2004 +0000 (2004-11-22)
parents 5281b60ddd27
children f0d6fa2867c5
files .rootkeys xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/vcpu.c xen/arch/ia64/xenasm.S
line diff
     1.1 --- a/.rootkeys	Mon Nov 22 22:40:14 2004 +0000
     1.2 +++ b/.rootkeys	Mon Nov 22 22:57:05 2004 +0000
     1.3 @@ -634,6 +634,11 @@ 40e9808eHXvs_5eggj9McD_J90mhNw tools/xfr
     1.4  3f72f1bdJPsV3JCnBqs9ddL9tr6D2g xen/COPYING
     1.5  3ddb79bcbOVHh38VJzc97-JEGD4dJQ xen/Makefile
     1.6  3ddb79bcWnTwYsQRWl_PaneJfa6p0w xen/Rules.mk
     1.7 +41a26ebcqaSGVQ8qTMwpPwOJSJ7qSw xen/arch/ia64/privop.c
     1.8 +41a26ebc4BOHDUsT0TSnryPeV2xfRA xen/arch/ia64/process.c
     1.9 +41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c
    1.10 +41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c
    1.11 +41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S
    1.12  3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen/arch/x86/Makefile
    1.13  3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen/arch/x86/Rules.mk
    1.14  3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/x86/acpi.c
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/arch/ia64/privop.c	Mon Nov 22 22:57:05 2004 +0000
     2.3 @@ -0,0 +1,863 @@
     2.4 +/*
     2.5 + * Privileged operation "API" handling functions.
     2.6 + * 
     2.7 + * Copyright (C) 2004 Hewlett-Packard Co.
     2.8 + *	Dan Magenheimer (dan.magenheimer@hp.com)
     2.9 + *
    2.10 + */
    2.11 +
    2.12 +#include <asm/privop.h>
    2.13 +#include <asm/privify.h>
    2.14 +#include <asm/vcpu.h>
    2.15 +#include <asm/processor.h>
    2.16 +#include <asm/delay.h>	// Debug only
    2.17 +//#include <debug.h>
    2.18 +
    2.19 +long priv_verbose=0;
    2.20 +
    2.21 +/**************************************************************************
    2.22 +Hypercall bundle creation
    2.23 +**************************************************************************/
    2.24 +
    2.25 +
    2.26 +void build_hypercall_bundle(UINT64 *imva, UINT64 breakimm, UINT64 hypnum, UINT64 ret)
    2.27 +{
    2.28 +	INST64_A5 slot0;
    2.29 +	INST64_I19 slot1;
    2.30 +	INST64_B4 slot2;
    2.31 +	IA64_BUNDLE bundle;
    2.32 +
    2.33 +	// slot1: mov r2 = hypnum (low 20 bits)
    2.34 +	slot0.inst = 0;
    2.35 +	slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
    2.36 +	slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
    2.37 +	slot0.imm5c = hypnum >> 16; slot0.s = 0;
    2.38 +	// slot1: break breakimm
    2.39 +	slot1.inst = 0;
    2.40 +	slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
    2.41 +	slot1.imm20 = breakimm; slot1.i = breakimm >> 20;
    2.42 +	// if ret slot2: br.ret.sptk.many rp
    2.43 +	// else slot2: br.cond.sptk.many rp
    2.44 +	slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
    2.45 +	slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
    2.46 +	if (ret) {
    2.47 +		slot2.btype = 4; slot2.x6 = 0x21;
    2.48 +	}
    2.49 +	else {
    2.50 +		slot2.btype = 0; slot2.x6 = 0x20;
    2.51 +	}
    2.52 +	
    2.53 +	bundle.i64[0] = 0; bundle.i64[1] = 0;
    2.54 +	bundle.template = 0x11;
    2.55 +	bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
    2.56 +	bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
    2.57 +	
    2.58 +	*imva++ = bundle.i64[0]; *imva = bundle.i64[1];
    2.59 +}
    2.60 +
    2.61 +/**************************************************************************
    2.62 +Privileged operation emulation routines
    2.63 +**************************************************************************/
    2.64 +
    2.65 +IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
    2.66 +{
    2.67 +	return vcpu_rfi(vcpu);
    2.68 +}
    2.69 +
    2.70 +IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
    2.71 +{
    2.72 +	return vcpu_bsw0(vcpu);
    2.73 +}
    2.74 +
    2.75 +IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
    2.76 +{
    2.77 +	return vcpu_bsw1(vcpu);
    2.78 +}
    2.79 +
    2.80 +IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
    2.81 +{
    2.82 +	return vcpu_cover(vcpu);
    2.83 +}
    2.84 +
    2.85 +IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
    2.86 +{
    2.87 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
    2.88 +	UINT64 addr_range;
    2.89 +
    2.90 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
    2.91 +	return vcpu_ptc_l(vcpu,vadr,addr_range);
    2.92 +}
    2.93 +
    2.94 +IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
    2.95 +{
    2.96 +	UINT src = inst.M28.r3;
    2.97 +
    2.98 +	// NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
    2.99 +	if (src > 63) return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
   2.100 +	return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
   2.101 +}
   2.102 +
   2.103 +IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
   2.104 +{
   2.105 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   2.106 +	UINT64 addr_range;
   2.107 +
   2.108 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   2.109 +	return vcpu_ptc_g(vcpu,vadr,addr_range);
   2.110 +}
   2.111 +
   2.112 +IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
   2.113 +{
   2.114 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   2.115 +	UINT64 addr_range;
   2.116 +
   2.117 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   2.118 +	return vcpu_ptc_ga(vcpu,vadr,addr_range);
   2.119 +}
   2.120 +
   2.121 +IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
   2.122 +{
   2.123 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   2.124 +	UINT64 addr_range;
   2.125 +
   2.126 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   2.127 +	return vcpu_ptr_d(vcpu,vadr,addr_range);
   2.128 +}
   2.129 +
   2.130 +IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
   2.131 +{
   2.132 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   2.133 +	UINT64 addr_range;
   2.134 +
   2.135 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   2.136 +	return vcpu_ptr_i(vcpu,vadr,addr_range);
   2.137 +}
   2.138 +
   2.139 +IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
   2.140 +{
   2.141 +	UINT64 padr;
   2.142 +	UINT fault;
   2.143 +	UINT src = inst.M46.r3;
   2.144 +
   2.145 +	// NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
   2.146 +	if (src > 63)
   2.147 +		fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
   2.148 +	else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
   2.149 +	if (fault == IA64_NO_FAULT)
   2.150 +		return vcpu_set_gr(vcpu, inst.M46.r1, padr);
   2.151 +	else return fault;
   2.152 +}
   2.153 +
   2.154 +IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
   2.155 +{
   2.156 +	UINT64 key;
   2.157 +	UINT fault;
   2.158 +	UINT src = inst.M46.r3;
   2.159 +
   2.160 +	// NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
   2.161 +	if (src > 63)
   2.162 +		fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
   2.163 +	else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
   2.164 +	if (fault == IA64_NO_FAULT)
   2.165 +		return vcpu_set_gr(vcpu, inst.M46.r1, key);
   2.166 +	else return fault;
   2.167 +}
   2.168 +
   2.169 +/************************************
   2.170 + * Insert translation register/cache
   2.171 +************************************/
   2.172 +
   2.173 +IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
   2.174 +{
   2.175 +	UINT64 fault, itir, ifa, pte, slot;
   2.176 +
   2.177 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   2.178 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   2.179 +		return(IA64_ILLOP_FAULT);
   2.180 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   2.181 +		return(IA64_ILLOP_FAULT);
   2.182 +	pte = vcpu_get_gr(vcpu,inst.M42.r2);
   2.183 +	slot = vcpu_get_gr(vcpu,inst.M42.r3);
   2.184 +
   2.185 +	return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
   2.186 +}
   2.187 +
   2.188 +IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
   2.189 +{
   2.190 +	UINT64 fault, itir, ifa, pte, slot;
   2.191 +
   2.192 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   2.193 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   2.194 +		return(IA64_ILLOP_FAULT);
   2.195 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   2.196 +		return(IA64_ILLOP_FAULT);
   2.197 +	pte = vcpu_get_gr(vcpu,inst.M42.r2);
   2.198 +	slot = vcpu_get_gr(vcpu,inst.M42.r3);
   2.199 +
   2.200 +	return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
   2.201 +}
   2.202 +
   2.203 +IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
   2.204 +{
   2.205 +	UINT64 fault, itir, ifa, pte;
   2.206 +
   2.207 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   2.208 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   2.209 +		return(IA64_ILLOP_FAULT);
   2.210 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   2.211 +		return(IA64_ILLOP_FAULT);
   2.212 +	pte = vcpu_get_gr(vcpu,inst.M41.r2);
   2.213 +
   2.214 +	return (vcpu_itc_d(vcpu,pte,itir,ifa));
   2.215 +}
   2.216 +
   2.217 +IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
   2.218 +{
   2.219 +	UINT64 fault, itir, ifa, pte;
   2.220 +
   2.221 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   2.222 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   2.223 +		return(IA64_ILLOP_FAULT);
   2.224 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   2.225 +		return(IA64_ILLOP_FAULT);
   2.226 +	pte = vcpu_get_gr(vcpu,inst.M41.r2);
   2.227 +
   2.228 +	return (vcpu_itc_i(vcpu,pte,itir,ifa));
   2.229 +}
   2.230 +
   2.231 +/*************************************
   2.232 + * Moves to semi-privileged registers
   2.233 +*************************************/
   2.234 +
   2.235 +IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
   2.236 +{
   2.237 +	// I27 and M30 are identical for these fields
   2.238 +	UINT64 ar3 = inst.M30.ar3;
   2.239 +	UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
   2.240 +	return (vcpu_set_ar(vcpu,ar3,imm));
   2.241 +}
   2.242 +
   2.243 +IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
   2.244 +{
   2.245 +	// I26 and M29 are identical for these fields
   2.246 +	UINT64 ar3 = inst.M29.ar3;
   2.247 +
   2.248 +	if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
   2.249 +		UINT64 val;
   2.250 +		if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
   2.251 +			return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
   2.252 +		else return IA64_ILLOP_FAULT;
   2.253 +	}
   2.254 +	else {
   2.255 +		UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
   2.256 +		return (vcpu_set_ar(vcpu,ar3,r2));
   2.257 +	}
   2.258 +}
   2.259 +
   2.260 +/********************************
   2.261 + * Moves to privileged registers
   2.262 +********************************/
   2.263 +
   2.264 +IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
   2.265 +{
   2.266 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   2.267 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   2.268 +	return (vcpu_set_pkr(vcpu,r3,r2));
   2.269 +}
   2.270 +
   2.271 +IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
   2.272 +{
   2.273 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   2.274 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   2.275 +	return (vcpu_set_rr(vcpu,r3,r2));
   2.276 +}
   2.277 +
   2.278 +IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
   2.279 +{
   2.280 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   2.281 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   2.282 +	return (vcpu_set_dbr(vcpu,r3,r2));
   2.283 +}
   2.284 +
   2.285 +IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
   2.286 +{
   2.287 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   2.288 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   2.289 +	return (vcpu_set_ibr(vcpu,r3,r2));
   2.290 +}
   2.291 +
   2.292 +IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
   2.293 +{
   2.294 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   2.295 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   2.296 +	return (vcpu_set_pmc(vcpu,r3,r2));
   2.297 +}
   2.298 +
   2.299 +IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
   2.300 +{
   2.301 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   2.302 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   2.303 +	return (vcpu_set_pmd(vcpu,r3,r2));
   2.304 +}
   2.305 +
   2.306 +unsigned long to_cr_cnt[128] = { 0 };
   2.307 +
   2.308 +IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
   2.309 +{
   2.310 +	UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
   2.311 +	to_cr_cnt[inst.M32.cr3]++;
   2.312 +	switch (inst.M32.cr3) {
   2.313 +	    case 0: return vcpu_set_dcr(vcpu,val);
   2.314 +	    case 1: return vcpu_set_itm(vcpu,val);
   2.315 +	    case 2: return vcpu_set_iva(vcpu,val);
   2.316 +	    case 8: return vcpu_set_pta(vcpu,val);
   2.317 +	    case 16:return vcpu_set_ipsr(vcpu,val);
   2.318 +	    case 17:return vcpu_set_isr(vcpu,val);
   2.319 +	    case 19:return vcpu_set_iip(vcpu,val);
   2.320 +	    case 20:return vcpu_set_ifa(vcpu,val);
   2.321 +	    case 21:return vcpu_set_itir(vcpu,val);
   2.322 +	    case 22:return vcpu_set_iipa(vcpu,val);
   2.323 +	    case 23:return vcpu_set_ifs(vcpu,val);
   2.324 +	    case 24:return vcpu_set_iim(vcpu,val);
   2.325 +	    case 25:return vcpu_set_iha(vcpu,val);
   2.326 +	    case 64:return vcpu_set_lid(vcpu,val);
   2.327 +	    case 65:return IA64_ILLOP_FAULT;
   2.328 +	    case 66:return vcpu_set_tpr(vcpu,val);
   2.329 +	    case 67:return vcpu_set_eoi(vcpu,val);
   2.330 +	    case 68:return IA64_ILLOP_FAULT;
   2.331 +	    case 69:return IA64_ILLOP_FAULT;
   2.332 +	    case 70:return IA64_ILLOP_FAULT;
   2.333 +	    case 71:return IA64_ILLOP_FAULT;
   2.334 +	    case 72:return vcpu_set_itv(vcpu,val);
   2.335 +	    case 73:return vcpu_set_pmv(vcpu,val);
   2.336 +	    case 74:return vcpu_set_cmcv(vcpu,val);
   2.337 +	    case 80:return vcpu_set_lrr0(vcpu,val);
   2.338 +	    case 81:return vcpu_set_lrr1(vcpu,val);
   2.339 +	    default: return IA64_ILLOP_FAULT;
   2.340 +	}
   2.341 +}
   2.342 +
   2.343 +IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
   2.344 +{
   2.345 +	UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
   2.346 +	return vcpu_reset_psr_sm(vcpu,imm24);
   2.347 +}
   2.348 +
   2.349 +IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
   2.350 +{
   2.351 +	UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
   2.352 +	return vcpu_set_psr_sm(vcpu,imm24);
   2.353 +}
   2.354 +
   2.355 +/**
   2.356 + * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
   2.357 + */
   2.358 +IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
   2.359 +{
   2.360 +	UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
   2.361 +	return vcpu_set_psr_l(vcpu,val);
   2.362 +}
   2.363 +
   2.364 +/**********************************
   2.365 + * Moves from privileged registers
   2.366 + **********************************/
   2.367 +
   2.368 +IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
   2.369 +{
   2.370 +	UINT64 val;
   2.371 +	IA64FAULT fault;
   2.372 +	
   2.373 +	if (inst.M43.r1 > 63) { // privified mov from cpuid
   2.374 +		fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   2.375 +		if (fault == IA64_NO_FAULT)
   2.376 +			return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
   2.377 +	}
   2.378 +	else {
   2.379 +		fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   2.380 +		if (fault == IA64_NO_FAULT)
   2.381 +			return vcpu_set_gr(vcpu, inst.M43.r1, val);
   2.382 +	}
   2.383 +	return fault;
   2.384 +}
   2.385 +
   2.386 +IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
   2.387 +{
   2.388 +	UINT64 val;
   2.389 +	IA64FAULT fault;
   2.390 +	
   2.391 +	fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   2.392 +	if (fault == IA64_NO_FAULT)
   2.393 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   2.394 +	else return fault;
   2.395 +}
   2.396 +
   2.397 +IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
   2.398 +{
   2.399 +	UINT64 val;
   2.400 +	IA64FAULT fault;
   2.401 +	
   2.402 +	fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   2.403 +	if (fault == IA64_NO_FAULT)
   2.404 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   2.405 +	else return fault;
   2.406 +}
   2.407 +
   2.408 +IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
   2.409 +{
   2.410 +	UINT64 val;
   2.411 +	IA64FAULT fault;
   2.412 +	
   2.413 +	fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   2.414 +	if (fault == IA64_NO_FAULT)
   2.415 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   2.416 +	else return fault;
   2.417 +}
   2.418 +
   2.419 +IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
   2.420 +{
   2.421 +	UINT64 val;
   2.422 +	IA64FAULT fault;
   2.423 +	
   2.424 +	fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   2.425 +	if (fault == IA64_NO_FAULT)
   2.426 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   2.427 +	else return fault;
   2.428 +}
   2.429 +
   2.430 +unsigned long from_cr_cnt[128] = { 0 };
   2.431 +
   2.432 +#define cr_get(cr) \
   2.433 +	((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
   2.434 +		vcpu_set_gr(vcpu, tgt, val) : fault;
   2.435 +	
   2.436 +IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
   2.437 +{
   2.438 +	UINT64 tgt = inst.M33.r1;
   2.439 +	UINT64 val;
   2.440 +	IA64FAULT fault;
   2.441 +
   2.442 +	from_cr_cnt[inst.M33.cr3]++;
   2.443 +	switch (inst.M33.cr3) {
   2.444 +	    case 0: return cr_get(dcr);
   2.445 +	    case 1: return cr_get(itm);
   2.446 +	    case 2: return cr_get(iva);
   2.447 +	    case 8: return cr_get(pta);
   2.448 +	    case 16:return cr_get(ipsr);
   2.449 +	    case 17:return cr_get(isr);
   2.450 +	    case 19:return cr_get(iip);
   2.451 +	    case 20:return cr_get(ifa);
   2.452 +	    case 21:return cr_get(itir);
   2.453 +	    case 22:return cr_get(iipa);
   2.454 +	    case 23:return cr_get(ifs);
   2.455 +	    case 24:return cr_get(iim);
   2.456 +	    case 25:return cr_get(iha);
   2.457 +	    case 64:return cr_get(lid);
   2.458 +	    case 65:return cr_get(ivr);
   2.459 +	    case 66:return cr_get(tpr);
   2.460 +	    case 67:return vcpu_set_gr(vcpu,tgt,0L);
   2.461 +	    case 68:return cr_get(irr0);
   2.462 +	    case 69:return cr_get(irr1);
   2.463 +	    case 70:return cr_get(irr2);
   2.464 +	    case 71:return cr_get(irr3);
   2.465 +	    case 72:return cr_get(itv);
   2.466 +	    case 73:return cr_get(pmv);
   2.467 +	    case 74:return cr_get(cmcv);
   2.468 +	    case 80:return cr_get(lrr0);
   2.469 +	    case 81:return cr_get(lrr1);
   2.470 +	    default: return IA64_ILLOP_FAULT;
   2.471 +	}
   2.472 +	return IA64_ILLOP_FAULT;
   2.473 +}
   2.474 +
   2.475 +IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
   2.476 +{
   2.477 +	UINT64 tgt = inst.M33.r1;
   2.478 +	UINT64 val;
   2.479 +	IA64FAULT fault;
   2.480 +
   2.481 +	if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
   2.482 +		return vcpu_set_gr(vcpu, tgt, val);
   2.483 +	else return fault;
   2.484 +}
   2.485 +
   2.486 +/**************************************************************************
   2.487 +Privileged operation decode and dispatch routines
   2.488 +**************************************************************************/
   2.489 +
   2.490 +IA64_SLOT_TYPE slot_types[0x20][3] = {
   2.491 +	{M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
   2.492 +	{M, I, ILLEGAL}, {M, I, ILLEGAL},
   2.493 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
   2.494 +	{M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
   2.495 +	{M, F, I}, {M, F, I},
   2.496 +	{M, M, F}, {M, M, F},
   2.497 +	{M, I, B}, {M, I, B},
   2.498 +	{M, B, B}, {M, B, B},
   2.499 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
   2.500 +	{B, B, B}, {B, B, B},
   2.501 +	{M, M, B}, {M, M, B},
   2.502 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
   2.503 +	{M, F, B}, {M, F, B},
   2.504 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
   2.505 +};
   2.506 +
   2.507 +// pointer to privileged emulation function
   2.508 +typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
   2.509 +
   2.510 +PPEFCN Mpriv_funcs[64] = {
   2.511 +  priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
   2.512 +  priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
   2.513 +  0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
   2.514 +  priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
   2.515 +  priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
   2.516 +  priv_mov_from_pmc, 0, 0, 0,
   2.517 +  0, 0, 0, 0,
   2.518 +  0, 0, priv_tpa, priv_tak,
   2.519 +  0, 0, 0, 0,
   2.520 +  priv_mov_from_cr, priv_mov_from_psr, 0, 0,
   2.521 +  0, 0, 0, 0,
   2.522 +  priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
   2.523 +  0, 0, 0, 0,
   2.524 +  priv_ptc_e, 0, 0, 0,
   2.525 +  0, 0, 0, 0, 0, 0, 0, 0
   2.526 +};
   2.527 +
   2.528 +struct {
   2.529 +	unsigned long mov_to_ar_imm;
   2.530 +	unsigned long mov_to_ar_reg;
   2.531 +	unsigned long ssm;
   2.532 +	unsigned long rsm;
   2.533 +	unsigned long rfi;
   2.534 +	unsigned long bsw0;
   2.535 +	unsigned long bsw1;
   2.536 +	unsigned long cover;
   2.537 +	unsigned long Mpriv_cnt[64];
   2.538 +} privcnt = { 0 };
   2.539 +
   2.540 +unsigned long privop_trace = 0;
   2.541 +
   2.542 +IA64FAULT
   2.543 +priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
   2.544 +{
   2.545 +	IA64_BUNDLE bundle, __get_domain_bundle(UINT64);
   2.546 +	int slot;
   2.547 +	IA64_SLOT_TYPE slot_type;
   2.548 +	INST64 inst;
   2.549 +	PPEFCN pfunc;
   2.550 +	unsigned long ipsr = regs->cr_ipsr;
   2.551 +	UINT64 iip = regs->cr_iip;
   2.552 +	int x6;
   2.553 +	
   2.554 +	// make a local copy of the bundle containing the privop
   2.555 +#if 1
   2.556 +	bundle = __get_domain_bundle(iip);
   2.557 +	if (!bundle.i64[0] && !bundle.i64[1]) return IA64_RETRY;
   2.558 +#else
   2.559 +#ifdef AVOIDING_POSSIBLE_DOMAIN_TLB_MISS
   2.560 +	//TODO: this needs to check for faults and behave accordingly
   2.561 +	if (!vcpu_get_iip_bundle(&bundle)) return IA64_DTLB_FAULT;
   2.562 +#else
   2.563 +if (iip < 0x10000) {
   2.564 + printf("priv_handle_op: unlikely iip=%p,b0=%p\n",iip,regs->b0);
   2.565 + dummy();
   2.566 +}
   2.567 +        bundle = *(IA64_BUNDLE *)iip;
   2.568 +#endif
   2.569 +#endif
   2.570 +#if 0
   2.571 +	if (iip==0xa000000100001820) {
   2.572 +		static int firstpagefault = 1;
   2.573 +		if (firstpagefault) {
   2.574 +			printf("*** First time to domain page fault!\n");				firstpagefault=0;
   2.575 +		}
   2.576 +	}
   2.577 +#endif
   2.578 +	if (privop_trace) {
   2.579 +		static long i = 400;
   2.580 +		//if (i > 0) printf("privop @%p\n",iip);
   2.581 +		if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
   2.582 +			iip,ia64_get_itc(),ia64_get_itm());
   2.583 +		i--;
   2.584 +	}
   2.585 +	slot = ((struct ia64_psr *)&ipsr)->ri;
   2.586 +	if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
   2.587 +	else if (slot == 1)
   2.588 +		inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
   2.589 +	else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41; 
   2.590 +	else printf("priv_handle_op: illegal slot: %d\n", slot);
   2.591 +
   2.592 +	slot_type = slot_types[bundle.template][slot];
   2.593 +	if (priv_verbose) {
   2.594 +		printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
   2.595 +		 iip, (UINT64)inst.inst, slot, slot_type);
   2.596 +	}
   2.597 +	if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
   2.598 +		// break instr for privified cover
   2.599 +	}
   2.600 +	else if (privlvl != 2) return (IA64_ILLOP_FAULT);
   2.601 +	switch (slot_type) {
   2.602 +	    case M:
   2.603 +		if (inst.generic.major == 0) {
   2.604 +#if 0
   2.605 +			if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
   2.606 +				privcnt.cover++;
   2.607 +				return priv_cover(vcpu,inst);
   2.608 +			}
   2.609 +#endif
   2.610 +			if (inst.M29.x3 != 0) break;
   2.611 +			if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
   2.612 +				privcnt.mov_to_ar_imm++;
   2.613 +				return priv_mov_to_ar_imm(vcpu,inst);
   2.614 +			}
   2.615 +			if (inst.M44.x4 == 6) {
   2.616 +				privcnt.ssm++;
   2.617 +				return priv_ssm(vcpu,inst);
   2.618 +			}
   2.619 +			if (inst.M44.x4 == 7) {
   2.620 +				privcnt.rsm++;
   2.621 +				return priv_rsm(vcpu,inst);
   2.622 +			}
   2.623 +			break;
   2.624 +		}
   2.625 +		else if (inst.generic.major != 1) break;
   2.626 +		x6 = inst.M29.x6;
   2.627 +		if (x6 == 0x2a) {
   2.628 +			privcnt.mov_to_ar_reg++;
   2.629 +			return priv_mov_to_ar_reg(vcpu,inst);
   2.630 +		}
   2.631 +		if (inst.M29.x3 != 0) break;
   2.632 +		if (!(pfunc = Mpriv_funcs[x6])) break;
   2.633 +		if (x6 == 0x1e || x6 == 0x1f)  { // tpa or tak are "special"
   2.634 +			if (inst.M46.r3 > 63) {
   2.635 +				if (x6 == 0x1e) x6 = 0x1b;
   2.636 +				else x6 = 0x1a;
   2.637 +			}
   2.638 +		}
   2.639 +		privcnt.Mpriv_cnt[x6]++;
   2.640 +		return (*pfunc)(vcpu,inst);
   2.641 +		break;
   2.642 +	    case B:
   2.643 +		if (inst.generic.major != 0) break;
   2.644 +		if (inst.B8.x6 == 0x08) {
   2.645 +			IA64FAULT fault;
   2.646 +			privcnt.rfi++;
   2.647 +			fault = priv_rfi(vcpu,inst);
   2.648 +			if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
   2.649 +			return fault;
   2.650 +		}
   2.651 +		if (inst.B8.x6 == 0x0c) {
   2.652 +			privcnt.bsw0++;
   2.653 +			return priv_bsw0(vcpu,inst);
   2.654 +		}
   2.655 +		if (inst.B8.x6 == 0x0d) {
   2.656 +			privcnt.bsw1++;
   2.657 +			return priv_bsw1(vcpu,inst);
   2.658 +		}
   2.659 +		if (inst.B8.x6 == 0x0) { // break instr for privified cover
   2.660 +			privcnt.cover++;
   2.661 +			return priv_cover(vcpu,inst);
   2.662 +		}
   2.663 +		break;
   2.664 +	    case I:
   2.665 +		if (inst.generic.major != 0) break;
   2.666 +#if 0
   2.667 +		if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
   2.668 +			privcnt.cover++;
   2.669 +			return priv_cover(vcpu,inst);
   2.670 +		}
   2.671 +#endif
   2.672 +		if (inst.I26.x3 != 0) break;  // I26.x3 == I27.x3
   2.673 +		if (inst.I26.x6 == 0x2a) {
   2.674 +			privcnt.mov_to_ar_reg++;
   2.675 +			return priv_mov_to_ar_reg(vcpu,inst);
   2.676 +		}
   2.677 +		if (inst.I27.x6 == 0x0a) {
   2.678 +			privcnt.mov_to_ar_imm++;
   2.679 +			return priv_mov_to_ar_imm(vcpu,inst);
   2.680 +		}
   2.681 +		break;
   2.682 +	    default:
   2.683 +		break;
   2.684 +	}
   2.685 +        //printf("We who are about do die salute you\n");
   2.686 +	printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
   2.687 +		 iip, (UINT64)inst.inst, slot, slot_type);
   2.688 +        //printf("vtop(0x%lx)==0x%lx\r\n", iip, tr_vtop(iip));
   2.689 +        //thread_mozambique("privop fault\n");
   2.690 +	return (IA64_ILLOP_FAULT);
   2.691 +}
   2.692 +
   2.693 +/** Emulate a privileged operation.
   2.694 + *
   2.695 + * This should probably return 0 on success and the "trap number"
   2.696 + * (e.g. illegal operation for bad register, priv op for an
   2.697 + * instruction that isn't allowed, etc.) on "failure"
   2.698 + *
   2.699 + * @param vcpu virtual cpu
   2.700 + * @param isrcode interrupt service routine code
   2.701 + * @return fault
   2.702 + */
   2.703 +IA64FAULT
   2.704 +priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
   2.705 +{
   2.706 +	IA64FAULT fault;
   2.707 +	UINT64 ipsr = regs->cr_ipsr;
   2.708 +	UINT64 isrcode = (isr >> 4) & 0xf;
   2.709 +	int privlvl;
   2.710 +
   2.711 +	// handle privops masked as illops? and breaks (6)
   2.712 +	if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
   2.713 +        	printf("priv_emulate: isrcode != 0 or 1 or 2\n");
   2.714 +		printf("priv_emulate: returning ILLOP, not implemented!\n");
   2.715 +		while (1);
   2.716 +		return IA64_ILLOP_FAULT;
   2.717 +	}
   2.718 +	//if (isrcode != 1 && isrcode != 2) return 0;
   2.719 +	vcpu_set_regs(vcpu,regs);
   2.720 +	privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
   2.721 +	// its OK for a privified-cover to be executed in user-land
   2.722 +	fault = priv_handle_op(vcpu,regs,privlvl);
   2.723 +	if (fault == IA64_NO_FAULT) { // success!!
   2.724 +		// update iip/ipsr to point to the next instruction
   2.725 +		(void)vcpu_increment_iip(vcpu);
   2.726 +	}
   2.727 +	else if (fault == IA64_EXTINT_VECTOR) {
   2.728 +		// update iip/ipsr before delivering interrupt
   2.729 +		(void)vcpu_increment_iip(vcpu);
   2.730 +	}
   2.731 +	else if (fault == IA64_RFI_IN_PROGRESS) return fault;
   2.732 +		// success but don't update to next instruction
   2.733 +        else if (fault == IA64_RETRY) {
   2.734 +            //printf("Priv emulate gets IA64_RETRY\n");
   2.735 +	    //printf("priv_emulate: returning RETRY, not implemented!\n");
   2.736 +	    //while (1);
   2.737 +	    // don't update iip/ipsr, deliver 
   2.738 +	
   2.739 +            vcpu_force_data_miss(vcpu,regs->cr_iip);
   2.740 +	    return IA64_RETRY;
   2.741 +        }
   2.742 +	else if (priv_verbose) printf("unhandled operation from handle_op\n");
   2.743 +//	if (fault == IA64_ILLOP_FAULT) {
   2.744 +//		printf("priv_emulate: returning ILLOP, not implemented!\n");
   2.745 +//		while (1);
   2.746 +//	}
   2.747 +	return fault;
   2.748 +}
   2.749 +
   2.750 +
   2.751 +/**************************************************************************
   2.752 +Privileged operation instrumentation routines
   2.753 +**************************************************************************/
   2.754 +
   2.755 +char *Mpriv_str[64] = {
   2.756 +  "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
   2.757 +  "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
   2.758 +  "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
   2.759 +  "ptr_d", "ptr_i", "itr_d", "itr_i",
   2.760 +  "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
   2.761 +  "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
   2.762 +  "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
   2.763 +  "<0x1c>", "<0x1d>", "tpa", "tak",
   2.764 +  "<0x20>", "<0x21>", "<0x22>", "<0x23>",
   2.765 +  "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
   2.766 +  "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
   2.767 +  "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
   2.768 +  "<0x30>", "<0x31>", "<0x32>", "<0x33>",
   2.769 +  "ptc_e", "<0x35>", "<0x36>", "<0x37>",
   2.770 +  "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
   2.771 +  "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
   2.772 +};
   2.773 +
   2.774 +#define RS "Rsvd"
   2.775 +char *cr_str[128] = {
   2.776 +  "dcr","itm","iva",RS,RS,RS,RS,RS,
   2.777 +  "pta",RS,RS,RS,RS,RS,RS,RS,
   2.778 +  "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
   2.779 +  "iim","iha",RS,RS,RS,RS,RS,RS,
   2.780 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   2.781 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   2.782 +  "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
   2.783 +  "itv","pmv","cmcv",RS,RS,RS,RS,RS,
   2.784 +  "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
   2.785 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   2.786 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   2.787 +  RS,RS,RS,RS,RS,RS,RS,RS
   2.788 +};
   2.789 +
   2.790 +void dump_privop_counts(void)
   2.791 +{
   2.792 +	int i, j;
   2.793 +	UINT64 sum = 0;
   2.794 +
   2.795 +	// this is ugly and should probably produce sorted output
   2.796 +	// but it will have to do for now
   2.797 +	sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
   2.798 +	sum += privcnt.ssm; sum += privcnt.rsm;
   2.799 +	sum += privcnt.rfi; sum += privcnt.bsw0;
   2.800 +	sum += privcnt.bsw1; sum += privcnt.cover;
   2.801 +	for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
   2.802 +	printf("Privop statistics: (Total privops: %ld)\r\n",sum);
   2.803 +	if (privcnt.mov_to_ar_imm)
   2.804 +		printf("%10d  %s [%d%%]\r\n", privcnt.mov_to_ar_imm,
   2.805 +			"mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
   2.806 +	if (privcnt.mov_to_ar_reg)
   2.807 +		printf("%10d  %s [%d%%]\r\n", privcnt.mov_to_ar_reg,
   2.808 +			"mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
   2.809 +	if (privcnt.ssm)
   2.810 +		printf("%10d  %s [%d%%]\r\n", privcnt.ssm,
   2.811 +			"ssm", (privcnt.ssm*100L)/sum);
   2.812 +	if (privcnt.rsm)
   2.813 +		printf("%10d  %s [%d%%]\r\n", privcnt.rsm,
   2.814 +			"rsm", (privcnt.rsm*100L)/sum);
   2.815 +	if (privcnt.rfi)
   2.816 +		printf("%10d  %s [%d%%]\r\n", privcnt.rfi,
   2.817 +			"rfi", (privcnt.rfi*100L)/sum);
   2.818 +	if (privcnt.bsw0)
   2.819 +		printf("%10d  %s [%d%%]\r\n", privcnt.bsw0,
   2.820 +			"bsw0", (privcnt.bsw0*100L)/sum);
   2.821 +	if (privcnt.bsw1)
   2.822 +		printf("%10d  %s [%d%%]\r\n", privcnt.bsw1,
   2.823 +			"bsw1", (privcnt.bsw1*100L)/sum);
   2.824 +	if (privcnt.cover)
   2.825 +		printf("%10d  %s [%d%%]\r\n", privcnt.cover,
   2.826 +			"cover", (privcnt.cover*100L)/sum);
   2.827 +	for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
   2.828 +		if (!Mpriv_str[i]) printf("PRIVSTRING NULL!!\r\n");
   2.829 +		else printf("%10d  %s [%d%%]\r\n", privcnt.Mpriv_cnt[i],
   2.830 +			Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
   2.831 +		if (i == 0x24) { // mov from CR
   2.832 +			printf("            [");
   2.833 +			for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
   2.834 +				if (!cr_str[j])
   2.835 +					printf("PRIVSTRING NULL!!\r\n");
   2.836 +				printf("%s(%d),",cr_str[j],from_cr_cnt[j]);
   2.837 +			}
   2.838 +			printf("]\r\n");
   2.839 +		}
   2.840 +		else if (i == 0x2c) { // mov to CR
   2.841 +			printf("            [");
   2.842 +			for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
   2.843 +				if (!cr_str[j])
   2.844 +					printf("PRIVSTRING NULL!!\r\n");
   2.845 +				printf("%s(%d),",cr_str[j],to_cr_cnt[j]);
   2.846 +			}
   2.847 +			printf("]\r\n");
   2.848 +		}
   2.849 +	}
   2.850 +}
   2.851 +
   2.852 +void zero_privop_counts(void)
   2.853 +{
   2.854 +	int i, j;
   2.855 +
   2.856 +	// this is ugly and should probably produce sorted output
   2.857 +	// but it will have to do for now
   2.858 +	printf("Zeroing privop statistics\r\n");
   2.859 +	privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
   2.860 +	privcnt.ssm = 0; privcnt.rsm = 0;
   2.861 +	privcnt.rfi = 0; privcnt.bsw0 = 0;
   2.862 +	privcnt.bsw1 = 0; privcnt.cover = 0;
   2.863 +	for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
   2.864 +	for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
   2.865 +	for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
   2.866 +}
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/process.c	Mon Nov 22 22:57:05 2004 +0000
     3.3 @@ -0,0 +1,836 @@
     3.4 +/*
     3.5 + * Miscellaneous process/domain related routines
     3.6 + * 
     3.7 + * Copyright (C) 2004 Hewlett-Packard Co.
     3.8 + *	Dan Magenheimer (dan.magenheimer@hp.com)
     3.9 + *
    3.10 + */
    3.11 +
    3.12 +#include <xen/config.h>
    3.13 +#include <xen/lib.h>
    3.14 +#include <xen/errno.h>
    3.15 +#include <xen/sched.h>
    3.16 +#include <xen/smp.h>
    3.17 +#include <asm/ptrace.h>
    3.18 +#include <xen/delay.h>
    3.19 +
    3.20 +#include <linux/efi.h>	/* FOR EFI_UNIMPLEMENTED */
    3.21 +#include <asm/sal.h>	/* FOR struct ia64_sal_retval */
    3.22 +
    3.23 +#include <asm/system.h>
    3.24 +#include <asm/io.h>
    3.25 +#include <asm/processor.h>
    3.26 +#include <asm/desc.h>
    3.27 +#include <asm/ldt.h>
    3.28 +#include <xen/irq.h>
    3.29 +#include <xen/event.h>
    3.30 +#include <asm/regionreg.h>
    3.31 +#include <asm/privop.h>
    3.32 +#include <asm/vcpu.h>
    3.33 +#include <asm/ia64_int.h>
    3.34 +#include <asm/hpsim_ssc.h>
    3.35 +#include <asm/dom_fw.h>
    3.36 +
    3.37 +extern struct ia64_sal_retval pal_emulator_static(UINT64);
    3.38 +extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
    3.39 +
    3.40 +extern unsigned long dom0_start, dom0_size;
    3.41 +
    3.42 +#define IA64_PSR_CPL1	(__IA64_UL(1) << IA64_PSR_CPL1_BIT)
    3.43 +// note IA64_PSR_PK removed from following, why is this necessary?
    3.44 +#define	DELIVER_PSR_SET	(IA64_PSR_IC | IA64_PSR_I | \
    3.45 +			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
    3.46 +			IA64_PSR_IT | IA64_PSR_BN)
    3.47 +
    3.48 +#define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
    3.49 +			IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI |	\
    3.50 +			IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
    3.51 +			IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
    3.52 +			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
    3.53 +			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
    3.54 +
    3.55 +#define PSCB(x)	x->shared_info->arch
    3.56 +
    3.57 +extern unsigned long vcpu_verbose;
    3.58 +
    3.59 +long do_iopl(domid_t domain, unsigned int new_io_pl)
    3.60 +{
    3.61 +	dummy();
    3.62 +	return 0;
    3.63 +}
    3.64 +
    3.65 +void schedule_tail(struct domain *next)
    3.66 +{
    3.67 +	unsigned long rr7;
    3.68 +	printk("current=%lx,shared_info=%lx\n",current,current->shared_info);
    3.69 +	printk("next=%lx,shared_info=%lx\n",next,next->shared_info);
    3.70 +	if (rr7 = load_region_regs(current)) {
    3.71 +		printk("schedule_tail: change to rr7 not yet implemented\n");
    3.72 +	}
    3.73 +}
    3.74 +
    3.75 +extern TR_ENTRY *match_tr(struct domain *d, unsigned long ifa);
    3.76 +
    3.77 +void tdpfoo(void) { }
    3.78 +
    3.79 +// given a domain virtual address, pte and pagesize, extract the metaphysical
    3.80 +// address, convert the pte for a physical address for (possibly different)
    3.81 +// Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
    3.82 +// PAGE_SIZE!)
    3.83 +unsigned long translate_domain_pte(unsigned long pteval,
    3.84 +	unsigned long address, unsigned long itir)
    3.85 +{
    3.86 +	struct domain *d = (struct domain *) current;
    3.87 +	unsigned long mask, pteval2, mpaddr;
    3.88 +	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
    3.89 +	extern struct domain *dom0;
    3.90 +	extern unsigned long dom0_start, dom0_size;
    3.91 +
    3.92 +	// FIXME address had better be pre-validated on insert
    3.93 +	mask = (1L << ((itir >> 2) & 0x3f)) - 1;
    3.94 +	mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
    3.95 +	if (d == dom0) {
    3.96 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
    3.97 +			//printk("translate_domain_pte: out-of-bounds dom0 mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
    3.98 +			tdpfoo();
    3.99 +		}
   3.100 +	}
   3.101 +	else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
   3.102 +		printf("translate_domain_pte: bad mpa=%p (> %p),vadr=%p,pteval=%p,itir=%p\n",
   3.103 +			mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
   3.104 +		tdpfoo();
   3.105 +	}
   3.106 +	pteval2 = lookup_domain_mpa(d,mpaddr);
   3.107 +	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
   3.108 +	pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
   3.109 +	pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
   3.110 +	return pteval2;
   3.111 +}
   3.112 +
   3.113 +// given a current domain metaphysical address, return the physical address
   3.114 +unsigned long translate_domain_mpaddr(unsigned long mpaddr)
   3.115 +{
   3.116 +	extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   3.117 +	unsigned long pteval;
   3.118 +
   3.119 +	if (current == dom0) {
   3.120 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   3.121 +			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
   3.122 +			tdpfoo();
   3.123 +		}
   3.124 +	}
   3.125 +	pteval = lookup_domain_mpa(current,mpaddr);
   3.126 +	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
   3.127 +}
   3.128 +
   3.129 +void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
   3.130 +{
   3.131 +	unsigned long vcpu_get_ipsr_int_state(struct domain *,unsigned long);
   3.132 +	unsigned long vcpu_get_rr_ve(struct domain *,unsigned long);
   3.133 +	unsigned long vcpu_get_itir_on_fault(struct domain *,unsigned long);
   3.134 +	struct domain *d = (struct domain *) current;
   3.135 +
   3.136 +	if (vector == IA64_EXTINT_VECTOR) {
   3.137 +		
   3.138 +		extern unsigned long vcpu_verbose, privop_trace;
   3.139 +		static first_extint = 1;
   3.140 +		if (first_extint) {
   3.141 +			printf("Delivering first extint to domain: ifa=%p, isr=%p, itir=%p, iip=%p\n",ifa,isr,itiriim,regs->cr_iip);
   3.142 +			//privop_trace = 1; vcpu_verbose = 1;
   3.143 +			first_extint = 0;
   3.144 +		}
   3.145 +	}
   3.146 +	if (!PSCB(d).interrupt_collection_enabled) {
   3.147 +		if (!(PSCB(d).ipsr & IA64_PSR_DT)) {
   3.148 +			printf("psr.dt off, trying to deliver nested dtlb!\n");
   3.149 +			while(1);
   3.150 +		}
   3.151 +		vector &= ~0xf;
   3.152 +		if (vector != IA64_DATA_TLB_VECTOR &&
   3.153 +		    vector != IA64_DATA_TLB_VECTOR) {
   3.154 +printf("psr.ic off, delivering fault=%lx,iip=%p,isr=%p,PSCB.iip=%p\n",
   3.155 +	vector,regs->cr_iip,isr,PSCB(d).iip);
   3.156 +			while(1);
   3.157 +			
   3.158 +		}
   3.159 +//printf("Delivering NESTED DATA TLB fault\n");
   3.160 +		vector = IA64_DATA_NESTED_TLB_VECTOR;
   3.161 +		regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
   3.162 +		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   3.163 +// NOTE: nested trap must NOT pass PSCB address
   3.164 +		//regs->r31 = (unsigned long) &PSCB(d);
   3.165 +		return;
   3.166 +
   3.167 +	}
   3.168 +	if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(d).ifa = ifa;
   3.169 +	else ifa = PSCB(d).ifa;
   3.170 +	vector &= ~0xf;
   3.171 +//	always deliver on ALT vector (for now?) because no VHPT
   3.172 +//	if (!vcpu_get_rr_ve(d,ifa)) {
   3.173 +		if (vector == IA64_DATA_TLB_VECTOR)
   3.174 +			vector = IA64_ALT_DATA_TLB_VECTOR;
   3.175 +		else if (vector == IA64_INST_TLB_VECTOR)
   3.176 +			vector = IA64_ALT_INST_TLB_VECTOR;
   3.177 +//	}
   3.178 +	PSCB(d).unat = regs->ar_unat;  // not sure if this is really needed?
   3.179 +	PSCB(d).precover_ifs = regs->cr_ifs;
   3.180 +	vcpu_bsw0(d);
   3.181 +	PSCB(d).ipsr = vcpu_get_ipsr_int_state(d,regs->cr_ipsr);
   3.182 +	if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
   3.183 +		PSCB(d).iim = itiriim;
   3.184 +	else PSCB(d).itir = vcpu_get_itir_on_fault(d,ifa);
   3.185 +	PSCB(d).isr = isr; // this is unnecessary except for interrupts!
   3.186 +	PSCB(d).iip = regs->cr_iip;
   3.187 +	PSCB(d).ifs = 0;
   3.188 +	PSCB(d).incomplete_regframe = 0;
   3.189 +
   3.190 +	regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
   3.191 +	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   3.192 +// FIXME: NEED TO PASS PSCB, BUT **NOT** IN R31 WHICH IS BEING USED FOR ar.pr
   3.193 +// IN ANY CASE, PASS PINNED ADDRESS, NOT THIS ONE
   3.194 +	//regs->r31 = (unsigned long) &PSCB(d);
   3.195 +
   3.196 +	PSCB(d).interrupt_delivery_enabled = 0;
   3.197 +	PSCB(d).interrupt_collection_enabled = 0;
   3.198 +}
   3.199 +
   3.200 +void foodpi(void) {}
   3.201 +
   3.202 +// ONLY gets called from ia64_leave_kernel
   3.203 +// ONLY call with interrupts disabled?? (else might miss one?)
   3.204 +// NEVER successful if already reflecting a trap/fault because psr.i==0
   3.205 +void deliver_pending_interrupt(struct pt_regs *regs)
   3.206 +{
   3.207 +	struct domain *d = (struct domain *) current;
   3.208 +	// FIXME: Will this work properly if doing an RFI???
   3.209 +	if (!is_idle_task(d) && user_mode(regs)) {
   3.210 +		vcpu_poke_timer(d);
   3.211 +		if (vcpu_deliverable_interrupts(d)) {
   3.212 +			unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   3.213 +			foodpi();
   3.214 +			reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
   3.215 +		}
   3.216 +	}
   3.217 +}
   3.218 +
   3.219 +int handle_lazy_cover(struct domain *d, unsigned long isr, struct pt_regs *regs)
   3.220 +{
   3.221 +	if (!PSCB(d).interrupt_collection_enabled) {
   3.222 +		if (isr & IA64_ISR_IR) {
   3.223 +//			printf("Handling lazy cover\n");
   3.224 +			PSCB(d).ifs = regs->cr_ifs;
   3.225 +			PSCB(d).incomplete_regframe = 1;
   3.226 +			regs->cr_ifs = 0;
   3.227 +			return(1); // retry same instruction with cr.ifs off
   3.228 +		}
   3.229 +	}
   3.230 +	return(0);
   3.231 +}
   3.232 +
   3.233 +#define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
   3.234 +
   3.235 +void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   3.236 +{
   3.237 +	struct domain *d = (struct domain *) current;
   3.238 +	TR_ENTRY *trp;
   3.239 +	unsigned long psr = regs->cr_ipsr, mask, flags;
   3.240 +	unsigned long iip = regs->cr_iip;
   3.241 +	// FIXME should validate address here
   3.242 +	unsigned long pteval, mpaddr;
   3.243 +	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   3.244 +	IA64FAULT fault;
   3.245 +	extern void __get_domain_bundle(void);
   3.246 +
   3.247 +// NEED TO HANDLE THREE CASES:
   3.248 +// 1) domain is in metaphysical mode
   3.249 +// 2) domain address is in TR
   3.250 +// 3) domain address is not in TR (reflect data miss)
   3.251 +
   3.252 +		// got here trying to read a privop bundle
   3.253 +	     	//if (d->metaphysical_mode) {
   3.254 +     	if (d->metaphysical_mode && !(address>>61)) {  //FIXME
   3.255 +		if (d == dom0) {
   3.256 +			if (address < dom0_start || address >= dom0_start + dom0_size) {
   3.257 +				printk("xen_handle_domain_access: out-of-bounds"
   3.258 +				   "dom0 mpaddr %p! continuing...\n",mpaddr);
   3.259 +				tdpfoo();
   3.260 +			}
   3.261 +		}
   3.262 +		pteval = lookup_domain_mpa(d,address);
   3.263 +		//FIXME: check return value?
   3.264 +		// would be nice to have a counter here
   3.265 +		vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
   3.266 +		return;
   3.267 +	}
   3.268 +if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
   3.269 +	if (*(unsigned long *)__get_domain_bundle != iip) {
   3.270 +		printf("Bad user space access @%p ",address);
   3.271 +		printf("iip=%p, ipsr=%p, b0=%p\n",iip,psr,regs->b0);
   3.272 +		while(1);
   3.273 +	}
   3.274 +		
   3.275 +	fault = vcpu_tpa(d,address,&mpaddr);
   3.276 +	if (fault != IA64_NO_FAULT) {
   3.277 +		// this is hardcoded to handle __get_domain_bundle only
   3.278 +		regs->r8 = 0; regs->r9 = 0;
   3.279 +		regs->cr_iip += 0x20;
   3.280 +		//regs->cr_iip |= (2UL << IA64_PSR_RI_BIT);
   3.281 +		return;
   3.282 +	}
   3.283 +	if (d == dom0) {
   3.284 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   3.285 +			printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
   3.286 +			tdpfoo();
   3.287 +		}
   3.288 +	}
   3.289 +	pteval = lookup_domain_mpa(d,mpaddr);
   3.290 +	// would be nice to have a counter here
   3.291 +	//printf("Handling privop data TLB miss\n");
   3.292 +	// FIXME, must be inlined or potential for nested fault here!
   3.293 +	vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
   3.294 +}
   3.295 +
   3.296 +void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   3.297 +{
   3.298 +	struct domain *d = (struct domain *) current;
   3.299 +	TR_ENTRY *trp;
   3.300 +	unsigned long psr = regs->cr_ipsr, mask, flags;
   3.301 +	unsigned long iip = regs->cr_iip;
   3.302 +	// FIXME should validate address here
   3.303 +	unsigned long pteval, mpaddr;
   3.304 +	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   3.305 +	unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
   3.306 +	unsigned long vector;
   3.307 +	IA64FAULT fault;
   3.308 +
   3.309 +
   3.310 +	//The right way is put in VHPT and take another miss!
   3.311 +
   3.312 +	// weak attempt to avoid doing both I/D tlb insert to avoid
   3.313 +	// problems for privop bundle fetch, doesn't work, deal with later
   3.314 +	if (IS_XEN_ADDRESS(d,iip) && !IS_XEN_ADDRESS(d,address)) {
   3.315 +		xen_handle_domain_access(address, isr, regs, itir);
   3.316 +
   3.317 +		return;
   3.318 +	}
   3.319 +
   3.320 +	// FIXME: no need to pass itir in to this routine as we need to
   3.321 +	// compute the virtual itir anyway (based on domain's RR.ps)
   3.322 +	// AND ACTUALLY reflect_interruption doesn't use it anyway!
   3.323 +	itir = vcpu_get_itir_on_fault(d,address);
   3.324 +
   3.325 +	if (d->metaphysical_mode && (is_data || !(address>>61))) {  //FIXME
   3.326 +		// FIXME should validate mpaddr here
   3.327 +		if (d == dom0) {
   3.328 +			if (address < dom0_start || address >= dom0_start + dom0_size) {
   3.329 +				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
   3.330 +				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,d->shared_info->arch.iip);
   3.331 +				tdpfoo();
   3.332 +			}
   3.333 +		}
   3.334 +		pteval = lookup_domain_mpa(d,address);
   3.335 +		// FIXME, must be inlined or potential for nested fault here!
   3.336 +		vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,PAGE_SHIFT);
   3.337 +		return;
   3.338 +	}
   3.339 +	if (trp = match_tr(d,address)) {
   3.340 +		// FIXME address had better be pre-validated on insert
   3.341 +		pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
   3.342 +		vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
   3.343 +		return;
   3.344 +	}
   3.345 +	vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
   3.346 +	if (handle_lazy_cover(d, isr, regs)) return;
   3.347 +if (!(address>>61)) { printf("ia64_do_page_fault: @%p???, iip=%p, itc=%p (spinning...)\n",address,iip,ia64_get_itc()); while(1); }
   3.348 +	if ((isr & IA64_ISR_SP)
   3.349 +	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
   3.350 +	{
   3.351 +		/*
   3.352 +		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
   3.353 +		 * bit in the psr to ensure forward progress.  (Target register will get a
   3.354 +		 * NaT for ld.s, lfetch will be canceled.)
   3.355 +		 */
   3.356 +		ia64_psr(regs)->ed = 1;
   3.357 +		return;
   3.358 +	}
   3.359 +	reflect_interruption(address, isr, itir, regs, vector);
   3.360 +}
   3.361 +
   3.362 +void
   3.363 +ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
   3.364 +	    unsigned long iim, unsigned long itir, unsigned long arg5,
   3.365 +	    unsigned long arg6, unsigned long arg7, unsigned long stack)
   3.366 +{
   3.367 +	struct pt_regs *regs = (struct pt_regs *) &stack;
   3.368 +	unsigned long code, error = isr;
   3.369 +	char buf[128];
   3.370 +	int result, sig;
   3.371 +	static const char *reason[] = {
   3.372 +		"IA-64 Illegal Operation fault",
   3.373 +		"IA-64 Privileged Operation fault",
   3.374 +		"IA-64 Privileged Register fault",
   3.375 +		"IA-64 Reserved Register/Field fault",
   3.376 +		"Disabled Instruction Set Transition fault",
   3.377 +		"Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
   3.378 +		"Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
   3.379 +		"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
   3.380 +	};
   3.381 +#if 0
   3.382 +printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
   3.383 + vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
   3.384 +#endif
   3.385 +
   3.386 +	if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
   3.387 +		/*
   3.388 +		 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
   3.389 +		 * the lfetch.
   3.390 +		 */
   3.391 +		ia64_psr(regs)->ed = 1;
   3.392 +		printf("ia64_fault: handled lfetch.fault\n");
   3.393 +		return;
   3.394 +	}
   3.395 +
   3.396 +	switch (vector) {
   3.397 +	      case 24: /* General Exception */
   3.398 +		code = (isr >> 4) & 0xf;
   3.399 +		sprintf(buf, "General Exception: %s%s", reason[code],
   3.400 +			(code == 3) ? ((isr & (1UL << 37))
   3.401 +				       ? " (RSE access)" : " (data access)") : "");
   3.402 +		if (code == 8) {
   3.403 +# ifdef CONFIG_IA64_PRINT_HAZARDS
   3.404 +			printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
   3.405 +			       current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
   3.406 +			       regs->pr);
   3.407 +# endif
   3.408 +			printf("ia64_fault: returning on hazard\n");
   3.409 +			return;
   3.410 +		}
   3.411 +		break;
   3.412 +
   3.413 +	      case 25: /* Disabled FP-Register */
   3.414 +		if (isr & 2) {
   3.415 +			//disabled_fph_fault(regs);
   3.416 +			//return;
   3.417 +		}
   3.418 +		sprintf(buf, "Disabled FPL fault---not supposed to happen!");
   3.419 +		break;
   3.420 +
   3.421 +	      case 26: /* NaT Consumption */
   3.422 +		if (user_mode(regs)) {
   3.423 +			void *addr;
   3.424 +
   3.425 +			if (((isr >> 4) & 0xf) == 2) {
   3.426 +				/* NaT page consumption */
   3.427 +				//sig = SIGSEGV;
   3.428 +				//code = SEGV_ACCERR;
   3.429 +				addr = (void *) ifa;
   3.430 +			} else {
   3.431 +				/* register NaT consumption */
   3.432 +				//sig = SIGILL;
   3.433 +				//code = ILL_ILLOPN;
   3.434 +				addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   3.435 +			}
   3.436 +			//siginfo.si_signo = sig;
   3.437 +			//siginfo.si_code = code;
   3.438 +			//siginfo.si_errno = 0;
   3.439 +			//siginfo.si_addr = addr;
   3.440 +			//siginfo.si_imm = vector;
   3.441 +			//siginfo.si_flags = __ISR_VALID;
   3.442 +			//siginfo.si_isr = isr;
   3.443 +			//force_sig_info(sig, &siginfo, current);
   3.444 +			//return;
   3.445 +		} //else if (ia64_done_with_exception(regs))
   3.446 +			//return;
   3.447 +		sprintf(buf, "NaT consumption");
   3.448 +		break;
   3.449 +
   3.450 +	      case 31: /* Unsupported Data Reference */
   3.451 +		if (user_mode(regs)) {
   3.452 +			//siginfo.si_signo = SIGILL;
   3.453 +			//siginfo.si_code = ILL_ILLOPN;
   3.454 +			//siginfo.si_errno = 0;
   3.455 +			//siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   3.456 +			//siginfo.si_imm = vector;
   3.457 +			//siginfo.si_flags = __ISR_VALID;
   3.458 +			//siginfo.si_isr = isr;
   3.459 +			//force_sig_info(SIGILL, &siginfo, current);
   3.460 +			//return;
   3.461 +		}
   3.462 +		sprintf(buf, "Unsupported data reference");
   3.463 +		break;
   3.464 +
   3.465 +	      case 29: /* Debug */
   3.466 +	      case 35: /* Taken Branch Trap */
   3.467 +	      case 36: /* Single Step Trap */
   3.468 +		//if (fsys_mode(current, regs)) {}
   3.469 +		switch (vector) {
   3.470 +		      case 29:
   3.471 +			//siginfo.si_code = TRAP_HWBKPT;
   3.472 +#ifdef CONFIG_ITANIUM
   3.473 +			/*
   3.474 +			 * Erratum 10 (IFA may contain incorrect address) now has
   3.475 +			 * "NoFix" status.  There are no plans for fixing this.
   3.476 +			 */
   3.477 +			if (ia64_psr(regs)->is == 0)
   3.478 +			  ifa = regs->cr_iip;
   3.479 +#endif
   3.480 +			break;
   3.481 +		      case 35: ifa = 0; break;
   3.482 +		      case 36: ifa = 0; break;
   3.483 +		      //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
   3.484 +		      //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
   3.485 +		}
   3.486 +		//siginfo.si_signo = SIGTRAP;
   3.487 +		//siginfo.si_errno = 0;
   3.488 +		//siginfo.si_addr  = (void *) ifa;
   3.489 +		//siginfo.si_imm   = 0;
   3.490 +		//siginfo.si_flags = __ISR_VALID;
   3.491 +		//siginfo.si_isr   = isr;
   3.492 +		//force_sig_info(SIGTRAP, &siginfo, current);
   3.493 +		//return;
   3.494 +
   3.495 +	      case 32: /* fp fault */
   3.496 +	      case 33: /* fp trap */
   3.497 +		//result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
   3.498 +		if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
   3.499 +			//siginfo.si_signo = SIGFPE;
   3.500 +			//siginfo.si_errno = 0;
   3.501 +			//siginfo.si_code = FPE_FLTINV;
   3.502 +			//siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   3.503 +			//siginfo.si_flags = __ISR_VALID;
   3.504 +			//siginfo.si_isr = isr;
   3.505 +			//siginfo.si_imm = 0;
   3.506 +			//force_sig_info(SIGFPE, &siginfo, current);
   3.507 +		}
   3.508 +		//return;
   3.509 +		sprintf(buf, "FP fault/trap");
   3.510 +		break;
   3.511 +
   3.512 +	      case 34:
   3.513 +		if (isr & 0x2) {
   3.514 +			/* Lower-Privilege Transfer Trap */
   3.515 +			/*
   3.516 +			 * Just clear PSR.lp and then return immediately: all the
   3.517 +			 * interesting work (e.g., signal delivery is done in the kernel
   3.518 +			 * exit path).
   3.519 +			 */
   3.520 +			//ia64_psr(regs)->lp = 0;
   3.521 +			//return;
   3.522 +			sprintf(buf, "Lower-Privilege Transfer trap");
   3.523 +		} else {
   3.524 +			/* Unimplemented Instr. Address Trap */
   3.525 +			if (user_mode(regs)) {
   3.526 +				//siginfo.si_signo = SIGILL;
   3.527 +				//siginfo.si_code = ILL_BADIADDR;
   3.528 +				//siginfo.si_errno = 0;
   3.529 +				//siginfo.si_flags = 0;
   3.530 +				//siginfo.si_isr = 0;
   3.531 +				//siginfo.si_imm = 0;
   3.532 +				//siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   3.533 +				//force_sig_info(SIGILL, &siginfo, current);
   3.534 +				//return;
   3.535 +			}
   3.536 +			sprintf(buf, "Unimplemented Instruction Address fault");
   3.537 +		}
   3.538 +		break;
   3.539 +
   3.540 +	      case 45:
   3.541 +		printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
   3.542 +		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
   3.543 +		       regs->cr_iip, ifa, isr);
   3.544 +		//force_sig(SIGSEGV, current);
   3.545 +		break;
   3.546 +
   3.547 +	      case 46:
   3.548 +		printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
   3.549 +		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
   3.550 +		       regs->cr_iip, ifa, isr, iim);
   3.551 +		//force_sig(SIGSEGV, current);
   3.552 +		return;
   3.553 +
   3.554 +	      case 47:
   3.555 +		sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
   3.556 +		break;
   3.557 +
   3.558 +	      default:
   3.559 +		sprintf(buf, "Fault %lu", vector);
   3.560 +		break;
   3.561 +	}
   3.562 +	//die_if_kernel(buf, regs, error);
   3.563 +printk("ia64_fault: %s: reflecting\n",buf);
   3.564 +reflect_interruption(ifa,isr,iim,regs,IA64_GENEX_VECTOR);
   3.565 +//while(1);
   3.566 +	//force_sig(SIGILL, current);
   3.567 +}
   3.568 +
   3.569 +unsigned long running_on_sim = 0;
   3.570 +
   3.571 +void
   3.572 +do_ssc(unsigned long ssc, struct pt_regs *regs)
   3.573 +{
   3.574 +	extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   3.575 +	unsigned long arg0, arg1, arg2, arg3, retval;
   3.576 +	char buf[2];
   3.577 +/**/	static int last_fd, last_count;	// FIXME FIXME FIXME
   3.578 +/**/					// BROKEN FOR MULTIPLE DOMAINS & SMP
   3.579 +/**/	struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
   3.580 +	extern unsigned long vcpu_verbose, privop_trace;
   3.581 +
   3.582 +	arg0 = vcpu_get_gr(current,32);
   3.583 +	switch(ssc) {
   3.584 +	    case SSC_PUTCHAR:
   3.585 +		buf[0] = arg0;
   3.586 +		buf[1] = '\0';
   3.587 +		printf(buf);
   3.588 +		break;
   3.589 +	    case SSC_GETCHAR:
   3.590 +		retval = ia64_ssc(0,0,0,0,ssc);
   3.591 +		vcpu_set_gr(current,8,retval);
   3.592 +		break;
   3.593 +	    case SSC_WAIT_COMPLETION:
   3.594 +		if (arg0) {	// metaphysical address
   3.595 +
   3.596 +			arg0 = translate_domain_mpaddr(arg0);
   3.597 +/**/			stat = (struct ssc_disk_stat *)__va(arg0);
   3.598 +///**/			if (stat->fd == last_fd) stat->count = last_count;
   3.599 +/**/			stat->count = last_count;
   3.600 +//if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
   3.601 +///**/			retval = ia64_ssc(arg0,0,0,0,ssc);
   3.602 +/**/			retval = 0;
   3.603 +		}
   3.604 +		else retval = -1L;
   3.605 +		vcpu_set_gr(current,8,retval);
   3.606 +		break;
   3.607 +	    case SSC_OPEN:
   3.608 +		arg1 = vcpu_get_gr(current,33);	// access rights
   3.609 +if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware.  (ignoring...)\n"); arg0 = 0; }
   3.610 +		if (arg0) {	// metaphysical address
   3.611 +			arg0 = translate_domain_mpaddr(arg0);
   3.612 +			retval = ia64_ssc(arg0,arg1,0,0,ssc);
   3.613 +		}
   3.614 +		else retval = -1L;
   3.615 +		vcpu_set_gr(current,8,retval);
   3.616 +		break;
   3.617 +	    case SSC_WRITE:
   3.618 +	    case SSC_READ:
   3.619 +//if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
   3.620 +		arg1 = vcpu_get_gr(current,33);
   3.621 +		arg2 = vcpu_get_gr(current,34);
   3.622 +		arg3 = vcpu_get_gr(current,35);
   3.623 +		if (arg2) {	// metaphysical address of descriptor
   3.624 +			struct ssc_disk_req *req;
   3.625 +			unsigned long mpaddr, paddr;
   3.626 +			long len;
   3.627 +
   3.628 +			arg2 = translate_domain_mpaddr(arg2);
   3.629 +			req = (struct disk_req *)__va(arg2);
   3.630 +			req->len &= 0xffffffffL;	// avoid strange bug
   3.631 +			len = req->len;
   3.632 +/**/			last_fd = arg1;
   3.633 +/**/			last_count = len;
   3.634 +			mpaddr = req->addr;
   3.635 +//if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
   3.636 +			retval = 0;
   3.637 +			if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
   3.638 +				// do partial page first
   3.639 +				req->addr = translate_domain_mpaddr(mpaddr);
   3.640 +				req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
   3.641 +				len -= req->len; mpaddr += req->len;
   3.642 +				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   3.643 +				arg3 += req->len; // file offset
   3.644 +/**/				last_stat.fd = last_fd;
   3.645 +/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   3.646 +//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
   3.647 +			}
   3.648 +			if (retval >= 0) while (len > 0) {
   3.649 +				req->addr = translate_domain_mpaddr(mpaddr);
   3.650 +				req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
   3.651 +				len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
   3.652 +				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   3.653 +				arg3 += req->len; // file offset
   3.654 +// TEMP REMOVED AGAIN				arg3 += req->len; // file offset
   3.655 +/**/				last_stat.fd = last_fd;
   3.656 +/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   3.657 +//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
   3.658 +			}
   3.659 +			// set it back to the original value
   3.660 +			req->len = last_count;
   3.661 +		}
   3.662 +		else retval = -1L;
   3.663 +		vcpu_set_gr(current,8,retval);
   3.664 +//if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
   3.665 +		break;
   3.666 +	    case SSC_CONNECT_INTERRUPT:
   3.667 +		arg1 = vcpu_get_gr(current,33);
   3.668 +		arg2 = vcpu_get_gr(current,34);
   3.669 +		arg3 = vcpu_get_gr(current,35);
   3.670 +		if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware.  (ignoring...)\n"); break; }
   3.671 +		(void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   3.672 +		break;
   3.673 +	    case SSC_NETDEV_PROBE:
   3.674 +		vcpu_set_gr(current,8,-1L);
   3.675 +		break;
   3.676 +	    default:
   3.677 +		printf("ia64_handle_break: bad ssc code %lx\n",ssc);
   3.678 +		break;
   3.679 +	}
   3.680 +	vcpu_increment_iip(current);
   3.681 +}
   3.682 +
   3.683 +void fooefi(void) {}
   3.684 +
   3.685 +void
   3.686 +ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   3.687 +{
   3.688 +	static int first_time = 1;
   3.689 +	struct domain *d = (struct domain *) current;
   3.690 +	extern unsigned long running_on_sim;
   3.691 +
   3.692 +	if (first_time) {
   3.693 +		if (platform_is_hp_ski()) running_on_sim = 1;
   3.694 +		else running_on_sim = 0;
   3.695 +		first_time = 0;
   3.696 +	}
   3.697 +	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
   3.698 +		if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
   3.699 +		else do_ssc(vcpu_get_gr(current,36), regs);
   3.700 +	}
   3.701 +	else if (iim == d->breakimm) {
   3.702 +		struct ia64_sal_retval x;
   3.703 +		switch (regs->r2) {
   3.704 +		    case FW_HYPERCALL_PAL_CALL:
   3.705 +			//printf("*** PAL hypercall: index=%d\n",regs->r28);
   3.706 +			//FIXME: This should call a C routine
   3.707 +			x = pal_emulator_static(regs->r28);
   3.708 +			regs->r8 = x.status; regs->r9 = x.v0;
   3.709 +			regs->r10 = x.v1; regs->r11 = x.v2;
   3.710 +			break;
   3.711 +		    case FW_HYPERCALL_SAL_CALL:
   3.712 +			x = sal_emulator(vcpu_get_gr(d,32),vcpu_get_gr(d,33),
   3.713 +				vcpu_get_gr(d,34),vcpu_get_gr(d,35),
   3.714 +				vcpu_get_gr(d,36),vcpu_get_gr(d,37),
   3.715 +				vcpu_get_gr(d,38),vcpu_get_gr(d,39));
   3.716 +			regs->r8 = x.status; regs->r9 = x.v0;
   3.717 +			regs->r10 = x.v1; regs->r11 = x.v2;
   3.718 +			break;
   3.719 +		    case FW_HYPERCALL_EFI_RESET_SYSTEM:
   3.720 +			printf("efi.reset_system called ");
   3.721 +			if (current == dom0) {
   3.722 +				printf("(by dom0)\n ");
   3.723 +				(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
   3.724 +			}
   3.725 +			printf("(not supported for non-0 domain)\n");
   3.726 +			regs->r8 = EFI_UNSUPPORTED;
   3.727 +			break;
   3.728 +		    case FW_HYPERCALL_EFI_GET_TIME:
   3.729 +			{
   3.730 +			unsigned long *tv, *tc;
   3.731 +			fooefi();
   3.732 +			tv = vcpu_get_gr(d,32);
   3.733 +			tc = vcpu_get_gr(d,33);
   3.734 +			//printf("efi_get_time(%p,%p) called...",tv,tc);
   3.735 +			tv = __va(translate_domain_mpaddr(tv));
   3.736 +			if (tc) tc = __va(translate_domain_mpaddr(tc));
   3.737 +			regs->r8 = (*efi.get_time)(tv,tc);
   3.738 +			//printf("and returns %lx\n",regs->r8);
   3.739 +			}
   3.740 +			break;
   3.741 +		    case FW_HYPERCALL_EFI_SET_TIME:
   3.742 +		    case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
   3.743 +		    case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
   3.744 +			// FIXME: need fixes in efi.h from 2.6.9
   3.745 +		    case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
   3.746 +			// FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
   3.747 +			// SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS 
   3.748 +			// POINTER ARGUMENTS WILL BE VIRTUAL!!
   3.749 +		    case FW_HYPERCALL_EFI_GET_VARIABLE:
   3.750 +			// FIXME: need fixes in efi.h from 2.6.9
   3.751 +		    case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
   3.752 +		    case FW_HYPERCALL_EFI_SET_VARIABLE:
   3.753 +		    case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
   3.754 +			// FIXME: need fixes in efi.h from 2.6.9
   3.755 +			regs->r8 = EFI_UNSUPPORTED;
   3.756 +			break;
   3.757 +		}
   3.758 +		vcpu_increment_iip(current);
   3.759 +	}
   3.760 +	else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR);
   3.761 +}
   3.762 +
   3.763 +void
   3.764 +ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
   3.765 +{
   3.766 +	IA64FAULT vector;
   3.767 +	struct domain *d = (struct domain *) current;
   3.768 +	// FIXME: no need to pass itir in to this routine as we need to
   3.769 +	// compute the virtual itir anyway (based on domain's RR.ps)
   3.770 +	// AND ACTUALLY reflect_interruption doesn't use it anyway!
   3.771 +	itir = vcpu_get_itir_on_fault(d,ifa);
   3.772 +	vector = priv_emulate((struct domain *)current,regs,isr);
   3.773 +	if (vector == IA64_RETRY) {
   3.774 +		reflect_interruption(ifa,isr,itir,regs,
   3.775 +			IA64_ALT_DATA_TLB_VECTOR | IA64_FORCED_IFA);
   3.776 +	}
   3.777 +	else if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
   3.778 +		reflect_interruption(ifa,isr,itir,regs,vector);
   3.779 +	}
   3.780 +}
   3.781 +
   3.782 +#define INTR_TYPE_MAX	10
   3.783 +UINT64 int_counts[INTR_TYPE_MAX];
   3.784 +
   3.785 +void
   3.786 +ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
   3.787 +{
   3.788 +	extern unsigned long vcpu_get_itir_on_fault(struct domain *vcpu, UINT64 ifa);
   3.789 +	struct domain *d = (struct domain *) current;
   3.790 +	unsigned long check_lazy_cover = 0;
   3.791 +	unsigned long psr = regs->cr_ipsr;
   3.792 +	unsigned long itir = vcpu_get_itir_on_fault(d,ifa);
   3.793 +
   3.794 +	if (!(psr & IA64_PSR_CPL)) {
   3.795 +		printf("ia64_handle_reflection: reflecting with priv=0!!\n");
   3.796 +		while(1);
   3.797 +	}
   3.798 +	// FIXME: no need to pass itir in to this routine as we need to
   3.799 +	// compute the virtual itir anyway (based on domain's RR.ps)
   3.800 +	// AND ACTUALLY reflect_interruption doesn't use it anyway!
   3.801 +	itir = vcpu_get_itir_on_fault(d,ifa);
   3.802 +	switch(vector) {
   3.803 +	    case 8:
   3.804 +		vector = IA64_DIRTY_BIT_VECTOR; break;
   3.805 +	    case 9:
   3.806 +		vector = IA64_INST_ACCESS_BIT_VECTOR; break;
   3.807 +	    case 10:
   3.808 +		check_lazy_cover = 1;
   3.809 +		vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
   3.810 +	    case 22:
   3.811 +		vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
   3.812 +	    case 23:
   3.813 +		check_lazy_cover = 1;
   3.814 +		vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
   3.815 +	    case 25:
   3.816 +		vector = IA64_DISABLED_FPREG_VECTOR; break;
   3.817 +	    case 26:
   3.818 +printf("*** NaT fault... attempting to handle as privop\n");
   3.819 +		vector = priv_emulate(d,regs,isr);
   3.820 +		if (vector == IA64_NO_FAULT) {
   3.821 +printf("*** Handled privop masquerading as NaT fault\n");
   3.822 +			return;
   3.823 +		}
   3.824 +		vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   3.825 +	    case 27:
   3.826 +//printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
   3.827 +		itir = iim;
   3.828 +		vector = IA64_SPECULATION_VECTOR; break;
   3.829 +	    case 30:
   3.830 +		// FIXME: Should we handle unaligned refs in Xen??
   3.831 +		vector = IA64_UNALIGNED_REF_VECTOR; break;
   3.832 +	    default:
   3.833 +		printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
   3.834 +		while(vector);
   3.835 +		return;
   3.836 +	}
   3.837 +	if (check_lazy_cover && handle_lazy_cover(d, isr, regs)) return;
   3.838 +	reflect_interruption(ifa,isr,itir,regs,vector);
   3.839 +}
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/ia64/regionreg.c	Mon Nov 22 22:57:05 2004 +0000
     4.3 @@ -0,0 +1,399 @@
     4.4 +/*
     4.5 + * Region register and region id management
     4.6 + *
     4.7 + * Copyright (C) 2001-2004 Hewlett-Packard Co.
     4.8 + *	Dan Magenheimer (dan.magenheimer@hp.com
     4.9 + *	Bret Mckee (bret.mckee@hp.com)
    4.10 + *
    4.11 + */
    4.12 +
    4.13 +
    4.14 +#include <linux/config.h>
    4.15 +#include <linux/types.h>
    4.16 +#include <linux/sched.h>
    4.17 +#include <asm/page.h>
    4.18 +#include <asm/regionreg.h>
    4.19 +#include <asm/vhpt.h>
    4.20 +
    4.21 +
    4.22 +#define	IA64_MIN_IMPL_RID_BITS	(IA64_MIN_IMPL_RID_MSB+1)
    4.23 +#define	IA64_MAX_IMPL_RID_BITS	24
    4.24 +
    4.25 +#define MIN_RIDS	(1 << IA64_MIN_IMPL_RID_BITS)
    4.26 +#define	MIN_RID_MAX	(MIN_RIDS - 1)
    4.27 +#define	MIN_RID_MASK	(MIN_RIDS - 1)
    4.28 +#define	MAX_RIDS	(1 << (IA64_MAX_IMPL_RID_BITS))
    4.29 +#define	MAX_RID		(MAX_RIDS - 1)
    4.30 +#define	MAX_RID_BLOCKS	(1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
    4.31 +#define RIDS_PER_RIDBLOCK MIN_RIDS
    4.32 +
    4.33 +// This is the one global memory representation of the default Xen region reg
    4.34 +ia64_rr xen_rr;
    4.35 +
    4.36 +#if 0
    4.37 +// following already defined in include/asm-ia64/gcc_intrin.h
    4.38 +// it should probably be ifdef'd out from there to ensure all region
    4.39 +// register usage is encapsulated in this file
    4.40 +static inline unsigned long
    4.41 +ia64_get_rr (unsigned long rr)
    4.42 +{
    4.43 +	    unsigned long r;
    4.44 +	    __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
    4.45 +	    return r;
    4.46 +}
    4.47 +
    4.48 +static inline void
    4.49 +ia64_set_rr (unsigned long rr, unsigned long rrv)
    4.50 +{
    4.51 +	    __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
    4.52 +}
    4.53 +#endif
    4.54 +
    4.55 +// use this to allocate a rid out of the "Xen reserved rid block"
    4.56 +unsigned long allocate_reserved_rid(void)
    4.57 +{
    4.58 +	static unsigned long currentrid = XEN_DEFAULT_RID;
    4.59 +	unsigned long t = currentrid;
    4.60 +
    4.61 +	unsigned long max = RIDS_PER_RIDBLOCK;
    4.62 +
    4.63 +	if (++currentrid >= max) return(-1UL);
    4.64 +	return t;
    4.65 +}
    4.66 +
    4.67 +
    4.68 +// returns -1 if none available
    4.69 +unsigned long allocate_metaphysical_rid(void)
    4.70 +{
    4.71 +	unsigned long rid = allocate_reserved_rid();
    4.72 +}
    4.73 +
    4.74 +int deallocate_metaphysical_rid(unsigned long rid)
    4.75 +{
    4.76 +	// fix this when the increment allocation mechanism is fixed.
    4.77 +	return 1;
    4.78 +}
    4.79 +
    4.80 +
    4.81 +void init_rr(void)
    4.82 +{
    4.83 +	xen_rr.rrval = 0;
    4.84 +	xen_rr.ve = 0;
    4.85 +	xen_rr.rid = allocate_reserved_rid();
    4.86 +	xen_rr.ps = PAGE_SHIFT;
    4.87 +
    4.88 +	printf("initialized xen_rr.rid=0x%lx\n", xen_rr.rid);
    4.89 +}
    4.90 +
    4.91 +/*************************************
    4.92 +  Region Block setup/management
    4.93 +*************************************/
    4.94 +
    4.95 +static int implemented_rid_bits = 0;
    4.96 +static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
    4.97 +
    4.98 +void get_impl_rid_bits(void)
    4.99 +{
   4.100 +	// FIXME (call PAL)
   4.101 +//#ifdef CONFIG_MCKINLEY
   4.102 +	implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
   4.103 +//#else
   4.104 +//#error "rid ranges won't work on Merced"
   4.105 +//#endif
   4.106 +	if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS ||
   4.107 +	    implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
   4.108 +		BUG();
   4.109 +}
   4.110 +
   4.111 +
   4.112 +/*
   4.113 + * Allocate a power-of-two-sized chunk of region id space -- one or more
   4.114 + *  "rid blocks"
   4.115 + */
   4.116 +int allocate_rid_range(struct domain *d, unsigned long ridbits)
   4.117 +{
   4.118 +	int i, j, n_rid_blocks;
   4.119 +
   4.120 +	if (implemented_rid_bits == 0) get_impl_rid_bits();
   4.121 +	
   4.122 +	if (ridbits >= IA64_MAX_IMPL_RID_BITS)
   4.123 +	ridbits = IA64_MAX_IMPL_RID_BITS - 1;
   4.124 +	
   4.125 +	if (ridbits < IA64_MIN_IMPL_RID_BITS)
   4.126 +	ridbits = IA64_MIN_IMPL_RID_BITS;
   4.127 +
   4.128 +	// convert to rid_blocks and find one
   4.129 +	n_rid_blocks = ridbits - IA64_MIN_IMPL_RID_BITS + 1;
   4.130 +	
   4.131 +	// skip over block 0, reserved for "meta-physical mappings (and Xen)"
   4.132 +	for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
   4.133 +		if (ridblock_owner[i] == NULL) {
   4.134 +			for (j = i; j < i + n_rid_blocks; ++j) {
   4.135 +				if (ridblock_owner[j]) break;
   4.136 +			}
   4.137 +			if (ridblock_owner[j] == NULL) break;
   4.138 +		}
   4.139 +	}
   4.140 +	
   4.141 +	if (i >= MAX_RID_BLOCKS) return 0;
   4.142 +	
   4.143 +	// found an unused block:
   4.144 +	//   (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
   4.145 +	// mark this block as owned
   4.146 +	for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
   4.147 +	
   4.148 +	// setup domain struct
   4.149 +	d->rid_bits = ridbits;
   4.150 +	d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
   4.151 +	d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
   4.152 +	
   4.153 +	return 1;
   4.154 +}
   4.155 +
   4.156 +
   4.157 +int deallocate_rid_range(struct domain *d)
   4.158 +{
   4.159 +	int i;
   4.160 +	int rid_block_end = d->ending_rid >> IA64_MIN_IMPL_RID_BITS;
   4.161 +	int rid_block_start = d->starting_rid >> IA64_MIN_IMPL_RID_BITS;
   4.162 +
   4.163 +	return 1;  // KLUDGE ALERT
   4.164 +	//
   4.165 +	// not all domains will have allocated RIDs (physical mode loaders for instance)
   4.166 +	//
   4.167 +	if (d->rid_bits == 0) return 1;
   4.168 +
   4.169 +#ifdef DEBUG
   4.170 +	for (i = rid_block_start; i < rid_block_end; ++i) {
   4.171 +	        ASSERT(ridblock_owner[i] == d);
   4.172 +	    }
   4.173 +#endif
   4.174 +	
   4.175 +	for (i = rid_block_start; i < rid_block_end; ++i)
   4.176 +	ridblock_owner[i] = NULL;
   4.177 +	
   4.178 +	d->rid_bits = 0;
   4.179 +	d->starting_rid = 0;
   4.180 +	d->ending_rid = 0;
   4.181 +	return 1;
   4.182 +}
   4.183 +
   4.184 +
   4.185 +// This function is purely for performance... apparently scrambling
   4.186 +//  bits in the region id makes for better hashing, which means better
   4.187 +//  use of the VHPT, which means better performance
   4.188 +// Note that the only time a RID should be mangled is when it is stored in
   4.189 +//  a region register; anytime it is "viewable" outside of this module,
   4.190 +//  it should be unmangled
   4.191 +
   4.192 +//This appears to work in Xen... turn it on later so no complications yet
   4.193 +//#define CONFIG_MANGLE_RIDS
   4.194 +#ifdef CONFIG_MANGLE_RIDS
   4.195 +static inline unsigned long
   4.196 +vmMangleRID(unsigned long RIDVal)
   4.197 +{
   4.198 +	union bits64 { unsigned char bytes[4]; unsigned long uint; };
   4.199 +
   4.200 +	union bits64 t;
   4.201 +	unsigned char tmp;
   4.202 +
   4.203 +	t.uint = RIDVal;
   4.204 +	tmp = t.bytes[1];
   4.205 +	t.bytes[1] = t.bytes[3];
   4.206 +	t.bytes[3] = tmp;
   4.207 +
   4.208 +	return t.uint;
   4.209 +}
   4.210 +
   4.211 +// since vmMangleRID is symmetric, use it for unmangling also
   4.212 +#define vmUnmangleRID(x)	vmMangleRID(x)
   4.213 +#else
   4.214 +// no mangling/unmangling
   4.215 +#define vmMangleRID(x)	(x)
   4.216 +#define vmUnmangleRID(x) (x)
   4.217 +#endif
   4.218 +
   4.219 +static inline void
   4.220 +set_rr_no_srlz(unsigned long rr, unsigned long rrval)
   4.221 +{
   4.222 +	ia64_set_rr(rr, vmMangleRID(rrval));
   4.223 +}
   4.224 +
   4.225 +void
   4.226 +set_rr(unsigned long rr, unsigned long rrval)
   4.227 +{
   4.228 +	ia64_set_rr(rr, vmMangleRID(rrval));
   4.229 +	ia64_srlz_d();
   4.230 +}
   4.231 +
   4.232 +unsigned long
   4.233 +get_rr(unsigned long rr)
   4.234 +{
   4.235 +	return vmUnmangleRID(ia64_get_rr(rr));
   4.236 +}
   4.237 +
   4.238 +static inline int validate_page_size(unsigned long ps)
   4.239 +{
   4.240 +	switch(ps) {
   4.241 +	    case 12: case 13: case 14: case 16: case 18:
   4.242 +	    case 20: case 22: case 24: case 26: case 28:
   4.243 +		return 1;
   4.244 +	    default:
   4.245 +		return 0;
   4.246 +	}
   4.247 +}
   4.248 +
   4.249 +// validates and changes a single region register
   4.250 +// in the currently executing domain
   4.251 +// Passing a value of -1 is a (successful) no-op
   4.252 +// NOTE: DOES NOT SET VCPU's rrs[x] value!!
   4.253 +int set_one_rr(unsigned long rr, unsigned long val)
   4.254 +{
   4.255 +	struct domain *d = current;
   4.256 +	unsigned long rreg = REGION_NUMBER(rr);
   4.257 +	ia64_rr rrv, newrrv, memrrv;
   4.258 +	unsigned long newrid;
   4.259 +
   4.260 +	if (val == -1) return 1;
   4.261 +
   4.262 +	rrv.rrval = val;
   4.263 +	newrrv.rrval = 0;
   4.264 +	newrid = d->starting_rid + rrv.rid;
   4.265 +
   4.266 +	if (newrid > d->ending_rid) return 0;
   4.267 +
   4.268 +	memrrv.rrval = rrv.rrval;
   4.269 +	if (rreg == 7) {
   4.270 +		newrrv.rid = newrid;
   4.271 +		newrrv.ve = VHPT_ENABLED_REGION_7;
   4.272 +		newrrv.ps = IA64_GRANULE_SHIFT;
   4.273 +		ia64_new_rr7(vmMangleRID(newrrv.rrval));
   4.274 +	}
   4.275 +	else {
   4.276 +		newrrv.rid = newrid;
   4.277 +		// FIXME? region 6 needs to be uncached for EFI to work
   4.278 +		if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
   4.279 +		else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
   4.280 +		newrrv.ps = PAGE_SHIFT;
   4.281 +		set_rr(rr,newrrv.rrval);
   4.282 +	}
   4.283 +	return 1;
   4.284 +}
   4.285 +
   4.286 +// set rr0 to the passed rid (for metaphysical mode so don't use domain offset
   4.287 +int set_metaphysical_rr(unsigned long rr, unsigned long rid)
   4.288 +{
   4.289 +	ia64_rr rrv;
   4.290 +	
   4.291 +	rrv.rrval = 0;
   4.292 +	rrv.rid = rid;
   4.293 +	rrv.ps = PAGE_SHIFT;
   4.294 +//	rrv.ve = 1; 	FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
   4.295 +	rrv.ve = 0;
   4.296 +	set_rr(rr,rrv.rrval);
   4.297 +}
   4.298 +
   4.299 +// validates/changes region registers 0-6 in the currently executing domain
   4.300 +// Note that this is the one and only SP API (other than executing a privop)
   4.301 +// for a domain to use to change region registers
   4.302 +int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
   4.303 +		     u64 rr4, u64 rr5, u64 rr6, u64 rr7)
   4.304 +{
   4.305 +	if (!set_one_rr(0x0000000000000000L, rr0)) return 0;
   4.306 +	if (!set_one_rr(0x2000000000000000L, rr1)) return 0;
   4.307 +	if (!set_one_rr(0x4000000000000000L, rr2)) return 0;
   4.308 +	if (!set_one_rr(0x6000000000000000L, rr3)) return 0;
   4.309 +	if (!set_one_rr(0x8000000000000000L, rr4)) return 0;
   4.310 +	if (!set_one_rr(0xa000000000000000L, rr5)) return 0;
   4.311 +	if (!set_one_rr(0xc000000000000000L, rr6)) return 0;
   4.312 +	if (!set_one_rr(0xe000000000000000L, rr7)) return 0;
   4.313 +	return 1;
   4.314 +}
   4.315 +
   4.316 +void init_all_rr(struct domain *d)
   4.317 +{
   4.318 +	ia64_rr rrv;
   4.319 +
   4.320 +	rrv.rrval = 0;
   4.321 +	rrv.rid = d->metaphysical_rid;
   4.322 +	rrv.ps = PAGE_SHIFT;
   4.323 +	rrv.ve = 1;
   4.324 +	d->shared_info->arch.rrs[0] = -1;
   4.325 +	d->shared_info->arch.rrs[1] = rrv.rrval;
   4.326 +	d->shared_info->arch.rrs[2] = rrv.rrval;
   4.327 +	d->shared_info->arch.rrs[3] = rrv.rrval;
   4.328 +	d->shared_info->arch.rrs[4] = rrv.rrval;
   4.329 +	d->shared_info->arch.rrs[5] = rrv.rrval;
   4.330 +	d->shared_info->arch.rrs[6] = rrv.rrval;
   4.331 +//	d->shared_info->arch.rrs[7] = rrv.rrval;
   4.332 +}
   4.333 +
   4.334 +
   4.335 +/* XEN/ia64 INTERNAL ROUTINES */
   4.336 +
   4.337 +unsigned long physicalize_rid(struct domain *d, unsigned long rid)
   4.338 +{
   4.339 +	ia64_rr rrv;
   4.340 +	    
   4.341 +	rrv.rrval = rid;
   4.342 +	rrv.rid += d->starting_rid;
   4.343 +	return rrv.rrval;
   4.344 +}
   4.345 +
   4.346 +unsigned long
   4.347 +virtualize_rid(struct domain *d, unsigned long rid)
   4.348 +{
   4.349 +	ia64_rr rrv;
   4.350 +	    
   4.351 +	rrv.rrval = rid;
   4.352 +	rrv.rid -= d->starting_rid;
   4.353 +	return rrv.rrval;
   4.354 +}
   4.355 +
   4.356 +// loads a thread's region register (0-6) state into
   4.357 +// the real physical region registers.  Returns the
   4.358 +// (possibly mangled) bits to store into rr7
   4.359 +// iff it is different than what is currently in physical
   4.360 +// rr7 (because we have to to assembly and physical mode
   4.361 +// to change rr7).  If no change to rr7 is required, returns 0.
   4.362 +//
   4.363 +unsigned long load_region_regs(struct domain *d)
   4.364 +{
   4.365 +	unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6;
   4.366 +	unsigned long oldrr7, newrr7;
   4.367 +	// TODO: These probably should be validated
   4.368 +
   4.369 +	if (d->metaphysical_mode) {
   4.370 +		ia64_rr rrv;
   4.371 +
   4.372 +		rrv.rid = d->metaphysical_rid;
   4.373 +		rrv.ps = PAGE_SHIFT;
   4.374 +		rrv.ve = 1;
   4.375 +		rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rr6 = newrr7 = rrv.rrval;
   4.376 +	}
   4.377 +	else {
   4.378 +		rr0 = physicalize_rid(d, d->shared_info->arch.rrs[0]);
   4.379 +		rr1 = physicalize_rid(d, d->shared_info->arch.rrs[1]);
   4.380 +		rr2 = physicalize_rid(d, d->shared_info->arch.rrs[2]);
   4.381 +		rr3 = physicalize_rid(d, d->shared_info->arch.rrs[3]);
   4.382 +		rr4 = physicalize_rid(d, d->shared_info->arch.rrs[4]);
   4.383 +		rr5 = physicalize_rid(d, d->shared_info->arch.rrs[5]);
   4.384 +		rr6 = physicalize_rid(d, d->shared_info->arch.rrs[6]);
   4.385 +		newrr7 = physicalize_rid(d, d->shared_info->arch.rrs[7]);
   4.386 +	}
   4.387 +
   4.388 +	set_rr_no_srlz(0x0000000000000000L, rr0);
   4.389 +	set_rr_no_srlz(0x2000000000000000L, rr1);
   4.390 +	set_rr_no_srlz(0x4000000000000000L, rr2);
   4.391 +	set_rr_no_srlz(0x6000000000000000L, rr3);
   4.392 +	set_rr_no_srlz(0x8000000000000000L, rr4);
   4.393 +	set_rr_no_srlz(0xa000000000000000L, rr5);
   4.394 +	set_rr_no_srlz(0xc000000000000000L, rr6);
   4.395 +	ia64_srlz_d();
   4.396 +	oldrr7 = get_rr(0xe000000000000000L);
   4.397 +	if (oldrr7 != newrr7) {
   4.398 +		newrr7 = (newrr7 & ~0xff) | (PAGE_SHIFT << 2) | 1;
   4.399 +		return vmMangleRID(newrr7);
   4.400 +	}
   4.401 +	else return 0;
   4.402 +}
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/vcpu.c	Mon Nov 22 22:57:05 2004 +0000
     5.3 @@ -0,0 +1,1559 @@
     5.4 +/*
     5.5 + * Virtualized CPU functions
     5.6 + * 
     5.7 + * Copyright (C) 2004 Hewlett-Packard Co.
     5.8 + *	Dan Magenheimer (dan.magenheimer@hp.com)
     5.9 + *
    5.10 + */
    5.11 +
    5.12 +#include <linux/sched.h>
    5.13 +#include <asm/ia64_int.h>
    5.14 +#include <asm/vcpu.h>
    5.15 +#include <asm/regionreg.h>
    5.16 +#include <asm/tlb.h>
    5.17 +#include <asm/processor.h>
    5.18 +#include <asm/delay.h>
    5.19 +
    5.20 +typedef	union {
    5.21 +	struct ia64_psr;
    5.22 +	unsigned long i64;
    5.23 +} PSR;
    5.24 +
    5.25 +//typedef	struct pt_regs	REGS;
    5.26 +//typedef struct domain VCPU;
    5.27 +
    5.28 +// this def for vcpu_regs won't work if kernel stack is present
    5.29 +#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->regs)
    5.30 +#define	PSCB(x)	x->shared_info->arch
    5.31 +
    5.32 +#define	TRUE	1
    5.33 +#define	FALSE	0
    5.34 +#define	IA64_PTA_SZ_BIT		2
    5.35 +#define	IA64_PTA_VF_BIT		8
    5.36 +#define	IA64_PTA_BASE_BIT	15
    5.37 +#define	IA64_PTA_LFMT		(1UL << IA64_PTA_VF_BIT)
    5.38 +#define	IA64_PTA_SZ(x)	(x##UL << IA64_PTA_SZ_BIT)
    5.39 +
    5.40 +#define STATIC
    5.41 +
    5.42 +unsigned long vcpu_verbose = 0;
    5.43 +#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
    5.44 +
    5.45 +/**************************************************************************
    5.46 + VCPU general register access routines
    5.47 +**************************************************************************/
    5.48 +
    5.49 +UINT64
    5.50 +vcpu_get_gr(VCPU *vcpu, unsigned reg)
    5.51 +{
    5.52 +	REGS *regs = vcpu_regs(vcpu);
    5.53 +	UINT64 val;
    5.54 +
    5.55 +	if (!reg) return 0;
    5.56 +	getreg(reg,&val,0,regs);	// FIXME: handle NATs later
    5.57 +	return val;
    5.58 +}
    5.59 +
    5.60 +// returns:
    5.61 +//   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
    5.62 +//   IA64_NO_FAULT otherwise
    5.63 +IA64FAULT
    5.64 +vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
    5.65 +{
    5.66 +	REGS *regs = vcpu_regs(vcpu);
    5.67 +	long sof = (regs->cr_ifs) & 0x7f;
    5.68 +
    5.69 +	if (!reg) return IA64_ILLOP_FAULT;
    5.70 +	if (reg >= sof + 32) return IA64_ILLOP_FAULT;
    5.71 +	setreg(reg,value,0,regs);	// FIXME: handle NATs later
    5.72 +	return IA64_NO_FAULT;
    5.73 +}
    5.74 +
    5.75 +/**************************************************************************
    5.76 + VCPU privileged application register access routines
    5.77 +**************************************************************************/
    5.78 +
    5.79 +IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
    5.80 +{
    5.81 +	if (reg == 44) return (vcpu_set_itc(vcpu,val));
    5.82 +	if (reg == 27) return (IA64_ILLOP_FAULT);
    5.83 +	if (reg > 7) return (IA64_ILLOP_FAULT);
    5.84 +	PSCB(vcpu).krs[reg] = val;
    5.85 +#if 0
    5.86 +// for now, privify kr read's so all kr accesses are privileged
    5.87 +	switch (reg) {
    5.88 +	      case 0: asm volatile ("mov ar.k0=%0" :: "r"(val)); break;
    5.89 +	      case 1: asm volatile ("mov ar.k1=%0" :: "r"(val)); break;
    5.90 +	      case 2: asm volatile ("mov ar.k2=%0" :: "r"(val)); break;
    5.91 +	      case 3: asm volatile ("mov ar.k3=%0" :: "r"(val)); break;
    5.92 +	      case 4: asm volatile ("mov ar.k4=%0" :: "r"(val)); break;
    5.93 +	      case 5: asm volatile ("mov ar.k5=%0" :: "r"(val)); break;
    5.94 +	      case 6: asm volatile ("mov ar.k6=%0" :: "r"(val)); break;
    5.95 +	      case 7: asm volatile ("mov ar.k7=%0" :: "r"(val)); break;
    5.96 +	      case 27: asm volatile ("mov ar.cflg=%0" :: "r"(val)); break;
    5.97 +	}
    5.98 +#endif
    5.99 +	return IA64_NO_FAULT;
   5.100 +}
   5.101 +
   5.102 +IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
   5.103 +{
   5.104 +	if (reg > 7) return (IA64_ILLOP_FAULT);
   5.105 +	*val = PSCB(vcpu).krs[reg];
   5.106 +	return IA64_NO_FAULT;
   5.107 +}
   5.108 +
   5.109 +/**************************************************************************
   5.110 + VCPU processor status register access routines
   5.111 +**************************************************************************/
   5.112 +
   5.113 +void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
   5.114 +{
   5.115 +	/* only do something if mode changes */
   5.116 +	if (!!newmode ^ !!vcpu->metaphysical_mode) {
   5.117 +		if (newmode) set_metaphysical_rr(0,vcpu->metaphysical_rid);
   5.118 +		else if (PSCB(vcpu).rrs[0] != -1)
   5.119 +			set_one_rr(0, PSCB(vcpu).rrs[0]);
   5.120 +		vcpu->metaphysical_mode = newmode;
   5.121 +	}
   5.122 +}
   5.123 +
   5.124 +IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
   5.125 +{
   5.126 +	struct ia64_psr psr, imm, *ipsr;
   5.127 +	REGS *regs = vcpu_regs(vcpu);
   5.128 +
   5.129 +	// TODO: All of these bits need to be virtualized
   5.130 +	// TODO: Only allowed for current vcpu
   5.131 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   5.132 +	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   5.133 +	imm = *(struct ia64_psr *)&imm24;
   5.134 +	// interrupt flag
   5.135 +	if (imm.i) PSCB(vcpu).interrupt_delivery_enabled = 0;
   5.136 +	if (imm.ic)  PSCB(vcpu).interrupt_collection_enabled = 0;
   5.137 +	// interrupt collection flag
   5.138 +	//if (imm.ic) PSCB(vcpu).interrupt_delivery_enabled = 0;
   5.139 +	// just handle psr.up and psr.pp for now
   5.140 +	if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
   5.141 +		| IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
   5.142 +		| IA64_PSR_DFL | IA64_PSR_DFH))
   5.143 +			return (IA64_ILLOP_FAULT);
   5.144 +	if (imm.dfh) ipsr->dfh = 0;
   5.145 +	if (imm.dfl) ipsr->dfl = 0;
   5.146 +	if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
   5.147 +	if (imm.up) { ipsr->up = 0; psr.up = 0; }
   5.148 +	if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
   5.149 +	if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
   5.150 +	__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   5.151 +	return IA64_NO_FAULT;
   5.152 +}
   5.153 +
   5.154 +extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
   5.155 +#define SPURIOUS_VECTOR 0xf
   5.156 +
   5.157 +IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
   5.158 +{
   5.159 +	struct ia64_psr psr, imm, *ipsr;
   5.160 +	REGS *regs = vcpu_regs(vcpu);
   5.161 +	UINT64 mask, enabling_interrupts = 0;
   5.162 +
   5.163 +	// TODO: All of these bits need to be virtualized
   5.164 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   5.165 +	imm = *(struct ia64_psr *)&imm24;
   5.166 +	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   5.167 +	// just handle psr.sp,pp and psr.i,ic (and user mask) for now
   5.168 +	mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
   5.169 +		IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
   5.170 +	if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
   5.171 +	if (imm.dfh) ipsr->dfh = 1;
   5.172 +	if (imm.dfl) ipsr->dfl = 1;
   5.173 +	if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
   5.174 +	if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
   5.175 +	if (imm.i) {
   5.176 +		if (!PSCB(vcpu).interrupt_delivery_enabled) {
   5.177 +//printf("vcpu_set_psr_sm: psr.ic 0->1 ");
   5.178 +			enabling_interrupts = 1;
   5.179 +		}
   5.180 +		PSCB(vcpu).interrupt_delivery_enabled = 1;
   5.181 +	}
   5.182 +	if (imm.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
   5.183 +	// TODO: do this faster
   5.184 +	if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   5.185 +	if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
   5.186 +	if (imm.up) { ipsr->up = 1; psr.up = 1; }
   5.187 +	if (imm.be) {
   5.188 +		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
   5.189 +		return (IA64_ILLOP_FAULT);
   5.190 +	}
   5.191 +	if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
   5.192 +	__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   5.193 +#if 0 // now done with deliver_pending_interrupts
   5.194 +	if (enabling_interrupts) {
   5.195 +		if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) {
   5.196 +//printf("with interrupts pending\n");
   5.197 +			return IA64_EXTINT_VECTOR;
   5.198 +		}
   5.199 +//else printf("but nothing pending\n");
   5.200 +	}
   5.201 +#endif
   5.202 +	return IA64_NO_FAULT;
   5.203 +}
   5.204 +
   5.205 +IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
   5.206 +{
   5.207 +	struct ia64_psr psr, newpsr, *ipsr;
   5.208 +	REGS *regs = vcpu_regs(vcpu);
   5.209 +	UINT64 enabling_interrupts = 0;
   5.210 +
   5.211 +	// TODO: All of these bits need to be virtualized
   5.212 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   5.213 +	newpsr = *(struct ia64_psr *)&val;
   5.214 +	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   5.215 +	// just handle psr.up and psr.pp for now
   5.216 +	//if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
   5.217 +	// however trying to set other bits can't be an error as it is in ssm
   5.218 +	if (newpsr.dfh) ipsr->dfh = 1;
   5.219 +	if (newpsr.dfl) ipsr->dfl = 1;
   5.220 +	if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; }
   5.221 +	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   5.222 +	if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
   5.223 +	if (newpsr.i) {
   5.224 +		if (!PSCB(vcpu).interrupt_delivery_enabled)
   5.225 +			enabling_interrupts = 1;
   5.226 +		PSCB(vcpu).interrupt_delivery_enabled = 1;
   5.227 +	}
   5.228 +	if (newpsr.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
   5.229 +	if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   5.230 +	if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
   5.231 +	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   5.232 +	if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
   5.233 +	else vcpu_set_metaphysical_mode(vcpu,TRUE);
   5.234 +	if (newpsr.be) {
   5.235 +		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
   5.236 +		return (IA64_ILLOP_FAULT);
   5.237 +	}
   5.238 +	//__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   5.239 +#if 0 // now done with deliver_pending_interrupts
   5.240 +	if (enabling_interrupts) {
   5.241 +		if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
   5.242 +			return IA64_EXTINT_VECTOR;
   5.243 +	}
   5.244 +#endif
   5.245 +	return IA64_NO_FAULT;
   5.246 +}
   5.247 +
   5.248 +IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
   5.249 +{
   5.250 +	UINT64 psr;
   5.251 +	struct ia64_psr newpsr;
   5.252 +
   5.253 +	// TODO: This needs to return a "filtered" view of
   5.254 +	// the psr, not the actual psr.  Probably the psr needs
   5.255 +	// to be a field in regs (in addition to ipsr).
   5.256 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   5.257 +	newpsr = *(struct ia64_psr *)&psr;
   5.258 +	if (newpsr.cpl == 2) newpsr.cpl = 0;
   5.259 +	if (PSCB(vcpu).interrupt_delivery_enabled) newpsr.i = 1;
   5.260 +	else newpsr.i = 0;
   5.261 +	if (PSCB(vcpu).interrupt_collection_enabled) newpsr.ic = 1;
   5.262 +	else newpsr.ic = 0;
   5.263 +	*pval = *(unsigned long *)&newpsr;
   5.264 +	return IA64_NO_FAULT;
   5.265 +}
   5.266 +
   5.267 +BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
   5.268 +{
   5.269 +	return !!PSCB(vcpu).interrupt_collection_enabled;
   5.270 +}
   5.271 +
   5.272 +BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
   5.273 +{
   5.274 +	return !!PSCB(vcpu).interrupt_delivery_enabled;
   5.275 +}
   5.276 +
   5.277 +UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
   5.278 +{
   5.279 +	UINT64 dcr = PSCB(vcpu).dcr;
   5.280 +	PSR psr = {0};
   5.281 +	
   5.282 +	//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
   5.283 +	psr.i64 = prevpsr;
   5.284 +	psr.be = 0; if (dcr & IA64_DCR_BE) psr.be = 1;
   5.285 +	psr.pp = 0; if (dcr & IA64_DCR_PP) psr.pp = 1;
   5.286 +	psr.ic = PSCB(vcpu).interrupt_collection_enabled;
   5.287 +	psr.i = PSCB(vcpu).interrupt_delivery_enabled;
   5.288 +	psr.bn = PSCB(vcpu).banknum;
   5.289 +	psr.dt = 1; psr.it = 1; psr.rt = 1;
   5.290 +	if (psr.cpl == 2) psr.cpl = 0; // !!!! fool domain
   5.291 +	// psr.pk = 1;
   5.292 +	//printf("returns 0x%016lx...",psr.i64);
   5.293 +	return psr.i64;
   5.294 +}
   5.295 +
   5.296 +/**************************************************************************
   5.297 + VCPU control register access routines
   5.298 +**************************************************************************/
   5.299 +
   5.300 +IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
   5.301 +{
   5.302 +extern unsigned long privop_trace;
   5.303 +//privop_trace=0;
   5.304 +//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu).iip);
   5.305 +	// Reads of cr.dcr on Xen always have the sign bit set, so
   5.306 +	// a domain can differentiate whether it is running on SP or not
   5.307 +	*pval = PSCB(vcpu).dcr | 0x8000000000000000L;
   5.308 +	return (IA64_NO_FAULT);
   5.309 +}
   5.310 +
   5.311 +IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
   5.312 +{
   5.313 +	*pval = PSCB(vcpu).iva & ~0x7fffL;
   5.314 +	return (IA64_NO_FAULT);
   5.315 +}
   5.316 +
   5.317 +IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
   5.318 +{
   5.319 +	*pval = PSCB(vcpu).pta;
   5.320 +	return (IA64_NO_FAULT);
   5.321 +}
   5.322 +
   5.323 +IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
   5.324 +{
   5.325 +	//REGS *regs = vcpu_regs(vcpu);
   5.326 +	//*pval = regs->cr_ipsr;
   5.327 +	*pval = PSCB(vcpu).ipsr;
   5.328 +	return (IA64_NO_FAULT);
   5.329 +}
   5.330 +
   5.331 +IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
   5.332 +{
   5.333 +	*pval = PSCB(vcpu).isr;
   5.334 +	return (IA64_NO_FAULT);
   5.335 +}
   5.336 +
   5.337 +IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
   5.338 +{
   5.339 +	//REGS *regs = vcpu_regs(vcpu);
   5.340 +	//*pval = regs->cr_iip;
   5.341 +	*pval = PSCB(vcpu).iip;
   5.342 +	return (IA64_NO_FAULT);
   5.343 +}
   5.344 +
   5.345 +IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
   5.346 +{
   5.347 +	UINT64 val = PSCB(vcpu).ifa;
   5.348 +	*pval = val;
   5.349 +	return (IA64_NO_FAULT);
   5.350 +}
   5.351 +
   5.352 +
   5.353 +unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
   5.354 +{
   5.355 +	ia64_rr rr;
   5.356 +
   5.357 +	rr.rrval = 0;
   5.358 +	rr.ps = vcpu_get_rr_ps(vcpu,ifa);
   5.359 +	rr.rid = vcpu_get_rr_rid(vcpu,ifa);
   5.360 +	return (rr.rrval);
   5.361 +}
   5.362 +
   5.363 +
   5.364 +IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
   5.365 +{
   5.366 +	UINT64 val = PSCB(vcpu).itir;
   5.367 +	*pval = val;
   5.368 +	return (IA64_NO_FAULT);
   5.369 +}
   5.370 +
   5.371 +IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
   5.372 +{
   5.373 +	UINT64 val = PSCB(vcpu).iipa;
   5.374 +	// SP entry code does not save iipa yet nor does it get
   5.375 +	//  properly delivered in the pscb
   5.376 +	printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
   5.377 +	*pval = val;
   5.378 +	return (IA64_NO_FAULT);
   5.379 +}
   5.380 +
   5.381 +IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
   5.382 +{
   5.383 +	//PSCB(vcpu).ifs = PSCB(vcpu)->regs.cr_ifs;
   5.384 +	//*pval = PSCB(vcpu).regs.cr_ifs;
   5.385 +	*pval = PSCB(vcpu).ifs;
   5.386 +	PSCB(vcpu).incomplete_regframe = 0;
   5.387 +	return (IA64_NO_FAULT);
   5.388 +}
   5.389 +
   5.390 +IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
   5.391 +{
   5.392 +	UINT64 val = PSCB(vcpu).iim;
   5.393 +	*pval = val;
   5.394 +	return (IA64_NO_FAULT);
   5.395 +}
   5.396 +
   5.397 +IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
   5.398 +{
   5.399 +	return vcpu_thash(vcpu,PSCB(vcpu).ifa,pval);
   5.400 +}
   5.401 +
   5.402 +IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
   5.403 +{
   5.404 +extern unsigned long privop_trace;
   5.405 +//privop_trace=1;
   5.406 +	// Reads of cr.dcr on SP always have the sign bit set, so
   5.407 +	// a domain can differentiate whether it is running on SP or not
   5.408 +	// Thus, writes of DCR should ignore the sign bit
   5.409 +//verbose("vcpu_set_dcr: called\n");
   5.410 +	PSCB(vcpu).dcr = val & ~0x8000000000000000L;
   5.411 +	return (IA64_NO_FAULT);
   5.412 +}
   5.413 +
   5.414 +IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
   5.415 +{
   5.416 +	PSCB(vcpu).iva = val & ~0x7fffL;
   5.417 +	return (IA64_NO_FAULT);
   5.418 +}
   5.419 +
   5.420 +IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
   5.421 +{
   5.422 +	if (val & IA64_PTA_LFMT) {
   5.423 +		printf("*** No support for VHPT long format yet!!\n");
   5.424 +		return (IA64_ILLOP_FAULT);
   5.425 +	}
   5.426 +	if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
   5.427 +	if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
   5.428 +	PSCB(vcpu).pta = val;
   5.429 +	return IA64_NO_FAULT;
   5.430 +}
   5.431 +
   5.432 +IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
   5.433 +{
   5.434 +	PSCB(vcpu).ipsr = val;
   5.435 +	return IA64_NO_FAULT;
   5.436 +}
   5.437 +
   5.438 +IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
   5.439 +{
   5.440 +	PSCB(vcpu).isr = val;
   5.441 +	return IA64_NO_FAULT;
   5.442 +}
   5.443 +
   5.444 +IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
   5.445 +{
   5.446 +	PSCB(vcpu).iip = val;
   5.447 +	return IA64_NO_FAULT;
   5.448 +}
   5.449 +
   5.450 +IA64FAULT vcpu_increment_iip(VCPU *vcpu)
   5.451 +{
   5.452 +	REGS *regs = vcpu_regs(vcpu);
   5.453 +	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   5.454 +	if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
   5.455 +	else ipsr->ri++;
   5.456 +	return (IA64_NO_FAULT);
   5.457 +}
   5.458 +
   5.459 +IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
   5.460 +{
   5.461 +	PSCB(vcpu).ifa = val;
   5.462 +	return IA64_NO_FAULT;
   5.463 +}
   5.464 +
   5.465 +IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
   5.466 +{
   5.467 +	PSCB(vcpu).itir = val;
   5.468 +	return IA64_NO_FAULT;
   5.469 +}
   5.470 +
   5.471 +IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
   5.472 +{
   5.473 +	// SP entry code does not save iipa yet nor does it get
   5.474 +	//  properly delivered in the pscb
   5.475 +	printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
   5.476 +	PSCB(vcpu).iipa = val;
   5.477 +	return IA64_NO_FAULT;
   5.478 +}
   5.479 +
   5.480 +IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
   5.481 +{
   5.482 +	//REGS *regs = vcpu_regs(vcpu);
   5.483 +	PSCB(vcpu).ifs = val;
   5.484 +	return IA64_NO_FAULT;
   5.485 +}
   5.486 +
   5.487 +IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
   5.488 +{
   5.489 +	PSCB(vcpu).iim = val;
   5.490 +	return IA64_NO_FAULT;
   5.491 +}
   5.492 +
   5.493 +IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
   5.494 +{
   5.495 +	PSCB(vcpu).iha = val;
   5.496 +	return IA64_NO_FAULT;
   5.497 +}
   5.498 +
   5.499 +/**************************************************************************
   5.500 + VCPU interrupt control register access routines
   5.501 +**************************************************************************/
   5.502 +
   5.503 +void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
   5.504 +{
   5.505 +	if (vector & ~0xff) {
   5.506 +		printf("vcpu_pend_interrupt: bad vector\n");
   5.507 +		return;
   5.508 +	}
   5.509 +	if (!test_bit(vector,PSCB(vcpu).delivery_mask)) return;
   5.510 +	if (test_bit(vector,PSCB(vcpu).irr)) {
   5.511 +//printf("vcpu_pend_interrupt: overrun\n");
   5.512 +	}
   5.513 +	set_bit(vector,PSCB(vcpu).irr);
   5.514 +}
   5.515 +
   5.516 +#define	IA64_TPR_MMI	0x10000
   5.517 +#define	IA64_TPR_MIC	0x000f0
   5.518 +
   5.519 +/* checks to see if a VCPU has any unmasked pending interrupts
   5.520 + * if so, returns the highest, else returns SPURIOUS_VECTOR */
   5.521 +/* NOTE: Since this gets called from vcpu_get_ivr() and the
   5.522 + * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
   5.523 + * this routine also ignores pscb.interrupt_delivery_enabled
   5.524 + * and this must be checked independently; see vcpu_deliverable interrupts() */
   5.525 +UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
   5.526 +{
   5.527 +	UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
   5.528 +
   5.529 +	p = &PSCB(vcpu).irr[3];
   5.530 +	q = &PSCB(vcpu).delivery_mask[3];
   5.531 +	r = &PSCB(vcpu).insvc[3];
   5.532 +	for (i = 3; ; p--, q--, r--, i--) {
   5.533 +		bits = *p & *q;
   5.534 +		if (bits) break; // got a potential interrupt
   5.535 +		if (*r) {
   5.536 +			// nothing in this word which is pending+inservice
   5.537 +			// but there is one inservice which masks lower
   5.538 +			return SPURIOUS_VECTOR;
   5.539 +		}
   5.540 +		if (i == 0) {
   5.541 +		// checked all bits... nothing pending+inservice
   5.542 +			return SPURIOUS_VECTOR;
   5.543 +		}
   5.544 +	}
   5.545 +	// have a pending,deliverable interrupt... see if it is masked
   5.546 +	bitnum = ia64_fls(bits);
   5.547 +//printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
   5.548 +	vector = bitnum+(i*64);
   5.549 +	mask = 1L << bitnum;
   5.550 +//printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
   5.551 +	if (*r >= mask) {
   5.552 +		// masked by equal inservice
   5.553 +//printf("but masked by equal inservice\n");
   5.554 +		return SPURIOUS_VECTOR;
   5.555 +	}
   5.556 +	if (PSCB(vcpu).tpr & IA64_TPR_MMI) {
   5.557 +		// tpr.mmi is set
   5.558 +//printf("but masked by tpr.mmi\n");
   5.559 +		return SPURIOUS_VECTOR;
   5.560 +	}
   5.561 +	if (((PSCB(vcpu).tpr & IA64_TPR_MIC) + 15) >= vector) {
   5.562 +		//tpr.mic masks class
   5.563 +//printf("but masked by tpr.mic\n");
   5.564 +		return SPURIOUS_VECTOR;
   5.565 +	}
   5.566 +
   5.567 +//printf("returned to caller\n");
   5.568 +	return vector;
   5.569 +}
   5.570 +
   5.571 +UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
   5.572 +{
   5.573 +	return (vcpu_get_psr_i(vcpu) &&
   5.574 +		vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
   5.575 +}
   5.576 +
   5.577 +IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
   5.578 +{
   5.579 +extern unsigned long privop_trace;
   5.580 +//privop_trace=1;
   5.581 +	//TODO: Implement this
   5.582 +	printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
   5.583 +	*pval = 0;
   5.584 +	return IA64_NO_FAULT;
   5.585 +}
   5.586 +
   5.587 +IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
   5.588 +{
   5.589 +	int i;
   5.590 +	UINT64 vector, mask;
   5.591 +#if 1
   5.592 +	static char firstivr = 1;
   5.593 +	static char firsttime[256];
   5.594 +	if (firstivr) {
   5.595 +		int i;
   5.596 +		for (i=0;i<256;i++) firsttime[i]=1;
   5.597 +		firstivr=0;
   5.598 +	}
   5.599 +#endif
   5.600 +
   5.601 +	vector = vcpu_check_pending_interrupts(vcpu);
   5.602 +	if (vector == SPURIOUS_VECTOR) {
   5.603 +		PSCB(vcpu).pending_interruption = 0;
   5.604 +		*pval = vector;
   5.605 +		return IA64_NO_FAULT;
   5.606 +	}
   5.607 +	// now have an unmasked, pending, deliverable vector!
   5.608 +	// getting ivr has "side effects"
   5.609 +#if 0
   5.610 +	if (firsttime[vector]) {
   5.611 +		printf("*** First get_ivr on vector=%d,itc=%lx\n",
   5.612 +			vector,ia64_get_itc());
   5.613 +		firsttime[vector]=0;
   5.614 +	}
   5.615 +#endif
   5.616 +	i = vector >> 6;
   5.617 +	mask = 1L << (vector & 0x3f);
   5.618 +//printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
   5.619 +	PSCB(vcpu).insvc[i] |= mask;
   5.620 +	PSCB(vcpu).irr[i] &= ~mask;
   5.621 +	PSCB(vcpu).pending_interruption--;
   5.622 +	*pval = vector;
   5.623 +	return IA64_NO_FAULT;
   5.624 +}
   5.625 +
   5.626 +IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
   5.627 +{
   5.628 +	*pval = PSCB(vcpu).tpr;
   5.629 +	return (IA64_NO_FAULT);
   5.630 +}
   5.631 +
   5.632 +IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
   5.633 +{
   5.634 +	*pval = 0L;  // reads of eoi always return 0
   5.635 +	return (IA64_NO_FAULT);
   5.636 +}
   5.637 +
   5.638 +IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
   5.639 +{
   5.640 +#ifndef IRR_USE_FIXED
   5.641 +	printk("vcpu_get_irr: called, not implemented yet\n");
   5.642 +	return IA64_ILLOP_FAULT;
   5.643 +#else
   5.644 +	*pval = vcpu->irr[0];
   5.645 +	return (IA64_NO_FAULT);
   5.646 +#endif
   5.647 +}
   5.648 +
   5.649 +IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
   5.650 +{
   5.651 +#ifndef IRR_USE_FIXED
   5.652 +	printk("vcpu_get_irr: called, not implemented yet\n");
   5.653 +	return IA64_ILLOP_FAULT;
   5.654 +#else
   5.655 +	*pval = vcpu->irr[1];
   5.656 +	return (IA64_NO_FAULT);
   5.657 +#endif
   5.658 +}
   5.659 +
   5.660 +IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
   5.661 +{
   5.662 +#ifndef IRR_USE_FIXED
   5.663 +	printk("vcpu_get_irr: called, not implemented yet\n");
   5.664 +	return IA64_ILLOP_FAULT;
   5.665 +#else
   5.666 +	*pval = vcpu->irr[2];
   5.667 +	return (IA64_NO_FAULT);
   5.668 +#endif
   5.669 +}
   5.670 +
   5.671 +IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
   5.672 +{
   5.673 +#ifndef IRR_USE_FIXED
   5.674 +	printk("vcpu_get_irr: called, not implemented yet\n");
   5.675 +	return IA64_ILLOP_FAULT;
   5.676 +#else
   5.677 +	*pval = vcpu->irr[3];
   5.678 +	return (IA64_NO_FAULT);
   5.679 +#endif
   5.680 +}
   5.681 +
   5.682 +IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
   5.683 +{
   5.684 +	*pval = PSCB(vcpu).itv;
   5.685 +	return (IA64_NO_FAULT);
   5.686 +}
   5.687 +
   5.688 +IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
   5.689 +{
   5.690 +	*pval = PSCB(vcpu).pmv;
   5.691 +	return (IA64_NO_FAULT);
   5.692 +}
   5.693 +
   5.694 +IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
   5.695 +{
   5.696 +	*pval = PSCB(vcpu).cmcv;
   5.697 +	return (IA64_NO_FAULT);
   5.698 +}
   5.699 +
   5.700 +IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
   5.701 +{
   5.702 +	// fix this when setting values other than m-bit is supported
   5.703 +	printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
   5.704 +	*pval = (1L << 16);
   5.705 +	return (IA64_NO_FAULT);
   5.706 +}
   5.707 +
   5.708 +IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
   5.709 +{
   5.710 +	// fix this when setting values other than m-bit is supported
   5.711 +	printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
   5.712 +	*pval = (1L << 16);
   5.713 +	return (IA64_NO_FAULT);
   5.714 +}
   5.715 +
   5.716 +IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
   5.717 +{
   5.718 +	printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
   5.719 +	return (IA64_ILLOP_FAULT);
   5.720 +}
   5.721 +
   5.722 +IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
   5.723 +{
   5.724 +	if (val & 0xff00) return IA64_RSVDREG_FAULT;
   5.725 +	PSCB(vcpu).tpr = val;
   5.726 +	return (IA64_NO_FAULT);
   5.727 +}
   5.728 +
   5.729 +IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
   5.730 +{
   5.731 +	UINT64 *p, bits, vec, bitnum;
   5.732 +	int i;
   5.733 +
   5.734 +	p = &PSCB(vcpu).insvc[3];
   5.735 +	for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
   5.736 +	if (i < 0) {
   5.737 +		printf("Trying to EOI interrupt when none are in-service.\r\n");
   5.738 +		return;
   5.739 +	}
   5.740 +	bitnum = ia64_fls(bits);
   5.741 +	vec = bitnum + (i*64);
   5.742 +	/* clear the correct bit */
   5.743 +	bits &= ~(1L << bitnum);
   5.744 +	*p = bits;
   5.745 +	/* clearing an eoi bit may unmask another pending interrupt... */
   5.746 +	if (PSCB(vcpu).interrupt_delivery_enabled) { // but only if enabled...
   5.747 +		// worry about this later... Linux only calls eoi
   5.748 +		// with interrupts disabled
   5.749 +		printf("Trying to EOI interrupt with interrupts enabled\r\n");
   5.750 +	}
   5.751 +//printf("YYYYY vcpu_set_eoi: Successful\n");
   5.752 +	return (IA64_NO_FAULT);
   5.753 +}
   5.754 +
   5.755 +IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
   5.756 +{
   5.757 +	if (!(val & (1L << 16))) {
   5.758 +		printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
   5.759 +		return (IA64_ILLOP_FAULT);
   5.760 +	}
   5.761 +	// no place to save this state but nothing to do anyway
   5.762 +	return (IA64_NO_FAULT);
   5.763 +}
   5.764 +
   5.765 +IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
   5.766 +{
   5.767 +	if (!(val & (1L << 16))) {
   5.768 +		printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
   5.769 +		return (IA64_ILLOP_FAULT);
   5.770 +	}
   5.771 +	// no place to save this state but nothing to do anyway
   5.772 +	return (IA64_NO_FAULT);
   5.773 +}
   5.774 +
   5.775 +
   5.776 +IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
   5.777 +{
   5.778 +extern unsigned long privop_trace;
   5.779 +//privop_trace=1;
   5.780 +	if (val & 0xef00) return (IA64_ILLOP_FAULT);
   5.781 +	PSCB(vcpu).itv = val;
   5.782 +	if (val & 0x10000) {
   5.783 +printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu).domain_itm);
   5.784 +		PSCB(vcpu).domain_itm = 0;
   5.785 +	}
   5.786 +	else vcpu_enable_timer(vcpu,1000000L);
   5.787 +	return (IA64_NO_FAULT);
   5.788 +}
   5.789 +
   5.790 +IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
   5.791 +{
   5.792 +	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
   5.793 +	PSCB(vcpu).pmv = val;
   5.794 +	return (IA64_NO_FAULT);
   5.795 +}
   5.796 +
   5.797 +IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
   5.798 +{
   5.799 +	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
   5.800 +	PSCB(vcpu).cmcv = val;
   5.801 +	return (IA64_NO_FAULT);
   5.802 +}
   5.803 +
   5.804 +/**************************************************************************
   5.805 +Interval timer routines
   5.806 +**************************************************************************/
   5.807 +
   5.808 +BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
   5.809 +{
   5.810 +	UINT64 itv = PSCB(vcpu).itv;
   5.811 +	return(!itv || !!(itv & 0x10000));
   5.812 +}
   5.813 +
   5.814 +BOOLEAN vcpu_timer_expired(VCPU *vcpu)
   5.815 +{
   5.816 +	unsigned long domain_itm = PSCB(vcpu).domain_itm;
   5.817 +	unsigned long now = ia64_get_itc();
   5.818 + 
   5.819 +	if (domain_itm && (now > domain_itm) &&
   5.820 +		!vcpu_timer_disabled(vcpu)) return TRUE;
   5.821 +	return FALSE;
   5.822 +}
   5.823 +
   5.824 +void vcpu_safe_set_itm(unsigned long val)
   5.825 +{
   5.826 +	unsigned long epsilon = 100;
   5.827 +	UINT64 now = ia64_get_itc();
   5.828 +
   5.829 +	local_irq_disable();
   5.830 +	while (1) {
   5.831 +//printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
   5.832 +		ia64_set_itm(val);
   5.833 +		if (val > (now = ia64_get_itc())) break;
   5.834 +		val = now + epsilon;
   5.835 +		epsilon <<= 1;
   5.836 +	}
   5.837 +	local_irq_enable();
   5.838 +}
   5.839 +
   5.840 +void vcpu_set_next_timer(VCPU *vcpu)
   5.841 +{
   5.842 +	UINT64 d = PSCB(vcpu).domain_itm;
   5.843 +	//UINT64 s = PSCB(vcpu).xen_itm;
   5.844 +	UINT64 s = local_cpu_data->itm_next;
   5.845 +	UINT64 now = ia64_get_itc();
   5.846 +	//UINT64 interval = PSCB(vcpu).xen_timer_interval;
   5.847 +
   5.848 +	/* gloss over the wraparound problem for now... we know it exists
   5.849 +	 * but it doesn't matter right now */
   5.850 +
   5.851 +#if 0
   5.852 +	/* ensure at least next SP tick is in the future */
   5.853 +	if (!interval) PSCB(vcpu).xen_itm = now +
   5.854 +#if 0
   5.855 +		(running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
   5.856 +		 			DEFAULT_CLOCK_RATE);
   5.857 +#else
   5.858 +	3000000;
   5.859 +//printf("vcpu_set_next_timer: HACK!\n");
   5.860 +#endif
   5.861 +#if 0
   5.862 +	if (PSCB(vcpu).xen_itm < now)
   5.863 +		while (PSCB(vcpu).xen_itm < now + (interval>>1))
   5.864 +			PSCB(vcpu).xen_itm += interval;
   5.865 +#endif
   5.866 +#endif
   5.867 +
   5.868 +	if (is_idle_task(vcpu)) {
   5.869 +		printf("****** vcpu_set_next_timer called during idle!!\n");
   5.870 +	}
   5.871 +	//s = PSCB(vcpu).xen_itm;
   5.872 +	if (d && (d > now) && (d < s)) {
   5.873 +		vcpu_safe_set_itm(d);
   5.874 +		//using_domain_as_itm++;
   5.875 +	}
   5.876 +	else {
   5.877 +		vcpu_safe_set_itm(s);
   5.878 +		//using_xen_as_itm++;
   5.879 +	}
   5.880 +}
   5.881 +
   5.882 +// parameter is a time interval specified in cycles
   5.883 +void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
   5.884 +{
   5.885 +    PSCB(vcpu).xen_timer_interval = cycles;
   5.886 +    vcpu_set_next_timer(vcpu);
   5.887 +    printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
   5.888 +             PSCB(vcpu).xen_timer_interval);
   5.889 +    __set_bit(PSCB(vcpu).itv, PSCB(vcpu).delivery_mask);
   5.890 +}
   5.891 +
   5.892 +IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
   5.893 +{
   5.894 +	UINT now = ia64_get_itc();
   5.895 +
   5.896 +	//if (val < now) val = now + 1000;
   5.897 +//printf("*** vcpu_set_itm: called with %lx\n",val);
   5.898 +	PSCB(vcpu).domain_itm = val;
   5.899 +	vcpu_set_next_timer(vcpu);
   5.900 +	return (IA64_NO_FAULT);
   5.901 +}
   5.902 +
   5.903 +IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
   5.904 +{
   5.905 +	
   5.906 +	UINT64 oldnow = ia64_get_itc();
   5.907 +	UINT64 olditm = PSCB(vcpu).domain_itm;
   5.908 +	unsigned long d = olditm - oldnow;
   5.909 +	unsigned long x = local_cpu_data->itm_next - oldnow;
   5.910 +	
   5.911 +	UINT64 newnow = val, min_delta;
   5.912 +
   5.913 +	local_irq_disable();
   5.914 +	if (olditm) {
   5.915 +printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
   5.916 +		PSCB(vcpu).domain_itm = newnow + d;
   5.917 +	}
   5.918 +	local_cpu_data->itm_next = newnow + x;
   5.919 +	d = PSCB(vcpu).domain_itm;
   5.920 +	x = local_cpu_data->itm_next;
   5.921 +	
   5.922 +	ia64_set_itc(newnow);
   5.923 +	if (d && (d > newnow) && (d < x)) {
   5.924 +		vcpu_safe_set_itm(d);
   5.925 +		//using_domain_as_itm++;
   5.926 +	}
   5.927 +	else {
   5.928 +		vcpu_safe_set_itm(x);
   5.929 +		//using_xen_as_itm++;
   5.930 +	}
   5.931 +	local_irq_enable();
   5.932 +	return (IA64_NO_FAULT);
   5.933 +}
   5.934 +
   5.935 +IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
   5.936 +{
   5.937 +	//FIXME: Implement this
   5.938 +	printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
   5.939 +	return (IA64_NO_FAULT);
   5.940 +	//return (IA64_ILLOP_FAULT);
   5.941 +}
   5.942 +
   5.943 +IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
   5.944 +{
   5.945 +	//TODO: Implement this
   5.946 +	printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
   5.947 +	return (IA64_ILLOP_FAULT);
   5.948 +}
   5.949 +
   5.950 +void vcpu_pend_timer(VCPU *vcpu)
   5.951 +{
   5.952 +	UINT64 itv = PSCB(vcpu).itv & 0xff;
   5.953 +
   5.954 +	if (vcpu_timer_disabled(vcpu)) return;
   5.955 +	vcpu_pend_interrupt(vcpu, itv);
   5.956 +}
   5.957 +
   5.958 +//FIXME: This is a hack because everything dies if a timer tick is lost
   5.959 +void vcpu_poke_timer(VCPU *vcpu)
   5.960 +{
   5.961 +	UINT64 itv = PSCB(vcpu).itv & 0xff;
   5.962 +	UINT64 now = ia64_get_itc();
   5.963 +	UINT64 itm = PSCB(vcpu).domain_itm;
   5.964 +	UINT64 irr;
   5.965 +
   5.966 +	if (vcpu_timer_disabled(vcpu)) return;
   5.967 +	if (!itm) return;
   5.968 +	if (itv != 0xefL) {
   5.969 +		printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
   5.970 +		while(1);
   5.971 +	}
   5.972 +	// using 0xef instead of itv so can get real irr
   5.973 +	if (now > itm && !test_bit(0xefL, PSCB(vcpu).insvc)) {
   5.974 +		if (!test_bit(0xefL,PSCB(vcpu).irr)) {
   5.975 +			irr = ia64_getreg(_IA64_REG_CR_IRR3);
   5.976 +			if (irr & (1L<<(0xef-0xc0))) return;
   5.977 +if (now-itm>0x800000)
   5.978 +printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
   5.979 +			vcpu_pend_interrupt(vcpu, 0xefL);
   5.980 +		}
   5.981 +	}
   5.982 +}
   5.983 +
   5.984 +
   5.985 +/**************************************************************************
   5.986 +Privileged operation emulation routines
   5.987 +**************************************************************************/
   5.988 +
   5.989 +IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
   5.990 +{
   5.991 +	PSCB(vcpu).ifa = ifa;	// privop traps don't set ifa so do it here
   5.992 +	return (IA64_DATA_TLB_VECTOR | IA64_FORCED_IFA);
   5.993 +}
   5.994 +
   5.995 +
   5.996 +IA64FAULT vcpu_rfi(VCPU *vcpu)
   5.997 +{
   5.998 +	// TODO: Only allowed for current vcpu
   5.999 +	PSR psr;
  5.1000 +	UINT64 int_enable, regspsr = 0;
  5.1001 +	UINT64 ifs;
  5.1002 +	REGS *regs = vcpu_regs(vcpu);
  5.1003 +	extern void dorfirfi(void);
  5.1004 +
  5.1005 +	psr.i64 = PSCB(vcpu).ipsr;
  5.1006 +	if (psr.cpl < 3) psr.cpl = 2;
  5.1007 +	if (psr.i) PSCB(vcpu).interrupt_delivery_enabled = 1;
  5.1008 +	int_enable = psr.i;
  5.1009 +	if (psr.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
  5.1010 +	if (psr.dt && psr.rt && psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
  5.1011 +	else vcpu_set_metaphysical_mode(vcpu,TRUE);
  5.1012 +	psr.ic = 1; psr.i = 1;
  5.1013 +	psr.dt = 1; psr.rt = 1; psr.it = 1;
  5.1014 +	psr.bn = 1;
  5.1015 +	//psr.pk = 1;  // checking pkeys shouldn't be a problem but seems broken
  5.1016 +	if (psr.be) {
  5.1017 +		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
  5.1018 +		return (IA64_ILLOP_FAULT);
  5.1019 +	}
  5.1020 +	PSCB(vcpu).incomplete_regframe = 0; // is this necessary?
  5.1021 +	ifs = PSCB(vcpu).ifs;
  5.1022 +	//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  5.1023 +	//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  5.1024 +	if (ifs & regs->cr_ifs & 0x8000000000000000L) {
  5.1025 +#define SI_OFS(x)	((char *)(&PSCB(vcpu).x) - (char *)(vcpu->shared_info))
  5.1026 +if (SI_OFS(iip)!=0x150 || SI_OFS(ipsr)!=0x148 || SI_OFS(ifs)!=0x158) {
  5.1027 +printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
  5.1028 +while(1);
  5.1029 +}
  5.1030 +		// TODO: validate PSCB(vcpu).iip 
  5.1031 +		// TODO: PSCB(vcpu).ipsr = psr;
  5.1032 +		PSCB(vcpu).ipsr = psr.i64;
  5.1033 +		// now set up the trampoline
  5.1034 +		regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
  5.1035 +		__asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
  5.1036 +		regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
  5.1037 +	}
  5.1038 +	else {
  5.1039 +		regs->cr_ipsr = psr.i64;
  5.1040 +		regs->cr_iip = PSCB(vcpu).iip;
  5.1041 +	}
  5.1042 +	PSCB(vcpu).interrupt_collection_enabled = 1;
  5.1043 +	vcpu_bsw1(vcpu);
  5.1044 +	PSCB(vcpu).interrupt_delivery_enabled = int_enable;
  5.1045 +	return (IA64_NO_FAULT);
  5.1046 +}
  5.1047 +
  5.1048 +IA64FAULT vcpu_cover(VCPU *vcpu)
  5.1049 +{
  5.1050 +	REGS *regs = vcpu_regs(vcpu);
  5.1051 +
  5.1052 +	if (!PSCB(vcpu).interrupt_collection_enabled) {
  5.1053 +		if (!PSCB(vcpu).incomplete_regframe)
  5.1054 +			PSCB(vcpu).ifs = regs->cr_ifs;
  5.1055 +		else PSCB(vcpu).incomplete_regframe = 0;
  5.1056 +	}
  5.1057 +	regs->cr_ifs = 0;
  5.1058 +	return (IA64_NO_FAULT);
  5.1059 +}
  5.1060 +
  5.1061 +IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  5.1062 +{
  5.1063 +	extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
  5.1064 +	UINT64 pta = PSCB(vcpu).pta;
  5.1065 +	UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
  5.1066 +	UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
  5.1067 +	UINT64 Mask = (1L << pta_sz) - 1;
  5.1068 +	UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
  5.1069 +	UINT64 compMask_60_15 = ~Mask_60_15;
  5.1070 +	//UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
  5.1071 +	UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
  5.1072 +	UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
  5.1073 +	UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
  5.1074 +	UINT64 VHPT_addr2a =
  5.1075 +		((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
  5.1076 +	UINT64 VHPT_addr2b =
  5.1077 +		((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
  5.1078 +	UINT64 VHPT_addr3 = VHPT_offset & 0x3fff;
  5.1079 +	UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
  5.1080 +			VHPT_addr3;
  5.1081 +
  5.1082 +	if (VHPT_addr1 == 0xe000000000000000L) {
  5.1083 +	    printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
  5.1084 +		PSCB(vcpu).iip);
  5.1085 +	    return (IA64_ILLOP_FAULT);
  5.1086 +	}
  5.1087 +//verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
  5.1088 +	*pval = VHPT_addr;
  5.1089 +	return (IA64_NO_FAULT);
  5.1090 +}
  5.1091 +
  5.1092 +IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  5.1093 +{
  5.1094 +	printf("vcpu_ttag: ttag instruction unsupported\n");
  5.1095 +	return (IA64_ILLOP_FAULT);
  5.1096 +}
  5.1097 +
  5.1098 +IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  5.1099 +{
  5.1100 +	extern TR_ENTRY *match_tr(VCPU *,UINT64);
  5.1101 +	extern TR_ENTRY *match_dtlb(VCPU *,UINT64);
  5.1102 +	TR_ENTRY *trp;
  5.1103 +	UINT64 mask;
  5.1104 +
  5.1105 +extern unsigned long privop_trace;
  5.1106 +	if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) {
  5.1107 +		mask = (1L << trp->ps) - 1;
  5.1108 +		*padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
  5.1109 +		verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu).iip,*padr);
  5.1110 +		return (IA64_NO_FAULT);
  5.1111 +	}
  5.1112 +	verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu).iip);
  5.1113 +	return vcpu_force_data_miss(vcpu, vadr);
  5.1114 +}
  5.1115 +
  5.1116 +IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
  5.1117 +{
  5.1118 +	printf("vcpu_tak: tak instruction unsupported\n");
  5.1119 +	return (IA64_ILLOP_FAULT);
  5.1120 +	// HACK ALERT: tak does a thash for now
  5.1121 +	//return vcpu_thash(vcpu,vadr,key);
  5.1122 +}
  5.1123 +
  5.1124 +/**************************************************************************
  5.1125 + VCPU debug breakpoint register access routines
  5.1126 +**************************************************************************/
  5.1127 +
  5.1128 +IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
  5.1129 +{
  5.1130 +	// TODO: unimplemented DBRs return a reserved register fault
  5.1131 +	// TODO: Should set Logical CPU state, not just physical
  5.1132 +	ia64_set_dbr(reg,val);
  5.1133 +	return (IA64_NO_FAULT);
  5.1134 +}
  5.1135 +
  5.1136 +IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
  5.1137 +{
  5.1138 +	// TODO: unimplemented IBRs return a reserved register fault
  5.1139 +	// TODO: Should set Logical CPU state, not just physical
  5.1140 +	ia64_set_ibr(reg,val);
  5.1141 +	return (IA64_NO_FAULT);
  5.1142 +}
  5.1143 +
  5.1144 +IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  5.1145 +{
  5.1146 +	// TODO: unimplemented DBRs return a reserved register fault
  5.1147 +	UINT64 val = ia64_get_dbr(reg);
  5.1148 +	*pval = val;
  5.1149 +	return (IA64_NO_FAULT);
  5.1150 +}
  5.1151 +
  5.1152 +IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  5.1153 +{
  5.1154 +	// TODO: unimplemented IBRs return a reserved register fault
  5.1155 +	UINT64 val = ia64_get_ibr(reg);
  5.1156 +	*pval = val;
  5.1157 +	return (IA64_NO_FAULT);
  5.1158 +}
  5.1159 +
  5.1160 +/**************************************************************************
  5.1161 + VCPU performance monitor register access routines
  5.1162 +**************************************************************************/
  5.1163 +
  5.1164 +IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
  5.1165 +{
  5.1166 +	// TODO: Should set Logical CPU state, not just physical
  5.1167 +	// NOTE: Writes to unimplemented PMC registers are discarded
  5.1168 +	ia64_set_pmc(reg,val);
  5.1169 +	return (IA64_NO_FAULT);
  5.1170 +}
  5.1171 +
  5.1172 +IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
  5.1173 +{
  5.1174 +	// TODO: Should set Logical CPU state, not just physical
  5.1175 +	// NOTE: Writes to unimplemented PMD registers are discarded
  5.1176 +	ia64_set_pmd(reg,val);
  5.1177 +	return (IA64_NO_FAULT);
  5.1178 +}
  5.1179 +
  5.1180 +IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  5.1181 +{
  5.1182 +	// NOTE: Reads from unimplemented PMC registers return zero
  5.1183 +	UINT64 val = (UINT64)ia64_get_pmc(reg);
  5.1184 +	*pval = val;
  5.1185 +	return (IA64_NO_FAULT);
  5.1186 +}
  5.1187 +
  5.1188 +IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  5.1189 +{
  5.1190 +	// NOTE: Reads from unimplemented PMD registers return zero
  5.1191 +	UINT64 val = (UINT64)ia64_get_pmd(reg);
  5.1192 +	*pval = val;
  5.1193 +	return (IA64_NO_FAULT);
  5.1194 +}
  5.1195 +
  5.1196 +/**************************************************************************
  5.1197 + VCPU banked general register access routines
  5.1198 +**************************************************************************/
  5.1199 +
  5.1200 +IA64FAULT vcpu_bsw0(VCPU *vcpu)
  5.1201 +{
  5.1202 +	REGS *regs = vcpu_regs(vcpu);
  5.1203 +	unsigned long *r = &regs->r16;
  5.1204 +	unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
  5.1205 +	unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
  5.1206 +	int i;
  5.1207 +
  5.1208 +	if (PSCB(vcpu).banknum) {
  5.1209 +		for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  5.1210 +		PSCB(vcpu).banknum = 0;
  5.1211 +	}
  5.1212 +	return (IA64_NO_FAULT);
  5.1213 +}
  5.1214 +
  5.1215 +IA64FAULT vcpu_bsw1(VCPU *vcpu)
  5.1216 +{
  5.1217 +	REGS *regs = vcpu_regs(vcpu);
  5.1218 +	unsigned long *r = &regs->r16;
  5.1219 +	unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
  5.1220 +	unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
  5.1221 +	int i;
  5.1222 +
  5.1223 +	if (!PSCB(vcpu).banknum) {
  5.1224 +		for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  5.1225 +		PSCB(vcpu).banknum = 1;
  5.1226 +	}
  5.1227 +	return (IA64_NO_FAULT);
  5.1228 +}
  5.1229 +
  5.1230 +/**************************************************************************
  5.1231 + VCPU cpuid access routines
  5.1232 +**************************************************************************/
  5.1233 +
  5.1234 +
  5.1235 +IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  5.1236 +{
  5.1237 +	// FIXME: This could get called as a result of a rsvd-reg fault
  5.1238 +	// if reg > 3
  5.1239 +	switch(reg) {
  5.1240 +	    case 0:
  5.1241 +	    case 1:
  5.1242 +		memcpy(pval,"Xen/ia64",8);
  5.1243 +		break;
  5.1244 +	    case 2:
  5.1245 +		*pval = 0;
  5.1246 +		break;
  5.1247 +	    case 3:
  5.1248 +		*pval = 0;  //FIXME: See vol1, 3.1.11
  5.1249 +		break;
  5.1250 +	    case 4:
  5.1251 +		*pval = 1;  //FIXME: See vol1, 3.1.11
  5.1252 +		break;
  5.1253 +	    default:
  5.1254 +		*pval = 0;  //FIXME: See vol1, 3.1.11
  5.1255 +		break;
  5.1256 +	}
  5.1257 +	return (IA64_NO_FAULT);
  5.1258 +}
  5.1259 +
  5.1260 +/**************************************************************************
  5.1261 + VCPU region register access routines
  5.1262 +**************************************************************************/
  5.1263 +
  5.1264 +unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
  5.1265 +{
  5.1266 +	
  5.1267 +	ia64_rr rr;
  5.1268 +
  5.1269 +	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  5.1270 +	return(rr.ve);
  5.1271 +}
  5.1272 +
  5.1273 +
  5.1274 +unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
  5.1275 +{
  5.1276 +	
  5.1277 +	ia64_rr rr;
  5.1278 +
  5.1279 +	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  5.1280 +	return(rr.ps);
  5.1281 +}
  5.1282 +
  5.1283 +
  5.1284 +unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
  5.1285 +{
  5.1286 +	
  5.1287 +	ia64_rr rr;
  5.1288 +
  5.1289 +	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  5.1290 +	return(rr.rid);
  5.1291 +}
  5.1292 +
  5.1293 +
  5.1294 +IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
  5.1295 +{
  5.1296 +	extern void set_one_rr(UINT64, UINT64);
  5.1297 +	PSCB(vcpu).rrs[reg>>61] = val;
  5.1298 +	// warning: set_one_rr() does it "live"
  5.1299 +	set_one_rr(reg,val);
  5.1300 +	return (IA64_NO_FAULT);
  5.1301 +}
  5.1302 +
  5.1303 +IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  5.1304 +{
  5.1305 +	UINT val = PSCB(vcpu).rrs[reg>>61];
  5.1306 +	*pval = val;
  5.1307 +	return (IA64_NO_FAULT);
  5.1308 +}
  5.1309 +
  5.1310 +/**************************************************************************
  5.1311 + VCPU protection key register access routines
  5.1312 +**************************************************************************/
  5.1313 +
  5.1314 +IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  5.1315 +{
  5.1316 +#ifndef PKR_USE_FIXED
  5.1317 +	printk("vcpu_get_pkr: called, not implemented yet\n");
  5.1318 +	return IA64_ILLOP_FAULT;
  5.1319 +#else
  5.1320 +	UINT64 val = (UINT64)ia64_get_pkr(reg);
  5.1321 +	*pval = val;
  5.1322 +	return (IA64_NO_FAULT);
  5.1323 +#endif
  5.1324 +}
  5.1325 +
  5.1326 +IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
  5.1327 +{
  5.1328 +#ifndef PKR_USE_FIXED
  5.1329 +	printk("vcpu_set_pkr: called, not implemented yet\n");
  5.1330 +	return IA64_ILLOP_FAULT;
  5.1331 +#else
  5.1332 +//	if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
  5.1333 +	vcpu->pkrs[reg] = val;
  5.1334 +	ia64_set_pkr(reg,val);
  5.1335 +	return (IA64_NO_FAULT);
  5.1336 +#endif
  5.1337 +}
  5.1338 +
  5.1339 +/**************************************************************************
  5.1340 + VCPU translation register access routines
  5.1341 +**************************************************************************/
  5.1342 +
  5.1343 +static void vcpu_purge_tr_entry(TR_ENTRY *trp)
  5.1344 +{
  5.1345 +	trp->p = 0;
  5.1346 +}
  5.1347 +
  5.1348 +static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
  5.1349 +{
  5.1350 +	UINT64 ps;
  5.1351 +
  5.1352 +	trp->itir = itir;
  5.1353 +	trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
  5.1354 +	trp->p = 1;
  5.1355 +	ps = trp->ps;
  5.1356 +	trp->page_flags = pte;
  5.1357 +	if (trp->pl < 2) trp->pl = 2;
  5.1358 +	trp->vadr = ifa & ~0xfff;
  5.1359 +	if (ps > 12) { // "ignore" relevant low-order bits
  5.1360 +		trp->ppn &= ~((1UL<<(ps-12))-1);
  5.1361 +		trp->vadr &= ~((1UL<<ps)-1);
  5.1362 +	}
  5.1363 +}
  5.1364 +
  5.1365 +TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
  5.1366 +{
  5.1367 +	unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
  5.1368 +	int i;
  5.1369 +
  5.1370 +	for (i = 0; i < count; i++, trp++) {
  5.1371 +		if (!trp->p) continue;
  5.1372 +		if (physicalize_rid(vcpu,trp->rid) != rid) continue;
  5.1373 +        	if (ifa < trp->vadr) continue;
  5.1374 +        	if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
  5.1375 +		//if (trp->key && !match_pkr(vcpu,trp->key)) continue;
  5.1376 +		return trp;
  5.1377 +	}
  5.1378 +	return 0;
  5.1379 +}
  5.1380 +
  5.1381 +TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
  5.1382 +{
  5.1383 +	TR_ENTRY *trp;
  5.1384 +
  5.1385 +	trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.dtrs,ifa,NDTRS);
  5.1386 +	if (trp) return trp;
  5.1387 +	trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.itrs,ifa,NITRS);
  5.1388 +	if (trp) return trp;
  5.1389 +	return 0;
  5.1390 +}
  5.1391 +
  5.1392 +IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
  5.1393 +		UINT64 itir, UINT64 ifa)
  5.1394 +{
  5.1395 +	TR_ENTRY *trp;
  5.1396 +
  5.1397 +	if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
  5.1398 +	trp = &PSCB(vcpu).dtrs[slot];
  5.1399 +	vcpu_set_tr_entry(trp,pte,itir,ifa);
  5.1400 +	return IA64_NO_FAULT;
  5.1401 +}
  5.1402 +
  5.1403 +IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
  5.1404 +		UINT64 itir, UINT64 ifa)
  5.1405 +{
  5.1406 +	TR_ENTRY *trp;
  5.1407 +
  5.1408 +	if (slot >= NITRS) return IA64_RSVDREG_FAULT;
  5.1409 +	trp = &PSCB(vcpu).itrs[slot];
  5.1410 +	vcpu_set_tr_entry(trp,pte,itir,ifa);
  5.1411 +	return IA64_NO_FAULT;
  5.1412 +}
  5.1413 +
  5.1414 +/**************************************************************************
  5.1415 + VCPU translation cache access routines
  5.1416 +**************************************************************************/
  5.1417 +
  5.1418 +void foobar(void) { /*vcpu_verbose = 1;*/ }
  5.1419 +
  5.1420 +extern VCPU *dom0;
  5.1421 +
  5.1422 +void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps)
  5.1423 +{
  5.1424 +	unsigned long psr;
  5.1425 +	unsigned long ps = (vcpu==dom0) ? logps : PAGE_SHIFT;
  5.1426 +
  5.1427 +	// FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
  5.1428 +	// FIXME, must be inlined or potential for nested fault here!
  5.1429 +	psr = ia64_clear_ic();
  5.1430 +	ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
  5.1431 +	ia64_set_psr(psr);
  5.1432 +	// ia64_srlz_i(); // no srls req'd, will rfi later
  5.1433 +	if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu).itlb,pte,logps<<2,vaddr);
  5.1434 +	if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu).dtlb,pte,logps<<2,vaddr);
  5.1435 +}
  5.1436 +
  5.1437 +TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
  5.1438 +{
  5.1439 +	return vcpu_match_tr_entry(vcpu,&vcpu->shared_info->arch.dtlb,ifa,1);
  5.1440 +}
  5.1441 +
  5.1442 +IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  5.1443 +{
  5.1444 +	unsigned long pteval, logps = (itir >> 2) & 0x3f;
  5.1445 +	unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
  5.1446 +
  5.1447 +	if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
  5.1448 +		printf("vcpu_itc_d: domain trying to use smaller page size!\n");
  5.1449 +		//FIXME: kill domain here
  5.1450 +		while(1);
  5.1451 +	}
  5.1452 +	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
  5.1453 +	pteval = translate_domain_pte(pte,ifa,itir);
  5.1454 +	if (!pteval) return IA64_ILLOP_FAULT;
  5.1455 +	vcpu_itc_no_srlz(vcpu,2,ifa,pteval,logps);
  5.1456 +	return IA64_NO_FAULT;
  5.1457 +}
  5.1458 +
  5.1459 +IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  5.1460 +{
  5.1461 +	unsigned long pteval, logps = (itir >> 2) & 0x3f;
  5.1462 +	unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
  5.1463 +
  5.1464 +	// FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
  5.1465 +	if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
  5.1466 +		printf("vcpu_itc_i: domain trying to use smaller page size!\n");
  5.1467 +		//FIXME: kill domain here
  5.1468 +		while(1);
  5.1469 +	}
  5.1470 +	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
  5.1471 +	pteval = translate_domain_pte(pte,ifa,itir);
  5.1472 +	// FIXME: what to do if bad physical address? (machine check?)
  5.1473 +	if (!pteval) return IA64_ILLOP_FAULT;
  5.1474 +	vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,logps);
  5.1475 +	return IA64_NO_FAULT;
  5.1476 +}
  5.1477 +
  5.1478 +IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
  5.1479 +{
  5.1480 +	printk("vcpu_ptc_l: called, not implemented yet\n");
  5.1481 +	return IA64_ILLOP_FAULT;
  5.1482 +}
  5.1483 +
  5.1484 +IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
  5.1485 +{
  5.1486 +	UINT64 mpaddr;
  5.1487 +	IA64FAULT fault;
  5.1488 +	unsigned long lookup_domain_mpa(VCPU *,unsigned long);
  5.1489 +	unsigned long pteval, dom_imva;
  5.1490 +
  5.1491 +	fault = vcpu_tpa(vcpu, vadr, &mpaddr);
  5.1492 +	if (fault == IA64_NO_FAULT) {
  5.1493 +		struct domain *dom0;
  5.1494 +		unsigned long dom0_start, dom0_size;
  5.1495 +		if (vcpu == dom0) {
  5.1496 +			if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
  5.1497 +				printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
  5.1498 +			}
  5.1499 +		}
  5.1500 +		pteval = lookup_domain_mpa(vcpu,mpaddr);
  5.1501 +		if (pteval) {
  5.1502 +			dom_imva = __va(pteval & _PFN_MASK);
  5.1503 +			ia64_fc(dom_imva);
  5.1504 +		}
  5.1505 +		else {
  5.1506 +			REGS *regs = vcpu_regs(vcpu);
  5.1507 +			printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
  5.1508 +					vadr,regs->cr_iip);
  5.1509 +		}
  5.1510 +	}
  5.1511 +	return fault;
  5.1512 +}
  5.1513 +
  5.1514 +IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
  5.1515 +{
  5.1516 +
  5.1517 +	// Note that this only needs to be called once, i.e. the
  5.1518 +	// architected loop to purge the entire TLB, should use
  5.1519 +	//  base = stride1 = stride2 = 0, count0 = count 1 = 1
  5.1520 +
  5.1521 +	// FIXME: When VHPT is in place, flush that too!
  5.1522 +	local_flush_tlb_all();
  5.1523 +	// just invalidate the "whole" tlb
  5.1524 +	vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
  5.1525 +	vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
  5.1526 +	return IA64_NO_FAULT;
  5.1527 +}
  5.1528 +
  5.1529 +IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
  5.1530 +{
  5.1531 +	printk("vcpu_ptc_g: called, not implemented yet\n");
  5.1532 +	return IA64_ILLOP_FAULT;
  5.1533 +}
  5.1534 +
  5.1535 +IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  5.1536 +{
  5.1537 +	extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
  5.1538 +	// FIXME: validate not flushing Xen addresses
  5.1539 +	// if (Xen address) return(IA64_ILLOP_FAULT);
  5.1540 +	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
  5.1541 +	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
  5.1542 +	vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
  5.1543 +	vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
  5.1544 +	return IA64_NO_FAULT;
  5.1545 +}
  5.1546 +
  5.1547 +IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  5.1548 +{
  5.1549 +	printf("vcpu_ptr_d: Purging TLB is unsupported\n");
  5.1550 +	return (IA64_ILLOP_FAULT);
  5.1551 +}
  5.1552 +
  5.1553 +IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  5.1554 +{
  5.1555 +	printf("vcpu_ptr_i: Purging TLB is unsupported\n");
  5.1556 +	return (IA64_ILLOP_FAULT);
  5.1557 +}
  5.1558 +
  5.1559 +void vcpu_set_regs(VCPU *vcpu, REGS *regs)
  5.1560 +{
  5.1561 +	vcpu->regs = regs;
  5.1562 +}
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/ia64/xenasm.S	Mon Nov 22 22:57:05 2004 +0000
     6.3 @@ -0,0 +1,461 @@
     6.4 +/*
     6.5 + * Assembly support routines for Xen/ia64
     6.6 + *
     6.7 + * Copyright (C) 2004 Hewlett-Packard Co
     6.8 + *	Dan Magenheimer <dan.magenheimer@hp.com>
     6.9 + */
    6.10 +
    6.11 +#include <linux/config.h>
    6.12 +#include <asm/asmmacro.h>
    6.13 +#include <asm/processor.h>
    6.14 +#include <asm/pgtable.h>
    6.15 +#include <asm/vhpt.h>
    6.16 +
    6.17 +#define RunningOnHpSki(rx,ry,pn) 			\
    6.18 +	addl rx = 2, r0; 				\
    6.19 +	addl ry = 3, r0; 				\
    6.20 +	;; 						\
    6.21 +	mov rx = cpuid[rx]; 				\
    6.22 +	mov ry = cpuid[ry]; 				\
    6.23 +	;; 						\
    6.24 +	cmp.eq pn,p0 = 0, rx; 				\
    6.25 +	;; 						\
    6.26 +	(pn) movl rx = 0x7000004 ; 			\
    6.27 +	;; 						\
    6.28 +	(pn) cmp.eq pn,p0 = ry, rx; 			\
    6.29 +	;;
    6.30 +
    6.31 +//int platform_is_hp_ski(void)
    6.32 +GLOBAL_ENTRY(platform_is_hp_ski)
    6.33 +	mov r8 = 0
    6.34 +	RunningOnHpSki(r3,r9,p8)
    6.35 +(p8)	mov r8 = 1
    6.36 +	br.ret.sptk.many b0
    6.37 +END(platform_is_hp_ski)
    6.38 +
    6.39 +// Change rr7 to the passed value while ensuring
    6.40 +// Xen is mapped into the new region
    6.41 +#define PSR_BITS_TO_CLEAR						\
    6.42 +	(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT |		\
    6.43 +	 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |	\
    6.44 +	 IA64_PSR_DFL | IA64_PSR_DFH)
    6.45 +// FIXME? Note that this turns off the DB bit (debug)
    6.46 +#define PSR_BITS_TO_SET	IA64_PSR_BN
    6.47 +
    6.48 +GLOBAL_ENTRY(ia64_new_rr7)
    6.49 +	// not sure this unwind statement is correct...
    6.50 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    6.51 +	alloc loc1 = ar.pfs, 1, 7, 0, 0
    6.52 +1:	{
    6.53 +	  mov r28  = in0		// copy procedure index
    6.54 +	  mov r8   = ip			// save ip to compute branch
    6.55 +	  mov loc0 = rp			// save rp
    6.56 +	};;
    6.57 +	.body
    6.58 +	movl loc2=PERCPU_ADDR
    6.59 +	;;
    6.60 +	tpa loc2=loc2			// grab this BEFORE changing rr7
    6.61 +	;;
    6.62 +#if VHPT_ENABLED
    6.63 +	movl loc6=VHPT_ADDR
    6.64 +	;;
    6.65 +	tpa loc6=loc6			// grab this BEFORE changing rr7
    6.66 +	;;
    6.67 +#endif
    6.68 +	movl loc5=SHAREDINFO_ADDR
    6.69 +	;;
    6.70 +	tpa loc5=loc5			// grab this BEFORE changing rr7
    6.71 +	;;
    6.72 +	mov loc3 = psr			// save psr
    6.73 +	adds r8  = 1f-1b,r8		// calculate return address for call
    6.74 +	;;
    6.75 +	tpa r8=r8			// convert rp to physical
    6.76 +	;;
    6.77 +	mov loc4=ar.rsc			// save RSE configuration
    6.78 +	;;
    6.79 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
    6.80 +	movl r16=PSR_BITS_TO_CLEAR
    6.81 +	movl r17=PSR_BITS_TO_SET
    6.82 +	;;
    6.83 +	or loc3=loc3,r17		// add in psr the bits to set
    6.84 +	;;
    6.85 +	andcm r16=loc3,r16		// removes bits to clear from psr
    6.86 +	br.call.sptk.many rp=ia64_switch_mode_phys
    6.87 +1:
    6.88 +	// now in physical mode with psr.i/ic off so do rr7 switch
    6.89 +	dep	r16=-1,r0,61,3
    6.90 +	;;
    6.91 +	mov	rr[r16]=in0
    6.92 +	srlz.d
    6.93 +	;;
    6.94 +
    6.95 +	// re-pin mappings for kernel text and data
    6.96 +	mov r18=KERNEL_TR_PAGE_SHIFT<<2
    6.97 +	movl r17=KERNEL_START
    6.98 +	;;
    6.99 +	rsm psr.i | psr.ic
   6.100 +	;;
   6.101 +	srlz.i
   6.102 +	;;
   6.103 +	ptr.i	r17,r18
   6.104 +	ptr.d	r17,r18
   6.105 +	;;
   6.106 +	mov cr.itir=r18
   6.107 +	mov cr.ifa=r17
   6.108 +	mov r16=IA64_TR_KERNEL
   6.109 +	//mov r3=ip
   6.110 +	movl r18=PAGE_KERNEL
   6.111 +	;;
   6.112 +	dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
   6.113 +	;;
   6.114 +	or r18=r2,r18
   6.115 +	;;
   6.116 +	srlz.i
   6.117 +	;;
   6.118 +	itr.i itr[r16]=r18
   6.119 +	;;
   6.120 +	itr.d dtr[r16]=r18
   6.121 +	;;
   6.122 +
   6.123 +	// re-pin mappings for stack (current), per-cpu, vhpt, and shared info
   6.124 +
   6.125 +	// unless overlaps with KERNEL_TR
   6.126 +	dep r18=0,r13,0,KERNEL_TR_PAGE_SHIFT
   6.127 +	;;
   6.128 +	cmp.eq p7,p0=r17,r18
   6.129 +(p7)	br.cond.sptk	.stack_overlaps
   6.130 +	;;
   6.131 +	movl r25=PAGE_KERNEL
   6.132 +	dep r20=0,r13,50,14		// physical address of "current"
   6.133 +	;;
   6.134 +	or r23=r25,r20			// construct PA | page properties
   6.135 +	mov r25=IA64_GRANULE_SHIFT<<2
   6.136 +	;;
   6.137 +	ptr.d	r13,r25
   6.138 +	;;
   6.139 +	mov cr.itir=r25
   6.140 +	mov cr.ifa=r13			// VA of next task...
   6.141 +	;;
   6.142 +	mov r25=IA64_TR_CURRENT_STACK
   6.143 +	;;
   6.144 +	itr.d dtr[r25]=r23		// wire in new mapping...
   6.145 +	;;
   6.146 +.stack_overlaps:
   6.147 +
   6.148 +	movl r22=PERCPU_ADDR
   6.149 +	;;
   6.150 +	movl r25=PAGE_KERNEL
   6.151 +	;;
   6.152 +	mov r20=loc2			// saved percpu physical address
   6.153 +	;;
   6.154 +	or r23=r25,r20			// construct PA | page properties
   6.155 +	mov r24=PERCPU_PAGE_SHIFT<<2
   6.156 +	;;
   6.157 +	ptr.d	r22,r24
   6.158 +	;;
   6.159 +	mov cr.itir=r24
   6.160 +	mov cr.ifa=r22
   6.161 +	;;
   6.162 +	mov r25=IA64_TR_PERCPU_DATA
   6.163 +	;;
   6.164 +	itr.d dtr[r25]=r23		// wire in new mapping...
   6.165 +	;;
   6.166 +
   6.167 +#if VHPT_ENABLED
   6.168 +	movl r22=VHPT_ADDR
   6.169 +	;;
   6.170 +	movl r25=PAGE_KERNEL
   6.171 +	;;
   6.172 +	mov r20=loc6			// saved vhpt physical address
   6.173 +	;;
   6.174 +	or r23=r25,r20			// construct PA | page properties
   6.175 +	mov r24=VHPT_PAGE_SHIFT<<2
   6.176 +	;;
   6.177 +	ptr.d	r22,r24
   6.178 +	;;
   6.179 +	mov cr.itir=r24
   6.180 +	mov cr.ifa=r22
   6.181 +	;;
   6.182 +	mov r25=IA64_TR_VHPT
   6.183 +	;;
   6.184 +	itr.d dtr[r25]=r23		// wire in new mapping...
   6.185 +	;;
   6.186 +#endif
   6.187 +
   6.188 +	movl r22=SHAREDINFO_ADDR
   6.189 +	;;
   6.190 +	movl r25=PAGE_KERNEL
   6.191 +	;;
   6.192 +	mov r20=loc5			// saved sharedinfo physical address
   6.193 +	;;
   6.194 +	or r23=r25,r20			// construct PA | page properties
   6.195 +	mov r24=PAGE_SHIFT<<2
   6.196 +	;;
   6.197 +	ptr.d	r22,r24
   6.198 +	;;
   6.199 +	mov cr.itir=r24
   6.200 +	mov cr.ifa=r22
   6.201 +	;;
   6.202 +	mov r25=IA64_TR_SHARED_INFO
   6.203 +	;;
   6.204 +	itr.d dtr[r25]=r23		// wire in new mapping...
   6.205 +	;;
   6.206 +
   6.207 +	// done, switch back to virtual and return
   6.208 +	mov r16=loc3			// r16= original psr
   6.209 +	br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
   6.210 +	mov psr.l = loc3		// restore init PSR
   6.211 +
   6.212 +	mov ar.pfs = loc1
   6.213 +	mov rp = loc0
   6.214 +	;;
   6.215 +	mov ar.rsc=loc4			// restore RSE configuration
   6.216 +	srlz.d				// seralize restoration of psr.l
   6.217 +	br.ret.sptk.many rp
   6.218 +END(ia64_new_rr7)
   6.219 +
   6.220 +#include "minstate.h"
   6.221 +
   6.222 +GLOBAL_ENTRY(ia64_prepare_handle_privop)
   6.223 +	.prologue
   6.224 +	/*
   6.225 +	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
   6.226 +	 */
   6.227 +	mov r16=r0
   6.228 +	DO_SAVE_SWITCH_STACK
   6.229 +	br.call.sptk.many rp=ia64_handle_privop		// stack frame setup in ivt
   6.230 +.ret22:	.body
   6.231 +	DO_LOAD_SWITCH_STACK
   6.232 +	br.cond.sptk.many rp				// goes to ia64_leave_kernel
   6.233 +END(ia64_prepare_handle_privop)
   6.234 +
   6.235 +GLOBAL_ENTRY(ia64_prepare_handle_break)
   6.236 +	.prologue
   6.237 +	/*
   6.238 +	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
   6.239 +	 */
   6.240 +	mov r16=r0
   6.241 +	DO_SAVE_SWITCH_STACK
   6.242 +	br.call.sptk.many rp=ia64_handle_break	// stack frame setup in ivt
   6.243 +.ret23:	.body
   6.244 +	DO_LOAD_SWITCH_STACK
   6.245 +	br.cond.sptk.many rp			// goes to ia64_leave_kernel
   6.246 +END(ia64_prepare_handle_break)
   6.247 +
   6.248 +GLOBAL_ENTRY(ia64_prepare_handle_reflection)
   6.249 +	.prologue
   6.250 +	/*
   6.251 +	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
   6.252 +	 */
   6.253 +	mov r16=r0
   6.254 +	DO_SAVE_SWITCH_STACK
   6.255 +	br.call.sptk.many rp=ia64_handle_reflection	// stack frame setup in ivt
   6.256 +.ret24:	.body
   6.257 +	DO_LOAD_SWITCH_STACK
   6.258 +	br.cond.sptk.many rp			// goes to ia64_leave_kernel
   6.259 +END(ia64_prepare_handle_reflection)
   6.260 +
   6.261 +// NOTE: instruction spacing must be explicit for recovery on miss
   6.262 +GLOBAL_ENTRY(__get_domain_bundle)
   6.263 +	ld8 r8=[r32],8
   6.264 +	nop 0
   6.265 +	nop 0
   6.266 +	;;
   6.267 +	ld8 r9=[r32]
   6.268 +	nop 0
   6.269 +	nop 0
   6.270 +	;;
   6.271 +	br.ret.sptk.many rp
   6.272 +	nop 0
   6.273 +	nop 0
   6.274 +	;;
   6.275 +END(__get_domain_bundle)
   6.276 +
   6.277 +GLOBAL_ENTRY(dorfirfi)
   6.278 +#define SI_CR_IIP_OFFSET 0x150
   6.279 +#define SI_CR_IPSR_OFFSET 0x148
   6.280 +#define SI_CR_IFS_OFFSET 0x158
   6.281 +        movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
   6.282 +        movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
   6.283 +        movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
   6.284 +	;;
   6.285 +	ld8 r16 = [r16]
   6.286 +	ld8 r17 = [r17]
   6.287 +	ld8 r18 = [r18]
   6.288 +	;;
   6.289 +        mov cr.iip=r16
   6.290 +        mov cr.ipsr=r17
   6.291 +        mov cr.ifs=r18
   6.292 +	;;
   6.293 +        // fall through
   6.294 +END(dorfirfi)
   6.295 +
   6.296 +GLOBAL_ENTRY(dorfi)
   6.297 +        rfi
   6.298 +	;;
   6.299 +END(dorfirfi)
   6.300 +
   6.301 +//
   6.302 +// Long's Peak UART Offsets
   6.303 +//
   6.304 +#define COM_TOP 0xff5e0000
   6.305 +#define COM_BOT 0xff5e2000
   6.306 +
   6.307 +// UART offsets	
   6.308 +#define UART_TX		0	/* Out: Transmit buffer (DLAB=0) */
   6.309 +#define UART_INT_ENB	1	/* interrupt enable (DLAB=0) */	
   6.310 +#define UART_INT_ID	2	/* Interrupt ID register */
   6.311 +#define UART_LINE_CTL	3	/* Line control register */
   6.312 +#define UART_MODEM_CTL	4	/* Modem Control Register */
   6.313 +#define UART_LSR	5	/* In:  Line Status Register */
   6.314 +#define UART_MSR	6	/* Modem status register */	
   6.315 +#define UART_DLATCH_LOW UART_TX
   6.316 +#define UART_DLATCH_HIGH UART_INT_ENB
   6.317 +#define COM1   0x3f8
   6.318 +#define COM2   0x2F8
   6.319 +#define COM3   0x3E8
   6.320 +
   6.321 +/* interrupt enable bits (offset 1) */
   6.322 +#define DATA_AVAIL_INT 1
   6.323 +#define XMIT_HOLD_EMPTY_INT 2
   6.324 +#define LINE_STAT_INT 4
   6.325 +#define MODEM_STAT_INT 8
   6.326 +
   6.327 +/* line status bits (offset 5) */
   6.328 +#define REC_DATA_READY 1
   6.329 +#define OVERRUN 2
   6.330 +#define PARITY_ERROR 4
   6.331 +#define FRAMING_ERROR 8
   6.332 +#define BREAK_INTERRUPT 0x10
   6.333 +#define XMIT_HOLD_EMPTY 0x20
   6.334 +#define XMIT_SHIFT_EMPTY 0x40
   6.335 +
   6.336 +// Write a single character
   6.337 +// input: r32 = character to be written
   6.338 +// output: none
   6.339 +GLOBAL_ENTRY(longs_peak_putc)	
   6.340 +	rsm psr.dt
   6.341 +        movl r16 = 0x8000000000000000 + COM_TOP + UART_LSR
   6.342 +	;;
   6.343 +	srlz.i
   6.344 +	;;
   6.345 +
   6.346 +.Chk_THRE_p:
   6.347 +        ld1.acq r18=[r16]
   6.348 +        ;;
   6.349 +	
   6.350 +	and r18 = XMIT_HOLD_EMPTY, r18
   6.351 +	;;
   6.352 +	cmp4.eq p6,p0=0,r18
   6.353 +	;;
   6.354 +	
   6.355 +(p6)    br .Chk_THRE_p
   6.356 +	;;
   6.357 +        movl r16 = 0x8000000000000000 + COM_TOP + UART_TX
   6.358 +	;;
   6.359 +	st1.rel [r16]=r32
   6.360 +	;;
   6.361 +	ssm psr.dt
   6.362 +	;;
   6.363 +	srlz.i
   6.364 +	;;
   6.365 +	br.ret.sptk.many b0
   6.366 +END(longs_peak_putc)	
   6.367 +
   6.368 +/* derived from linux/arch/ia64/hp/sim/boot/boot_head.S */
   6.369 +GLOBAL_ENTRY(pal_emulator_static)
   6.370 +	mov r8=-1
   6.371 +	mov r9=256
   6.372 +	;;
   6.373 +	cmp.gtu p7,p8=r9,r32		/* r32 <= 255? */
   6.374 +(p7)	br.cond.sptk.few static
   6.375 +	;;
   6.376 +	mov r9=512
   6.377 +	;;
   6.378 +	cmp.gtu p7,p8=r9,r32
   6.379 +(p7)	br.cond.sptk.few stacked
   6.380 +	;;
   6.381 +static:	cmp.eq p7,p8=6,r32		/* PAL_PTCE_INFO */
   6.382 +(p8)	br.cond.sptk.few 1f
   6.383 +	;;
   6.384 +	mov r8=0			/* status = 0 */
   6.385 +	movl r9=0x100000000		/* tc.base */
   6.386 +	movl r10=0x0000000200000003	/* count[0], count[1] */
   6.387 +	movl r11=0x1000000000002000	/* stride[0], stride[1] */
   6.388 +	br.ret.sptk.few rp
   6.389 +1:	cmp.eq p7,p8=14,r32		/* PAL_FREQ_RATIOS */
   6.390 +(p8)	br.cond.sptk.few 1f
   6.391 +	mov r8=0			/* status = 0 */
   6.392 +	movl r9 =0x900000002		/* proc_ratio (1/100) */
   6.393 +	movl r10=0x100000100		/* bus_ratio<<32 (1/256) */
   6.394 +	movl r11=0x900000002		/* itc_ratio<<32 (1/100) */
   6.395 +	;;
   6.396 +1:	cmp.eq p7,p8=19,r32		/* PAL_RSE_INFO */
   6.397 +(p8)	br.cond.sptk.few 1f
   6.398 +	mov r8=0			/* status = 0 */
   6.399 +	mov r9=96			/* num phys stacked */
   6.400 +	mov r10=0			/* hints */
   6.401 +	mov r11=0
   6.402 +	br.ret.sptk.few rp
   6.403 +1:	cmp.eq p7,p8=1,r32		/* PAL_CACHE_FLUSH */
   6.404 +(p8)	br.cond.sptk.few 1f
   6.405 +#if 0
   6.406 +	mov r9=ar.lc
   6.407 +	movl r8=524288			/* flush 512k million cache lines (16MB) */
   6.408 +	;;
   6.409 +	mov ar.lc=r8
   6.410 +	movl r8=0xe000000000000000
   6.411 +	;;
   6.412 +.loop:	fc r8
   6.413 +	add r8=32,r8
   6.414 +	br.cloop.sptk.few .loop
   6.415 +	sync.i
   6.416 +	;;
   6.417 +	srlz.i
   6.418 +	;;
   6.419 +	mov ar.lc=r9
   6.420 +	mov r8=r0
   6.421 +	;;
   6.422 +1:	cmp.eq p7,p8=15,r32		/* PAL_PERF_MON_INFO */
   6.423 +(p8)	br.cond.sptk.few 1f
   6.424 +	mov r8=0			/* status = 0 */
   6.425 +	movl r9 =0x08122f04		/* generic=4 width=47 retired=8 cycles=18 */
   6.426 +	mov r10=0			/* reserved */
   6.427 +	mov r11=0			/* reserved */
   6.428 +	mov r16=0xffff			/* implemented PMC */
   6.429 +	mov r17=0x3ffff			/* implemented PMD */
   6.430 +	add r18=8,r29			/* second index */
   6.431 +	;;
   6.432 +	st8 [r29]=r16,16		/* store implemented PMC */
   6.433 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.434 +	;;
   6.435 +	st8 [r29]=r0,16			/* clear remaining bits  */
   6.436 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.437 +	;;
   6.438 +	st8 [r29]=r17,16		/* store implemented PMD */
   6.439 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.440 +	mov r16=0xf0			/* cycles count capable PMC */
   6.441 +	;;
   6.442 +	st8 [r29]=r0,16			/* clear remaining bits  */
   6.443 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.444 +	mov r17=0xf0			/* retired bundles capable PMC */
   6.445 +	;;
   6.446 +	st8 [r29]=r16,16		/* store cycles capable */
   6.447 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.448 +	;;
   6.449 +	st8 [r29]=r0,16			/* clear remaining bits  */
   6.450 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.451 +	;;
   6.452 +	st8 [r29]=r17,16		/* store retired bundle capable */
   6.453 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.454 +	;;
   6.455 +	st8 [r29]=r0,16			/* clear remaining bits  */
   6.456 +	st8 [r18]=r0,16			/* clear remaining bits  */
   6.457 +	;;
   6.458 +1:	br.cond.sptk.few rp
   6.459 +#else
   6.460 +1:
   6.461 +#endif
   6.462 +stacked:
   6.463 +	br.ret.sptk.few rp
   6.464 +END(pal_emulator_static)