ia64/xen-unstable

view xen/arch/ia64/xen/privop.c @ 9768:63af1c14fa18

[IA64] missed chunk of Kevin's hypercall cleanup patch

Missed this chunk of Kevin's patch when merging with dom0vp changes

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 22:30:07 2006 -0600 (2006-04-25)
parents 96bc87dd7ca9
children ee97d247a3b7
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 #include <asm/dom_fw.h>
14 #include <asm/vhpt.h>
15 //#include <debug.h>
17 /* FIXME: where these declarations should be there ? */
18 extern int dump_reflect_counts(char *);
19 extern void zero_reflect_counts(void);
21 long priv_verbose=0;
23 /* Set to 1 to handle privified instructions from the privify tool. */
24 static const int privify_en = 0;
26 /**************************************************************************
27 Hypercall bundle creation
28 **************************************************************************/
31 void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
32 {
33 INST64_A5 slot0;
34 INST64_I19 slot1;
35 INST64_B4 slot2;
36 IA64_BUNDLE bundle;
38 // slot1: mov r2 = hypnum (low 20 bits)
39 slot0.inst = 0;
40 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
41 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
42 slot0.imm5c = hypnum >> 16; slot0.s = 0;
43 // slot1: break brkimm
44 slot1.inst = 0;
45 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
46 slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
47 // if ret slot2: br.ret.sptk.many rp
48 // else slot2: br.cond.sptk.many rp
49 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
50 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
51 if (ret) {
52 slot2.btype = 4; slot2.x6 = 0x21;
53 }
54 else {
55 slot2.btype = 0; slot2.x6 = 0x20;
56 }
58 bundle.i64[0] = 0; bundle.i64[1] = 0;
59 bundle.template = 0x11;
60 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
61 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
63 imva[0] = bundle.i64[0]; imva[1] = bundle.i64[1];
64 ia64_fc (imva);
65 ia64_fc (imva + 1);
66 }
68 void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum)
69 {
70 extern unsigned long pal_call_stub[];
71 IA64_BUNDLE bundle;
72 INST64_A5 slot_a5;
73 INST64_M37 slot_m37;
75 /* The source of the hypercall stub is the pal_call_stub function
76 defined in xenasm.S. */
78 /* Copy the first bundle and patch the hypercall number. */
79 bundle.i64[0] = pal_call_stub[0];
80 bundle.i64[1] = pal_call_stub[1];
81 slot_a5.inst = bundle.slot0;
82 slot_a5.imm7b = hypnum;
83 slot_a5.imm9d = hypnum >> 7;
84 slot_a5.imm5c = hypnum >> 16;
85 bundle.slot0 = slot_a5.inst;
86 imva[0] = bundle.i64[0];
87 imva[1] = bundle.i64[1];
88 ia64_fc (imva);
89 ia64_fc (imva + 1);
91 /* Copy the second bundle and patch the hypercall vector. */
92 bundle.i64[0] = pal_call_stub[2];
93 bundle.i64[1] = pal_call_stub[3];
94 slot_m37.inst = bundle.slot0;
95 slot_m37.imm20a = brkimm;
96 slot_m37.i = brkimm >> 20;
97 bundle.slot0 = slot_m37.inst;
98 imva[2] = bundle.i64[0];
99 imva[3] = bundle.i64[1];
100 ia64_fc (imva + 2);
101 ia64_fc (imva + 3);
102 }
105 /**************************************************************************
106 Privileged operation emulation routines
107 **************************************************************************/
109 IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
110 {
111 return vcpu_rfi(vcpu);
112 }
114 IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
115 {
116 return vcpu_bsw0(vcpu);
117 }
119 IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
120 {
121 return vcpu_bsw1(vcpu);
122 }
124 IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
125 {
126 return vcpu_cover(vcpu);
127 }
129 IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
130 {
131 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
132 UINT64 addr_range;
134 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
135 return vcpu_ptc_l(vcpu,vadr,addr_range);
136 }
138 IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
139 {
140 UINT src = inst.M28.r3;
142 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
143 if (privify_en && src > 63)
144 return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
145 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
146 }
148 IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
149 {
150 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
151 UINT64 addr_range;
153 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
154 return vcpu_ptc_g(vcpu,vadr,addr_range);
155 }
157 IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
158 {
159 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
160 UINT64 addr_range;
162 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
163 return vcpu_ptc_ga(vcpu,vadr,addr_range);
164 }
166 IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
167 {
168 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
169 UINT64 addr_range;
171 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
172 return vcpu_ptr_d(vcpu,vadr,addr_range);
173 }
175 IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
176 {
177 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
178 UINT64 addr_range;
180 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
181 return vcpu_ptr_i(vcpu,vadr,addr_range);
182 }
184 IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
185 {
186 UINT64 padr;
187 UINT fault;
188 UINT src = inst.M46.r3;
190 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
191 if (privify_en && src > 63)
192 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
193 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
194 if (fault == IA64_NO_FAULT)
195 return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
196 else return fault;
197 }
199 IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
200 {
201 UINT64 key;
202 UINT fault;
203 UINT src = inst.M46.r3;
205 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
206 if (privify_en && src > 63)
207 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
208 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
209 if (fault == IA64_NO_FAULT)
210 return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
211 else return fault;
212 }
214 /************************************
215 * Insert translation register/cache
216 ************************************/
218 IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
219 {
220 UINT64 fault, itir, ifa, pte, slot;
222 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
223 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
224 return(IA64_ILLOP_FAULT);
225 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
226 return(IA64_ILLOP_FAULT);
227 pte = vcpu_get_gr(vcpu,inst.M42.r2);
228 slot = vcpu_get_gr(vcpu,inst.M42.r3);
230 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
231 }
233 IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
234 {
235 UINT64 fault, itir, ifa, pte, slot;
237 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
238 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
239 return(IA64_ILLOP_FAULT);
240 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
241 return(IA64_ILLOP_FAULT);
242 pte = vcpu_get_gr(vcpu,inst.M42.r2);
243 slot = vcpu_get_gr(vcpu,inst.M42.r3);
245 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
246 }
248 IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
249 {
250 UINT64 fault, itir, ifa, pte;
252 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
253 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
254 return(IA64_ILLOP_FAULT);
255 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
256 return(IA64_ILLOP_FAULT);
257 pte = vcpu_get_gr(vcpu,inst.M41.r2);
259 return (vcpu_itc_d(vcpu,pte,itir,ifa));
260 }
262 IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
263 {
264 UINT64 fault, itir, ifa, pte;
266 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
267 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
268 return(IA64_ILLOP_FAULT);
269 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
270 return(IA64_ILLOP_FAULT);
271 pte = vcpu_get_gr(vcpu,inst.M41.r2);
273 return (vcpu_itc_i(vcpu,pte,itir,ifa));
274 }
276 /*************************************
277 * Moves to semi-privileged registers
278 *************************************/
280 IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
281 {
282 // I27 and M30 are identical for these fields
283 UINT64 ar3 = inst.M30.ar3;
284 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
285 return (vcpu_set_ar(vcpu,ar3,imm));
286 }
288 IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
289 {
290 // I26 and M29 are identical for these fields
291 UINT64 ar3 = inst.M29.ar3;
293 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
294 // privified mov from kr
295 UINT64 val;
296 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
297 return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
298 else return IA64_ILLOP_FAULT;
299 }
300 else {
301 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
302 return (vcpu_set_ar(vcpu,ar3,r2));
303 }
304 }
306 /********************************
307 * Moves to privileged registers
308 ********************************/
310 IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
311 {
312 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
313 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
314 return (vcpu_set_pkr(vcpu,r3,r2));
315 }
317 IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
318 {
319 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
320 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
321 return (vcpu_set_rr(vcpu,r3,r2));
322 }
324 IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
325 {
326 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
327 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
328 return (vcpu_set_dbr(vcpu,r3,r2));
329 }
331 IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
332 {
333 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
334 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
335 return (vcpu_set_ibr(vcpu,r3,r2));
336 }
338 IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
339 {
340 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
341 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
342 return (vcpu_set_pmc(vcpu,r3,r2));
343 }
345 IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
346 {
347 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
348 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
349 return (vcpu_set_pmd(vcpu,r3,r2));
350 }
352 unsigned long to_cr_cnt[128] = { 0 };
354 IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
355 {
356 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
357 to_cr_cnt[inst.M32.cr3]++;
358 switch (inst.M32.cr3) {
359 case 0: return vcpu_set_dcr(vcpu,val);
360 case 1: return vcpu_set_itm(vcpu,val);
361 case 2: return vcpu_set_iva(vcpu,val);
362 case 8: return vcpu_set_pta(vcpu,val);
363 case 16:return vcpu_set_ipsr(vcpu,val);
364 case 17:return vcpu_set_isr(vcpu,val);
365 case 19:return vcpu_set_iip(vcpu,val);
366 case 20:return vcpu_set_ifa(vcpu,val);
367 case 21:return vcpu_set_itir(vcpu,val);
368 case 22:return vcpu_set_iipa(vcpu,val);
369 case 23:return vcpu_set_ifs(vcpu,val);
370 case 24:return vcpu_set_iim(vcpu,val);
371 case 25:return vcpu_set_iha(vcpu,val);
372 case 64:return vcpu_set_lid(vcpu,val);
373 case 65:return IA64_ILLOP_FAULT;
374 case 66:return vcpu_set_tpr(vcpu,val);
375 case 67:return vcpu_set_eoi(vcpu,val);
376 case 68:return IA64_ILLOP_FAULT;
377 case 69:return IA64_ILLOP_FAULT;
378 case 70:return IA64_ILLOP_FAULT;
379 case 71:return IA64_ILLOP_FAULT;
380 case 72:return vcpu_set_itv(vcpu,val);
381 case 73:return vcpu_set_pmv(vcpu,val);
382 case 74:return vcpu_set_cmcv(vcpu,val);
383 case 80:return vcpu_set_lrr0(vcpu,val);
384 case 81:return vcpu_set_lrr1(vcpu,val);
385 default: return IA64_ILLOP_FAULT;
386 }
387 }
389 IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
390 {
391 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
392 return vcpu_reset_psr_sm(vcpu,imm24);
393 }
395 IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
396 {
397 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
398 return vcpu_set_psr_sm(vcpu,imm24);
399 }
401 /**
402 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
403 */
404 IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
405 {
406 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
407 return vcpu_set_psr_l(vcpu,val);
408 }
410 /**********************************
411 * Moves from privileged registers
412 **********************************/
414 IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
415 {
416 UINT64 val;
417 IA64FAULT fault;
418 UINT64 reg;
420 reg = vcpu_get_gr(vcpu,inst.M43.r3);
421 if (privify_en && inst.M43.r1 > 63) {
422 // privified mov from cpuid
423 fault = vcpu_get_cpuid(vcpu,reg,&val);
424 if (fault == IA64_NO_FAULT)
425 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
426 }
427 else {
428 fault = vcpu_get_rr(vcpu,reg,&val);
429 if (fault == IA64_NO_FAULT)
430 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
431 }
432 return fault;
433 }
435 IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
436 {
437 UINT64 val;
438 IA64FAULT fault;
440 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
441 if (fault == IA64_NO_FAULT)
442 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
443 else return fault;
444 }
446 IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
447 {
448 UINT64 val;
449 IA64FAULT fault;
451 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
452 if (fault == IA64_NO_FAULT)
453 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
454 else return fault;
455 }
457 IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
458 {
459 UINT64 val;
460 IA64FAULT fault;
462 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
463 if (fault == IA64_NO_FAULT)
464 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
465 else return fault;
466 }
468 IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
469 {
470 UINT64 val;
471 IA64FAULT fault;
472 UINT64 reg;
474 reg = vcpu_get_gr(vcpu,inst.M43.r3);
475 if (privify_en && inst.M43.r1 > 63) {
476 // privified mov from pmd
477 fault = vcpu_get_pmd(vcpu,reg,&val);
478 if (fault == IA64_NO_FAULT)
479 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
480 }
481 else {
482 fault = vcpu_get_pmc(vcpu,reg,&val);
483 if (fault == IA64_NO_FAULT)
484 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
485 }
486 return fault;
487 }
489 unsigned long from_cr_cnt[128] = { 0 };
491 #define cr_get(cr) \
492 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
493 vcpu_set_gr(vcpu, tgt, val, 0) : fault;
495 IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
496 {
497 UINT64 tgt = inst.M33.r1;
498 UINT64 val;
499 IA64FAULT fault;
501 from_cr_cnt[inst.M33.cr3]++;
502 switch (inst.M33.cr3) {
503 case 0: return cr_get(dcr);
504 case 1: return cr_get(itm);
505 case 2: return cr_get(iva);
506 case 8: return cr_get(pta);
507 case 16:return cr_get(ipsr);
508 case 17:return cr_get(isr);
509 case 19:return cr_get(iip);
510 case 20:return cr_get(ifa);
511 case 21:return cr_get(itir);
512 case 22:return cr_get(iipa);
513 case 23:return cr_get(ifs);
514 case 24:return cr_get(iim);
515 case 25:return cr_get(iha);
516 case 64:return cr_get(lid);
517 case 65:return cr_get(ivr);
518 case 66:return cr_get(tpr);
519 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
520 case 68:return cr_get(irr0);
521 case 69:return cr_get(irr1);
522 case 70:return cr_get(irr2);
523 case 71:return cr_get(irr3);
524 case 72:return cr_get(itv);
525 case 73:return cr_get(pmv);
526 case 74:return cr_get(cmcv);
527 case 80:return cr_get(lrr0);
528 case 81:return cr_get(lrr1);
529 default: return IA64_ILLOP_FAULT;
530 }
531 return IA64_ILLOP_FAULT;
532 }
534 IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
535 {
536 UINT64 tgt = inst.M33.r1;
537 UINT64 val;
538 IA64FAULT fault;
540 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
541 return vcpu_set_gr(vcpu, tgt, val, 0);
542 else return fault;
543 }
545 /**************************************************************************
546 Privileged operation decode and dispatch routines
547 **************************************************************************/
549 static const IA64_SLOT_TYPE slot_types[0x20][3] = {
550 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
551 {M, I, ILLEGAL}, {M, I, ILLEGAL},
552 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
553 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
554 {M, F, I}, {M, F, I},
555 {M, M, F}, {M, M, F},
556 {M, I, B}, {M, I, B},
557 {M, B, B}, {M, B, B},
558 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
559 {B, B, B}, {B, B, B},
560 {M, M, B}, {M, M, B},
561 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
562 {M, F, B}, {M, F, B},
563 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
564 };
566 // pointer to privileged emulation function
567 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
569 static const PPEFCN Mpriv_funcs[64] = {
570 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
571 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
572 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
573 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
574 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
575 priv_mov_from_pmc, 0, 0, 0,
576 0, 0, 0, 0,
577 0, 0, priv_tpa, priv_tak,
578 0, 0, 0, 0,
579 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
580 0, 0, 0, 0,
581 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
582 0, 0, 0, 0,
583 priv_ptc_e, 0, 0, 0,
584 0, 0, 0, 0, 0, 0, 0, 0
585 };
587 struct {
588 unsigned long mov_to_ar_imm;
589 unsigned long mov_to_ar_reg;
590 unsigned long mov_from_ar;
591 unsigned long ssm;
592 unsigned long rsm;
593 unsigned long rfi;
594 unsigned long bsw0;
595 unsigned long bsw1;
596 unsigned long cover;
597 unsigned long fc;
598 unsigned long cpuid;
599 unsigned long Mpriv_cnt[64];
600 } privcnt = { 0 };
602 unsigned long privop_trace = 0;
604 IA64FAULT
605 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
606 {
607 IA64_BUNDLE bundle;
608 IA64_BUNDLE __get_domain_bundle(UINT64);
609 int slot;
610 IA64_SLOT_TYPE slot_type;
611 INST64 inst;
612 PPEFCN pfunc;
613 unsigned long ipsr = regs->cr_ipsr;
614 UINT64 iip = regs->cr_iip;
615 int x6;
617 // make a local copy of the bundle containing the privop
618 #if 1
619 bundle = __get_domain_bundle(iip);
620 if (!bundle.i64[0] && !bundle.i64[1])
621 #else
622 if (__copy_from_user(&bundle,iip,sizeof(bundle)))
623 #endif
624 {
625 //printf("*** priv_handle_op: privop bundle at 0x%lx not mapped, retrying\n",iip);
626 return vcpu_force_data_miss(vcpu,regs->cr_iip);
627 }
628 #if 0
629 if (iip==0xa000000100001820) {
630 static int firstpagefault = 1;
631 if (firstpagefault) {
632 printf("*** First time to domain page fault!\n"); firstpagefault=0;
633 }
634 }
635 #endif
636 if (privop_trace) {
637 static long i = 400;
638 //if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
639 if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, itc=%lx, itm=%lx\n",
640 iip,ia64_get_itc(),ia64_get_itm());
641 i--;
642 }
643 slot = ((struct ia64_psr *)&ipsr)->ri;
644 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
645 else if (slot == 1)
646 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
647 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
648 else printf("priv_handle_op: illegal slot: %d\n", slot);
650 slot_type = slot_types[bundle.template][slot];
651 if (priv_verbose) {
652 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
653 iip, (UINT64)inst.inst, slot, slot_type);
654 }
655 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
656 // break instr for privified cover
657 }
658 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
659 switch (slot_type) {
660 case M:
661 if (inst.generic.major == 0) {
662 #if 0
663 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
664 privcnt.cover++;
665 return priv_cover(vcpu,inst);
666 }
667 #endif
668 if (inst.M29.x3 != 0) break;
669 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
670 privcnt.mov_to_ar_imm++;
671 return priv_mov_to_ar_imm(vcpu,inst);
672 }
673 if (inst.M44.x4 == 6) {
674 privcnt.ssm++;
675 return priv_ssm(vcpu,inst);
676 }
677 if (inst.M44.x4 == 7) {
678 privcnt.rsm++;
679 return priv_rsm(vcpu,inst);
680 }
681 break;
682 }
683 else if (inst.generic.major != 1) break;
684 x6 = inst.M29.x6;
685 if (x6 == 0x2a) {
686 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
687 privcnt.mov_from_ar++; // privified mov from kr
688 else privcnt.mov_to_ar_reg++;
689 return priv_mov_to_ar_reg(vcpu,inst);
690 }
691 if (inst.M29.x3 != 0) break;
692 if (!(pfunc = Mpriv_funcs[x6])) break;
693 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
694 if (privify_en && inst.M46.r3 > 63) {
695 if (x6 == 0x1e) x6 = 0x1b;
696 else x6 = 0x1a;
697 }
698 }
699 if (privify_en && x6 == 52 && inst.M28.r3 > 63)
700 privcnt.fc++;
701 else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
702 privcnt.cpuid++;
703 else privcnt.Mpriv_cnt[x6]++;
704 return (*pfunc)(vcpu,inst);
705 break;
706 case B:
707 if (inst.generic.major != 0) break;
708 if (inst.B8.x6 == 0x08) {
709 IA64FAULT fault;
710 privcnt.rfi++;
711 fault = priv_rfi(vcpu,inst);
712 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
713 return fault;
714 }
715 if (inst.B8.x6 == 0x0c) {
716 privcnt.bsw0++;
717 return priv_bsw0(vcpu,inst);
718 }
719 if (inst.B8.x6 == 0x0d) {
720 privcnt.bsw1++;
721 return priv_bsw1(vcpu,inst);
722 }
723 if (inst.B8.x6 == 0x0) { // break instr for privified cover
724 privcnt.cover++;
725 return priv_cover(vcpu,inst);
726 }
727 break;
728 case I:
729 if (inst.generic.major != 0) break;
730 #if 0
731 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
732 privcnt.cover++;
733 return priv_cover(vcpu,inst);
734 }
735 #endif
736 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
737 if (inst.I26.x6 == 0x2a) {
738 if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
739 privcnt.mov_from_ar++; // privified mov from kr
740 else privcnt.mov_to_ar_reg++;
741 return priv_mov_to_ar_reg(vcpu,inst);
742 }
743 if (inst.I27.x6 == 0x0a) {
744 privcnt.mov_to_ar_imm++;
745 return priv_mov_to_ar_imm(vcpu,inst);
746 }
747 break;
748 default:
749 break;
750 }
751 //printf("We who are about do die salute you\n");
752 printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=0x%lx\n",
753 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
754 //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
755 //thread_mozambique("privop fault\n");
756 return (IA64_ILLOP_FAULT);
757 }
759 /** Emulate a privileged operation.
760 *
761 * This should probably return 0 on success and the "trap number"
762 * (e.g. illegal operation for bad register, priv op for an
763 * instruction that isn't allowed, etc.) on "failure"
764 *
765 * @param vcpu virtual cpu
766 * @param isrcode interrupt service routine code
767 * @return fault
768 */
769 IA64FAULT
770 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
771 {
772 IA64FAULT fault;
773 UINT64 ipsr = regs->cr_ipsr;
774 UINT64 isrcode = (isr >> 4) & 0xf;
775 int privlvl;
777 // handle privops masked as illops? and breaks (6)
778 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
779 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
780 printf("priv_emulate: returning ILLOP, not implemented!\n");
781 while (1);
782 return IA64_ILLOP_FAULT;
783 }
784 //if (isrcode != 1 && isrcode != 2) return 0;
785 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
786 // its OK for a privified-cover to be executed in user-land
787 fault = priv_handle_op(vcpu,regs,privlvl);
788 if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // success!!
789 // update iip/ipsr to point to the next instruction
790 (void)vcpu_increment_iip(vcpu);
791 }
792 if (fault == IA64_ILLOP_FAULT)
793 printf("priv_emulate: priv_handle_op fails, isr=0x%lx\n",isr);
794 return fault;
795 }
798 // FIXME: Move these to include/public/arch-ia64?
799 #define HYPERPRIVOP_RFI 0x1
800 #define HYPERPRIVOP_RSM_DT 0x2
801 #define HYPERPRIVOP_SSM_DT 0x3
802 #define HYPERPRIVOP_COVER 0x4
803 #define HYPERPRIVOP_ITC_D 0x5
804 #define HYPERPRIVOP_ITC_I 0x6
805 #define HYPERPRIVOP_SSM_I 0x7
806 #define HYPERPRIVOP_GET_IVR 0x8
807 #define HYPERPRIVOP_GET_TPR 0x9
808 #define HYPERPRIVOP_SET_TPR 0xa
809 #define HYPERPRIVOP_EOI 0xb
810 #define HYPERPRIVOP_SET_ITM 0xc
811 #define HYPERPRIVOP_THASH 0xd
812 #define HYPERPRIVOP_PTC_GA 0xe
813 #define HYPERPRIVOP_ITR_D 0xf
814 #define HYPERPRIVOP_GET_RR 0x10
815 #define HYPERPRIVOP_SET_RR 0x11
816 #define HYPERPRIVOP_SET_KR 0x12
817 #define HYPERPRIVOP_FC 0x13
818 #define HYPERPRIVOP_GET_CPUID 0x14
819 #define HYPERPRIVOP_GET_PMD 0x15
820 #define HYPERPRIVOP_GET_EFLAG 0x16
821 #define HYPERPRIVOP_SET_EFLAG 0x17
822 #define HYPERPRIVOP_MAX 0x17
824 static const char * const hyperpriv_str[HYPERPRIVOP_MAX+1] = {
825 0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
826 "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
827 "=rr", "rr=", "kr=", "fc", "=cpuid", "=pmd", "=ar.eflg", "ar.eflg="
828 };
830 unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
831 unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
833 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
834 * so this code is primarily used for debugging them */
835 int
836 ia64_hyperprivop(unsigned long iim, REGS *regs)
837 {
838 struct vcpu *v = current;
839 UINT64 val;
840 UINT64 itir, ifa;
842 // FIXME: Handle faults appropriately for these
843 if (!iim || iim > HYPERPRIVOP_MAX) {
844 printf("bad hyperprivop; ignored\n");
845 printf("iim=%lx, iip=0x%lx\n", iim, regs->cr_iip);
846 return 1;
847 }
848 slow_hyperpriv_cnt[iim]++;
849 switch(iim) {
850 case HYPERPRIVOP_RFI:
851 (void)vcpu_rfi(v);
852 return 0; // don't update iip
853 case HYPERPRIVOP_RSM_DT:
854 (void)vcpu_reset_psr_dt(v);
855 return 1;
856 case HYPERPRIVOP_SSM_DT:
857 (void)vcpu_set_psr_dt(v);
858 return 1;
859 case HYPERPRIVOP_COVER:
860 (void)vcpu_cover(v);
861 return 1;
862 case HYPERPRIVOP_ITC_D:
863 (void)vcpu_get_itir(v,&itir);
864 (void)vcpu_get_ifa(v,&ifa);
865 (void)vcpu_itc_d(v,regs->r8,itir,ifa);
866 return 1;
867 case HYPERPRIVOP_ITC_I:
868 (void)vcpu_get_itir(v,&itir);
869 (void)vcpu_get_ifa(v,&ifa);
870 (void)vcpu_itc_i(v,regs->r8,itir,ifa);
871 return 1;
872 case HYPERPRIVOP_SSM_I:
873 (void)vcpu_set_psr_i(v);
874 return 1;
875 case HYPERPRIVOP_GET_IVR:
876 (void)vcpu_get_ivr(v,&val);
877 regs->r8 = val;
878 return 1;
879 case HYPERPRIVOP_GET_TPR:
880 (void)vcpu_get_tpr(v,&val);
881 regs->r8 = val;
882 return 1;
883 case HYPERPRIVOP_SET_TPR:
884 (void)vcpu_set_tpr(v,regs->r8);
885 return 1;
886 case HYPERPRIVOP_EOI:
887 (void)vcpu_set_eoi(v,0L);
888 return 1;
889 case HYPERPRIVOP_SET_ITM:
890 (void)vcpu_set_itm(v,regs->r8);
891 return 1;
892 case HYPERPRIVOP_THASH:
893 (void)vcpu_thash(v,regs->r8,&val);
894 regs->r8 = val;
895 return 1;
896 case HYPERPRIVOP_PTC_GA:
897 (void)vcpu_ptc_ga(v,regs->r8,(1L << ((regs->r9 & 0xfc) >> 2)));
898 return 1;
899 case HYPERPRIVOP_ITR_D:
900 (void)vcpu_get_itir(v,&itir);
901 (void)vcpu_get_ifa(v,&ifa);
902 (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
903 return 1;
904 case HYPERPRIVOP_GET_RR:
905 (void)vcpu_get_rr(v,regs->r8,&val);
906 regs->r8 = val;
907 return 1;
908 case HYPERPRIVOP_SET_RR:
909 (void)vcpu_set_rr(v,regs->r8,regs->r9);
910 return 1;
911 case HYPERPRIVOP_SET_KR:
912 (void)vcpu_set_ar(v,regs->r8,regs->r9);
913 return 1;
914 case HYPERPRIVOP_FC:
915 (void)vcpu_fc(v,regs->r8);
916 return 1;
917 case HYPERPRIVOP_GET_CPUID:
918 (void)vcpu_get_cpuid(v,regs->r8,&val);
919 regs->r8 = val;
920 return 1;
921 case HYPERPRIVOP_GET_PMD:
922 (void)vcpu_get_pmd(v,regs->r8,&val);
923 regs->r8 = val;
924 return 1;
925 case HYPERPRIVOP_GET_EFLAG:
926 (void)vcpu_get_ar(v,24,&val);
927 regs->r8 = val;
928 return 1;
929 case HYPERPRIVOP_SET_EFLAG:
930 (void)vcpu_set_ar(v,24,regs->r8);
931 return 1;
932 }
933 return 0;
934 }
937 /**************************************************************************
938 Privileged operation instrumentation routines
939 **************************************************************************/
941 static const char * const Mpriv_str[64] = {
942 "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
943 "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
944 "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
945 "ptr_d", "ptr_i", "itr_d", "itr_i",
946 "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
947 "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
948 "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
949 "<0x1c>", "<0x1d>", "tpa", "tak",
950 "<0x20>", "<0x21>", "<0x22>", "<0x23>",
951 "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
952 "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
953 "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
954 "<0x30>", "<0x31>", "<0x32>", "<0x33>",
955 "ptc_e", "<0x35>", "<0x36>", "<0x37>",
956 "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
957 "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
958 };
960 #define RS "Rsvd"
961 static const char * const cr_str[128] = {
962 "dcr","itm","iva",RS,RS,RS,RS,RS,
963 "pta",RS,RS,RS,RS,RS,RS,RS,
964 "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
965 "iim","iha",RS,RS,RS,RS,RS,RS,
966 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
967 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
968 "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
969 "itv","pmv","cmcv",RS,RS,RS,RS,RS,
970 "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
971 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
972 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
973 RS,RS,RS,RS,RS,RS,RS,RS
974 };
976 // FIXME: should use snprintf to ensure no buffer overflow
977 static int dump_privop_counts(char *buf)
978 {
979 int i, j;
980 UINT64 sum = 0;
981 char *s = buf;
983 // this is ugly and should probably produce sorted output
984 // but it will have to do for now
985 sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
986 sum += privcnt.ssm; sum += privcnt.rsm;
987 sum += privcnt.rfi; sum += privcnt.bsw0;
988 sum += privcnt.bsw1; sum += privcnt.cover;
989 for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
990 s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
991 if (privcnt.mov_to_ar_imm)
992 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.mov_to_ar_imm,
993 "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
994 if (privcnt.mov_to_ar_reg)
995 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.mov_to_ar_reg,
996 "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
997 if (privcnt.mov_from_ar)
998 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.mov_from_ar,
999 "privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
1000 if (privcnt.ssm)
1001 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.ssm,
1002 "ssm", (privcnt.ssm*100L)/sum);
1003 if (privcnt.rsm)
1004 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.rsm,
1005 "rsm", (privcnt.rsm*100L)/sum);
1006 if (privcnt.rfi)
1007 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.rfi,
1008 "rfi", (privcnt.rfi*100L)/sum);
1009 if (privcnt.bsw0)
1010 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.bsw0,
1011 "bsw0", (privcnt.bsw0*100L)/sum);
1012 if (privcnt.bsw1)
1013 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.bsw1,
1014 "bsw1", (privcnt.bsw1*100L)/sum);
1015 if (privcnt.cover)
1016 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.cover,
1017 "cover", (privcnt.cover*100L)/sum);
1018 if (privcnt.fc)
1019 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.fc,
1020 "privified-fc", (privcnt.fc*100L)/sum);
1021 if (privcnt.cpuid)
1022 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.cpuid,
1023 "privified-getcpuid", (privcnt.cpuid*100L)/sum);
1024 for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
1025 if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
1026 else s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.Mpriv_cnt[i],
1027 Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
1028 if (i == 0x24) { // mov from CR
1029 s += sprintf(s," [");
1030 for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
1031 if (!cr_str[j])
1032 s += sprintf(s,"PRIVSTRING NULL!!\n");
1033 s += sprintf(s,"%s(%ld),",cr_str[j],from_cr_cnt[j]);
1035 s += sprintf(s,"]\n");
1037 else if (i == 0x2c) { // mov to CR
1038 s += sprintf(s," [");
1039 for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
1040 if (!cr_str[j])
1041 s += sprintf(s,"PRIVSTRING NULL!!\n");
1042 s += sprintf(s,"%s(%ld),",cr_str[j],to_cr_cnt[j]);
1044 s += sprintf(s,"]\n");
1047 return s - buf;
1050 static int zero_privop_counts(char *buf)
1052 int i, j;
1053 char *s = buf;
1055 // this is ugly and should probably produce sorted output
1056 // but it will have to do for now
1057 privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
1058 privcnt.mov_from_ar = 0;
1059 privcnt.ssm = 0; privcnt.rsm = 0;
1060 privcnt.rfi = 0; privcnt.bsw0 = 0;
1061 privcnt.bsw1 = 0; privcnt.cover = 0;
1062 privcnt.fc = 0; privcnt.cpuid = 0;
1063 for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
1064 for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
1065 for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
1066 s += sprintf(s,"All privop statistics zeroed\n");
1067 return s - buf;
1070 #ifdef PRIVOP_ADDR_COUNT
1072 extern struct privop_addr_count privop_addr_counter[];
1074 void privop_count_addr(unsigned long iip, int inst)
1076 struct privop_addr_count *v = &privop_addr_counter[inst];
1077 int i;
1079 for (i = 0; i < PRIVOP_COUNT_NADDRS; i++) {
1080 if (!v->addr[i]) { v->addr[i] = iip; v->count[i]++; return; }
1081 else if (v->addr[i] == iip) { v->count[i]++; return; }
1083 v->overflow++;;
1086 static int dump_privop_addrs(char *buf)
1088 int i,j;
1089 char *s = buf;
1090 s += sprintf(s,"Privop addresses:\n");
1091 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
1092 struct privop_addr_count *v = &privop_addr_counter[i];
1093 s += sprintf(s,"%s:\n",v->instname);
1094 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
1095 if (!v->addr[j]) break;
1096 s += sprintf(s," at 0x%lx #%ld\n",v->addr[j],v->count[j]);
1098 if (v->overflow)
1099 s += sprintf(s," other #%ld\n",v->overflow);
1101 return s - buf;
1104 static void zero_privop_addrs(void)
1106 int i,j;
1107 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
1108 struct privop_addr_count *v = &privop_addr_counter[i];
1109 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
1110 v->addr[j] = v->count[j] = 0;
1111 v->overflow = 0;
1114 #endif
1116 extern unsigned long dtlb_translate_count;
1117 extern unsigned long tr_translate_count;
1118 extern unsigned long phys_translate_count;
1119 extern unsigned long vhpt_translate_count;
1120 extern unsigned long fast_vhpt_translate_count;
1121 extern unsigned long recover_to_page_fault_count;
1122 extern unsigned long recover_to_break_fault_count;
1123 extern unsigned long lazy_cover_count;
1124 extern unsigned long idle_when_pending;
1125 extern unsigned long pal_halt_light_count;
1126 extern unsigned long context_switch_count;
1128 static int dump_misc_stats(char *buf)
1130 char *s = buf;
1131 s += sprintf(s,"Virtual TR translations: %ld\n",tr_translate_count);
1132 s += sprintf(s,"Virtual VHPT slow translations: %ld\n",vhpt_translate_count);
1133 s += sprintf(s,"Virtual VHPT fast translations: %ld\n",fast_vhpt_translate_count);
1134 s += sprintf(s,"Virtual DTLB translations: %ld\n",dtlb_translate_count);
1135 s += sprintf(s,"Physical translations: %ld\n",phys_translate_count);
1136 s += sprintf(s,"Recoveries to page fault: %ld\n",recover_to_page_fault_count);
1137 s += sprintf(s,"Recoveries to break fault: %ld\n",recover_to_break_fault_count);
1138 s += sprintf(s,"Idle when pending: %ld\n",idle_when_pending);
1139 s += sprintf(s,"PAL_HALT_LIGHT (no pending): %ld\n",pal_halt_light_count);
1140 s += sprintf(s,"context switches: %ld\n",context_switch_count);
1141 s += sprintf(s,"Lazy covers: %ld\n",lazy_cover_count);
1142 return s - buf;
1145 static void zero_misc_stats(void)
1147 dtlb_translate_count = 0;
1148 tr_translate_count = 0;
1149 phys_translate_count = 0;
1150 vhpt_translate_count = 0;
1151 fast_vhpt_translate_count = 0;
1152 recover_to_page_fault_count = 0;
1153 recover_to_break_fault_count = 0;
1154 lazy_cover_count = 0;
1155 pal_halt_light_count = 0;
1156 idle_when_pending = 0;
1157 context_switch_count = 0;
1160 static int dump_hyperprivop_counts(char *buf)
1162 int i;
1163 char *s = buf;
1164 unsigned long total = 0;
1165 for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i];
1166 s += sprintf(s,"Slow hyperprivops (total %ld):\n",total);
1167 for (i = 1; i <= HYPERPRIVOP_MAX; i++)
1168 if (slow_hyperpriv_cnt[i])
1169 s += sprintf(s,"%10ld %s\n",
1170 slow_hyperpriv_cnt[i], hyperpriv_str[i]);
1171 total = 0;
1172 for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i];
1173 s += sprintf(s,"Fast hyperprivops (total %ld):\n",total);
1174 for (i = 1; i <= HYPERPRIVOP_MAX; i++)
1175 if (fast_hyperpriv_cnt[i])
1176 s += sprintf(s,"%10ld %s\n",
1177 fast_hyperpriv_cnt[i], hyperpriv_str[i]);
1178 return s - buf;
1181 static void zero_hyperprivop_counts(void)
1183 int i;
1184 for (i = 0; i <= HYPERPRIVOP_MAX; i++) slow_hyperpriv_cnt[i] = 0;
1185 for (i = 0; i <= HYPERPRIVOP_MAX; i++) fast_hyperpriv_cnt[i] = 0;
1188 #define TMPBUFLEN 8*1024
1189 int dump_privop_counts_to_user(char __user *ubuf, int len)
1191 char buf[TMPBUFLEN];
1192 int n = dump_privop_counts(buf);
1194 n += dump_hyperprivop_counts(buf + n);
1195 n += dump_reflect_counts(buf + n);
1196 #ifdef PRIVOP_ADDR_COUNT
1197 n += dump_privop_addrs(buf + n);
1198 #endif
1199 n += dump_vhpt_stats(buf + n);
1200 n += dump_misc_stats(buf + n);
1201 if (len < TMPBUFLEN) return -1;
1202 if (__copy_to_user(ubuf,buf,n)) return -1;
1203 return n;
1206 int zero_privop_counts_to_user(char __user *ubuf, int len)
1208 char buf[TMPBUFLEN];
1209 int n = zero_privop_counts(buf);
1211 zero_hyperprivop_counts();
1212 #ifdef PRIVOP_ADDR_COUNT
1213 zero_privop_addrs();
1214 #endif
1215 zero_vhpt_stats();
1216 zero_misc_stats();
1217 zero_reflect_counts();
1218 if (len < TMPBUFLEN) return -1;
1219 if (__copy_to_user(ubuf,buf,n)) return -1;
1220 return n;