ia64/xen-unstable

view xen/arch/ia64/xen/privop.c @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents 218591a0cb7e
children 96bc87dd7ca9
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 #include <asm/dom_fw.h>
14 #include <asm/vhpt.h>
15 //#include <debug.h>
17 /* FIXME: where these declarations should be there ? */
18 extern int dump_reflect_counts(char *);
19 extern void zero_reflect_counts(void);
21 long priv_verbose=0;
23 /* Set to 1 to handle privified instructions from the privify tool. */
24 static const int privify_en = 0;
26 /**************************************************************************
27 Hypercall bundle creation
28 **************************************************************************/
31 void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
32 {
33 INST64_A5 slot0;
34 INST64_I19 slot1;
35 INST64_B4 slot2;
36 IA64_BUNDLE bundle;
38 // slot1: mov r2 = hypnum (low 20 bits)
39 slot0.inst = 0;
40 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
41 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
42 slot0.imm5c = hypnum >> 16; slot0.s = 0;
43 // slot1: break brkimm
44 slot1.inst = 0;
45 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
46 slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
47 // if ret slot2: br.ret.sptk.many rp
48 // else slot2: br.cond.sptk.many rp
49 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
50 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
51 if (ret) {
52 slot2.btype = 4; slot2.x6 = 0x21;
53 }
54 else {
55 slot2.btype = 0; slot2.x6 = 0x20;
56 }
58 bundle.i64[0] = 0; bundle.i64[1] = 0;
59 bundle.template = 0x11;
60 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
61 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
63 *imva++ = bundle.i64[0]; *imva = bundle.i64[1];
64 }
66 void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum)
67 {
68 extern unsigned long pal_call_stub[];
69 IA64_BUNDLE bundle;
70 INST64_A5 slot_a5;
71 INST64_M37 slot_m37;
73 /* The source of the hypercall stub is the pal_call_stub function
74 defined in xenasm.S. */
76 /* Copy the first bundle and patch the hypercall number. */
77 bundle.i64[0] = pal_call_stub[0];
78 bundle.i64[1] = pal_call_stub[1];
79 slot_a5.inst = bundle.slot0;
80 slot_a5.imm7b = hypnum;
81 slot_a5.imm9d = hypnum >> 7;
82 slot_a5.imm5c = hypnum >> 16;
83 bundle.slot0 = slot_a5.inst;
84 imva[0] = bundle.i64[0];
85 imva[1] = bundle.i64[1];
87 /* Copy the second bundle and patch the hypercall vector. */
88 bundle.i64[0] = pal_call_stub[2];
89 bundle.i64[1] = pal_call_stub[3];
90 slot_m37.inst = bundle.slot0;
91 slot_m37.imm20a = brkimm;
92 slot_m37.i = brkimm >> 20;
93 bundle.slot0 = slot_m37.inst;
94 imva[2] = bundle.i64[0];
95 imva[3] = bundle.i64[1];
96 }
99 /**************************************************************************
100 Privileged operation emulation routines
101 **************************************************************************/
103 IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
104 {
105 return vcpu_rfi(vcpu);
106 }
108 IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
109 {
110 return vcpu_bsw0(vcpu);
111 }
113 IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
114 {
115 return vcpu_bsw1(vcpu);
116 }
118 IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
119 {
120 return vcpu_cover(vcpu);
121 }
123 IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
124 {
125 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
126 UINT64 addr_range;
128 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
129 return vcpu_ptc_l(vcpu,vadr,addr_range);
130 }
132 IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
133 {
134 UINT src = inst.M28.r3;
136 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
137 if (privify_en && src > 63)
138 return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
139 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
140 }
142 IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
143 {
144 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
145 UINT64 addr_range;
147 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
148 return vcpu_ptc_g(vcpu,vadr,addr_range);
149 }
151 IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
152 {
153 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
154 UINT64 addr_range;
156 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
157 return vcpu_ptc_ga(vcpu,vadr,addr_range);
158 }
160 IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
161 {
162 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
163 UINT64 addr_range;
165 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
166 return vcpu_ptr_d(vcpu,vadr,addr_range);
167 }
169 IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
170 {
171 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
172 UINT64 addr_range;
174 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
175 return vcpu_ptr_i(vcpu,vadr,addr_range);
176 }
178 IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
179 {
180 UINT64 padr;
181 UINT fault;
182 UINT src = inst.M46.r3;
184 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
185 if (privify_en && src > 63)
186 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
187 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
188 if (fault == IA64_NO_FAULT)
189 return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
190 else return fault;
191 }
193 IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
194 {
195 UINT64 key;
196 UINT fault;
197 UINT src = inst.M46.r3;
199 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
200 if (privify_en && src > 63)
201 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
202 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
203 if (fault == IA64_NO_FAULT)
204 return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
205 else return fault;
206 }
208 /************************************
209 * Insert translation register/cache
210 ************************************/
212 IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
213 {
214 UINT64 fault, itir, ifa, pte, slot;
216 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
217 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
218 return(IA64_ILLOP_FAULT);
219 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
220 return(IA64_ILLOP_FAULT);
221 pte = vcpu_get_gr(vcpu,inst.M42.r2);
222 slot = vcpu_get_gr(vcpu,inst.M42.r3);
224 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
225 }
227 IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
228 {
229 UINT64 fault, itir, ifa, pte, slot;
231 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
232 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
233 return(IA64_ILLOP_FAULT);
234 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
235 return(IA64_ILLOP_FAULT);
236 pte = vcpu_get_gr(vcpu,inst.M42.r2);
237 slot = vcpu_get_gr(vcpu,inst.M42.r3);
239 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
240 }
242 IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
243 {
244 UINT64 fault, itir, ifa, pte;
246 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
247 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
248 return(IA64_ILLOP_FAULT);
249 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
250 return(IA64_ILLOP_FAULT);
251 pte = vcpu_get_gr(vcpu,inst.M41.r2);
253 return (vcpu_itc_d(vcpu,pte,itir,ifa));
254 }
256 IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
257 {
258 UINT64 fault, itir, ifa, pte;
260 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
261 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
262 return(IA64_ILLOP_FAULT);
263 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
264 return(IA64_ILLOP_FAULT);
265 pte = vcpu_get_gr(vcpu,inst.M41.r2);
267 return (vcpu_itc_i(vcpu,pte,itir,ifa));
268 }
270 /*************************************
271 * Moves to semi-privileged registers
272 *************************************/
274 IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
275 {
276 // I27 and M30 are identical for these fields
277 UINT64 ar3 = inst.M30.ar3;
278 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
279 return (vcpu_set_ar(vcpu,ar3,imm));
280 }
282 IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
283 {
284 // I26 and M29 are identical for these fields
285 UINT64 ar3 = inst.M29.ar3;
287 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
288 // privified mov from kr
289 UINT64 val;
290 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
291 return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
292 else return IA64_ILLOP_FAULT;
293 }
294 else {
295 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
296 return (vcpu_set_ar(vcpu,ar3,r2));
297 }
298 }
300 /********************************
301 * Moves to privileged registers
302 ********************************/
304 IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
305 {
306 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
307 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
308 return (vcpu_set_pkr(vcpu,r3,r2));
309 }
311 IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
312 {
313 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
314 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
315 return (vcpu_set_rr(vcpu,r3,r2));
316 }
318 IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
319 {
320 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
321 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
322 return (vcpu_set_dbr(vcpu,r3,r2));
323 }
325 IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
326 {
327 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
328 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
329 return (vcpu_set_ibr(vcpu,r3,r2));
330 }
332 IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
333 {
334 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
335 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
336 return (vcpu_set_pmc(vcpu,r3,r2));
337 }
339 IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
340 {
341 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
342 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
343 return (vcpu_set_pmd(vcpu,r3,r2));
344 }
346 unsigned long to_cr_cnt[128] = { 0 };
348 IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
349 {
350 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
351 to_cr_cnt[inst.M32.cr3]++;
352 switch (inst.M32.cr3) {
353 case 0: return vcpu_set_dcr(vcpu,val);
354 case 1: return vcpu_set_itm(vcpu,val);
355 case 2: return vcpu_set_iva(vcpu,val);
356 case 8: return vcpu_set_pta(vcpu,val);
357 case 16:return vcpu_set_ipsr(vcpu,val);
358 case 17:return vcpu_set_isr(vcpu,val);
359 case 19:return vcpu_set_iip(vcpu,val);
360 case 20:return vcpu_set_ifa(vcpu,val);
361 case 21:return vcpu_set_itir(vcpu,val);
362 case 22:return vcpu_set_iipa(vcpu,val);
363 case 23:return vcpu_set_ifs(vcpu,val);
364 case 24:return vcpu_set_iim(vcpu,val);
365 case 25:return vcpu_set_iha(vcpu,val);
366 case 64:return vcpu_set_lid(vcpu,val);
367 case 65:return IA64_ILLOP_FAULT;
368 case 66:return vcpu_set_tpr(vcpu,val);
369 case 67:return vcpu_set_eoi(vcpu,val);
370 case 68:return IA64_ILLOP_FAULT;
371 case 69:return IA64_ILLOP_FAULT;
372 case 70:return IA64_ILLOP_FAULT;
373 case 71:return IA64_ILLOP_FAULT;
374 case 72:return vcpu_set_itv(vcpu,val);
375 case 73:return vcpu_set_pmv(vcpu,val);
376 case 74:return vcpu_set_cmcv(vcpu,val);
377 case 80:return vcpu_set_lrr0(vcpu,val);
378 case 81:return vcpu_set_lrr1(vcpu,val);
379 default: return IA64_ILLOP_FAULT;
380 }
381 }
383 IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
384 {
385 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
386 return vcpu_reset_psr_sm(vcpu,imm24);
387 }
389 IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
390 {
391 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
392 return vcpu_set_psr_sm(vcpu,imm24);
393 }
395 /**
396 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
397 */
398 IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
399 {
400 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
401 return vcpu_set_psr_l(vcpu,val);
402 }
404 /**********************************
405 * Moves from privileged registers
406 **********************************/
408 IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
409 {
410 UINT64 val;
411 IA64FAULT fault;
412 UINT64 reg;
414 reg = vcpu_get_gr(vcpu,inst.M43.r3);
415 if (privify_en && inst.M43.r1 > 63) {
416 // privified mov from cpuid
417 fault = vcpu_get_cpuid(vcpu,reg,&val);
418 if (fault == IA64_NO_FAULT)
419 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
420 }
421 else {
422 fault = vcpu_get_rr(vcpu,reg,&val);
423 if (fault == IA64_NO_FAULT)
424 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
425 }
426 return fault;
427 }
429 IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
430 {
431 UINT64 val;
432 IA64FAULT fault;
434 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
435 if (fault == IA64_NO_FAULT)
436 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
437 else return fault;
438 }
440 IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
441 {
442 UINT64 val;
443 IA64FAULT fault;
445 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
446 if (fault == IA64_NO_FAULT)
447 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
448 else return fault;
449 }
451 IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
452 {
453 UINT64 val;
454 IA64FAULT fault;
456 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
457 if (fault == IA64_NO_FAULT)
458 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
459 else return fault;
460 }
462 IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
463 {
464 UINT64 val;
465 IA64FAULT fault;
466 UINT64 reg;
468 reg = vcpu_get_gr(vcpu,inst.M43.r3);
469 if (privify_en && inst.M43.r1 > 63) {
470 // privified mov from pmd
471 fault = vcpu_get_pmd(vcpu,reg,&val);
472 if (fault == IA64_NO_FAULT)
473 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
474 }
475 else {
476 fault = vcpu_get_pmc(vcpu,reg,&val);
477 if (fault == IA64_NO_FAULT)
478 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
479 }
480 return fault;
481 }
483 unsigned long from_cr_cnt[128] = { 0 };
485 #define cr_get(cr) \
486 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
487 vcpu_set_gr(vcpu, tgt, val, 0) : fault;
489 IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
490 {
491 UINT64 tgt = inst.M33.r1;
492 UINT64 val;
493 IA64FAULT fault;
495 from_cr_cnt[inst.M33.cr3]++;
496 switch (inst.M33.cr3) {
497 case 0: return cr_get(dcr);
498 case 1: return cr_get(itm);
499 case 2: return cr_get(iva);
500 case 8: return cr_get(pta);
501 case 16:return cr_get(ipsr);
502 case 17:return cr_get(isr);
503 case 19:return cr_get(iip);
504 case 20:return cr_get(ifa);
505 case 21:return cr_get(itir);
506 case 22:return cr_get(iipa);
507 case 23:return cr_get(ifs);
508 case 24:return cr_get(iim);
509 case 25:return cr_get(iha);
510 case 64:return cr_get(lid);
511 case 65:return cr_get(ivr);
512 case 66:return cr_get(tpr);
513 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
514 case 68:return cr_get(irr0);
515 case 69:return cr_get(irr1);
516 case 70:return cr_get(irr2);
517 case 71:return cr_get(irr3);
518 case 72:return cr_get(itv);
519 case 73:return cr_get(pmv);
520 case 74:return cr_get(cmcv);
521 case 80:return cr_get(lrr0);
522 case 81:return cr_get(lrr1);
523 default: return IA64_ILLOP_FAULT;
524 }
525 return IA64_ILLOP_FAULT;
526 }
528 IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
529 {
530 UINT64 tgt = inst.M33.r1;
531 UINT64 val;
532 IA64FAULT fault;
534 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
535 return vcpu_set_gr(vcpu, tgt, val, 0);
536 else return fault;
537 }
539 /**************************************************************************
540 Privileged operation decode and dispatch routines
541 **************************************************************************/
543 static const IA64_SLOT_TYPE slot_types[0x20][3] = {
544 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
545 {M, I, ILLEGAL}, {M, I, ILLEGAL},
546 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
547 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
548 {M, F, I}, {M, F, I},
549 {M, M, F}, {M, M, F},
550 {M, I, B}, {M, I, B},
551 {M, B, B}, {M, B, B},
552 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
553 {B, B, B}, {B, B, B},
554 {M, M, B}, {M, M, B},
555 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
556 {M, F, B}, {M, F, B},
557 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
558 };
560 // pointer to privileged emulation function
561 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
563 static const PPEFCN Mpriv_funcs[64] = {
564 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
565 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
566 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
567 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
568 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
569 priv_mov_from_pmc, 0, 0, 0,
570 0, 0, 0, 0,
571 0, 0, priv_tpa, priv_tak,
572 0, 0, 0, 0,
573 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
574 0, 0, 0, 0,
575 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
576 0, 0, 0, 0,
577 priv_ptc_e, 0, 0, 0,
578 0, 0, 0, 0, 0, 0, 0, 0
579 };
581 struct {
582 unsigned long mov_to_ar_imm;
583 unsigned long mov_to_ar_reg;
584 unsigned long mov_from_ar;
585 unsigned long ssm;
586 unsigned long rsm;
587 unsigned long rfi;
588 unsigned long bsw0;
589 unsigned long bsw1;
590 unsigned long cover;
591 unsigned long fc;
592 unsigned long cpuid;
593 unsigned long Mpriv_cnt[64];
594 } privcnt = { 0 };
596 unsigned long privop_trace = 0;
598 IA64FAULT
599 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
600 {
601 IA64_BUNDLE bundle;
602 IA64_BUNDLE __get_domain_bundle(UINT64);
603 int slot;
604 IA64_SLOT_TYPE slot_type;
605 INST64 inst;
606 PPEFCN pfunc;
607 unsigned long ipsr = regs->cr_ipsr;
608 UINT64 iip = regs->cr_iip;
609 int x6;
611 // make a local copy of the bundle containing the privop
612 #if 1
613 bundle = __get_domain_bundle(iip);
614 if (!bundle.i64[0] && !bundle.i64[1])
615 #else
616 if (__copy_from_user(&bundle,iip,sizeof(bundle)))
617 #endif
618 {
619 //printf("*** priv_handle_op: privop bundle at 0x%lx not mapped, retrying\n",iip);
620 return vcpu_force_data_miss(vcpu,regs->cr_iip);
621 }
622 #if 0
623 if (iip==0xa000000100001820) {
624 static int firstpagefault = 1;
625 if (firstpagefault) {
626 printf("*** First time to domain page fault!\n"); firstpagefault=0;
627 }
628 }
629 #endif
630 if (privop_trace) {
631 static long i = 400;
632 //if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
633 if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, itc=%lx, itm=%lx\n",
634 iip,ia64_get_itc(),ia64_get_itm());
635 i--;
636 }
637 slot = ((struct ia64_psr *)&ipsr)->ri;
638 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
639 else if (slot == 1)
640 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
641 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
642 else printf("priv_handle_op: illegal slot: %d\n", slot);
644 slot_type = slot_types[bundle.template][slot];
645 if (priv_verbose) {
646 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
647 iip, (UINT64)inst.inst, slot, slot_type);
648 }
649 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
650 // break instr for privified cover
651 }
652 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
653 switch (slot_type) {
654 case M:
655 if (inst.generic.major == 0) {
656 #if 0
657 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
658 privcnt.cover++;
659 return priv_cover(vcpu,inst);
660 }
661 #endif
662 if (inst.M29.x3 != 0) break;
663 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
664 privcnt.mov_to_ar_imm++;
665 return priv_mov_to_ar_imm(vcpu,inst);
666 }
667 if (inst.M44.x4 == 6) {
668 privcnt.ssm++;
669 return priv_ssm(vcpu,inst);
670 }
671 if (inst.M44.x4 == 7) {
672 privcnt.rsm++;
673 return priv_rsm(vcpu,inst);
674 }
675 break;
676 }
677 else if (inst.generic.major != 1) break;
678 x6 = inst.M29.x6;
679 if (x6 == 0x2a) {
680 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
681 privcnt.mov_from_ar++; // privified mov from kr
682 else privcnt.mov_to_ar_reg++;
683 return priv_mov_to_ar_reg(vcpu,inst);
684 }
685 if (inst.M29.x3 != 0) break;
686 if (!(pfunc = Mpriv_funcs[x6])) break;
687 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
688 if (privify_en && inst.M46.r3 > 63) {
689 if (x6 == 0x1e) x6 = 0x1b;
690 else x6 = 0x1a;
691 }
692 }
693 if (privify_en && x6 == 52 && inst.M28.r3 > 63)
694 privcnt.fc++;
695 else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
696 privcnt.cpuid++;
697 else privcnt.Mpriv_cnt[x6]++;
698 return (*pfunc)(vcpu,inst);
699 break;
700 case B:
701 if (inst.generic.major != 0) break;
702 if (inst.B8.x6 == 0x08) {
703 IA64FAULT fault;
704 privcnt.rfi++;
705 fault = priv_rfi(vcpu,inst);
706 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
707 return fault;
708 }
709 if (inst.B8.x6 == 0x0c) {
710 privcnt.bsw0++;
711 return priv_bsw0(vcpu,inst);
712 }
713 if (inst.B8.x6 == 0x0d) {
714 privcnt.bsw1++;
715 return priv_bsw1(vcpu,inst);
716 }
717 if (inst.B8.x6 == 0x0) { // break instr for privified cover
718 privcnt.cover++;
719 return priv_cover(vcpu,inst);
720 }
721 break;
722 case I:
723 if (inst.generic.major != 0) break;
724 #if 0
725 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
726 privcnt.cover++;
727 return priv_cover(vcpu,inst);
728 }
729 #endif
730 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
731 if (inst.I26.x6 == 0x2a) {
732 if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
733 privcnt.mov_from_ar++; // privified mov from kr
734 else privcnt.mov_to_ar_reg++;
735 return priv_mov_to_ar_reg(vcpu,inst);
736 }
737 if (inst.I27.x6 == 0x0a) {
738 privcnt.mov_to_ar_imm++;
739 return priv_mov_to_ar_imm(vcpu,inst);
740 }
741 break;
742 default:
743 break;
744 }
745 //printf("We who are about do die salute you\n");
746 printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=0x%lx\n",
747 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
748 //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
749 //thread_mozambique("privop fault\n");
750 return (IA64_ILLOP_FAULT);
751 }
753 /** Emulate a privileged operation.
754 *
755 * This should probably return 0 on success and the "trap number"
756 * (e.g. illegal operation for bad register, priv op for an
757 * instruction that isn't allowed, etc.) on "failure"
758 *
759 * @param vcpu virtual cpu
760 * @param isrcode interrupt service routine code
761 * @return fault
762 */
763 IA64FAULT
764 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
765 {
766 IA64FAULT fault;
767 UINT64 ipsr = regs->cr_ipsr;
768 UINT64 isrcode = (isr >> 4) & 0xf;
769 int privlvl;
771 // handle privops masked as illops? and breaks (6)
772 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
773 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
774 printf("priv_emulate: returning ILLOP, not implemented!\n");
775 while (1);
776 return IA64_ILLOP_FAULT;
777 }
778 //if (isrcode != 1 && isrcode != 2) return 0;
779 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
780 // its OK for a privified-cover to be executed in user-land
781 fault = priv_handle_op(vcpu,regs,privlvl);
782 if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // success!!
783 // update iip/ipsr to point to the next instruction
784 (void)vcpu_increment_iip(vcpu);
785 }
786 if (fault == IA64_ILLOP_FAULT)
787 printf("priv_emulate: priv_handle_op fails, isr=0x%lx\n",isr);
788 return fault;
789 }
792 // FIXME: Move these to include/public/arch-ia64?
793 #define HYPERPRIVOP_RFI 0x1
794 #define HYPERPRIVOP_RSM_DT 0x2
795 #define HYPERPRIVOP_SSM_DT 0x3
796 #define HYPERPRIVOP_COVER 0x4
797 #define HYPERPRIVOP_ITC_D 0x5
798 #define HYPERPRIVOP_ITC_I 0x6
799 #define HYPERPRIVOP_SSM_I 0x7
800 #define HYPERPRIVOP_GET_IVR 0x8
801 #define HYPERPRIVOP_GET_TPR 0x9
802 #define HYPERPRIVOP_SET_TPR 0xa
803 #define HYPERPRIVOP_EOI 0xb
804 #define HYPERPRIVOP_SET_ITM 0xc
805 #define HYPERPRIVOP_THASH 0xd
806 #define HYPERPRIVOP_PTC_GA 0xe
807 #define HYPERPRIVOP_ITR_D 0xf
808 #define HYPERPRIVOP_GET_RR 0x10
809 #define HYPERPRIVOP_SET_RR 0x11
810 #define HYPERPRIVOP_SET_KR 0x12
811 #define HYPERPRIVOP_FC 0x13
812 #define HYPERPRIVOP_GET_CPUID 0x14
813 #define HYPERPRIVOP_GET_PMD 0x15
814 #define HYPERPRIVOP_GET_EFLAG 0x16
815 #define HYPERPRIVOP_SET_EFLAG 0x17
816 #define HYPERPRIVOP_MAX 0x17
818 static const char * const hyperpriv_str[HYPERPRIVOP_MAX+1] = {
819 0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
820 "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
821 "=rr", "rr=", "kr=", "fc", "=cpuid", "=pmd", "=ar.eflg", "ar.eflg="
822 };
824 unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
825 unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
827 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
828 * so this code is primarily used for debugging them */
829 int
830 ia64_hyperprivop(unsigned long iim, REGS *regs)
831 {
832 struct vcpu *v = current;
833 UINT64 val;
834 UINT64 itir, ifa;
836 // FIXME: Handle faults appropriately for these
837 if (!iim || iim > HYPERPRIVOP_MAX) {
838 printf("bad hyperprivop; ignored\n");
839 printf("iim=%lx, iip=0x%lx\n", iim, regs->cr_iip);
840 return 1;
841 }
842 slow_hyperpriv_cnt[iim]++;
843 switch(iim) {
844 case HYPERPRIVOP_RFI:
845 (void)vcpu_rfi(v);
846 return 0; // don't update iip
847 case HYPERPRIVOP_RSM_DT:
848 (void)vcpu_reset_psr_dt(v);
849 return 1;
850 case HYPERPRIVOP_SSM_DT:
851 (void)vcpu_set_psr_dt(v);
852 return 1;
853 case HYPERPRIVOP_COVER:
854 (void)vcpu_cover(v);
855 return 1;
856 case HYPERPRIVOP_ITC_D:
857 (void)vcpu_get_itir(v,&itir);
858 (void)vcpu_get_ifa(v,&ifa);
859 (void)vcpu_itc_d(v,regs->r8,itir,ifa);
860 return 1;
861 case HYPERPRIVOP_ITC_I:
862 (void)vcpu_get_itir(v,&itir);
863 (void)vcpu_get_ifa(v,&ifa);
864 (void)vcpu_itc_i(v,regs->r8,itir,ifa);
865 return 1;
866 case HYPERPRIVOP_SSM_I:
867 (void)vcpu_set_psr_i(v);
868 return 1;
869 case HYPERPRIVOP_GET_IVR:
870 (void)vcpu_get_ivr(v,&val);
871 regs->r8 = val;
872 return 1;
873 case HYPERPRIVOP_GET_TPR:
874 (void)vcpu_get_tpr(v,&val);
875 regs->r8 = val;
876 return 1;
877 case HYPERPRIVOP_SET_TPR:
878 (void)vcpu_set_tpr(v,regs->r8);
879 return 1;
880 case HYPERPRIVOP_EOI:
881 (void)vcpu_set_eoi(v,0L);
882 return 1;
883 case HYPERPRIVOP_SET_ITM:
884 (void)vcpu_set_itm(v,regs->r8);
885 return 1;
886 case HYPERPRIVOP_THASH:
887 (void)vcpu_thash(v,regs->r8,&val);
888 regs->r8 = val;
889 return 1;
890 case HYPERPRIVOP_PTC_GA:
891 (void)vcpu_ptc_ga(v,regs->r8,(1L << ((regs->r9 & 0xfc) >> 2)));
892 return 1;
893 case HYPERPRIVOP_ITR_D:
894 (void)vcpu_get_itir(v,&itir);
895 (void)vcpu_get_ifa(v,&ifa);
896 (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
897 return 1;
898 case HYPERPRIVOP_GET_RR:
899 (void)vcpu_get_rr(v,regs->r8,&val);
900 regs->r8 = val;
901 return 1;
902 case HYPERPRIVOP_SET_RR:
903 (void)vcpu_set_rr(v,regs->r8,regs->r9);
904 return 1;
905 case HYPERPRIVOP_SET_KR:
906 (void)vcpu_set_ar(v,regs->r8,regs->r9);
907 return 1;
908 case HYPERPRIVOP_FC:
909 (void)vcpu_fc(v,regs->r8);
910 return 1;
911 case HYPERPRIVOP_GET_CPUID:
912 (void)vcpu_get_cpuid(v,regs->r8,&val);
913 regs->r8 = val;
914 return 1;
915 case HYPERPRIVOP_GET_PMD:
916 (void)vcpu_get_pmd(v,regs->r8,&val);
917 regs->r8 = val;
918 return 1;
919 case HYPERPRIVOP_GET_EFLAG:
920 (void)vcpu_get_ar(v,24,&val);
921 regs->r8 = val;
922 return 1;
923 case HYPERPRIVOP_SET_EFLAG:
924 (void)vcpu_set_ar(v,24,regs->r8);
925 return 1;
926 }
927 return 0;
928 }
931 /**************************************************************************
932 Privileged operation instrumentation routines
933 **************************************************************************/
935 static const char * const Mpriv_str[64] = {
936 "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
937 "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
938 "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
939 "ptr_d", "ptr_i", "itr_d", "itr_i",
940 "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
941 "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
942 "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
943 "<0x1c>", "<0x1d>", "tpa", "tak",
944 "<0x20>", "<0x21>", "<0x22>", "<0x23>",
945 "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
946 "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
947 "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
948 "<0x30>", "<0x31>", "<0x32>", "<0x33>",
949 "ptc_e", "<0x35>", "<0x36>", "<0x37>",
950 "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
951 "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
952 };
954 #define RS "Rsvd"
955 static const char * const cr_str[128] = {
956 "dcr","itm","iva",RS,RS,RS,RS,RS,
957 "pta",RS,RS,RS,RS,RS,RS,RS,
958 "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
959 "iim","iha",RS,RS,RS,RS,RS,RS,
960 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
961 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
962 "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
963 "itv","pmv","cmcv",RS,RS,RS,RS,RS,
964 "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
965 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
966 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
967 RS,RS,RS,RS,RS,RS,RS,RS
968 };
970 // FIXME: should use snprintf to ensure no buffer overflow
971 static int dump_privop_counts(char *buf)
972 {
973 int i, j;
974 UINT64 sum = 0;
975 char *s = buf;
977 // this is ugly and should probably produce sorted output
978 // but it will have to do for now
979 sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
980 sum += privcnt.ssm; sum += privcnt.rsm;
981 sum += privcnt.rfi; sum += privcnt.bsw0;
982 sum += privcnt.bsw1; sum += privcnt.cover;
983 for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
984 s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
985 if (privcnt.mov_to_ar_imm)
986 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.mov_to_ar_imm,
987 "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
988 if (privcnt.mov_to_ar_reg)
989 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.mov_to_ar_reg,
990 "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
991 if (privcnt.mov_from_ar)
992 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.mov_from_ar,
993 "privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
994 if (privcnt.ssm)
995 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.ssm,
996 "ssm", (privcnt.ssm*100L)/sum);
997 if (privcnt.rsm)
998 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.rsm,
999 "rsm", (privcnt.rsm*100L)/sum);
1000 if (privcnt.rfi)
1001 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.rfi,
1002 "rfi", (privcnt.rfi*100L)/sum);
1003 if (privcnt.bsw0)
1004 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.bsw0,
1005 "bsw0", (privcnt.bsw0*100L)/sum);
1006 if (privcnt.bsw1)
1007 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.bsw1,
1008 "bsw1", (privcnt.bsw1*100L)/sum);
1009 if (privcnt.cover)
1010 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.cover,
1011 "cover", (privcnt.cover*100L)/sum);
1012 if (privcnt.fc)
1013 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.fc,
1014 "privified-fc", (privcnt.fc*100L)/sum);
1015 if (privcnt.cpuid)
1016 s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.cpuid,
1017 "privified-getcpuid", (privcnt.cpuid*100L)/sum);
1018 for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
1019 if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
1020 else s += sprintf(s,"%10ld %s [%ld%%]\n", privcnt.Mpriv_cnt[i],
1021 Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
1022 if (i == 0x24) { // mov from CR
1023 s += sprintf(s," [");
1024 for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
1025 if (!cr_str[j])
1026 s += sprintf(s,"PRIVSTRING NULL!!\n");
1027 s += sprintf(s,"%s(%ld),",cr_str[j],from_cr_cnt[j]);
1029 s += sprintf(s,"]\n");
1031 else if (i == 0x2c) { // mov to CR
1032 s += sprintf(s," [");
1033 for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
1034 if (!cr_str[j])
1035 s += sprintf(s,"PRIVSTRING NULL!!\n");
1036 s += sprintf(s,"%s(%ld),",cr_str[j],to_cr_cnt[j]);
1038 s += sprintf(s,"]\n");
1041 return s - buf;
1044 static int zero_privop_counts(char *buf)
1046 int i, j;
1047 char *s = buf;
1049 // this is ugly and should probably produce sorted output
1050 // but it will have to do for now
1051 privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
1052 privcnt.mov_from_ar = 0;
1053 privcnt.ssm = 0; privcnt.rsm = 0;
1054 privcnt.rfi = 0; privcnt.bsw0 = 0;
1055 privcnt.bsw1 = 0; privcnt.cover = 0;
1056 privcnt.fc = 0; privcnt.cpuid = 0;
1057 for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
1058 for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
1059 for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
1060 s += sprintf(s,"All privop statistics zeroed\n");
1061 return s - buf;
1064 #ifdef PRIVOP_ADDR_COUNT
1066 extern struct privop_addr_count privop_addr_counter[];
1068 void privop_count_addr(unsigned long iip, int inst)
1070 struct privop_addr_count *v = &privop_addr_counter[inst];
1071 int i;
1073 for (i = 0; i < PRIVOP_COUNT_NADDRS; i++) {
1074 if (!v->addr[i]) { v->addr[i] = iip; v->count[i]++; return; }
1075 else if (v->addr[i] == iip) { v->count[i]++; return; }
1077 v->overflow++;;
1080 static int dump_privop_addrs(char *buf)
1082 int i,j;
1083 char *s = buf;
1084 s += sprintf(s,"Privop addresses:\n");
1085 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
1086 struct privop_addr_count *v = &privop_addr_counter[i];
1087 s += sprintf(s,"%s:\n",v->instname);
1088 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
1089 if (!v->addr[j]) break;
1090 s += sprintf(s," at 0x%lx #%ld\n",v->addr[j],v->count[j]);
1092 if (v->overflow)
1093 s += sprintf(s," other #%ld\n",v->overflow);
1095 return s - buf;
1098 static void zero_privop_addrs(void)
1100 int i,j;
1101 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
1102 struct privop_addr_count *v = &privop_addr_counter[i];
1103 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
1104 v->addr[j] = v->count[j] = 0;
1105 v->overflow = 0;
1108 #endif
1110 extern unsigned long dtlb_translate_count;
1111 extern unsigned long tr_translate_count;
1112 extern unsigned long phys_translate_count;
1113 extern unsigned long vhpt_translate_count;
1114 extern unsigned long fast_vhpt_translate_count;
1115 extern unsigned long recover_to_page_fault_count;
1116 extern unsigned long recover_to_break_fault_count;
1117 extern unsigned long lazy_cover_count;
1118 extern unsigned long idle_when_pending;
1119 extern unsigned long pal_halt_light_count;
1120 extern unsigned long context_switch_count;
1122 static int dump_misc_stats(char *buf)
1124 char *s = buf;
1125 s += sprintf(s,"Virtual TR translations: %ld\n",tr_translate_count);
1126 s += sprintf(s,"Virtual VHPT slow translations: %ld\n",vhpt_translate_count);
1127 s += sprintf(s,"Virtual VHPT fast translations: %ld\n",fast_vhpt_translate_count);
1128 s += sprintf(s,"Virtual DTLB translations: %ld\n",dtlb_translate_count);
1129 s += sprintf(s,"Physical translations: %ld\n",phys_translate_count);
1130 s += sprintf(s,"Recoveries to page fault: %ld\n",recover_to_page_fault_count);
1131 s += sprintf(s,"Recoveries to break fault: %ld\n",recover_to_break_fault_count);
1132 s += sprintf(s,"Idle when pending: %ld\n",idle_when_pending);
1133 s += sprintf(s,"PAL_HALT_LIGHT (no pending): %ld\n",pal_halt_light_count);
1134 s += sprintf(s,"context switches: %ld\n",context_switch_count);
1135 s += sprintf(s,"Lazy covers: %ld\n",lazy_cover_count);
1136 return s - buf;
1139 static void zero_misc_stats(void)
1141 dtlb_translate_count = 0;
1142 tr_translate_count = 0;
1143 phys_translate_count = 0;
1144 vhpt_translate_count = 0;
1145 fast_vhpt_translate_count = 0;
1146 recover_to_page_fault_count = 0;
1147 recover_to_break_fault_count = 0;
1148 lazy_cover_count = 0;
1149 pal_halt_light_count = 0;
1150 idle_when_pending = 0;
1151 context_switch_count = 0;
1154 static int dump_hyperprivop_counts(char *buf)
1156 int i;
1157 char *s = buf;
1158 unsigned long total = 0;
1159 for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i];
1160 s += sprintf(s,"Slow hyperprivops (total %ld):\n",total);
1161 for (i = 1; i <= HYPERPRIVOP_MAX; i++)
1162 if (slow_hyperpriv_cnt[i])
1163 s += sprintf(s,"%10ld %s\n",
1164 slow_hyperpriv_cnt[i], hyperpriv_str[i]);
1165 total = 0;
1166 for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i];
1167 s += sprintf(s,"Fast hyperprivops (total %ld):\n",total);
1168 for (i = 1; i <= HYPERPRIVOP_MAX; i++)
1169 if (fast_hyperpriv_cnt[i])
1170 s += sprintf(s,"%10ld %s\n",
1171 fast_hyperpriv_cnt[i], hyperpriv_str[i]);
1172 return s - buf;
1175 static void zero_hyperprivop_counts(void)
1177 int i;
1178 for (i = 0; i <= HYPERPRIVOP_MAX; i++) slow_hyperpriv_cnt[i] = 0;
1179 for (i = 0; i <= HYPERPRIVOP_MAX; i++) fast_hyperpriv_cnt[i] = 0;
1182 #define TMPBUFLEN 8*1024
1183 int dump_privop_counts_to_user(char __user *ubuf, int len)
1185 char buf[TMPBUFLEN];
1186 int n = dump_privop_counts(buf);
1188 n += dump_hyperprivop_counts(buf + n);
1189 n += dump_reflect_counts(buf + n);
1190 #ifdef PRIVOP_ADDR_COUNT
1191 n += dump_privop_addrs(buf + n);
1192 #endif
1193 n += dump_vhpt_stats(buf + n);
1194 n += dump_misc_stats(buf + n);
1195 if (len < TMPBUFLEN) return -1;
1196 if (__copy_to_user(ubuf,buf,n)) return -1;
1197 return n;
1200 int zero_privop_counts_to_user(char __user *ubuf, int len)
1202 char buf[TMPBUFLEN];
1203 int n = zero_privop_counts(buf);
1205 zero_hyperprivop_counts();
1206 #ifdef PRIVOP_ADDR_COUNT
1207 zero_privop_addrs();
1208 #endif
1209 zero_vhpt_stats();
1210 zero_misc_stats();
1211 zero_reflect_counts();
1212 if (len < TMPBUFLEN) return -1;
1213 if (__copy_to_user(ubuf,buf,n)) return -1;
1214 return n;