ia64/xen-unstable

view xen/arch/ia64/xen/privop.c @ 6864:aecdccb1a350

Add additional stats to track VHPT saturation
author djm@kirby.fc.hp.com
date Fri Sep 16 16:54:53 2005 -0600 (2005-09-16)
parents 3ca4ca7a9cc2
children 7f9acc83ffcd
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 //#include <debug.h>
15 long priv_verbose=0;
17 /**************************************************************************
18 Hypercall bundle creation
19 **************************************************************************/
22 void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
23 {
24 INST64_A5 slot0;
25 INST64_I19 slot1;
26 INST64_B4 slot2;
27 IA64_BUNDLE bundle;
29 // slot1: mov r2 = hypnum (low 20 bits)
30 slot0.inst = 0;
31 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
32 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
33 slot0.imm5c = hypnum >> 16; slot0.s = 0;
34 // slot1: break brkimm
35 slot1.inst = 0;
36 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
37 slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
38 // if ret slot2: br.ret.sptk.many rp
39 // else slot2: br.cond.sptk.many rp
40 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
41 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
42 if (ret) {
43 slot2.btype = 4; slot2.x6 = 0x21;
44 }
45 else {
46 slot2.btype = 0; slot2.x6 = 0x20;
47 }
49 bundle.i64[0] = 0; bundle.i64[1] = 0;
50 bundle.template = 0x11;
51 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
52 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
54 *imva++ = bundle.i64[0]; *imva = bundle.i64[1];
55 }
57 /**************************************************************************
58 Privileged operation emulation routines
59 **************************************************************************/
61 IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
62 {
63 return vcpu_rfi(vcpu);
64 }
66 IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
67 {
68 return vcpu_bsw0(vcpu);
69 }
71 IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
72 {
73 return vcpu_bsw1(vcpu);
74 }
76 IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
77 {
78 return vcpu_cover(vcpu);
79 }
81 IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
82 {
83 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
84 UINT64 addr_range;
86 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
87 return vcpu_ptc_l(vcpu,vadr,addr_range);
88 }
90 IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
91 {
92 UINT src = inst.M28.r3;
94 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
95 if (src > 63) return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
96 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
97 }
99 IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
100 {
101 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
102 UINT64 addr_range;
104 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
105 return vcpu_ptc_g(vcpu,vadr,addr_range);
106 }
108 IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
109 {
110 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
111 UINT64 addr_range;
113 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
114 return vcpu_ptc_ga(vcpu,vadr,addr_range);
115 }
117 IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
118 {
119 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
120 UINT64 addr_range;
122 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
123 return vcpu_ptr_d(vcpu,vadr,addr_range);
124 }
126 IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
127 {
128 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
129 UINT64 addr_range;
131 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
132 return vcpu_ptr_i(vcpu,vadr,addr_range);
133 }
135 IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
136 {
137 UINT64 padr;
138 UINT fault;
139 UINT src = inst.M46.r3;
141 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
142 if (src > 63)
143 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
144 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
145 if (fault == IA64_NO_FAULT)
146 return vcpu_set_gr(vcpu, inst.M46.r1, padr);
147 else return fault;
148 }
150 IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
151 {
152 UINT64 key;
153 UINT fault;
154 UINT src = inst.M46.r3;
156 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
157 if (src > 63)
158 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
159 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
160 if (fault == IA64_NO_FAULT)
161 return vcpu_set_gr(vcpu, inst.M46.r1, key);
162 else return fault;
163 }
165 /************************************
166 * Insert translation register/cache
167 ************************************/
169 IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
170 {
171 UINT64 fault, itir, ifa, pte, slot;
173 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
174 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
175 return(IA64_ILLOP_FAULT);
176 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
177 return(IA64_ILLOP_FAULT);
178 pte = vcpu_get_gr(vcpu,inst.M42.r2);
179 slot = vcpu_get_gr(vcpu,inst.M42.r3);
181 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
182 }
184 IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
185 {
186 UINT64 fault, itir, ifa, pte, slot;
188 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
189 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
190 return(IA64_ILLOP_FAULT);
191 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
192 return(IA64_ILLOP_FAULT);
193 pte = vcpu_get_gr(vcpu,inst.M42.r2);
194 slot = vcpu_get_gr(vcpu,inst.M42.r3);
196 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
197 }
199 IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
200 {
201 UINT64 fault, itir, ifa, pte;
203 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
204 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
205 return(IA64_ILLOP_FAULT);
206 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
207 return(IA64_ILLOP_FAULT);
208 pte = vcpu_get_gr(vcpu,inst.M41.r2);
210 return (vcpu_itc_d(vcpu,pte,itir,ifa));
211 }
213 IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
214 {
215 UINT64 fault, itir, ifa, pte;
217 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
218 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
219 return(IA64_ILLOP_FAULT);
220 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
221 return(IA64_ILLOP_FAULT);
222 pte = vcpu_get_gr(vcpu,inst.M41.r2);
224 return (vcpu_itc_i(vcpu,pte,itir,ifa));
225 }
227 /*************************************
228 * Moves to semi-privileged registers
229 *************************************/
231 IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
232 {
233 // I27 and M30 are identical for these fields
234 UINT64 ar3 = inst.M30.ar3;
235 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
236 return (vcpu_set_ar(vcpu,ar3,imm));
237 }
239 IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
240 {
241 // I26 and M29 are identical for these fields
242 UINT64 ar3 = inst.M29.ar3;
244 if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
245 UINT64 val;
246 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
247 return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
248 else return IA64_ILLOP_FAULT;
249 }
250 else {
251 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
252 return (vcpu_set_ar(vcpu,ar3,r2));
253 }
254 }
256 /********************************
257 * Moves to privileged registers
258 ********************************/
260 IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
261 {
262 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
263 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
264 return (vcpu_set_pkr(vcpu,r3,r2));
265 }
267 IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
268 {
269 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
270 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
271 return (vcpu_set_rr(vcpu,r3,r2));
272 }
274 IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
275 {
276 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
277 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
278 return (vcpu_set_dbr(vcpu,r3,r2));
279 }
281 IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
282 {
283 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
284 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
285 return (vcpu_set_ibr(vcpu,r3,r2));
286 }
288 IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
289 {
290 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
291 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
292 return (vcpu_set_pmc(vcpu,r3,r2));
293 }
295 IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
296 {
297 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
298 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
299 return (vcpu_set_pmd(vcpu,r3,r2));
300 }
302 unsigned long to_cr_cnt[128] = { 0 };
304 IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
305 {
306 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
307 to_cr_cnt[inst.M32.cr3]++;
308 switch (inst.M32.cr3) {
309 case 0: return vcpu_set_dcr(vcpu,val);
310 case 1: return vcpu_set_itm(vcpu,val);
311 case 2: return vcpu_set_iva(vcpu,val);
312 case 8: return vcpu_set_pta(vcpu,val);
313 case 16:return vcpu_set_ipsr(vcpu,val);
314 case 17:return vcpu_set_isr(vcpu,val);
315 case 19:return vcpu_set_iip(vcpu,val);
316 case 20:return vcpu_set_ifa(vcpu,val);
317 case 21:return vcpu_set_itir(vcpu,val);
318 case 22:return vcpu_set_iipa(vcpu,val);
319 case 23:return vcpu_set_ifs(vcpu,val);
320 case 24:return vcpu_set_iim(vcpu,val);
321 case 25:return vcpu_set_iha(vcpu,val);
322 case 64:return vcpu_set_lid(vcpu,val);
323 case 65:return IA64_ILLOP_FAULT;
324 case 66:return vcpu_set_tpr(vcpu,val);
325 case 67:return vcpu_set_eoi(vcpu,val);
326 case 68:return IA64_ILLOP_FAULT;
327 case 69:return IA64_ILLOP_FAULT;
328 case 70:return IA64_ILLOP_FAULT;
329 case 71:return IA64_ILLOP_FAULT;
330 case 72:return vcpu_set_itv(vcpu,val);
331 case 73:return vcpu_set_pmv(vcpu,val);
332 case 74:return vcpu_set_cmcv(vcpu,val);
333 case 80:return vcpu_set_lrr0(vcpu,val);
334 case 81:return vcpu_set_lrr1(vcpu,val);
335 default: return IA64_ILLOP_FAULT;
336 }
337 }
339 IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
340 {
341 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
342 return vcpu_reset_psr_sm(vcpu,imm24);
343 }
345 IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
346 {
347 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
348 return vcpu_set_psr_sm(vcpu,imm24);
349 }
351 /**
352 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
353 */
354 IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
355 {
356 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
357 return vcpu_set_psr_l(vcpu,val);
358 }
360 /**********************************
361 * Moves from privileged registers
362 **********************************/
364 IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
365 {
366 UINT64 val;
367 IA64FAULT fault;
369 if (inst.M43.r1 > 63) { // privified mov from cpuid
370 fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
371 if (fault == IA64_NO_FAULT)
372 return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
373 }
374 else {
375 fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
376 if (fault == IA64_NO_FAULT)
377 return vcpu_set_gr(vcpu, inst.M43.r1, val);
378 }
379 return fault;
380 }
382 IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
383 {
384 UINT64 val;
385 IA64FAULT fault;
387 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
388 if (fault == IA64_NO_FAULT)
389 return vcpu_set_gr(vcpu, inst.M43.r1, val);
390 else return fault;
391 }
393 IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
394 {
395 UINT64 val;
396 IA64FAULT fault;
398 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
399 if (fault == IA64_NO_FAULT)
400 return vcpu_set_gr(vcpu, inst.M43.r1, val);
401 else return fault;
402 }
404 IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
405 {
406 UINT64 val;
407 IA64FAULT fault;
409 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
410 if (fault == IA64_NO_FAULT)
411 return vcpu_set_gr(vcpu, inst.M43.r1, val);
412 else return fault;
413 }
415 IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
416 {
417 UINT64 val;
418 IA64FAULT fault;
420 if (inst.M43.r1 > 63) { // privified mov from pmd
421 fault = vcpu_get_pmd(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
422 if (fault == IA64_NO_FAULT)
423 return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
424 }
425 else {
426 fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
427 if (fault == IA64_NO_FAULT)
428 return vcpu_set_gr(vcpu, inst.M43.r1, val);
429 }
430 return fault;
431 }
433 unsigned long from_cr_cnt[128] = { 0 };
435 #define cr_get(cr) \
436 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
437 vcpu_set_gr(vcpu, tgt, val) : fault;
439 IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
440 {
441 UINT64 tgt = inst.M33.r1;
442 UINT64 val;
443 IA64FAULT fault;
445 from_cr_cnt[inst.M33.cr3]++;
446 switch (inst.M33.cr3) {
447 case 0: return cr_get(dcr);
448 case 1: return cr_get(itm);
449 case 2: return cr_get(iva);
450 case 8: return cr_get(pta);
451 case 16:return cr_get(ipsr);
452 case 17:return cr_get(isr);
453 case 19:return cr_get(iip);
454 case 20:return cr_get(ifa);
455 case 21:return cr_get(itir);
456 case 22:return cr_get(iipa);
457 case 23:return cr_get(ifs);
458 case 24:return cr_get(iim);
459 case 25:return cr_get(iha);
460 case 64:return cr_get(lid);
461 case 65:return cr_get(ivr);
462 case 66:return cr_get(tpr);
463 case 67:return vcpu_set_gr(vcpu,tgt,0L);
464 case 68:return cr_get(irr0);
465 case 69:return cr_get(irr1);
466 case 70:return cr_get(irr2);
467 case 71:return cr_get(irr3);
468 case 72:return cr_get(itv);
469 case 73:return cr_get(pmv);
470 case 74:return cr_get(cmcv);
471 case 80:return cr_get(lrr0);
472 case 81:return cr_get(lrr1);
473 default: return IA64_ILLOP_FAULT;
474 }
475 return IA64_ILLOP_FAULT;
476 }
478 IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
479 {
480 UINT64 tgt = inst.M33.r1;
481 UINT64 val;
482 IA64FAULT fault;
484 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
485 return vcpu_set_gr(vcpu, tgt, val);
486 else return fault;
487 }
489 /**************************************************************************
490 Privileged operation decode and dispatch routines
491 **************************************************************************/
493 IA64_SLOT_TYPE slot_types[0x20][3] = {
494 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
495 {M, I, ILLEGAL}, {M, I, ILLEGAL},
496 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
497 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
498 {M, F, I}, {M, F, I},
499 {M, M, F}, {M, M, F},
500 {M, I, B}, {M, I, B},
501 {M, B, B}, {M, B, B},
502 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
503 {B, B, B}, {B, B, B},
504 {M, M, B}, {M, M, B},
505 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
506 {M, F, B}, {M, F, B},
507 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
508 };
510 // pointer to privileged emulation function
511 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
513 PPEFCN Mpriv_funcs[64] = {
514 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
515 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
516 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
517 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
518 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
519 priv_mov_from_pmc, 0, 0, 0,
520 0, 0, 0, 0,
521 0, 0, priv_tpa, priv_tak,
522 0, 0, 0, 0,
523 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
524 0, 0, 0, 0,
525 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
526 0, 0, 0, 0,
527 priv_ptc_e, 0, 0, 0,
528 0, 0, 0, 0, 0, 0, 0, 0
529 };
531 struct {
532 unsigned long mov_to_ar_imm;
533 unsigned long mov_to_ar_reg;
534 unsigned long mov_from_ar;
535 unsigned long ssm;
536 unsigned long rsm;
537 unsigned long rfi;
538 unsigned long bsw0;
539 unsigned long bsw1;
540 unsigned long cover;
541 unsigned long fc;
542 unsigned long cpuid;
543 unsigned long Mpriv_cnt[64];
544 } privcnt = { 0 };
546 unsigned long privop_trace = 0;
548 IA64FAULT
549 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
550 {
551 IA64_BUNDLE bundle;
552 IA64_BUNDLE __get_domain_bundle(UINT64);
553 int slot;
554 IA64_SLOT_TYPE slot_type;
555 INST64 inst;
556 PPEFCN pfunc;
557 unsigned long ipsr = regs->cr_ipsr;
558 UINT64 iip = regs->cr_iip;
559 int x6;
561 // make a local copy of the bundle containing the privop
562 #if 1
563 bundle = __get_domain_bundle(iip);
564 if (!bundle.i64[0] && !bundle.i64[1])
565 #else
566 if (__copy_from_user(&bundle,iip,sizeof(bundle)))
567 #endif
568 {
569 //printf("*** priv_handle_op: privop bundle @%p not mapped, retrying\n",iip);
570 return vcpu_force_data_miss(vcpu,regs->cr_iip);
571 }
572 #if 0
573 if (iip==0xa000000100001820) {
574 static int firstpagefault = 1;
575 if (firstpagefault) {
576 printf("*** First time to domain page fault!\n"); firstpagefault=0;
577 }
578 }
579 #endif
580 if (privop_trace) {
581 static long i = 400;
582 //if (i > 0) printf("privop @%p\n",iip);
583 if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
584 iip,ia64_get_itc(),ia64_get_itm());
585 i--;
586 }
587 slot = ((struct ia64_psr *)&ipsr)->ri;
588 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
589 else if (slot == 1)
590 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
591 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
592 else printf("priv_handle_op: illegal slot: %d\n", slot);
594 slot_type = slot_types[bundle.template][slot];
595 if (priv_verbose) {
596 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
597 iip, (UINT64)inst.inst, slot, slot_type);
598 }
599 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
600 // break instr for privified cover
601 }
602 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
603 switch (slot_type) {
604 case M:
605 if (inst.generic.major == 0) {
606 #if 0
607 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
608 privcnt.cover++;
609 return priv_cover(vcpu,inst);
610 }
611 #endif
612 if (inst.M29.x3 != 0) break;
613 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
614 privcnt.mov_to_ar_imm++;
615 return priv_mov_to_ar_imm(vcpu,inst);
616 }
617 if (inst.M44.x4 == 6) {
618 privcnt.ssm++;
619 return priv_ssm(vcpu,inst);
620 }
621 if (inst.M44.x4 == 7) {
622 privcnt.rsm++;
623 return priv_rsm(vcpu,inst);
624 }
625 break;
626 }
627 else if (inst.generic.major != 1) break;
628 x6 = inst.M29.x6;
629 if (x6 == 0x2a) {
630 if (inst.M29.r2 > 63 && inst.M29.ar3 < 8)
631 privcnt.mov_from_ar++; // privified mov from kr
632 else privcnt.mov_to_ar_reg++;
633 return priv_mov_to_ar_reg(vcpu,inst);
634 }
635 if (inst.M29.x3 != 0) break;
636 if (!(pfunc = Mpriv_funcs[x6])) break;
637 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
638 if (inst.M46.r3 > 63) {
639 if (x6 == 0x1e) x6 = 0x1b;
640 else x6 = 0x1a;
641 }
642 }
643 if (x6 == 52 && inst.M28.r3 > 63)
644 privcnt.fc++;
645 else if (x6 == 16 && inst.M43.r3 > 63)
646 privcnt.cpuid++;
647 else privcnt.Mpriv_cnt[x6]++;
648 return (*pfunc)(vcpu,inst);
649 break;
650 case B:
651 if (inst.generic.major != 0) break;
652 if (inst.B8.x6 == 0x08) {
653 IA64FAULT fault;
654 privcnt.rfi++;
655 fault = priv_rfi(vcpu,inst);
656 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
657 return fault;
658 }
659 if (inst.B8.x6 == 0x0c) {
660 privcnt.bsw0++;
661 return priv_bsw0(vcpu,inst);
662 }
663 if (inst.B8.x6 == 0x0d) {
664 privcnt.bsw1++;
665 return priv_bsw1(vcpu,inst);
666 }
667 if (inst.B8.x6 == 0x0) { // break instr for privified cover
668 privcnt.cover++;
669 return priv_cover(vcpu,inst);
670 }
671 break;
672 case I:
673 if (inst.generic.major != 0) break;
674 #if 0
675 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
676 privcnt.cover++;
677 return priv_cover(vcpu,inst);
678 }
679 #endif
680 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
681 if (inst.I26.x6 == 0x2a) {
682 if (inst.I26.r2 > 63 && inst.I26.ar3 < 8)
683 privcnt.mov_from_ar++; // privified mov from kr
684 else privcnt.mov_to_ar_reg++;
685 return priv_mov_to_ar_reg(vcpu,inst);
686 }
687 if (inst.I27.x6 == 0x0a) {
688 privcnt.mov_to_ar_imm++;
689 return priv_mov_to_ar_imm(vcpu,inst);
690 }
691 break;
692 default:
693 break;
694 }
695 //printf("We who are about do die salute you\n");
696 printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=%p\n",
697 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
698 //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
699 //thread_mozambique("privop fault\n");
700 return (IA64_ILLOP_FAULT);
701 }
703 /** Emulate a privileged operation.
704 *
705 * This should probably return 0 on success and the "trap number"
706 * (e.g. illegal operation for bad register, priv op for an
707 * instruction that isn't allowed, etc.) on "failure"
708 *
709 * @param vcpu virtual cpu
710 * @param isrcode interrupt service routine code
711 * @return fault
712 */
713 IA64FAULT
714 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
715 {
716 IA64FAULT fault;
717 UINT64 ipsr = regs->cr_ipsr;
718 UINT64 isrcode = (isr >> 4) & 0xf;
719 int privlvl;
721 // handle privops masked as illops? and breaks (6)
722 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
723 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
724 printf("priv_emulate: returning ILLOP, not implemented!\n");
725 while (1);
726 return IA64_ILLOP_FAULT;
727 }
728 //if (isrcode != 1 && isrcode != 2) return 0;
729 vcpu_set_regs(vcpu,regs);
730 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
731 // its OK for a privified-cover to be executed in user-land
732 fault = priv_handle_op(vcpu,regs,privlvl);
733 if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // success!!
734 // update iip/ipsr to point to the next instruction
735 (void)vcpu_increment_iip(vcpu);
736 }
737 if (fault == IA64_ILLOP_FAULT)
738 printf("priv_emulate: priv_handle_op fails, isr=%p\n",isr);
739 return fault;
740 }
743 // FIXME: Move these to include/public/arch-ia64?
744 #define HYPERPRIVOP_RFI 0x1
745 #define HYPERPRIVOP_RSM_DT 0x2
746 #define HYPERPRIVOP_SSM_DT 0x3
747 #define HYPERPRIVOP_COVER 0x4
748 #define HYPERPRIVOP_ITC_D 0x5
749 #define HYPERPRIVOP_ITC_I 0x6
750 #define HYPERPRIVOP_SSM_I 0x7
751 #define HYPERPRIVOP_GET_IVR 0x8
752 #define HYPERPRIVOP_GET_TPR 0x9
753 #define HYPERPRIVOP_SET_TPR 0xa
754 #define HYPERPRIVOP_EOI 0xb
755 #define HYPERPRIVOP_SET_ITM 0xc
756 #define HYPERPRIVOP_THASH 0xd
757 #define HYPERPRIVOP_PTC_GA 0xe
758 #define HYPERPRIVOP_ITR_D 0xf
759 #define HYPERPRIVOP_GET_RR 0x10
760 #define HYPERPRIVOP_SET_RR 0x11
761 #define HYPERPRIVOP_MAX 0x11
763 char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
764 0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
765 "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
766 "=rr", "rr=",
767 0
768 };
770 unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
771 unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
773 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
774 * so this code is primarily used for debugging them */
775 int
776 ia64_hyperprivop(unsigned long iim, REGS *regs)
777 {
778 struct vcpu *v = (struct domain *) current;
779 INST64 inst;
780 UINT64 val;
781 UINT64 itir, ifa;
783 // FIXME: Handle faults appropriately for these
784 if (!iim || iim > HYPERPRIVOP_MAX) {
785 printf("bad hyperprivop; ignored\n");
786 printf("iim=%d, iip=%p\n",iim,regs->cr_iip);
787 return 1;
788 }
789 slow_hyperpriv_cnt[iim]++;
790 switch(iim) {
791 case HYPERPRIVOP_RFI:
792 (void)vcpu_rfi(v);
793 return 0; // don't update iip
794 case HYPERPRIVOP_RSM_DT:
795 (void)vcpu_reset_psr_dt(v);
796 return 1;
797 case HYPERPRIVOP_SSM_DT:
798 (void)vcpu_set_psr_dt(v);
799 return 1;
800 case HYPERPRIVOP_COVER:
801 (void)vcpu_cover(v);
802 return 1;
803 case HYPERPRIVOP_ITC_D:
804 (void)vcpu_get_itir(v,&itir);
805 (void)vcpu_get_ifa(v,&ifa);
806 (void)vcpu_itc_d(v,regs->r8,itir,ifa);
807 return 1;
808 case HYPERPRIVOP_ITC_I:
809 (void)vcpu_get_itir(v,&itir);
810 (void)vcpu_get_ifa(v,&ifa);
811 (void)vcpu_itc_i(v,regs->r8,itir,ifa);
812 return 1;
813 case HYPERPRIVOP_SSM_I:
814 (void)vcpu_set_psr_i(v);
815 return 1;
816 case HYPERPRIVOP_GET_IVR:
817 (void)vcpu_get_ivr(v,&val);
818 regs->r8 = val;
819 return 1;
820 case HYPERPRIVOP_GET_TPR:
821 (void)vcpu_get_tpr(v,&val);
822 regs->r8 = val;
823 return 1;
824 case HYPERPRIVOP_SET_TPR:
825 (void)vcpu_set_tpr(v,regs->r8);
826 return 1;
827 case HYPERPRIVOP_EOI:
828 (void)vcpu_set_eoi(v,0L);
829 return 1;
830 case HYPERPRIVOP_SET_ITM:
831 (void)vcpu_set_itm(v,regs->r8);
832 return 1;
833 case HYPERPRIVOP_THASH:
834 (void)vcpu_thash(v,regs->r8,&val);
835 regs->r8 = val;
836 return 1;
837 case HYPERPRIVOP_PTC_GA:
838 (void)vcpu_ptc_ga(v,regs->r8,(1L << ((regs->r9 & 0xfc) >> 2)));
839 return 1;
840 case HYPERPRIVOP_ITR_D:
841 (void)vcpu_get_itir(v,&itir);
842 (void)vcpu_get_ifa(v,&ifa);
843 (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
844 return 1;
845 case HYPERPRIVOP_GET_RR:
846 (void)vcpu_get_rr(v,regs->r8,&val);
847 regs->r8 = val;
848 return 1;
849 case HYPERPRIVOP_SET_RR:
850 (void)vcpu_set_rr(v,regs->r8,regs->r9);
851 return 1;
852 }
853 return 0;
854 }
857 /**************************************************************************
858 Privileged operation instrumentation routines
859 **************************************************************************/
861 char *Mpriv_str[64] = {
862 "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
863 "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
864 "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
865 "ptr_d", "ptr_i", "itr_d", "itr_i",
866 "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
867 "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
868 "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
869 "<0x1c>", "<0x1d>", "tpa", "tak",
870 "<0x20>", "<0x21>", "<0x22>", "<0x23>",
871 "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
872 "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
873 "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
874 "<0x30>", "<0x31>", "<0x32>", "<0x33>",
875 "ptc_e", "<0x35>", "<0x36>", "<0x37>",
876 "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
877 "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
878 };
880 #define RS "Rsvd"
881 char *cr_str[128] = {
882 "dcr","itm","iva",RS,RS,RS,RS,RS,
883 "pta",RS,RS,RS,RS,RS,RS,RS,
884 "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
885 "iim","iha",RS,RS,RS,RS,RS,RS,
886 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
887 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
888 "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
889 "itv","pmv","cmcv",RS,RS,RS,RS,RS,
890 "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
891 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
892 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
893 RS,RS,RS,RS,RS,RS,RS,RS
894 };
896 // FIXME: should use snprintf to ensure no buffer overflow
897 int dump_privop_counts(char *buf)
898 {
899 int i, j;
900 UINT64 sum = 0;
901 char *s = buf;
903 // this is ugly and should probably produce sorted output
904 // but it will have to do for now
905 sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
906 sum += privcnt.ssm; sum += privcnt.rsm;
907 sum += privcnt.rfi; sum += privcnt.bsw0;
908 sum += privcnt.bsw1; sum += privcnt.cover;
909 for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
910 s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
911 if (privcnt.mov_to_ar_imm)
912 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.mov_to_ar_imm,
913 "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
914 if (privcnt.mov_to_ar_reg)
915 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.mov_to_ar_reg,
916 "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
917 if (privcnt.mov_from_ar)
918 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.mov_from_ar,
919 "privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
920 if (privcnt.ssm)
921 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.ssm,
922 "ssm", (privcnt.ssm*100L)/sum);
923 if (privcnt.rsm)
924 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.rsm,
925 "rsm", (privcnt.rsm*100L)/sum);
926 if (privcnt.rfi)
927 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.rfi,
928 "rfi", (privcnt.rfi*100L)/sum);
929 if (privcnt.bsw0)
930 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.bsw0,
931 "bsw0", (privcnt.bsw0*100L)/sum);
932 if (privcnt.bsw1)
933 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.bsw1,
934 "bsw1", (privcnt.bsw1*100L)/sum);
935 if (privcnt.cover)
936 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.cover,
937 "cover", (privcnt.cover*100L)/sum);
938 if (privcnt.fc)
939 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.fc,
940 "privified-fc", (privcnt.fc*100L)/sum);
941 if (privcnt.cpuid)
942 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.cpuid,
943 "privified-getcpuid", (privcnt.cpuid*100L)/sum);
944 for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
945 if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
946 else s += sprintf(s,"%10d %s [%d%%]\n", privcnt.Mpriv_cnt[i],
947 Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
948 if (i == 0x24) { // mov from CR
949 s += sprintf(s," [");
950 for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
951 if (!cr_str[j])
952 s += sprintf(s,"PRIVSTRING NULL!!\n");
953 s += sprintf(s,"%s(%d),",cr_str[j],from_cr_cnt[j]);
954 }
955 s += sprintf(s,"]\n");
956 }
957 else if (i == 0x2c) { // mov to CR
958 s += sprintf(s," [");
959 for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
960 if (!cr_str[j])
961 s += sprintf(s,"PRIVSTRING NULL!!\n");
962 s += sprintf(s,"%s(%d),",cr_str[j],to_cr_cnt[j]);
963 }
964 s += sprintf(s,"]\n");
965 }
966 }
967 return s - buf;
968 }
970 int zero_privop_counts(char *buf)
971 {
972 int i, j;
973 char *s = buf;
975 // this is ugly and should probably produce sorted output
976 // but it will have to do for now
977 privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
978 privcnt.mov_from_ar = 0;
979 privcnt.ssm = 0; privcnt.rsm = 0;
980 privcnt.rfi = 0; privcnt.bsw0 = 0;
981 privcnt.bsw1 = 0; privcnt.cover = 0;
982 privcnt.fc = 0; privcnt.cpuid = 0;
983 for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
984 for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
985 for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
986 s += sprintf(s,"All privop statistics zeroed\n");
987 return s - buf;
988 }
990 #ifdef PRIVOP_ADDR_COUNT
992 extern struct privop_addr_count privop_addr_counter[];
994 void privop_count_addr(unsigned long iip, int inst)
995 {
996 struct privop_addr_count *v = &privop_addr_counter[inst];
997 int i;
999 for (i = 0; i < PRIVOP_COUNT_NADDRS; i++) {
1000 if (!v->addr[i]) { v->addr[i] = iip; v->count[i]++; return; }
1001 else if (v->addr[i] == iip) { v->count[i]++; return; }
1003 v->overflow++;;
1006 int dump_privop_addrs(char *buf)
1008 int i,j;
1009 char *s = buf;
1010 s += sprintf(s,"Privop addresses:\n");
1011 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
1012 struct privop_addr_count *v = &privop_addr_counter[i];
1013 s += sprintf(s,"%s:\n",v->instname);
1014 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
1015 if (!v->addr[j]) break;
1016 s += sprintf(s," @%p #%ld\n",v->addr[j],v->count[j]);
1018 if (v->overflow)
1019 s += sprintf(s," other #%ld\n",v->overflow);
1021 return s - buf;
1024 void zero_privop_addrs(void)
1026 int i,j;
1027 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
1028 struct privop_addr_count *v = &privop_addr_counter[i];
1029 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
1030 v->addr[j] = v->count[j] = 0;
1031 v->overflow = 0;
1034 #endif
1036 extern unsigned long dtlb_translate_count;
1037 extern unsigned long tr_translate_count;
1038 extern unsigned long phys_translate_count;
1039 extern unsigned long vhpt_translate_count;
1040 extern unsigned long lazy_cover_count;
1041 extern unsigned long idle_when_pending;
1042 extern unsigned long pal_halt_light_count;
1043 extern unsigned long context_switch_count;
1045 int dump_misc_stats(char *buf)
1047 char *s = buf;
1048 s += sprintf(s,"Virtual TR translations: %d\n",tr_translate_count);
1049 s += sprintf(s,"Virtual VHPT translations: %d\n",vhpt_translate_count);
1050 s += sprintf(s,"Virtual DTLB translations: %d\n",dtlb_translate_count);
1051 s += sprintf(s,"Physical translations: %d\n",phys_translate_count);
1052 s += sprintf(s,"Idle when pending: %d\n",idle_when_pending);
1053 s += sprintf(s,"PAL_HALT_LIGHT (no pending): %d\n",pal_halt_light_count);
1054 s += sprintf(s,"context switches: %d\n",context_switch_count);
1055 s += sprintf(s,"Lazy covers: %d\n",lazy_cover_count);
1056 return s - buf;
1059 void zero_misc_stats(void)
1061 dtlb_translate_count = 0;
1062 tr_translate_count = 0;
1063 phys_translate_count = 0;
1064 vhpt_translate_count = 0;
1065 lazy_cover_count = 0;
1066 pal_halt_light_count = 0;
1067 idle_when_pending = 0;
1068 context_switch_count = 0;
1071 int dump_hyperprivop_counts(char *buf)
1073 int i;
1074 char *s = buf;
1075 unsigned long total = 0;
1076 for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i];
1077 s += sprintf(s,"Slow hyperprivops (total %d):\n",total);
1078 for (i = 1; i <= HYPERPRIVOP_MAX; i++)
1079 if (slow_hyperpriv_cnt[i])
1080 s += sprintf(s,"%10d %s\n",
1081 slow_hyperpriv_cnt[i], hyperpriv_str[i]);
1082 total = 0;
1083 for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i];
1084 s += sprintf(s,"Fast hyperprivops (total %d):\n",total);
1085 for (i = 1; i <= HYPERPRIVOP_MAX; i++)
1086 if (fast_hyperpriv_cnt[i])
1087 s += sprintf(s,"%10d %s\n",
1088 fast_hyperpriv_cnt[i], hyperpriv_str[i]);
1089 return s - buf;
1092 void zero_hyperprivop_counts(void)
1094 int i;
1095 for (i = 0; i <= HYPERPRIVOP_MAX; i++) slow_hyperpriv_cnt[i] = 0;
1096 for (i = 0; i <= HYPERPRIVOP_MAX; i++) fast_hyperpriv_cnt[i] = 0;
1099 #define TMPBUFLEN 8*1024
1100 int dump_privop_counts_to_user(char __user *ubuf, int len)
1102 char buf[TMPBUFLEN];
1103 int n = dump_privop_counts(buf);
1105 n += dump_hyperprivop_counts(buf + n);
1106 n += dump_reflect_counts(buf + n);
1107 #ifdef PRIVOP_ADDR_COUNT
1108 n += dump_privop_addrs(buf + n);
1109 #endif
1110 n += dump_vhpt_stats(buf + n);
1111 n += dump_misc_stats(buf + n);
1112 if (len < TMPBUFLEN) return -1;
1113 if (__copy_to_user(ubuf,buf,n)) return -1;
1114 return n;
1117 int zero_privop_counts_to_user(char __user *ubuf, int len)
1119 char buf[TMPBUFLEN];
1120 int n = zero_privop_counts(buf);
1122 zero_hyperprivop_counts();
1123 #ifdef PRIVOP_ADDR_COUNT
1124 zero_privop_addrs();
1125 #endif
1126 zero_vhpt_stats();
1127 zero_misc_stats();
1128 zero_reflect_counts();
1129 if (len < TMPBUFLEN) return -1;
1130 if (__copy_to_user(ubuf,buf,n)) return -1;
1131 return n;