ia64/xen-unstable

view xen/arch/ia64/privop.c @ 4836:a22acf8b8127

bitkeeper revision 1.1389.9.5 (4280ce45ySNP-9TH6XPIhkIZcGy1LA)

Fix bugs in hyperprivop counting
author djm@kirby.fc.hp.com
date Tue May 10 15:07:49 2005 +0000 (2005-05-10)
parents 1bc0400523f0
children f71cef640151
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 //#include <debug.h>
15 long priv_verbose=0;
17 /**************************************************************************
18 Hypercall bundle creation
19 **************************************************************************/
22 void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
23 {
24 INST64_A5 slot0;
25 INST64_I19 slot1;
26 INST64_B4 slot2;
27 IA64_BUNDLE bundle;
29 // slot1: mov r2 = hypnum (low 20 bits)
30 slot0.inst = 0;
31 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
32 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
33 slot0.imm5c = hypnum >> 16; slot0.s = 0;
34 // slot1: break brkimm
35 slot1.inst = 0;
36 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
37 slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
38 // if ret slot2: br.ret.sptk.many rp
39 // else slot2: br.cond.sptk.many rp
40 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
41 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
42 if (ret) {
43 slot2.btype = 4; slot2.x6 = 0x21;
44 }
45 else {
46 slot2.btype = 0; slot2.x6 = 0x20;
47 }
49 bundle.i64[0] = 0; bundle.i64[1] = 0;
50 bundle.template = 0x11;
51 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
52 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
54 *imva++ = bundle.i64[0]; *imva = bundle.i64[1];
55 }
57 /**************************************************************************
58 Privileged operation emulation routines
59 **************************************************************************/
61 IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
62 {
63 return vcpu_rfi(vcpu);
64 }
66 IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
67 {
68 return vcpu_bsw0(vcpu);
69 }
71 IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
72 {
73 return vcpu_bsw1(vcpu);
74 }
76 IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
77 {
78 return vcpu_cover(vcpu);
79 }
81 IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
82 {
83 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
84 UINT64 addr_range;
86 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
87 return vcpu_ptc_l(vcpu,vadr,addr_range);
88 }
90 IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
91 {
92 UINT src = inst.M28.r3;
94 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
95 if (src > 63) return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
96 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
97 }
99 IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
100 {
101 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
102 UINT64 addr_range;
104 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
105 return vcpu_ptc_g(vcpu,vadr,addr_range);
106 }
108 IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
109 {
110 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
111 UINT64 addr_range;
113 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
114 return vcpu_ptc_ga(vcpu,vadr,addr_range);
115 }
117 IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
118 {
119 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
120 UINT64 addr_range;
122 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
123 return vcpu_ptr_d(vcpu,vadr,addr_range);
124 }
126 IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
127 {
128 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
129 UINT64 addr_range;
131 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
132 return vcpu_ptr_i(vcpu,vadr,addr_range);
133 }
135 IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
136 {
137 UINT64 padr;
138 UINT fault;
139 UINT src = inst.M46.r3;
141 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
142 if (src > 63)
143 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
144 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
145 if (fault == IA64_NO_FAULT)
146 return vcpu_set_gr(vcpu, inst.M46.r1, padr);
147 else return fault;
148 }
150 IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
151 {
152 UINT64 key;
153 UINT fault;
154 UINT src = inst.M46.r3;
156 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
157 if (src > 63)
158 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
159 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
160 if (fault == IA64_NO_FAULT)
161 return vcpu_set_gr(vcpu, inst.M46.r1, key);
162 else return fault;
163 }
165 /************************************
166 * Insert translation register/cache
167 ************************************/
169 IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
170 {
171 UINT64 fault, itir, ifa, pte, slot;
173 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
174 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
175 return(IA64_ILLOP_FAULT);
176 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
177 return(IA64_ILLOP_FAULT);
178 pte = vcpu_get_gr(vcpu,inst.M42.r2);
179 slot = vcpu_get_gr(vcpu,inst.M42.r3);
181 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
182 }
184 IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
185 {
186 UINT64 fault, itir, ifa, pte, slot;
188 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
189 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
190 return(IA64_ILLOP_FAULT);
191 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
192 return(IA64_ILLOP_FAULT);
193 pte = vcpu_get_gr(vcpu,inst.M42.r2);
194 slot = vcpu_get_gr(vcpu,inst.M42.r3);
196 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
197 }
199 IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
200 {
201 UINT64 fault, itir, ifa, pte;
203 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
204 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
205 return(IA64_ILLOP_FAULT);
206 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
207 return(IA64_ILLOP_FAULT);
208 if (!inst.inst) pte = vcpu_get_tmp(vcpu,0);
209 else pte = vcpu_get_gr(vcpu,inst.M41.r2);
211 return (vcpu_itc_d(vcpu,pte,itir,ifa));
212 }
214 IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
215 {
216 UINT64 fault, itir, ifa, pte;
218 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
219 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
220 return(IA64_ILLOP_FAULT);
221 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
222 return(IA64_ILLOP_FAULT);
223 if (!inst.inst) pte = vcpu_get_tmp(vcpu,0);
224 else pte = vcpu_get_gr(vcpu,inst.M41.r2);
226 return (vcpu_itc_i(vcpu,pte,itir,ifa));
227 }
229 /*************************************
230 * Moves to semi-privileged registers
231 *************************************/
233 IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
234 {
235 // I27 and M30 are identical for these fields
236 UINT64 ar3 = inst.M30.ar3;
237 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
238 return (vcpu_set_ar(vcpu,ar3,imm));
239 }
241 IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
242 {
243 // I26 and M29 are identical for these fields
244 UINT64 ar3 = inst.M29.ar3;
246 if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
247 UINT64 val;
248 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
249 return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
250 else return IA64_ILLOP_FAULT;
251 }
252 else {
253 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
254 return (vcpu_set_ar(vcpu,ar3,r2));
255 }
256 }
258 /********************************
259 * Moves to privileged registers
260 ********************************/
262 IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
263 {
264 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
265 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
266 return (vcpu_set_pkr(vcpu,r3,r2));
267 }
269 IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
270 {
271 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
272 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
273 return (vcpu_set_rr(vcpu,r3,r2));
274 }
276 IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
277 {
278 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
279 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
280 return (vcpu_set_dbr(vcpu,r3,r2));
281 }
283 IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
284 {
285 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
286 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
287 return (vcpu_set_ibr(vcpu,r3,r2));
288 }
290 IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
291 {
292 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
293 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
294 return (vcpu_set_pmc(vcpu,r3,r2));
295 }
297 IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
298 {
299 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
300 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
301 return (vcpu_set_pmd(vcpu,r3,r2));
302 }
304 unsigned long to_cr_cnt[128] = { 0 };
306 IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
307 {
308 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
309 to_cr_cnt[inst.M32.cr3]++;
310 switch (inst.M32.cr3) {
311 case 0: return vcpu_set_dcr(vcpu,val);
312 case 1: return vcpu_set_itm(vcpu,val);
313 case 2: return vcpu_set_iva(vcpu,val);
314 case 8: return vcpu_set_pta(vcpu,val);
315 case 16:return vcpu_set_ipsr(vcpu,val);
316 case 17:return vcpu_set_isr(vcpu,val);
317 case 19:return vcpu_set_iip(vcpu,val);
318 case 20:return vcpu_set_ifa(vcpu,val);
319 case 21:return vcpu_set_itir(vcpu,val);
320 case 22:return vcpu_set_iipa(vcpu,val);
321 case 23:return vcpu_set_ifs(vcpu,val);
322 case 24:return vcpu_set_iim(vcpu,val);
323 case 25:return vcpu_set_iha(vcpu,val);
324 case 64:return vcpu_set_lid(vcpu,val);
325 case 65:return IA64_ILLOP_FAULT;
326 case 66:return vcpu_set_tpr(vcpu,val);
327 case 67:return vcpu_set_eoi(vcpu,val);
328 case 68:return IA64_ILLOP_FAULT;
329 case 69:return IA64_ILLOP_FAULT;
330 case 70:return IA64_ILLOP_FAULT;
331 case 71:return IA64_ILLOP_FAULT;
332 case 72:return vcpu_set_itv(vcpu,val);
333 case 73:return vcpu_set_pmv(vcpu,val);
334 case 74:return vcpu_set_cmcv(vcpu,val);
335 case 80:return vcpu_set_lrr0(vcpu,val);
336 case 81:return vcpu_set_lrr1(vcpu,val);
337 default: return IA64_ILLOP_FAULT;
338 }
339 }
341 IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
342 {
343 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
344 return vcpu_reset_psr_sm(vcpu,imm24);
345 }
347 IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
348 {
349 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
350 return vcpu_set_psr_sm(vcpu,imm24);
351 }
353 /**
354 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
355 */
356 IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
357 {
358 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
359 return vcpu_set_psr_l(vcpu,val);
360 }
362 /**********************************
363 * Moves from privileged registers
364 **********************************/
366 IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
367 {
368 UINT64 val;
369 IA64FAULT fault;
371 if (inst.M43.r1 > 63) { // privified mov from cpuid
372 fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
373 if (fault == IA64_NO_FAULT)
374 return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
375 }
376 else {
377 fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
378 if (fault == IA64_NO_FAULT)
379 return vcpu_set_gr(vcpu, inst.M43.r1, val);
380 }
381 return fault;
382 }
384 IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
385 {
386 UINT64 val;
387 IA64FAULT fault;
389 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
390 if (fault == IA64_NO_FAULT)
391 return vcpu_set_gr(vcpu, inst.M43.r1, val);
392 else return fault;
393 }
395 IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
396 {
397 UINT64 val;
398 IA64FAULT fault;
400 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
401 if (fault == IA64_NO_FAULT)
402 return vcpu_set_gr(vcpu, inst.M43.r1, val);
403 else return fault;
404 }
406 IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
407 {
408 UINT64 val;
409 IA64FAULT fault;
411 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
412 if (fault == IA64_NO_FAULT)
413 return vcpu_set_gr(vcpu, inst.M43.r1, val);
414 else return fault;
415 }
417 IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
418 {
419 UINT64 val;
420 IA64FAULT fault;
422 if (inst.M43.r1 > 63) { // privified mov from pmd
423 fault = vcpu_get_pmd(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
424 if (fault == IA64_NO_FAULT)
425 return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
426 }
427 else {
428 fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
429 if (fault == IA64_NO_FAULT)
430 return vcpu_set_gr(vcpu, inst.M43.r1, val);
431 }
432 return fault;
433 }
435 unsigned long from_cr_cnt[128] = { 0 };
437 #define cr_get(cr) \
438 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
439 vcpu_set_gr(vcpu, tgt, val) : fault;
441 IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
442 {
443 UINT64 tgt = inst.M33.r1;
444 UINT64 val;
445 IA64FAULT fault;
447 from_cr_cnt[inst.M33.cr3]++;
448 switch (inst.M33.cr3) {
449 case 0: return cr_get(dcr);
450 case 1: return cr_get(itm);
451 case 2: return cr_get(iva);
452 case 8: return cr_get(pta);
453 case 16:return cr_get(ipsr);
454 case 17:return cr_get(isr);
455 case 19:return cr_get(iip);
456 case 20:return cr_get(ifa);
457 case 21:return cr_get(itir);
458 case 22:return cr_get(iipa);
459 case 23:return cr_get(ifs);
460 case 24:return cr_get(iim);
461 case 25:return cr_get(iha);
462 case 64:return cr_get(lid);
463 case 65:return cr_get(ivr);
464 case 66:return cr_get(tpr);
465 case 67:return vcpu_set_gr(vcpu,tgt,0L);
466 case 68:return cr_get(irr0);
467 case 69:return cr_get(irr1);
468 case 70:return cr_get(irr2);
469 case 71:return cr_get(irr3);
470 case 72:return cr_get(itv);
471 case 73:return cr_get(pmv);
472 case 74:return cr_get(cmcv);
473 case 80:return cr_get(lrr0);
474 case 81:return cr_get(lrr1);
475 default: return IA64_ILLOP_FAULT;
476 }
477 return IA64_ILLOP_FAULT;
478 }
480 IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
481 {
482 UINT64 tgt = inst.M33.r1;
483 UINT64 val;
484 IA64FAULT fault;
486 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
487 return vcpu_set_gr(vcpu, tgt, val);
488 else return fault;
489 }
491 /**************************************************************************
492 Privileged operation decode and dispatch routines
493 **************************************************************************/
495 IA64_SLOT_TYPE slot_types[0x20][3] = {
496 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
497 {M, I, ILLEGAL}, {M, I, ILLEGAL},
498 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
499 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
500 {M, F, I}, {M, F, I},
501 {M, M, F}, {M, M, F},
502 {M, I, B}, {M, I, B},
503 {M, B, B}, {M, B, B},
504 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
505 {B, B, B}, {B, B, B},
506 {M, M, B}, {M, M, B},
507 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
508 {M, F, B}, {M, F, B},
509 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
510 };
512 // pointer to privileged emulation function
513 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
515 PPEFCN Mpriv_funcs[64] = {
516 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
517 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
518 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
519 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
520 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
521 priv_mov_from_pmc, 0, 0, 0,
522 0, 0, 0, 0,
523 0, 0, priv_tpa, priv_tak,
524 0, 0, 0, 0,
525 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
526 0, 0, 0, 0,
527 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
528 0, 0, 0, 0,
529 priv_ptc_e, 0, 0, 0,
530 0, 0, 0, 0, 0, 0, 0, 0
531 };
533 struct {
534 unsigned long mov_to_ar_imm;
535 unsigned long mov_to_ar_reg;
536 unsigned long mov_from_ar;
537 unsigned long ssm;
538 unsigned long rsm;
539 unsigned long rfi;
540 unsigned long bsw0;
541 unsigned long bsw1;
542 unsigned long cover;
543 unsigned long fc;
544 unsigned long cpuid;
545 unsigned long Mpriv_cnt[64];
546 } privcnt = { 0 };
548 unsigned long privop_trace = 0;
550 IA64FAULT
551 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
552 {
553 IA64_BUNDLE bundle;
554 IA64_BUNDLE __get_domain_bundle(UINT64);
555 int slot;
556 IA64_SLOT_TYPE slot_type;
557 INST64 inst;
558 PPEFCN pfunc;
559 unsigned long ipsr = regs->cr_ipsr;
560 UINT64 iip = regs->cr_iip;
561 int x6;
563 // make a local copy of the bundle containing the privop
564 #if 1
565 bundle = __get_domain_bundle(iip);
566 if (!bundle.i64[0] && !bundle.i64[1])
567 #else
568 if (__copy_from_user(&bundle,iip,sizeof(bundle)))
569 #endif
570 {
571 //printf("*** priv_handle_op: privop bundle @%p not mapped, retrying\n",iip);
572 return IA64_RETRY;
573 }
574 #if 0
575 if (iip==0xa000000100001820) {
576 static int firstpagefault = 1;
577 if (firstpagefault) {
578 printf("*** First time to domain page fault!\n"); firstpagefault=0;
579 }
580 }
581 #endif
582 if (privop_trace) {
583 static long i = 400;
584 //if (i > 0) printf("privop @%p\n",iip);
585 if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
586 iip,ia64_get_itc(),ia64_get_itm());
587 i--;
588 }
589 slot = ((struct ia64_psr *)&ipsr)->ri;
590 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
591 else if (slot == 1)
592 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
593 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
594 else printf("priv_handle_op: illegal slot: %d\n", slot);
596 slot_type = slot_types[bundle.template][slot];
597 if (priv_verbose) {
598 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
599 iip, (UINT64)inst.inst, slot, slot_type);
600 }
601 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
602 // break instr for privified cover
603 }
604 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
605 switch (slot_type) {
606 case M:
607 if (inst.generic.major == 0) {
608 #if 0
609 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
610 privcnt.cover++;
611 return priv_cover(vcpu,inst);
612 }
613 #endif
614 if (inst.M29.x3 != 0) break;
615 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
616 privcnt.mov_to_ar_imm++;
617 return priv_mov_to_ar_imm(vcpu,inst);
618 }
619 if (inst.M44.x4 == 6) {
620 privcnt.ssm++;
621 return priv_ssm(vcpu,inst);
622 }
623 if (inst.M44.x4 == 7) {
624 privcnt.rsm++;
625 return priv_rsm(vcpu,inst);
626 }
627 break;
628 }
629 else if (inst.generic.major != 1) break;
630 x6 = inst.M29.x6;
631 if (x6 == 0x2a) {
632 if (inst.M29.r2 > 63 && inst.M29.ar3 < 8)
633 privcnt.mov_from_ar++; // privified mov from kr
634 else privcnt.mov_to_ar_reg++;
635 return priv_mov_to_ar_reg(vcpu,inst);
636 }
637 if (inst.M29.x3 != 0) break;
638 if (!(pfunc = Mpriv_funcs[x6])) break;
639 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
640 if (inst.M46.r3 > 63) {
641 if (x6 == 0x1e) x6 = 0x1b;
642 else x6 = 0x1a;
643 }
644 }
645 if (x6 == 52 && inst.M28.r3 > 63)
646 privcnt.fc++;
647 else if (x6 == 16 && inst.M43.r3 > 63)
648 privcnt.cpuid++;
649 else privcnt.Mpriv_cnt[x6]++;
650 return (*pfunc)(vcpu,inst);
651 break;
652 case B:
653 if (inst.generic.major != 0) break;
654 if (inst.B8.x6 == 0x08) {
655 IA64FAULT fault;
656 privcnt.rfi++;
657 fault = priv_rfi(vcpu,inst);
658 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
659 return fault;
660 }
661 if (inst.B8.x6 == 0x0c) {
662 privcnt.bsw0++;
663 return priv_bsw0(vcpu,inst);
664 }
665 if (inst.B8.x6 == 0x0d) {
666 privcnt.bsw1++;
667 return priv_bsw1(vcpu,inst);
668 }
669 if (inst.B8.x6 == 0x0) { // break instr for privified cover
670 privcnt.cover++;
671 return priv_cover(vcpu,inst);
672 }
673 break;
674 case I:
675 if (inst.generic.major != 0) break;
676 #if 0
677 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
678 privcnt.cover++;
679 return priv_cover(vcpu,inst);
680 }
681 #endif
682 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
683 if (inst.I26.x6 == 0x2a) {
684 if (inst.I26.r2 > 63 && inst.I26.ar3 < 8)
685 privcnt.mov_from_ar++; // privified mov from kr
686 else privcnt.mov_to_ar_reg++;
687 return priv_mov_to_ar_reg(vcpu,inst);
688 }
689 if (inst.I27.x6 == 0x0a) {
690 privcnt.mov_to_ar_imm++;
691 return priv_mov_to_ar_imm(vcpu,inst);
692 }
693 break;
694 default:
695 break;
696 }
697 //printf("We who are about do die salute you\n");
698 printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
699 iip, (UINT64)inst.inst, slot, slot_type);
700 //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
701 //thread_mozambique("privop fault\n");
702 return (IA64_ILLOP_FAULT);
703 }
705 /** Emulate a privileged operation.
706 *
707 * This should probably return 0 on success and the "trap number"
708 * (e.g. illegal operation for bad register, priv op for an
709 * instruction that isn't allowed, etc.) on "failure"
710 *
711 * @param vcpu virtual cpu
712 * @param isrcode interrupt service routine code
713 * @return fault
714 */
715 IA64FAULT
716 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
717 {
718 IA64FAULT fault;
719 UINT64 ipsr = regs->cr_ipsr;
720 UINT64 isrcode = (isr >> 4) & 0xf;
721 int privlvl;
723 // handle privops masked as illops? and breaks (6)
724 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
725 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
726 printf("priv_emulate: returning ILLOP, not implemented!\n");
727 while (1);
728 return IA64_ILLOP_FAULT;
729 }
730 //if (isrcode != 1 && isrcode != 2) return 0;
731 vcpu_set_regs(vcpu,regs);
732 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
733 // its OK for a privified-cover to be executed in user-land
734 fault = priv_handle_op(vcpu,regs,privlvl);
735 if (fault == IA64_NO_FAULT) { // success!!
736 // update iip/ipsr to point to the next instruction
737 (void)vcpu_increment_iip(vcpu);
738 }
739 else if (fault == IA64_EXTINT_VECTOR) {
740 // update iip/ipsr before delivering interrupt
741 (void)vcpu_increment_iip(vcpu);
742 }
743 else if (fault == IA64_RFI_IN_PROGRESS) return fault;
744 // success but don't update to next instruction
745 else if (fault == IA64_RETRY) {
746 //printf("Priv emulate gets IA64_RETRY\n");
747 //printf("priv_emulate: returning RETRY, not implemented!\n");
748 //while (1);
749 // don't update iip/ipsr, deliver
751 vcpu_force_data_miss(vcpu,regs->cr_iip);
752 return IA64_RETRY;
753 }
754 else if (priv_verbose) printf("unhandled operation from handle_op\n");
755 // if (fault == IA64_ILLOP_FAULT) {
756 // printf("priv_emulate: returning ILLOP, not implemented!\n");
757 // while (1);
758 // }
759 return fault;
760 }
763 // FIXME: Move these to include/public/arch-ia64?
764 #define HYPERPRIVOP_RFI 0x1
765 #define HYPERPRIVOP_RSM_DT 0x2
766 #define HYPERPRIVOP_SSM_DT 0x3
767 #define HYPERPRIVOP_COVER 0x4
768 #define HYPERPRIVOP_ITC_D 0x5
769 #define HYPERPRIVOP_ITC_I 0x6
770 #define HYPERPRIVOP_MAX 0x6
772 char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
773 0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i",
774 0
775 };
777 unsigned long hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
779 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
780 * so this code is primarily used for debugging them */
781 int
782 ia64_hyperprivop(unsigned long iim, REGS *regs)
783 {
784 struct exec_domain *ed = (struct domain *) current;
785 INST64 inst;
786 UINT64 val;
788 // FIXME: Add instrumentation for these
789 // FIXME: Handle faults appropriately for these
790 if (!iim || iim > HYPERPRIVOP_MAX) {
791 printf("bad hyperprivop; ignored\n");
792 return 1;
793 }
794 hyperpriv_cnt[iim]++;
795 switch(iim) {
796 case HYPERPRIVOP_RFI:
797 (void)vcpu_rfi(ed);
798 return 0; // don't update iip
799 case HYPERPRIVOP_RSM_DT:
800 (void)vcpu_reset_psr_dt(ed);
801 return 1;
802 case HYPERPRIVOP_SSM_DT:
803 (void)vcpu_set_psr_dt(ed);
804 return 1;
805 case HYPERPRIVOP_COVER:
806 (void)vcpu_cover(ed);
807 return 1;
808 case HYPERPRIVOP_ITC_D:
809 inst.inst = 0;
810 (void)priv_itc_d(ed,inst);
811 return 1;
812 case HYPERPRIVOP_ITC_I:
813 inst.inst = 0;
814 (void)priv_itc_i(ed,inst);
815 return 1;
816 }
817 return 0;
818 }
821 /**************************************************************************
822 Privileged operation instrumentation routines
823 **************************************************************************/
825 char *Mpriv_str[64] = {
826 "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
827 "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
828 "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
829 "ptr_d", "ptr_i", "itr_d", "itr_i",
830 "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
831 "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
832 "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
833 "<0x1c>", "<0x1d>", "tpa", "tak",
834 "<0x20>", "<0x21>", "<0x22>", "<0x23>",
835 "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
836 "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
837 "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
838 "<0x30>", "<0x31>", "<0x32>", "<0x33>",
839 "ptc_e", "<0x35>", "<0x36>", "<0x37>",
840 "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
841 "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
842 };
844 #define RS "Rsvd"
845 char *cr_str[128] = {
846 "dcr","itm","iva",RS,RS,RS,RS,RS,
847 "pta",RS,RS,RS,RS,RS,RS,RS,
848 "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
849 "iim","iha",RS,RS,RS,RS,RS,RS,
850 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
851 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
852 "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
853 "itv","pmv","cmcv",RS,RS,RS,RS,RS,
854 "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
855 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
856 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
857 RS,RS,RS,RS,RS,RS,RS,RS
858 };
860 // FIXME: should use snprintf to ensure no buffer overflow
861 int dump_privop_counts(char *buf)
862 {
863 int i, j;
864 UINT64 sum = 0;
865 char *s = buf;
867 // this is ugly and should probably produce sorted output
868 // but it will have to do for now
869 sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
870 sum += privcnt.ssm; sum += privcnt.rsm;
871 sum += privcnt.rfi; sum += privcnt.bsw0;
872 sum += privcnt.bsw1; sum += privcnt.cover;
873 for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
874 s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
875 if (privcnt.mov_to_ar_imm)
876 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.mov_to_ar_imm,
877 "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
878 if (privcnt.mov_to_ar_reg)
879 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.mov_to_ar_reg,
880 "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
881 if (privcnt.mov_from_ar)
882 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.mov_from_ar,
883 "privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
884 if (privcnt.ssm)
885 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.ssm,
886 "ssm", (privcnt.ssm*100L)/sum);
887 if (privcnt.rsm)
888 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.rsm,
889 "rsm", (privcnt.rsm*100L)/sum);
890 if (privcnt.rfi)
891 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.rfi,
892 "rfi", (privcnt.rfi*100L)/sum);
893 if (privcnt.bsw0)
894 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.bsw0,
895 "bsw0", (privcnt.bsw0*100L)/sum);
896 if (privcnt.bsw1)
897 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.bsw1,
898 "bsw1", (privcnt.bsw1*100L)/sum);
899 if (privcnt.cover)
900 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.cover,
901 "cover", (privcnt.cover*100L)/sum);
902 if (privcnt.fc)
903 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.fc,
904 "privified-fc", (privcnt.fc*100L)/sum);
905 if (privcnt.cpuid)
906 s += sprintf(s,"%10d %s [%d%%]\n", privcnt.cpuid,
907 "privified-getcpuid", (privcnt.cpuid*100L)/sum);
908 for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
909 if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
910 else s += sprintf(s,"%10d %s [%d%%]\n", privcnt.Mpriv_cnt[i],
911 Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
912 if (i == 0x24) { // mov from CR
913 s += sprintf(s," [");
914 for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
915 if (!cr_str[j])
916 s += sprintf(s,"PRIVSTRING NULL!!\n");
917 s += sprintf(s,"%s(%d),",cr_str[j],from_cr_cnt[j]);
918 }
919 s += sprintf(s,"]\n");
920 }
921 else if (i == 0x2c) { // mov to CR
922 s += sprintf(s," [");
923 for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
924 if (!cr_str[j])
925 s += sprintf(s,"PRIVSTRING NULL!!\n");
926 s += sprintf(s,"%s(%d),",cr_str[j],to_cr_cnt[j]);
927 }
928 s += sprintf(s,"]\n");
929 }
930 }
931 return s - buf;
932 }
934 int zero_privop_counts(char *buf)
935 {
936 int i, j;
937 char *s = buf;
939 // this is ugly and should probably produce sorted output
940 // but it will have to do for now
941 privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
942 privcnt.mov_from_ar = 0;
943 privcnt.ssm = 0; privcnt.rsm = 0;
944 privcnt.rfi = 0; privcnt.bsw0 = 0;
945 privcnt.bsw1 = 0; privcnt.cover = 0;
946 privcnt.fc = 0; privcnt.cpuid = 0;
947 for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
948 for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
949 for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
950 s += sprintf(s,"All privop statistics zeroed\n");
951 return s - buf;
952 }
954 #ifdef PRIVOP_ADDR_COUNT
956 extern struct privop_addr_count privop_addr_counter[];
958 void privop_count_addr(unsigned long iip, int inst)
959 {
960 struct privop_addr_count *v = &privop_addr_counter[inst];
961 int i;
963 for (i = 0; i < PRIVOP_COUNT_NADDRS; i++) {
964 if (!v->addr[i]) { v->addr[i] = iip; v->count[i]++; return; }
965 else if (v->addr[i] == iip) { v->count[i]++; return; }
966 }
967 v->overflow++;;
968 }
970 int dump_privop_addrs(char *buf)
971 {
972 int i,j;
973 char *s = buf;
974 s += sprintf(s,"Privop addresses:\n");
975 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
976 struct privop_addr_count *v = &privop_addr_counter[i];
977 s += sprintf(s,"%s:\n",v->instname);
978 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
979 if (!v->addr[j]) break;
980 s += sprintf(s," @%p #%ld\n",v->addr[j],v->count[j]);
981 }
982 if (v->overflow)
983 s += sprintf(s," other #%ld\n",v->overflow);
984 }
985 return s - buf;
986 }
988 void zero_privop_addrs(void)
989 {
990 int i,j;
991 for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
992 struct privop_addr_count *v = &privop_addr_counter[i];
993 for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
994 v->addr[j] = v->count[j] = 0;
995 v->overflow = 0;
996 }
997 }
998 #endif
1000 int dump_hyperprivop_counts(char *buf)
1002 int i;
1003 char *s = buf;
1004 s += sprintf(s,"Hyperprivops:\n");
1005 for (i = 1; i <= HYPERPRIVOP_MAX; i++)
1006 if (hyperpriv_cnt[i])
1007 s += sprintf(s,"%10d %s\n",
1008 hyperpriv_cnt[i], hyperpriv_str[i]);
1009 return s - buf;
1012 void zero_hyperprivop_counts(void)
1014 int i;
1015 for (i = 0; i <= HYPERPRIVOP_MAX; i++) hyperpriv_cnt[i] = 0;
1018 #define TMPBUFLEN 8*1024
1019 int dump_privop_counts_to_user(char __user *ubuf, int len)
1021 char buf[TMPBUFLEN];
1022 int n = dump_privop_counts(buf);
1024 n += dump_hyperprivop_counts(buf + n);
1025 #ifdef PRIVOP_ADDR_COUNT
1026 n += dump_privop_addrs(buf + n);
1027 #endif
1028 if (len < TMPBUFLEN) return -1;
1029 if (__copy_to_user(ubuf,buf,n)) return -1;
1030 return n;
1033 int zero_privop_counts_to_user(char __user *ubuf, int len)
1035 char buf[TMPBUFLEN];
1036 int n = zero_privop_counts(buf);
1038 zero_hyperprivop_counts();
1039 #ifdef PRIVOP_ADDR_COUNT
1040 zero_privop_addrs();
1041 #endif
1042 if (len < TMPBUFLEN) return -1;
1043 if (__copy_to_user(ubuf,buf,n)) return -1;
1044 return n;