ia64/xen-unstable

view xen/arch/ia64/privop.c @ 3108:85d6a1145160

bitkeeper revision 1.1159.187.7 (41a4e12eWWEz6Rwd4YlbRFZKcBjaMQ)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno/BK/xen-2.0-testing.bk
into arcadians.cl.cam.ac.uk:/local/scratch-2/cl349/xen-2.0-testing.bk
author cl349@arcadians.cl.cam.ac.uk
date Wed Nov 24 19:29:50 2004 +0000 (2004-11-24)
parents b7cbbc4c7a3e
children 7ef582b6c9c4
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/privify.h>
11 #include <asm/vcpu.h>
12 #include <asm/processor.h>
13 #include <asm/delay.h> // Debug only
14 //#include <debug.h>
16 long priv_verbose=0;
18 /**************************************************************************
19 Hypercall bundle creation
20 **************************************************************************/
23 void build_hypercall_bundle(UINT64 *imva, UINT64 breakimm, UINT64 hypnum, UINT64 ret)
24 {
25 INST64_A5 slot0;
26 INST64_I19 slot1;
27 INST64_B4 slot2;
28 IA64_BUNDLE bundle;
30 // slot1: mov r2 = hypnum (low 20 bits)
31 slot0.inst = 0;
32 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
33 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
34 slot0.imm5c = hypnum >> 16; slot0.s = 0;
35 // slot1: break breakimm
36 slot1.inst = 0;
37 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
38 slot1.imm20 = breakimm; slot1.i = breakimm >> 20;
39 // if ret slot2: br.ret.sptk.many rp
40 // else slot2: br.cond.sptk.many rp
41 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
42 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
43 if (ret) {
44 slot2.btype = 4; slot2.x6 = 0x21;
45 }
46 else {
47 slot2.btype = 0; slot2.x6 = 0x20;
48 }
50 bundle.i64[0] = 0; bundle.i64[1] = 0;
51 bundle.template = 0x11;
52 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
53 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
55 *imva++ = bundle.i64[0]; *imva = bundle.i64[1];
56 }
58 /**************************************************************************
59 Privileged operation emulation routines
60 **************************************************************************/
62 IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
63 {
64 return vcpu_rfi(vcpu);
65 }
67 IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
68 {
69 return vcpu_bsw0(vcpu);
70 }
72 IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
73 {
74 return vcpu_bsw1(vcpu);
75 }
77 IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
78 {
79 return vcpu_cover(vcpu);
80 }
82 IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
83 {
84 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
85 UINT64 addr_range;
87 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
88 return vcpu_ptc_l(vcpu,vadr,addr_range);
89 }
91 IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
92 {
93 UINT src = inst.M28.r3;
95 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
96 if (src > 63) return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
97 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
98 }
100 IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
101 {
102 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
103 UINT64 addr_range;
105 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
106 return vcpu_ptc_g(vcpu,vadr,addr_range);
107 }
109 IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
110 {
111 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
112 UINT64 addr_range;
114 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
115 return vcpu_ptc_ga(vcpu,vadr,addr_range);
116 }
118 IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
119 {
120 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
121 UINT64 addr_range;
123 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
124 return vcpu_ptr_d(vcpu,vadr,addr_range);
125 }
127 IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
128 {
129 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
130 UINT64 addr_range;
132 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
133 return vcpu_ptr_i(vcpu,vadr,addr_range);
134 }
136 IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
137 {
138 UINT64 padr;
139 UINT fault;
140 UINT src = inst.M46.r3;
142 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
143 if (src > 63)
144 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
145 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
146 if (fault == IA64_NO_FAULT)
147 return vcpu_set_gr(vcpu, inst.M46.r1, padr);
148 else return fault;
149 }
151 IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
152 {
153 UINT64 key;
154 UINT fault;
155 UINT src = inst.M46.r3;
157 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
158 if (src > 63)
159 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
160 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
161 if (fault == IA64_NO_FAULT)
162 return vcpu_set_gr(vcpu, inst.M46.r1, key);
163 else return fault;
164 }
166 /************************************
167 * Insert translation register/cache
168 ************************************/
170 IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
171 {
172 UINT64 fault, itir, ifa, pte, slot;
174 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
175 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
176 return(IA64_ILLOP_FAULT);
177 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
178 return(IA64_ILLOP_FAULT);
179 pte = vcpu_get_gr(vcpu,inst.M42.r2);
180 slot = vcpu_get_gr(vcpu,inst.M42.r3);
182 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
183 }
185 IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
186 {
187 UINT64 fault, itir, ifa, pte, slot;
189 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
190 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
191 return(IA64_ILLOP_FAULT);
192 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
193 return(IA64_ILLOP_FAULT);
194 pte = vcpu_get_gr(vcpu,inst.M42.r2);
195 slot = vcpu_get_gr(vcpu,inst.M42.r3);
197 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
198 }
200 IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
201 {
202 UINT64 fault, itir, ifa, pte;
204 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
205 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
206 return(IA64_ILLOP_FAULT);
207 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
208 return(IA64_ILLOP_FAULT);
209 pte = vcpu_get_gr(vcpu,inst.M41.r2);
211 return (vcpu_itc_d(vcpu,pte,itir,ifa));
212 }
214 IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
215 {
216 UINT64 fault, itir, ifa, pte;
218 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
219 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
220 return(IA64_ILLOP_FAULT);
221 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
222 return(IA64_ILLOP_FAULT);
223 pte = vcpu_get_gr(vcpu,inst.M41.r2);
225 return (vcpu_itc_i(vcpu,pte,itir,ifa));
226 }
228 /*************************************
229 * Moves to semi-privileged registers
230 *************************************/
232 IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
233 {
234 // I27 and M30 are identical for these fields
235 UINT64 ar3 = inst.M30.ar3;
236 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
237 return (vcpu_set_ar(vcpu,ar3,imm));
238 }
240 IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
241 {
242 // I26 and M29 are identical for these fields
243 UINT64 ar3 = inst.M29.ar3;
245 if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
246 UINT64 val;
247 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
248 return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
249 else return IA64_ILLOP_FAULT;
250 }
251 else {
252 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
253 return (vcpu_set_ar(vcpu,ar3,r2));
254 }
255 }
257 /********************************
258 * Moves to privileged registers
259 ********************************/
261 IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
262 {
263 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
264 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
265 return (vcpu_set_pkr(vcpu,r3,r2));
266 }
268 IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
269 {
270 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
271 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
272 return (vcpu_set_rr(vcpu,r3,r2));
273 }
275 IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
276 {
277 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
278 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
279 return (vcpu_set_dbr(vcpu,r3,r2));
280 }
282 IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
283 {
284 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
285 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
286 return (vcpu_set_ibr(vcpu,r3,r2));
287 }
289 IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
290 {
291 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
292 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
293 return (vcpu_set_pmc(vcpu,r3,r2));
294 }
296 IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
297 {
298 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
299 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
300 return (vcpu_set_pmd(vcpu,r3,r2));
301 }
303 unsigned long to_cr_cnt[128] = { 0 };
305 IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
306 {
307 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
308 to_cr_cnt[inst.M32.cr3]++;
309 switch (inst.M32.cr3) {
310 case 0: return vcpu_set_dcr(vcpu,val);
311 case 1: return vcpu_set_itm(vcpu,val);
312 case 2: return vcpu_set_iva(vcpu,val);
313 case 8: return vcpu_set_pta(vcpu,val);
314 case 16:return vcpu_set_ipsr(vcpu,val);
315 case 17:return vcpu_set_isr(vcpu,val);
316 case 19:return vcpu_set_iip(vcpu,val);
317 case 20:return vcpu_set_ifa(vcpu,val);
318 case 21:return vcpu_set_itir(vcpu,val);
319 case 22:return vcpu_set_iipa(vcpu,val);
320 case 23:return vcpu_set_ifs(vcpu,val);
321 case 24:return vcpu_set_iim(vcpu,val);
322 case 25:return vcpu_set_iha(vcpu,val);
323 case 64:return vcpu_set_lid(vcpu,val);
324 case 65:return IA64_ILLOP_FAULT;
325 case 66:return vcpu_set_tpr(vcpu,val);
326 case 67:return vcpu_set_eoi(vcpu,val);
327 case 68:return IA64_ILLOP_FAULT;
328 case 69:return IA64_ILLOP_FAULT;
329 case 70:return IA64_ILLOP_FAULT;
330 case 71:return IA64_ILLOP_FAULT;
331 case 72:return vcpu_set_itv(vcpu,val);
332 case 73:return vcpu_set_pmv(vcpu,val);
333 case 74:return vcpu_set_cmcv(vcpu,val);
334 case 80:return vcpu_set_lrr0(vcpu,val);
335 case 81:return vcpu_set_lrr1(vcpu,val);
336 default: return IA64_ILLOP_FAULT;
337 }
338 }
340 IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
341 {
342 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
343 return vcpu_reset_psr_sm(vcpu,imm24);
344 }
346 IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
347 {
348 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
349 return vcpu_set_psr_sm(vcpu,imm24);
350 }
352 /**
353 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
354 */
355 IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
356 {
357 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
358 return vcpu_set_psr_l(vcpu,val);
359 }
361 /**********************************
362 * Moves from privileged registers
363 **********************************/
365 IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
366 {
367 UINT64 val;
368 IA64FAULT fault;
370 if (inst.M43.r1 > 63) { // privified mov from cpuid
371 fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
372 if (fault == IA64_NO_FAULT)
373 return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
374 }
375 else {
376 fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
377 if (fault == IA64_NO_FAULT)
378 return vcpu_set_gr(vcpu, inst.M43.r1, val);
379 }
380 return fault;
381 }
383 IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
384 {
385 UINT64 val;
386 IA64FAULT fault;
388 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
389 if (fault == IA64_NO_FAULT)
390 return vcpu_set_gr(vcpu, inst.M43.r1, val);
391 else return fault;
392 }
394 IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
395 {
396 UINT64 val;
397 IA64FAULT fault;
399 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
400 if (fault == IA64_NO_FAULT)
401 return vcpu_set_gr(vcpu, inst.M43.r1, val);
402 else return fault;
403 }
405 IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
406 {
407 UINT64 val;
408 IA64FAULT fault;
410 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
411 if (fault == IA64_NO_FAULT)
412 return vcpu_set_gr(vcpu, inst.M43.r1, val);
413 else return fault;
414 }
416 IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
417 {
418 UINT64 val;
419 IA64FAULT fault;
421 fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
422 if (fault == IA64_NO_FAULT)
423 return vcpu_set_gr(vcpu, inst.M43.r1, val);
424 else return fault;
425 }
427 unsigned long from_cr_cnt[128] = { 0 };
429 #define cr_get(cr) \
430 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
431 vcpu_set_gr(vcpu, tgt, val) : fault;
433 IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
434 {
435 UINT64 tgt = inst.M33.r1;
436 UINT64 val;
437 IA64FAULT fault;
439 from_cr_cnt[inst.M33.cr3]++;
440 switch (inst.M33.cr3) {
441 case 0: return cr_get(dcr);
442 case 1: return cr_get(itm);
443 case 2: return cr_get(iva);
444 case 8: return cr_get(pta);
445 case 16:return cr_get(ipsr);
446 case 17:return cr_get(isr);
447 case 19:return cr_get(iip);
448 case 20:return cr_get(ifa);
449 case 21:return cr_get(itir);
450 case 22:return cr_get(iipa);
451 case 23:return cr_get(ifs);
452 case 24:return cr_get(iim);
453 case 25:return cr_get(iha);
454 case 64:return cr_get(lid);
455 case 65:return cr_get(ivr);
456 case 66:return cr_get(tpr);
457 case 67:return vcpu_set_gr(vcpu,tgt,0L);
458 case 68:return cr_get(irr0);
459 case 69:return cr_get(irr1);
460 case 70:return cr_get(irr2);
461 case 71:return cr_get(irr3);
462 case 72:return cr_get(itv);
463 case 73:return cr_get(pmv);
464 case 74:return cr_get(cmcv);
465 case 80:return cr_get(lrr0);
466 case 81:return cr_get(lrr1);
467 default: return IA64_ILLOP_FAULT;
468 }
469 return IA64_ILLOP_FAULT;
470 }
472 IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
473 {
474 UINT64 tgt = inst.M33.r1;
475 UINT64 val;
476 IA64FAULT fault;
478 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
479 return vcpu_set_gr(vcpu, tgt, val);
480 else return fault;
481 }
483 /**************************************************************************
484 Privileged operation decode and dispatch routines
485 **************************************************************************/
487 IA64_SLOT_TYPE slot_types[0x20][3] = {
488 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
489 {M, I, ILLEGAL}, {M, I, ILLEGAL},
490 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
491 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
492 {M, F, I}, {M, F, I},
493 {M, M, F}, {M, M, F},
494 {M, I, B}, {M, I, B},
495 {M, B, B}, {M, B, B},
496 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
497 {B, B, B}, {B, B, B},
498 {M, M, B}, {M, M, B},
499 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
500 {M, F, B}, {M, F, B},
501 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
502 };
504 // pointer to privileged emulation function
505 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
507 PPEFCN Mpriv_funcs[64] = {
508 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
509 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
510 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
511 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
512 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
513 priv_mov_from_pmc, 0, 0, 0,
514 0, 0, 0, 0,
515 0, 0, priv_tpa, priv_tak,
516 0, 0, 0, 0,
517 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
518 0, 0, 0, 0,
519 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
520 0, 0, 0, 0,
521 priv_ptc_e, 0, 0, 0,
522 0, 0, 0, 0, 0, 0, 0, 0
523 };
525 struct {
526 unsigned long mov_to_ar_imm;
527 unsigned long mov_to_ar_reg;
528 unsigned long ssm;
529 unsigned long rsm;
530 unsigned long rfi;
531 unsigned long bsw0;
532 unsigned long bsw1;
533 unsigned long cover;
534 unsigned long Mpriv_cnt[64];
535 } privcnt = { 0 };
537 unsigned long privop_trace = 0;
539 IA64FAULT
540 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
541 {
542 IA64_BUNDLE bundle, __get_domain_bundle(UINT64);
543 int slot;
544 IA64_SLOT_TYPE slot_type;
545 INST64 inst;
546 PPEFCN pfunc;
547 unsigned long ipsr = regs->cr_ipsr;
548 UINT64 iip = regs->cr_iip;
549 int x6;
551 // make a local copy of the bundle containing the privop
552 #if 1
553 bundle = __get_domain_bundle(iip);
554 if (!bundle.i64[0] && !bundle.i64[1]) return IA64_RETRY;
555 #else
556 #ifdef AVOIDING_POSSIBLE_DOMAIN_TLB_MISS
557 //TODO: this needs to check for faults and behave accordingly
558 if (!vcpu_get_iip_bundle(&bundle)) return IA64_DTLB_FAULT;
559 #else
560 if (iip < 0x10000) {
561 printf("priv_handle_op: unlikely iip=%p,b0=%p\n",iip,regs->b0);
562 dummy();
563 }
564 bundle = *(IA64_BUNDLE *)iip;
565 #endif
566 #endif
567 #if 0
568 if (iip==0xa000000100001820) {
569 static int firstpagefault = 1;
570 if (firstpagefault) {
571 printf("*** First time to domain page fault!\n"); firstpagefault=0;
572 }
573 }
574 #endif
575 if (privop_trace) {
576 static long i = 400;
577 //if (i > 0) printf("privop @%p\n",iip);
578 if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
579 iip,ia64_get_itc(),ia64_get_itm());
580 i--;
581 }
582 slot = ((struct ia64_psr *)&ipsr)->ri;
583 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
584 else if (slot == 1)
585 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
586 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
587 else printf("priv_handle_op: illegal slot: %d\n", slot);
589 slot_type = slot_types[bundle.template][slot];
590 if (priv_verbose) {
591 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
592 iip, (UINT64)inst.inst, slot, slot_type);
593 }
594 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
595 // break instr for privified cover
596 }
597 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
598 switch (slot_type) {
599 case M:
600 if (inst.generic.major == 0) {
601 #if 0
602 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
603 privcnt.cover++;
604 return priv_cover(vcpu,inst);
605 }
606 #endif
607 if (inst.M29.x3 != 0) break;
608 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
609 privcnt.mov_to_ar_imm++;
610 return priv_mov_to_ar_imm(vcpu,inst);
611 }
612 if (inst.M44.x4 == 6) {
613 privcnt.ssm++;
614 return priv_ssm(vcpu,inst);
615 }
616 if (inst.M44.x4 == 7) {
617 privcnt.rsm++;
618 return priv_rsm(vcpu,inst);
619 }
620 break;
621 }
622 else if (inst.generic.major != 1) break;
623 x6 = inst.M29.x6;
624 if (x6 == 0x2a) {
625 privcnt.mov_to_ar_reg++;
626 return priv_mov_to_ar_reg(vcpu,inst);
627 }
628 if (inst.M29.x3 != 0) break;
629 if (!(pfunc = Mpriv_funcs[x6])) break;
630 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
631 if (inst.M46.r3 > 63) {
632 if (x6 == 0x1e) x6 = 0x1b;
633 else x6 = 0x1a;
634 }
635 }
636 privcnt.Mpriv_cnt[x6]++;
637 return (*pfunc)(vcpu,inst);
638 break;
639 case B:
640 if (inst.generic.major != 0) break;
641 if (inst.B8.x6 == 0x08) {
642 IA64FAULT fault;
643 privcnt.rfi++;
644 fault = priv_rfi(vcpu,inst);
645 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
646 return fault;
647 }
648 if (inst.B8.x6 == 0x0c) {
649 privcnt.bsw0++;
650 return priv_bsw0(vcpu,inst);
651 }
652 if (inst.B8.x6 == 0x0d) {
653 privcnt.bsw1++;
654 return priv_bsw1(vcpu,inst);
655 }
656 if (inst.B8.x6 == 0x0) { // break instr for privified cover
657 privcnt.cover++;
658 return priv_cover(vcpu,inst);
659 }
660 break;
661 case I:
662 if (inst.generic.major != 0) break;
663 #if 0
664 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
665 privcnt.cover++;
666 return priv_cover(vcpu,inst);
667 }
668 #endif
669 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
670 if (inst.I26.x6 == 0x2a) {
671 privcnt.mov_to_ar_reg++;
672 return priv_mov_to_ar_reg(vcpu,inst);
673 }
674 if (inst.I27.x6 == 0x0a) {
675 privcnt.mov_to_ar_imm++;
676 return priv_mov_to_ar_imm(vcpu,inst);
677 }
678 break;
679 default:
680 break;
681 }
682 //printf("We who are about do die salute you\n");
683 printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
684 iip, (UINT64)inst.inst, slot, slot_type);
685 //printf("vtop(0x%lx)==0x%lx\r\n", iip, tr_vtop(iip));
686 //thread_mozambique("privop fault\n");
687 return (IA64_ILLOP_FAULT);
688 }
690 /** Emulate a privileged operation.
691 *
692 * This should probably return 0 on success and the "trap number"
693 * (e.g. illegal operation for bad register, priv op for an
694 * instruction that isn't allowed, etc.) on "failure"
695 *
696 * @param vcpu virtual cpu
697 * @param isrcode interrupt service routine code
698 * @return fault
699 */
700 IA64FAULT
701 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
702 {
703 IA64FAULT fault;
704 UINT64 ipsr = regs->cr_ipsr;
705 UINT64 isrcode = (isr >> 4) & 0xf;
706 int privlvl;
708 // handle privops masked as illops? and breaks (6)
709 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
710 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
711 printf("priv_emulate: returning ILLOP, not implemented!\n");
712 while (1);
713 return IA64_ILLOP_FAULT;
714 }
715 //if (isrcode != 1 && isrcode != 2) return 0;
716 vcpu_set_regs(vcpu,regs);
717 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
718 // its OK for a privified-cover to be executed in user-land
719 fault = priv_handle_op(vcpu,regs,privlvl);
720 if (fault == IA64_NO_FAULT) { // success!!
721 // update iip/ipsr to point to the next instruction
722 (void)vcpu_increment_iip(vcpu);
723 }
724 else if (fault == IA64_EXTINT_VECTOR) {
725 // update iip/ipsr before delivering interrupt
726 (void)vcpu_increment_iip(vcpu);
727 }
728 else if (fault == IA64_RFI_IN_PROGRESS) return fault;
729 // success but don't update to next instruction
730 else if (fault == IA64_RETRY) {
731 //printf("Priv emulate gets IA64_RETRY\n");
732 //printf("priv_emulate: returning RETRY, not implemented!\n");
733 //while (1);
734 // don't update iip/ipsr, deliver
736 vcpu_force_data_miss(vcpu,regs->cr_iip);
737 return IA64_RETRY;
738 }
739 else if (priv_verbose) printf("unhandled operation from handle_op\n");
740 // if (fault == IA64_ILLOP_FAULT) {
741 // printf("priv_emulate: returning ILLOP, not implemented!\n");
742 // while (1);
743 // }
744 return fault;
745 }
748 /**************************************************************************
749 Privileged operation instrumentation routines
750 **************************************************************************/
752 char *Mpriv_str[64] = {
753 "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
754 "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
755 "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
756 "ptr_d", "ptr_i", "itr_d", "itr_i",
757 "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
758 "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
759 "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
760 "<0x1c>", "<0x1d>", "tpa", "tak",
761 "<0x20>", "<0x21>", "<0x22>", "<0x23>",
762 "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
763 "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
764 "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
765 "<0x30>", "<0x31>", "<0x32>", "<0x33>",
766 "ptc_e", "<0x35>", "<0x36>", "<0x37>",
767 "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
768 "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
769 };
771 #define RS "Rsvd"
772 char *cr_str[128] = {
773 "dcr","itm","iva",RS,RS,RS,RS,RS,
774 "pta",RS,RS,RS,RS,RS,RS,RS,
775 "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
776 "iim","iha",RS,RS,RS,RS,RS,RS,
777 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
778 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
779 "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
780 "itv","pmv","cmcv",RS,RS,RS,RS,RS,
781 "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
782 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
783 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
784 RS,RS,RS,RS,RS,RS,RS,RS
785 };
787 void dump_privop_counts(void)
788 {
789 int i, j;
790 UINT64 sum = 0;
792 // this is ugly and should probably produce sorted output
793 // but it will have to do for now
794 sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
795 sum += privcnt.ssm; sum += privcnt.rsm;
796 sum += privcnt.rfi; sum += privcnt.bsw0;
797 sum += privcnt.bsw1; sum += privcnt.cover;
798 for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
799 printf("Privop statistics: (Total privops: %ld)\r\n",sum);
800 if (privcnt.mov_to_ar_imm)
801 printf("%10d %s [%d%%]\r\n", privcnt.mov_to_ar_imm,
802 "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
803 if (privcnt.mov_to_ar_reg)
804 printf("%10d %s [%d%%]\r\n", privcnt.mov_to_ar_reg,
805 "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
806 if (privcnt.ssm)
807 printf("%10d %s [%d%%]\r\n", privcnt.ssm,
808 "ssm", (privcnt.ssm*100L)/sum);
809 if (privcnt.rsm)
810 printf("%10d %s [%d%%]\r\n", privcnt.rsm,
811 "rsm", (privcnt.rsm*100L)/sum);
812 if (privcnt.rfi)
813 printf("%10d %s [%d%%]\r\n", privcnt.rfi,
814 "rfi", (privcnt.rfi*100L)/sum);
815 if (privcnt.bsw0)
816 printf("%10d %s [%d%%]\r\n", privcnt.bsw0,
817 "bsw0", (privcnt.bsw0*100L)/sum);
818 if (privcnt.bsw1)
819 printf("%10d %s [%d%%]\r\n", privcnt.bsw1,
820 "bsw1", (privcnt.bsw1*100L)/sum);
821 if (privcnt.cover)
822 printf("%10d %s [%d%%]\r\n", privcnt.cover,
823 "cover", (privcnt.cover*100L)/sum);
824 for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
825 if (!Mpriv_str[i]) printf("PRIVSTRING NULL!!\r\n");
826 else printf("%10d %s [%d%%]\r\n", privcnt.Mpriv_cnt[i],
827 Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
828 if (i == 0x24) { // mov from CR
829 printf(" [");
830 for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
831 if (!cr_str[j])
832 printf("PRIVSTRING NULL!!\r\n");
833 printf("%s(%d),",cr_str[j],from_cr_cnt[j]);
834 }
835 printf("]\r\n");
836 }
837 else if (i == 0x2c) { // mov to CR
838 printf(" [");
839 for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
840 if (!cr_str[j])
841 printf("PRIVSTRING NULL!!\r\n");
842 printf("%s(%d),",cr_str[j],to_cr_cnt[j]);
843 }
844 printf("]\r\n");
845 }
846 }
847 }
849 void zero_privop_counts(void)
850 {
851 int i, j;
853 // this is ugly and should probably produce sorted output
854 // but it will have to do for now
855 printf("Zeroing privop statistics\r\n");
856 privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
857 privcnt.ssm = 0; privcnt.rsm = 0;
858 privcnt.rfi = 0; privcnt.bsw0 = 0;
859 privcnt.bsw1 = 0; privcnt.cover = 0;
860 for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
861 for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
862 for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
863 }