ia64/xen-unstable

view xen/arch/ia64/xen/privop.c @ 15893:f3173d151e1d

[IA64] debug_op hypercall and debug points

Add debug_op hypercall.
Add many debug points.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Fri Sep 21 11:03:54 2007 -0600 (2007-09-21)
parents b5488dee14af
children
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 #include <asm/dom_fw.h>
14 #include <asm/vhpt.h>
15 #include <asm/bundle.h>
16 #include <asm/debugger.h>
17 #include <xen/perfc.h>
19 static const long priv_verbose = 0;
21 /* Set to 1 to handle privified instructions from the privify tool. */
22 #ifndef CONFIG_PRIVIFY
23 static const int privify_en = 0;
24 #else
25 static const int privify_en = 1;
26 #endif
28 /**************************************************************************
29 Privileged operation emulation routines
30 **************************************************************************/
32 static IA64FAULT priv_rfi(VCPU * vcpu, INST64 inst)
33 {
34 REGS *regs = vcpu_regs(vcpu);
35 if (PSCB(vcpu, ifs) > 0x8000000000000000UL
36 && regs->cr_ifs > 0x8000000000000000UL) {
37 panic_domain(regs,
38 "rfi emulation with double uncover is "
39 "impossible - use hyperprivop\n"
40 " ip=0x%lx vifs=0x%lx ifs=0x%lx\n",
41 regs->cr_iip, PSCB(vcpu, ifs), regs->cr_ifs);
42 }
43 return vcpu_rfi(vcpu);
44 }
46 static IA64FAULT priv_bsw0(VCPU * vcpu, INST64 inst)
47 {
48 return vcpu_bsw0(vcpu);
49 }
51 static IA64FAULT priv_bsw1(VCPU * vcpu, INST64 inst)
52 {
53 return vcpu_bsw1(vcpu);
54 }
56 static IA64FAULT priv_cover(VCPU * vcpu, INST64 inst)
57 {
58 return vcpu_cover(vcpu);
59 }
61 static IA64FAULT priv_ptc_l(VCPU * vcpu, INST64 inst)
62 {
63 u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
64 u64 log_range;
66 log_range = ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
67 return vcpu_ptc_l(vcpu, vadr, log_range);
68 }
70 static IA64FAULT priv_ptc_e(VCPU * vcpu, INST64 inst)
71 {
72 unsigned int src = inst.M28.r3;
74 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
75 if (privify_en && src > 63)
76 return vcpu_fc(vcpu, vcpu_get_gr(vcpu, src - 64));
77 return vcpu_ptc_e(vcpu, vcpu_get_gr(vcpu, src));
78 }
80 static IA64FAULT priv_ptc_g(VCPU * vcpu, INST64 inst)
81 {
82 u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
83 u64 addr_range;
85 addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
86 return vcpu_ptc_g(vcpu, vadr, addr_range);
87 }
89 static IA64FAULT priv_ptc_ga(VCPU * vcpu, INST64 inst)
90 {
91 u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
92 u64 addr_range;
94 addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2);
95 return vcpu_ptc_ga(vcpu, vadr, addr_range);
96 }
98 static IA64FAULT priv_ptr_d(VCPU * vcpu, INST64 inst)
99 {
100 u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
101 u64 log_range;
103 log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
104 return vcpu_ptr_d(vcpu, vadr, log_range);
105 }
107 static IA64FAULT priv_ptr_i(VCPU * vcpu, INST64 inst)
108 {
109 u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3);
110 u64 log_range;
112 log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2;
113 return vcpu_ptr_i(vcpu, vadr, log_range);
114 }
116 static IA64FAULT priv_tpa(VCPU * vcpu, INST64 inst)
117 {
118 u64 padr;
119 unsigned int fault;
120 unsigned int src = inst.M46.r3;
122 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
123 if (privify_en && src > 63)
124 fault = vcpu_ttag(vcpu, vcpu_get_gr(vcpu, src - 64), &padr);
125 else
126 fault = vcpu_tpa(vcpu, vcpu_get_gr(vcpu, src), &padr);
127 if (fault == IA64_NO_FAULT)
128 return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
129 else
130 return fault;
131 }
133 static IA64FAULT priv_tak(VCPU * vcpu, INST64 inst)
134 {
135 u64 key;
136 unsigned int fault;
137 unsigned int src = inst.M46.r3;
139 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
140 if (privify_en && src > 63)
141 fault = vcpu_thash(vcpu, vcpu_get_gr(vcpu, src - 64), &key);
142 else
143 fault = vcpu_tak(vcpu, vcpu_get_gr(vcpu, src), &key);
144 if (fault == IA64_NO_FAULT)
145 return vcpu_set_gr(vcpu, inst.M46.r1, key, 0);
146 else
147 return fault;
148 }
150 /************************************
151 * Insert translation register/cache
152 ************************************/
154 static IA64FAULT priv_itr_d(VCPU * vcpu, INST64 inst)
155 {
156 u64 fault, itir, ifa, pte, slot;
158 //if (!vcpu_get_psr_ic(vcpu))
159 // return IA64_ILLOP_FAULT;
160 fault = vcpu_get_itir(vcpu, &itir);
161 if (fault != IA64_NO_FAULT)
162 return IA64_ILLOP_FAULT;
163 fault = vcpu_get_ifa(vcpu, &ifa);
164 if (fault != IA64_NO_FAULT)
165 return IA64_ILLOP_FAULT;
166 pte = vcpu_get_gr(vcpu, inst.M42.r2);
167 slot = vcpu_get_gr(vcpu, inst.M42.r3);
169 return vcpu_itr_d(vcpu, slot, pte, itir, ifa);
170 }
172 static IA64FAULT priv_itr_i(VCPU * vcpu, INST64 inst)
173 {
174 u64 fault, itir, ifa, pte, slot;
176 //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
177 fault = vcpu_get_itir(vcpu, &itir);
178 if (fault != IA64_NO_FAULT)
179 return IA64_ILLOP_FAULT;
180 fault = vcpu_get_ifa(vcpu, &ifa);
181 if (fault != IA64_NO_FAULT)
182 return IA64_ILLOP_FAULT;
183 pte = vcpu_get_gr(vcpu, inst.M42.r2);
184 slot = vcpu_get_gr(vcpu, inst.M42.r3);
186 return vcpu_itr_i(vcpu, slot, pte, itir, ifa);
187 }
189 static IA64FAULT priv_itc_d(VCPU * vcpu, INST64 inst)
190 {
191 u64 fault, itir, ifa, pte;
193 //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
194 fault = vcpu_get_itir(vcpu, &itir);
195 if (fault != IA64_NO_FAULT)
196 return IA64_ILLOP_FAULT;
197 fault = vcpu_get_ifa(vcpu, &ifa);
198 if (fault != IA64_NO_FAULT)
199 return IA64_ILLOP_FAULT;
200 pte = vcpu_get_gr(vcpu, inst.M41.r2);
202 return vcpu_itc_d(vcpu, pte, itir, ifa);
203 }
205 static IA64FAULT priv_itc_i(VCPU * vcpu, INST64 inst)
206 {
207 u64 fault, itir, ifa, pte;
209 //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT;
210 fault = vcpu_get_itir(vcpu, &itir);
211 if (fault != IA64_NO_FAULT)
212 return IA64_ILLOP_FAULT;
213 fault = vcpu_get_ifa(vcpu, &ifa);
214 if (fault != IA64_NO_FAULT)
215 return IA64_ILLOP_FAULT;
216 pte = vcpu_get_gr(vcpu, inst.M41.r2);
218 return vcpu_itc_i(vcpu, pte, itir, ifa);
219 }
221 /*************************************
222 * Moves to semi-privileged registers
223 *************************************/
225 static IA64FAULT priv_mov_to_ar_imm(VCPU * vcpu, INST64 inst)
226 {
227 // I27 and M30 are identical for these fields
228 u64 ar3 = inst.M30.ar3;
229 u64 imm = vcpu_get_gr(vcpu, inst.M30.imm);
230 return vcpu_set_ar(vcpu, ar3, imm);
231 }
233 static IA64FAULT priv_mov_to_ar_reg(VCPU * vcpu, INST64 inst)
234 {
235 // I26 and M29 are identical for these fields
236 u64 ar3 = inst.M29.ar3;
238 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
239 // privified mov from kr
240 u64 val;
241 if (vcpu_get_ar(vcpu, ar3, &val) != IA64_ILLOP_FAULT)
242 return vcpu_set_gr(vcpu, inst.M29.r2 - 64, val, 0);
243 else
244 return IA64_ILLOP_FAULT;
245 } else {
246 u64 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
247 return vcpu_set_ar(vcpu, ar3, r2);
248 }
249 }
251 /********************************
252 * Moves to privileged registers
253 ********************************/
255 static IA64FAULT priv_mov_to_pkr(VCPU * vcpu, INST64 inst)
256 {
257 u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
258 u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
259 return vcpu_set_pkr(vcpu, r3, r2);
260 }
262 static IA64FAULT priv_mov_to_rr(VCPU * vcpu, INST64 inst)
263 {
264 u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
265 u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
266 return vcpu_set_rr(vcpu, r3, r2);
267 }
269 static IA64FAULT priv_mov_to_dbr(VCPU * vcpu, INST64 inst)
270 {
271 u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
272 u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
273 return vcpu_set_dbr(vcpu, r3, r2);
274 }
276 static IA64FAULT priv_mov_to_ibr(VCPU * vcpu, INST64 inst)
277 {
278 u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
279 u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
280 return vcpu_set_ibr(vcpu, r3, r2);
281 }
283 static IA64FAULT priv_mov_to_pmc(VCPU * vcpu, INST64 inst)
284 {
285 u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
286 u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
287 return vcpu_set_pmc(vcpu, r3, r2);
288 }
290 static IA64FAULT priv_mov_to_pmd(VCPU * vcpu, INST64 inst)
291 {
292 u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
293 u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
294 return vcpu_set_pmd(vcpu, r3, r2);
295 }
297 static IA64FAULT priv_mov_to_cr(VCPU * vcpu, INST64 inst)
298 {
299 u64 val = vcpu_get_gr(vcpu, inst.M32.r2);
300 perfc_incra(mov_to_cr, inst.M32.cr3);
301 switch (inst.M32.cr3) {
302 case 0:
303 return vcpu_set_dcr(vcpu, val);
304 case 1:
305 return vcpu_set_itm(vcpu, val);
306 case 2:
307 return vcpu_set_iva(vcpu, val);
308 case 8:
309 return vcpu_set_pta(vcpu, val);
310 case 16:
311 return vcpu_set_ipsr(vcpu, val);
312 case 17:
313 return vcpu_set_isr(vcpu, val);
314 case 19:
315 return vcpu_set_iip(vcpu, val);
316 case 20:
317 return vcpu_set_ifa(vcpu, val);
318 case 21:
319 return vcpu_set_itir(vcpu, val);
320 case 22:
321 return vcpu_set_iipa(vcpu, val);
322 case 23:
323 return vcpu_set_ifs(vcpu, val);
324 case 24:
325 return vcpu_set_iim(vcpu, val);
326 case 25:
327 return vcpu_set_iha(vcpu, val);
328 case 64:
329 return vcpu_set_lid(vcpu, val);
330 case 65:
331 return IA64_ILLOP_FAULT;
332 case 66:
333 return vcpu_set_tpr(vcpu, val);
334 case 67:
335 return vcpu_set_eoi(vcpu, val);
336 case 68:
337 return IA64_ILLOP_FAULT;
338 case 69:
339 return IA64_ILLOP_FAULT;
340 case 70:
341 return IA64_ILLOP_FAULT;
342 case 71:
343 return IA64_ILLOP_FAULT;
344 case 72:
345 return vcpu_set_itv(vcpu, val);
346 case 73:
347 return vcpu_set_pmv(vcpu, val);
348 case 74:
349 return vcpu_set_cmcv(vcpu, val);
350 case 80:
351 return vcpu_set_lrr0(vcpu, val);
352 case 81:
353 return vcpu_set_lrr1(vcpu, val);
354 default:
355 return IA64_ILLOP_FAULT;
356 }
357 }
359 static IA64FAULT priv_rsm(VCPU * vcpu, INST64 inst)
360 {
361 u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
362 return vcpu_reset_psr_sm(vcpu, imm24);
363 }
365 static IA64FAULT priv_ssm(VCPU * vcpu, INST64 inst)
366 {
367 u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
368 return vcpu_set_psr_sm(vcpu, imm24);
369 }
371 /**
372 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
373 */
374 static IA64FAULT priv_mov_to_psr(VCPU * vcpu, INST64 inst)
375 {
376 u64 val = vcpu_get_gr(vcpu, inst.M35.r2);
377 return vcpu_set_psr_l(vcpu, val);
378 }
380 /**********************************
381 * Moves from privileged registers
382 **********************************/
384 static IA64FAULT priv_mov_from_rr(VCPU * vcpu, INST64 inst)
385 {
386 u64 val;
387 IA64FAULT fault;
388 u64 reg;
390 reg = vcpu_get_gr(vcpu, inst.M43.r3);
391 if (privify_en && inst.M43.r1 > 63) {
392 // privified mov from cpuid
393 fault = vcpu_get_cpuid(vcpu, reg, &val);
394 if (fault == IA64_NO_FAULT)
395 return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
396 } else {
397 fault = vcpu_get_rr(vcpu, reg, &val);
398 if (fault == IA64_NO_FAULT)
399 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
400 }
401 return fault;
402 }
404 static IA64FAULT priv_mov_from_pkr(VCPU * vcpu, INST64 inst)
405 {
406 u64 val;
407 IA64FAULT fault;
409 fault = vcpu_get_pkr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
410 if (fault == IA64_NO_FAULT)
411 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
412 else
413 return fault;
414 }
416 static IA64FAULT priv_mov_from_dbr(VCPU * vcpu, INST64 inst)
417 {
418 u64 val;
419 IA64FAULT fault;
421 fault = vcpu_get_dbr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
422 if (fault == IA64_NO_FAULT)
423 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
424 else
425 return fault;
426 }
428 static IA64FAULT priv_mov_from_ibr(VCPU * vcpu, INST64 inst)
429 {
430 u64 val;
431 IA64FAULT fault;
433 fault = vcpu_get_ibr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val);
434 if (fault == IA64_NO_FAULT)
435 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
436 else
437 return fault;
438 }
440 static IA64FAULT priv_mov_from_pmc(VCPU * vcpu, INST64 inst)
441 {
442 u64 val;
443 IA64FAULT fault;
444 u64 reg;
446 reg = vcpu_get_gr(vcpu, inst.M43.r3);
447 if (privify_en && inst.M43.r1 > 63) {
448 // privified mov from pmd
449 fault = vcpu_get_pmd(vcpu, reg, &val);
450 if (fault == IA64_NO_FAULT)
451 return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0);
452 } else {
453 fault = vcpu_get_pmc(vcpu, reg, &val);
454 if (fault == IA64_NO_FAULT)
455 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
456 }
457 return fault;
458 }
460 #define cr_get(cr) \
461 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
462 vcpu_set_gr(vcpu, tgt, val, 0) : fault;
464 static IA64FAULT priv_mov_from_cr(VCPU * vcpu, INST64 inst)
465 {
466 u64 tgt = inst.M33.r1;
467 u64 val;
468 IA64FAULT fault;
470 perfc_incra(mov_from_cr, inst.M33.cr3);
471 switch (inst.M33.cr3) {
472 case 0:
473 return cr_get(dcr);
474 case 1:
475 return cr_get(itm);
476 case 2:
477 return cr_get(iva);
478 case 8:
479 return cr_get(pta);
480 case 16:
481 return cr_get(ipsr);
482 case 17:
483 return cr_get(isr);
484 case 19:
485 return cr_get(iip);
486 case 20:
487 return cr_get(ifa);
488 case 21:
489 return cr_get(itir);
490 case 22:
491 return cr_get(iipa);
492 case 23:
493 return cr_get(ifs);
494 case 24:
495 return cr_get(iim);
496 case 25:
497 return cr_get(iha);
498 case 64:
499 return cr_get(lid);
500 case 65:
501 return cr_get(ivr);
502 case 66:
503 return cr_get(tpr);
504 case 67:
505 return vcpu_set_gr(vcpu, tgt, 0L, 0);
506 case 68:
507 return cr_get(irr0);
508 case 69:
509 return cr_get(irr1);
510 case 70:
511 return cr_get(irr2);
512 case 71:
513 return cr_get(irr3);
514 case 72:
515 return cr_get(itv);
516 case 73:
517 return cr_get(pmv);
518 case 74:
519 return cr_get(cmcv);
520 case 80:
521 return cr_get(lrr0);
522 case 81:
523 return cr_get(lrr1);
524 default:
525 return IA64_ILLOP_FAULT;
526 }
527 return IA64_ILLOP_FAULT;
528 }
530 static IA64FAULT priv_mov_from_psr(VCPU * vcpu, INST64 inst)
531 {
532 u64 tgt = inst.M33.r1;
533 u64 val;
534 IA64FAULT fault;
536 fault = vcpu_get_psr_masked(vcpu, &val);
537 if (fault == IA64_NO_FAULT)
538 return vcpu_set_gr(vcpu, tgt, val, 0);
539 else
540 return fault;
541 }
543 /**************************************************************************
544 Privileged operation decode and dispatch routines
545 **************************************************************************/
547 static const IA64_SLOT_TYPE slot_types[0x20][3] = {
548 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
549 {M, I, ILLEGAL}, {M, I, ILLEGAL},
550 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
551 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
552 {M, F, I}, {M, F, I},
553 {M, M, F}, {M, M, F},
554 {M, I, B}, {M, I, B},
555 {M, B, B}, {M, B, B},
556 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
557 {B, B, B}, {B, B, B},
558 {M, M, B}, {M, M, B},
559 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
560 {M, F, B}, {M, F, B},
561 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
562 };
564 // pointer to privileged emulation function
565 typedef IA64FAULT(*PPEFCN) (VCPU * vcpu, INST64 inst);
567 static const PPEFCN Mpriv_funcs[64] = {
568 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
569 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
570 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
571 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
572 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr,
573 priv_mov_from_pkr,
574 priv_mov_from_pmc, 0, 0, 0,
575 0, 0, 0, 0,
576 0, 0, priv_tpa, priv_tak,
577 0, 0, 0, 0,
578 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
579 0, 0, 0, 0,
580 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
581 0, 0, 0, 0,
582 priv_ptc_e, 0, 0, 0,
583 0, 0, 0, 0, 0, 0, 0, 0
584 };
586 static IA64FAULT priv_handle_op(VCPU * vcpu, REGS * regs)
587 {
588 IA64_BUNDLE bundle;
589 int slot;
590 IA64_SLOT_TYPE slot_type;
591 INST64 inst;
592 PPEFCN pfunc;
593 unsigned long ipsr = regs->cr_ipsr;
594 u64 iip = regs->cr_iip;
595 int x6;
597 // make a local copy of the bundle containing the privop
598 if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) {
599 //return vcpu_force_data_miss(vcpu, regs->cr_iip);
600 return vcpu_force_inst_miss(vcpu, regs->cr_iip);
601 }
602 slot = ((struct ia64_psr *)&ipsr)->ri;
603 if (slot == 0)
604 inst.inst = (bundle.i64[0] >> 5) & MASK_41;
605 else if (slot == 1)
606 inst.inst =
607 ((bundle.i64[0] >> 46) | bundle.i64[1] << 18) & MASK_41;
608 else if (slot == 2)
609 inst.inst = (bundle.i64[1] >> 23) & MASK_41;
610 else
611 panic_domain(regs,
612 "priv_handle_op: illegal slot: %d\n", slot);
614 slot_type = slot_types[bundle.template][slot];
615 if (priv_verbose) {
616 printk("priv_handle_op: checking bundle at 0x%lx "
617 "(op=0x%016lx) slot %d (type=%d)\n",
618 iip, (u64) inst.inst, slot, slot_type);
619 }
620 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
621 // break instr for privified cover
622 } else if (ia64_get_cpl(ipsr) > CONFIG_CPL0_EMUL)
623 return IA64_ILLOP_FAULT;
625 debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
627 switch (slot_type) {
628 case M:
629 if (inst.generic.major == 0) {
630 #if 0
631 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
632 privcnt.cover++;
633 return priv_cover(vcpu, inst);
634 }
635 #endif
636 if (inst.M29.x3 != 0)
637 break;
638 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
639 perfc_incr(mov_to_ar_imm);
640 return priv_mov_to_ar_imm(vcpu, inst);
641 }
642 if (inst.M44.x4 == 6) {
643 perfc_incr(ssm);
644 return priv_ssm(vcpu, inst);
645 }
646 if (inst.M44.x4 == 7) {
647 perfc_incr(rsm);
648 return priv_rsm(vcpu, inst);
649 }
650 break;
651 } else if (inst.generic.major != 1)
652 break;
653 x6 = inst.M29.x6;
654 if (x6 == 0x2a) {
655 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
656 perfc_incr(mov_from_ar); // privified mov from kr
657 else
658 perfc_incr(mov_to_ar_reg);
659 return priv_mov_to_ar_reg(vcpu, inst);
660 }
661 if (inst.M29.x3 != 0)
662 break;
663 if (!(pfunc = Mpriv_funcs[x6]))
664 break;
665 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
666 if (privify_en && inst.M46.r3 > 63) {
667 if (x6 == 0x1e)
668 x6 = 0x1b;
669 else
670 x6 = 0x1a;
671 }
672 }
673 if (privify_en && x6 == 52 && inst.M28.r3 > 63)
674 perfc_incr(fc);
675 else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
676 perfc_incr(cpuid);
677 else
678 perfc_incra(misc_privop, x6);
679 return (*pfunc) (vcpu, inst);
680 break;
681 case B:
682 if (inst.generic.major != 0)
683 break;
684 if (inst.B8.x6 == 0x08) {
685 IA64FAULT fault;
686 perfc_incr(rfi);
687 fault = priv_rfi(vcpu, inst);
688 if (fault == IA64_NO_FAULT)
689 fault = IA64_RFI_IN_PROGRESS;
690 return fault;
691 }
692 if (inst.B8.x6 == 0x0c) {
693 perfc_incr(bsw0);
694 return priv_bsw0(vcpu, inst);
695 }
696 if (inst.B8.x6 == 0x0d) {
697 perfc_incr(bsw1);
698 return priv_bsw1(vcpu, inst);
699 }
700 if (privify_en && inst.B8.x6 == 0x0) {
701 // break instr for privified cover
702 perfc_incr(cover);
703 return priv_cover(vcpu, inst);
704 }
705 break;
706 case I:
707 if (inst.generic.major != 0)
708 break;
709 #if 0
710 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
711 perfc_incr(cover);
712 return priv_cover(vcpu, inst);
713 }
714 #endif
715 if (inst.I26.x3 != 0)
716 break; // I26.x3 == I27.x3
717 if (inst.I26.x6 == 0x2a) {
718 if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
719 perfc_incr(mov_from_ar); // privified mov from kr
720 else
721 perfc_incr(mov_to_ar_reg);
722 return priv_mov_to_ar_reg(vcpu, inst);
723 }
724 if (inst.I27.x6 == 0x0a) {
725 perfc_incr(mov_to_ar_imm);
726 return priv_mov_to_ar_imm(vcpu, inst);
727 }
728 break;
729 default:
730 break;
731 }
732 //printk("We who are about do die salute you\n");
733 printk("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) "
734 "slot %d (type=%d), ipsr=0x%lx\n",
735 iip, (u64) inst.inst, slot, slot_type, ipsr);
736 //printk("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
737 //thread_mozambique("privop fault\n");
738 return IA64_ILLOP_FAULT;
739 }
741 /** Emulate a privileged operation.
742 *
743 * This should probably return 0 on success and the "trap number"
744 * (e.g. illegal operation for bad register, priv op for an
745 * instruction that isn't allowed, etc.) on "failure"
746 *
747 * @param vcpu virtual cpu
748 * @param isrcode interrupt service routine code
749 * @return fault
750 */
751 IA64FAULT priv_emulate(VCPU * vcpu, REGS * regs, u64 isr)
752 {
753 IA64FAULT fault;
754 u64 isrcode = (isr >> 4) & 0xf;
756 // handle privops masked as illops? and breaks (6)
757 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
758 printk("priv_emulate: isrcode != 0 or 1 or 2\n");
759 printk("priv_emulate: returning ILLOP, not implemented!\n");
760 while (1) ;
761 return IA64_ILLOP_FAULT;
762 }
763 // its OK for a privified-cover to be executed in user-land
764 fault = priv_handle_op(vcpu, regs);
765 if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) {
766 // success!!
767 // update iip/ipsr to point to the next instruction
768 (void)vcpu_increment_iip(vcpu);
769 }
770 if (fault == IA64_ILLOP_FAULT)
771 printk("priv_emulate: priv_handle_op fails, "
772 "isr=0x%lx iip=%lx\n", isr, regs->cr_iip);
773 return fault;
774 }
776 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
777 * so this code is primarily used for debugging them */
778 int ia64_hyperprivop(unsigned long iim, REGS * regs)
779 {
780 struct vcpu *v = current;
781 u64 val;
782 u64 itir, ifa;
784 if (!iim || iim > HYPERPRIVOP_MAX) {
785 panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
786 iim, regs->cr_iip);
787 return 1;
788 }
789 perfc_incra(slow_hyperprivop, iim);
791 debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
793 switch (iim) {
794 case HYPERPRIVOP_RFI:
795 vcpu_rfi(v);
796 return 0; // don't update iip
797 case HYPERPRIVOP_RSM_DT:
798 vcpu_reset_psr_dt(v);
799 return 1;
800 case HYPERPRIVOP_SSM_DT:
801 vcpu_set_psr_dt(v);
802 return 1;
803 case HYPERPRIVOP_COVER:
804 vcpu_cover(v);
805 return 1;
806 case HYPERPRIVOP_ITC_D:
807 vcpu_get_itir(v, &itir);
808 vcpu_get_ifa(v, &ifa);
809 vcpu_itc_d(v, regs->r8, itir, ifa);
810 return 1;
811 case HYPERPRIVOP_ITC_I:
812 vcpu_get_itir(v, &itir);
813 vcpu_get_ifa(v, &ifa);
814 vcpu_itc_i(v, regs->r8, itir, ifa);
815 return 1;
816 case HYPERPRIVOP_SSM_I:
817 vcpu_set_psr_i(v);
818 return 1;
819 case HYPERPRIVOP_GET_IVR:
820 vcpu_get_ivr(v, &val);
821 regs->r8 = val;
822 return 1;
823 case HYPERPRIVOP_GET_TPR:
824 vcpu_get_tpr(v, &val);
825 regs->r8 = val;
826 return 1;
827 case HYPERPRIVOP_SET_TPR:
828 vcpu_set_tpr(v, regs->r8);
829 return 1;
830 case HYPERPRIVOP_EOI:
831 vcpu_set_eoi(v, 0L);
832 return 1;
833 case HYPERPRIVOP_SET_ITM:
834 vcpu_set_itm(v, regs->r8);
835 return 1;
836 case HYPERPRIVOP_THASH:
837 vcpu_thash(v, regs->r8, &val);
838 regs->r8 = val;
839 return 1;
840 case HYPERPRIVOP_PTC_GA:
841 vcpu_ptc_ga(v, regs->r8, (1L << ((regs->r9 & 0xfc) >> 2)));
842 return 1;
843 case HYPERPRIVOP_ITR_D:
844 vcpu_get_itir(v, &itir);
845 vcpu_get_ifa(v, &ifa);
846 vcpu_itr_d(v, regs->r8, regs->r9, itir, ifa);
847 return 1;
848 case HYPERPRIVOP_GET_RR:
849 vcpu_get_rr(v, regs->r8, &val);
850 regs->r8 = val;
851 return 1;
852 case HYPERPRIVOP_SET_RR:
853 vcpu_set_rr(v, regs->r8, regs->r9);
854 return 1;
855 case HYPERPRIVOP_SET_KR:
856 vcpu_set_ar(v, regs->r8, regs->r9);
857 return 1;
858 case HYPERPRIVOP_FC:
859 vcpu_fc(v, regs->r8);
860 return 1;
861 case HYPERPRIVOP_GET_CPUID:
862 vcpu_get_cpuid(v, regs->r8, &val);
863 regs->r8 = val;
864 return 1;
865 case HYPERPRIVOP_GET_PMD:
866 vcpu_get_pmd(v, regs->r8, &val);
867 regs->r8 = val;
868 return 1;
869 case HYPERPRIVOP_GET_EFLAG:
870 vcpu_get_ar(v, 24, &val);
871 regs->r8 = val;
872 return 1;
873 case HYPERPRIVOP_SET_EFLAG:
874 vcpu_set_ar(v, 24, regs->r8);
875 return 1;
876 case HYPERPRIVOP_RSM_BE:
877 vcpu_reset_psr_sm(v, IA64_PSR_BE);
878 return 1;
879 case HYPERPRIVOP_GET_PSR:
880 vcpu_get_psr_masked(v, &val);
881 regs->r8 = val;
882 return 1;
883 case HYPERPRIVOP_SET_RR0_TO_RR4:
884 vcpu_set_rr0_to_rr4(v, regs->r8, regs->r9, regs->r10,
885 regs->r11, regs->r14);
886 return 1;
887 }
888 return 0;
889 }