ia64/xen-unstable

view xen/arch/ia64/xen/privop.c @ 10704:c8bc76d877e0

[IA64] Fix fetch code method when FP fault occurs @VTi side

This patch intends to use __vmx_get_domain_bundle to fetch code
when FP fault @VTi side.

Singed-off-by: Zhang xiantao <xiantao.zhang@intel.com>
author awilliam@xenbuild.aw
date Mon Jul 24 13:48:12 2006 -0600 (2006-07-24)
parents 8d501f39286c
children 86e5d8458c08
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 #include <asm/dom_fw.h>
14 #include <asm/vhpt.h>
15 #include <asm/bundle.h>
16 #include <asm/privop_stat.h>
18 long priv_verbose=0;
19 unsigned long privop_trace = 0;
21 /* Set to 1 to handle privified instructions from the privify tool. */
22 #ifndef CONFIG_PRIVIFY
23 static const int privify_en = 0;
24 #else
25 static const int privify_en = 1;
26 #endif
28 /**************************************************************************
29 Privileged operation emulation routines
30 **************************************************************************/
32 static IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
33 {
34 return vcpu_rfi(vcpu);
35 }
37 static IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
38 {
39 return vcpu_bsw0(vcpu);
40 }
42 static IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
43 {
44 return vcpu_bsw1(vcpu);
45 }
47 static IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
48 {
49 return vcpu_cover(vcpu);
50 }
52 static IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
53 {
54 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
55 UINT64 log_range;
57 log_range = ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
58 return vcpu_ptc_l(vcpu,vadr,log_range);
59 }
61 static IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
62 {
63 UINT src = inst.M28.r3;
65 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
66 if (privify_en && src > 63)
67 return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
68 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
69 }
71 static IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
72 {
73 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
74 UINT64 addr_range;
76 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
77 return vcpu_ptc_g(vcpu,vadr,addr_range);
78 }
80 static IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
81 {
82 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
83 UINT64 addr_range;
85 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
86 return vcpu_ptc_ga(vcpu,vadr,addr_range);
87 }
89 static IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
90 {
91 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
92 UINT64 log_range;
94 log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
95 return vcpu_ptr_d(vcpu,vadr,log_range);
96 }
98 static IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
99 {
100 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
101 UINT64 log_range;
103 log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
104 return vcpu_ptr_i(vcpu,vadr,log_range);
105 }
107 static IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
108 {
109 UINT64 padr;
110 UINT fault;
111 UINT src = inst.M46.r3;
113 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
114 if (privify_en && src > 63)
115 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
116 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
117 if (fault == IA64_NO_FAULT)
118 return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
119 else return fault;
120 }
122 static IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
123 {
124 UINT64 key;
125 UINT fault;
126 UINT src = inst.M46.r3;
128 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
129 if (privify_en && src > 63)
130 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
131 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
132 if (fault == IA64_NO_FAULT)
133 return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
134 else return fault;
135 }
137 /************************************
138 * Insert translation register/cache
139 ************************************/
141 static IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
142 {
143 UINT64 fault, itir, ifa, pte, slot;
145 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
146 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
147 return(IA64_ILLOP_FAULT);
148 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
149 return(IA64_ILLOP_FAULT);
150 pte = vcpu_get_gr(vcpu,inst.M42.r2);
151 slot = vcpu_get_gr(vcpu,inst.M42.r3);
153 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
154 }
156 static IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
157 {
158 UINT64 fault, itir, ifa, pte, slot;
160 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
161 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
162 return(IA64_ILLOP_FAULT);
163 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
164 return(IA64_ILLOP_FAULT);
165 pte = vcpu_get_gr(vcpu,inst.M42.r2);
166 slot = vcpu_get_gr(vcpu,inst.M42.r3);
168 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
169 }
171 static IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
172 {
173 UINT64 fault, itir, ifa, pte;
175 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
176 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
177 return(IA64_ILLOP_FAULT);
178 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
179 return(IA64_ILLOP_FAULT);
180 pte = vcpu_get_gr(vcpu,inst.M41.r2);
182 return (vcpu_itc_d(vcpu,pte,itir,ifa));
183 }
185 static IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
186 {
187 UINT64 fault, itir, ifa, pte;
189 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
190 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
191 return(IA64_ILLOP_FAULT);
192 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
193 return(IA64_ILLOP_FAULT);
194 pte = vcpu_get_gr(vcpu,inst.M41.r2);
196 return (vcpu_itc_i(vcpu,pte,itir,ifa));
197 }
199 /*************************************
200 * Moves to semi-privileged registers
201 *************************************/
203 static IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
204 {
205 // I27 and M30 are identical for these fields
206 UINT64 ar3 = inst.M30.ar3;
207 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
208 return (vcpu_set_ar(vcpu,ar3,imm));
209 }
211 static IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
212 {
213 // I26 and M29 are identical for these fields
214 UINT64 ar3 = inst.M29.ar3;
216 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
217 // privified mov from kr
218 UINT64 val;
219 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
220 return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
221 else return IA64_ILLOP_FAULT;
222 }
223 else {
224 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
225 return (vcpu_set_ar(vcpu,ar3,r2));
226 }
227 }
229 /********************************
230 * Moves to privileged registers
231 ********************************/
233 static IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
234 {
235 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
236 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
237 return (vcpu_set_pkr(vcpu,r3,r2));
238 }
240 static IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
241 {
242 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
243 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
244 return (vcpu_set_rr(vcpu,r3,r2));
245 }
247 static IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
248 {
249 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
250 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
251 return (vcpu_set_dbr(vcpu,r3,r2));
252 }
254 static IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
255 {
256 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
257 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
258 return (vcpu_set_ibr(vcpu,r3,r2));
259 }
261 static IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
262 {
263 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
264 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
265 return (vcpu_set_pmc(vcpu,r3,r2));
266 }
268 static IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
269 {
270 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
271 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
272 return (vcpu_set_pmd(vcpu,r3,r2));
273 }
275 static IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
276 {
277 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
278 privcnt.to_cr_cnt[inst.M32.cr3]++;
279 switch (inst.M32.cr3) {
280 case 0: return vcpu_set_dcr(vcpu,val);
281 case 1: return vcpu_set_itm(vcpu,val);
282 case 2: return vcpu_set_iva(vcpu,val);
283 case 8: return vcpu_set_pta(vcpu,val);
284 case 16:return vcpu_set_ipsr(vcpu,val);
285 case 17:return vcpu_set_isr(vcpu,val);
286 case 19:return vcpu_set_iip(vcpu,val);
287 case 20:return vcpu_set_ifa(vcpu,val);
288 case 21:return vcpu_set_itir(vcpu,val);
289 case 22:return vcpu_set_iipa(vcpu,val);
290 case 23:return vcpu_set_ifs(vcpu,val);
291 case 24:return vcpu_set_iim(vcpu,val);
292 case 25:return vcpu_set_iha(vcpu,val);
293 case 64:return vcpu_set_lid(vcpu,val);
294 case 65:return IA64_ILLOP_FAULT;
295 case 66:return vcpu_set_tpr(vcpu,val);
296 case 67:return vcpu_set_eoi(vcpu,val);
297 case 68:return IA64_ILLOP_FAULT;
298 case 69:return IA64_ILLOP_FAULT;
299 case 70:return IA64_ILLOP_FAULT;
300 case 71:return IA64_ILLOP_FAULT;
301 case 72:return vcpu_set_itv(vcpu,val);
302 case 73:return vcpu_set_pmv(vcpu,val);
303 case 74:return vcpu_set_cmcv(vcpu,val);
304 case 80:return vcpu_set_lrr0(vcpu,val);
305 case 81:return vcpu_set_lrr1(vcpu,val);
306 default: return IA64_ILLOP_FAULT;
307 }
308 }
310 static IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
311 {
312 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
313 return vcpu_reset_psr_sm(vcpu,imm24);
314 }
316 static IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
317 {
318 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
319 return vcpu_set_psr_sm(vcpu,imm24);
320 }
322 /**
323 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
324 */
325 static IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
326 {
327 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
328 return vcpu_set_psr_l(vcpu,val);
329 }
331 /**********************************
332 * Moves from privileged registers
333 **********************************/
335 static IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
336 {
337 UINT64 val;
338 IA64FAULT fault;
339 UINT64 reg;
341 reg = vcpu_get_gr(vcpu,inst.M43.r3);
342 if (privify_en && inst.M43.r1 > 63) {
343 // privified mov from cpuid
344 fault = vcpu_get_cpuid(vcpu,reg,&val);
345 if (fault == IA64_NO_FAULT)
346 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
347 }
348 else {
349 fault = vcpu_get_rr(vcpu,reg,&val);
350 if (fault == IA64_NO_FAULT)
351 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
352 }
353 return fault;
354 }
356 static IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
357 {
358 UINT64 val;
359 IA64FAULT fault;
361 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
362 if (fault == IA64_NO_FAULT)
363 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
364 else return fault;
365 }
367 static IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
368 {
369 UINT64 val;
370 IA64FAULT fault;
372 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
373 if (fault == IA64_NO_FAULT)
374 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
375 else return fault;
376 }
378 static IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
379 {
380 UINT64 val;
381 IA64FAULT fault;
383 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
384 if (fault == IA64_NO_FAULT)
385 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
386 else return fault;
387 }
389 static IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
390 {
391 UINT64 val;
392 IA64FAULT fault;
393 UINT64 reg;
395 reg = vcpu_get_gr(vcpu,inst.M43.r3);
396 if (privify_en && inst.M43.r1 > 63) {
397 // privified mov from pmd
398 fault = vcpu_get_pmd(vcpu,reg,&val);
399 if (fault == IA64_NO_FAULT)
400 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
401 }
402 else {
403 fault = vcpu_get_pmc(vcpu,reg,&val);
404 if (fault == IA64_NO_FAULT)
405 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
406 }
407 return fault;
408 }
410 #define cr_get(cr) \
411 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
412 vcpu_set_gr(vcpu, tgt, val, 0) : fault;
414 static IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
415 {
416 UINT64 tgt = inst.M33.r1;
417 UINT64 val;
418 IA64FAULT fault;
420 privcnt.from_cr_cnt[inst.M33.cr3]++;
421 switch (inst.M33.cr3) {
422 case 0: return cr_get(dcr);
423 case 1: return cr_get(itm);
424 case 2: return cr_get(iva);
425 case 8: return cr_get(pta);
426 case 16:return cr_get(ipsr);
427 case 17:return cr_get(isr);
428 case 19:return cr_get(iip);
429 case 20:return cr_get(ifa);
430 case 21:return cr_get(itir);
431 case 22:return cr_get(iipa);
432 case 23:return cr_get(ifs);
433 case 24:return cr_get(iim);
434 case 25:return cr_get(iha);
435 case 64:return cr_get(lid);
436 case 65:return cr_get(ivr);
437 case 66:return cr_get(tpr);
438 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
439 case 68:return cr_get(irr0);
440 case 69:return cr_get(irr1);
441 case 70:return cr_get(irr2);
442 case 71:return cr_get(irr3);
443 case 72:return cr_get(itv);
444 case 73:return cr_get(pmv);
445 case 74:return cr_get(cmcv);
446 case 80:return cr_get(lrr0);
447 case 81:return cr_get(lrr1);
448 default: return IA64_ILLOP_FAULT;
449 }
450 return IA64_ILLOP_FAULT;
451 }
453 static IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
454 {
455 UINT64 tgt = inst.M33.r1;
456 UINT64 val;
457 IA64FAULT fault;
459 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
460 return vcpu_set_gr(vcpu, tgt, val, 0);
461 else return fault;
462 }
464 /**************************************************************************
465 Privileged operation decode and dispatch routines
466 **************************************************************************/
468 static const IA64_SLOT_TYPE slot_types[0x20][3] = {
469 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
470 {M, I, ILLEGAL}, {M, I, ILLEGAL},
471 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
472 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
473 {M, F, I}, {M, F, I},
474 {M, M, F}, {M, M, F},
475 {M, I, B}, {M, I, B},
476 {M, B, B}, {M, B, B},
477 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
478 {B, B, B}, {B, B, B},
479 {M, M, B}, {M, M, B},
480 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
481 {M, F, B}, {M, F, B},
482 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
483 };
485 // pointer to privileged emulation function
486 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
488 static const PPEFCN Mpriv_funcs[64] = {
489 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
490 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
491 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
492 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
493 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
494 priv_mov_from_pmc, 0, 0, 0,
495 0, 0, 0, 0,
496 0, 0, priv_tpa, priv_tak,
497 0, 0, 0, 0,
498 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
499 0, 0, 0, 0,
500 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
501 0, 0, 0, 0,
502 priv_ptc_e, 0, 0, 0,
503 0, 0, 0, 0, 0, 0, 0, 0
504 };
506 static IA64FAULT
507 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
508 {
509 IA64_BUNDLE bundle;
510 int slot;
511 IA64_SLOT_TYPE slot_type;
512 INST64 inst;
513 PPEFCN pfunc;
514 unsigned long ipsr = regs->cr_ipsr;
515 UINT64 iip = regs->cr_iip;
516 int x6;
518 // make a local copy of the bundle containing the privop
519 if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) {
520 //return vcpu_force_data_miss(vcpu, regs->cr_iip);
521 return vcpu_force_inst_miss(vcpu, regs->cr_iip);
522 }
524 #if 0
525 if (iip==0xa000000100001820) {
526 static int firstpagefault = 1;
527 if (firstpagefault) {
528 printf("*** First time to domain page fault!\n"); firstpagefault=0;
529 }
530 }
531 #endif
532 if (privop_trace) {
533 static long i = 400;
534 //if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
535 if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, itc=%lx, itm=%lx\n",
536 iip,ia64_get_itc(),ia64_get_itm());
537 i--;
538 }
539 slot = ((struct ia64_psr *)&ipsr)->ri;
540 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
541 else if (slot == 1)
542 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
543 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
544 else printf("priv_handle_op: illegal slot: %d\n", slot);
546 slot_type = slot_types[bundle.template][slot];
547 if (priv_verbose) {
548 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
549 iip, (UINT64)inst.inst, slot, slot_type);
550 }
551 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
552 // break instr for privified cover
553 }
554 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
555 switch (slot_type) {
556 case M:
557 if (inst.generic.major == 0) {
558 #if 0
559 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
560 privcnt.cover++;
561 return priv_cover(vcpu,inst);
562 }
563 #endif
564 if (inst.M29.x3 != 0) break;
565 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
566 privcnt.mov_to_ar_imm++;
567 return priv_mov_to_ar_imm(vcpu,inst);
568 }
569 if (inst.M44.x4 == 6) {
570 privcnt.ssm++;
571 return priv_ssm(vcpu,inst);
572 }
573 if (inst.M44.x4 == 7) {
574 privcnt.rsm++;
575 return priv_rsm(vcpu,inst);
576 }
577 break;
578 }
579 else if (inst.generic.major != 1) break;
580 x6 = inst.M29.x6;
581 if (x6 == 0x2a) {
582 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
583 privcnt.mov_from_ar++; // privified mov from kr
584 else privcnt.mov_to_ar_reg++;
585 return priv_mov_to_ar_reg(vcpu,inst);
586 }
587 if (inst.M29.x3 != 0) break;
588 if (!(pfunc = Mpriv_funcs[x6])) break;
589 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
590 if (privify_en && inst.M46.r3 > 63) {
591 if (x6 == 0x1e) x6 = 0x1b;
592 else x6 = 0x1a;
593 }
594 }
595 if (privify_en && x6 == 52 && inst.M28.r3 > 63)
596 privcnt.fc++;
597 else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
598 privcnt.cpuid++;
599 else privcnt.Mpriv_cnt[x6]++;
600 return (*pfunc)(vcpu,inst);
601 break;
602 case B:
603 if (inst.generic.major != 0) break;
604 if (inst.B8.x6 == 0x08) {
605 IA64FAULT fault;
606 privcnt.rfi++;
607 fault = priv_rfi(vcpu,inst);
608 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
609 return fault;
610 }
611 if (inst.B8.x6 == 0x0c) {
612 privcnt.bsw0++;
613 return priv_bsw0(vcpu,inst);
614 }
615 if (inst.B8.x6 == 0x0d) {
616 privcnt.bsw1++;
617 return priv_bsw1(vcpu,inst);
618 }
619 if (inst.B8.x6 == 0x0) { // break instr for privified cover
620 privcnt.cover++;
621 return priv_cover(vcpu,inst);
622 }
623 break;
624 case I:
625 if (inst.generic.major != 0) break;
626 #if 0
627 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
628 privcnt.cover++;
629 return priv_cover(vcpu,inst);
630 }
631 #endif
632 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
633 if (inst.I26.x6 == 0x2a) {
634 if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
635 privcnt.mov_from_ar++; // privified mov from kr
636 else privcnt.mov_to_ar_reg++;
637 return priv_mov_to_ar_reg(vcpu,inst);
638 }
639 if (inst.I27.x6 == 0x0a) {
640 privcnt.mov_to_ar_imm++;
641 return priv_mov_to_ar_imm(vcpu,inst);
642 }
643 break;
644 default:
645 break;
646 }
647 //printf("We who are about do die salute you\n");
648 printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=0x%lx\n",
649 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
650 //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
651 //thread_mozambique("privop fault\n");
652 return (IA64_ILLOP_FAULT);
653 }
655 /** Emulate a privileged operation.
656 *
657 * This should probably return 0 on success and the "trap number"
658 * (e.g. illegal operation for bad register, priv op for an
659 * instruction that isn't allowed, etc.) on "failure"
660 *
661 * @param vcpu virtual cpu
662 * @param isrcode interrupt service routine code
663 * @return fault
664 */
665 IA64FAULT
666 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
667 {
668 IA64FAULT fault;
669 UINT64 ipsr = regs->cr_ipsr;
670 UINT64 isrcode = (isr >> 4) & 0xf;
671 int privlvl;
673 // handle privops masked as illops? and breaks (6)
674 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
675 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
676 printf("priv_emulate: returning ILLOP, not implemented!\n");
677 while (1);
678 return IA64_ILLOP_FAULT;
679 }
680 //if (isrcode != 1 && isrcode != 2) return 0;
681 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
682 // its OK for a privified-cover to be executed in user-land
683 fault = priv_handle_op(vcpu,regs,privlvl);
684 if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // success!!
685 // update iip/ipsr to point to the next instruction
686 (void)vcpu_increment_iip(vcpu);
687 }
688 if (fault == IA64_ILLOP_FAULT)
689 printf("priv_emulate: priv_handle_op fails, isr=0x%lx\n",isr);
690 return fault;
691 }
693 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
694 * so this code is primarily used for debugging them */
695 int
696 ia64_hyperprivop(unsigned long iim, REGS *regs)
697 {
698 struct vcpu *v = current;
699 UINT64 val;
700 UINT64 itir, ifa;
702 if (!iim || iim > HYPERPRIVOP_MAX) {
703 panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
704 iim, regs->cr_iip);
705 return 1;
706 }
707 slow_hyperpriv_cnt[iim]++;
708 switch(iim) {
709 case HYPERPRIVOP_RFI:
710 (void)vcpu_rfi(v);
711 return 0; // don't update iip
712 case HYPERPRIVOP_RSM_DT:
713 (void)vcpu_reset_psr_dt(v);
714 return 1;
715 case HYPERPRIVOP_SSM_DT:
716 (void)vcpu_set_psr_dt(v);
717 return 1;
718 case HYPERPRIVOP_COVER:
719 (void)vcpu_cover(v);
720 return 1;
721 case HYPERPRIVOP_ITC_D:
722 (void)vcpu_get_itir(v,&itir);
723 (void)vcpu_get_ifa(v,&ifa);
724 (void)vcpu_itc_d(v,regs->r8,itir,ifa);
725 return 1;
726 case HYPERPRIVOP_ITC_I:
727 (void)vcpu_get_itir(v,&itir);
728 (void)vcpu_get_ifa(v,&ifa);
729 (void)vcpu_itc_i(v,regs->r8,itir,ifa);
730 return 1;
731 case HYPERPRIVOP_SSM_I:
732 (void)vcpu_set_psr_i(v);
733 return 1;
734 case HYPERPRIVOP_GET_IVR:
735 (void)vcpu_get_ivr(v,&val);
736 regs->r8 = val;
737 return 1;
738 case HYPERPRIVOP_GET_TPR:
739 (void)vcpu_get_tpr(v,&val);
740 regs->r8 = val;
741 return 1;
742 case HYPERPRIVOP_SET_TPR:
743 (void)vcpu_set_tpr(v,regs->r8);
744 return 1;
745 case HYPERPRIVOP_EOI:
746 (void)vcpu_set_eoi(v,0L);
747 return 1;
748 case HYPERPRIVOP_SET_ITM:
749 (void)vcpu_set_itm(v,regs->r8);
750 return 1;
751 case HYPERPRIVOP_THASH:
752 (void)vcpu_thash(v,regs->r8,&val);
753 regs->r8 = val;
754 return 1;
755 case HYPERPRIVOP_PTC_GA:
756 (void)vcpu_ptc_ga(v,regs->r8,(1L << ((regs->r9 & 0xfc) >> 2)));
757 return 1;
758 case HYPERPRIVOP_ITR_D:
759 (void)vcpu_get_itir(v,&itir);
760 (void)vcpu_get_ifa(v,&ifa);
761 (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
762 return 1;
763 case HYPERPRIVOP_GET_RR:
764 (void)vcpu_get_rr(v,regs->r8,&val);
765 regs->r8 = val;
766 return 1;
767 case HYPERPRIVOP_SET_RR:
768 (void)vcpu_set_rr(v,regs->r8,regs->r9);
769 return 1;
770 case HYPERPRIVOP_SET_KR:
771 (void)vcpu_set_ar(v,regs->r8,regs->r9);
772 return 1;
773 case HYPERPRIVOP_FC:
774 (void)vcpu_fc(v,regs->r8);
775 return 1;
776 case HYPERPRIVOP_GET_CPUID:
777 (void)vcpu_get_cpuid(v,regs->r8,&val);
778 regs->r8 = val;
779 return 1;
780 case HYPERPRIVOP_GET_PMD:
781 (void)vcpu_get_pmd(v,regs->r8,&val);
782 regs->r8 = val;
783 return 1;
784 case HYPERPRIVOP_GET_EFLAG:
785 (void)vcpu_get_ar(v,24,&val);
786 regs->r8 = val;
787 return 1;
788 case HYPERPRIVOP_SET_EFLAG:
789 (void)vcpu_set_ar(v,24,regs->r8);
790 return 1;
791 case HYPERPRIVOP_RSM_BE:
792 (void)vcpu_reset_psr_sm(v, IA64_PSR_BE);
793 return 1;
794 case HYPERPRIVOP_GET_PSR:
795 (void)vcpu_get_psr(v, &val);
796 regs->r8 = val;
797 return 1;
798 }
799 return 0;
800 }