ia64/xen-unstable

view xen/arch/ia64/xen/privop.c @ 10703:8d501f39286c

[IA64] vDSO paravirtualization: paravirtualize vDSO

paravirtualize vdso areabased on Kevin's pointout and Dan's Idea.
introduce hyperprivop HYPERPRIVOP_RSM_BE and HYPERPRIVOP_GET_PSR.
and paravirtualize vdso area using them.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Mon Jul 24 13:43:35 2006 -0600 (2006-07-24)
parents 63595abd80c5
children c8bc76d877e0
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 #include <asm/dom_fw.h>
14 #include <asm/vhpt.h>
15 #include <asm/bundle.h>
16 #include <asm/privop_stat.h>
18 long priv_verbose=0;
19 unsigned long privop_trace = 0;
21 /* Set to 1 to handle privified instructions from the privify tool. */
22 #ifndef CONFIG_PRIVIFY
23 static const int privify_en = 0;
24 #else
25 static const int privify_en = 1;
26 #endif
28 /**************************************************************************
29 Privileged operation emulation routines
30 **************************************************************************/
32 static IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
33 {
34 return vcpu_rfi(vcpu);
35 }
37 static IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
38 {
39 return vcpu_bsw0(vcpu);
40 }
42 static IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
43 {
44 return vcpu_bsw1(vcpu);
45 }
47 static IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
48 {
49 return vcpu_cover(vcpu);
50 }
52 static IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
53 {
54 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
55 UINT64 log_range;
57 log_range = ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
58 return vcpu_ptc_l(vcpu,vadr,log_range);
59 }
61 static IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
62 {
63 UINT src = inst.M28.r3;
65 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
66 if (privify_en && src > 63)
67 return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
68 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
69 }
71 static IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
72 {
73 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
74 UINT64 addr_range;
76 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
77 return vcpu_ptc_g(vcpu,vadr,addr_range);
78 }
80 static IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
81 {
82 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
83 UINT64 addr_range;
85 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
86 return vcpu_ptc_ga(vcpu,vadr,addr_range);
87 }
89 static IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
90 {
91 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
92 UINT64 log_range;
94 log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
95 return vcpu_ptr_d(vcpu,vadr,log_range);
96 }
98 static IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
99 {
100 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
101 UINT64 log_range;
103 log_range = (vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2;
104 return vcpu_ptr_i(vcpu,vadr,log_range);
105 }
107 static IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
108 {
109 UINT64 padr;
110 UINT fault;
111 UINT src = inst.M46.r3;
113 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
114 if (privify_en && src > 63)
115 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
116 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
117 if (fault == IA64_NO_FAULT)
118 return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
119 else return fault;
120 }
122 static IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
123 {
124 UINT64 key;
125 UINT fault;
126 UINT src = inst.M46.r3;
128 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
129 if (privify_en && src > 63)
130 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
131 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
132 if (fault == IA64_NO_FAULT)
133 return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
134 else return fault;
135 }
137 /************************************
138 * Insert translation register/cache
139 ************************************/
141 static IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
142 {
143 UINT64 fault, itir, ifa, pte, slot;
145 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
146 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
147 return(IA64_ILLOP_FAULT);
148 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
149 return(IA64_ILLOP_FAULT);
150 pte = vcpu_get_gr(vcpu,inst.M42.r2);
151 slot = vcpu_get_gr(vcpu,inst.M42.r3);
153 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
154 }
156 static IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
157 {
158 UINT64 fault, itir, ifa, pte, slot;
160 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
161 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
162 return(IA64_ILLOP_FAULT);
163 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
164 return(IA64_ILLOP_FAULT);
165 pte = vcpu_get_gr(vcpu,inst.M42.r2);
166 slot = vcpu_get_gr(vcpu,inst.M42.r3);
168 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
169 }
171 static IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
172 {
173 UINT64 fault, itir, ifa, pte;
175 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
176 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
177 return(IA64_ILLOP_FAULT);
178 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
179 return(IA64_ILLOP_FAULT);
180 pte = vcpu_get_gr(vcpu,inst.M41.r2);
182 return (vcpu_itc_d(vcpu,pte,itir,ifa));
183 }
185 static IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
186 {
187 UINT64 fault, itir, ifa, pte;
189 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
190 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
191 return(IA64_ILLOP_FAULT);
192 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
193 return(IA64_ILLOP_FAULT);
194 pte = vcpu_get_gr(vcpu,inst.M41.r2);
196 return (vcpu_itc_i(vcpu,pte,itir,ifa));
197 }
199 /*************************************
200 * Moves to semi-privileged registers
201 *************************************/
203 static IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
204 {
205 // I27 and M30 are identical for these fields
206 UINT64 ar3 = inst.M30.ar3;
207 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
208 return (vcpu_set_ar(vcpu,ar3,imm));
209 }
211 static IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
212 {
213 // I26 and M29 are identical for these fields
214 UINT64 ar3 = inst.M29.ar3;
216 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) {
217 // privified mov from kr
218 UINT64 val;
219 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
220 return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
221 else return IA64_ILLOP_FAULT;
222 }
223 else {
224 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
225 return (vcpu_set_ar(vcpu,ar3,r2));
226 }
227 }
229 /********************************
230 * Moves to privileged registers
231 ********************************/
233 static IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
234 {
235 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
236 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
237 return (vcpu_set_pkr(vcpu,r3,r2));
238 }
240 static IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
241 {
242 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
243 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
244 return (vcpu_set_rr(vcpu,r3,r2));
245 }
247 static IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
248 {
249 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
250 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
251 return (vcpu_set_dbr(vcpu,r3,r2));
252 }
254 static IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
255 {
256 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
257 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
258 return (vcpu_set_ibr(vcpu,r3,r2));
259 }
261 static IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
262 {
263 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
264 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
265 return (vcpu_set_pmc(vcpu,r3,r2));
266 }
268 static IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
269 {
270 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
271 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
272 return (vcpu_set_pmd(vcpu,r3,r2));
273 }
275 static IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
276 {
277 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
278 privcnt.to_cr_cnt[inst.M32.cr3]++;
279 switch (inst.M32.cr3) {
280 case 0: return vcpu_set_dcr(vcpu,val);
281 case 1: return vcpu_set_itm(vcpu,val);
282 case 2: return vcpu_set_iva(vcpu,val);
283 case 8: return vcpu_set_pta(vcpu,val);
284 case 16:return vcpu_set_ipsr(vcpu,val);
285 case 17:return vcpu_set_isr(vcpu,val);
286 case 19:return vcpu_set_iip(vcpu,val);
287 case 20:return vcpu_set_ifa(vcpu,val);
288 case 21:return vcpu_set_itir(vcpu,val);
289 case 22:return vcpu_set_iipa(vcpu,val);
290 case 23:return vcpu_set_ifs(vcpu,val);
291 case 24:return vcpu_set_iim(vcpu,val);
292 case 25:return vcpu_set_iha(vcpu,val);
293 case 64:return vcpu_set_lid(vcpu,val);
294 case 65:return IA64_ILLOP_FAULT;
295 case 66:return vcpu_set_tpr(vcpu,val);
296 case 67:return vcpu_set_eoi(vcpu,val);
297 case 68:return IA64_ILLOP_FAULT;
298 case 69:return IA64_ILLOP_FAULT;
299 case 70:return IA64_ILLOP_FAULT;
300 case 71:return IA64_ILLOP_FAULT;
301 case 72:return vcpu_set_itv(vcpu,val);
302 case 73:return vcpu_set_pmv(vcpu,val);
303 case 74:return vcpu_set_cmcv(vcpu,val);
304 case 80:return vcpu_set_lrr0(vcpu,val);
305 case 81:return vcpu_set_lrr1(vcpu,val);
306 default: return IA64_ILLOP_FAULT;
307 }
308 }
310 static IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
311 {
312 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
313 return vcpu_reset_psr_sm(vcpu,imm24);
314 }
316 static IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
317 {
318 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
319 return vcpu_set_psr_sm(vcpu,imm24);
320 }
322 /**
323 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
324 */
325 static IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
326 {
327 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
328 return vcpu_set_psr_l(vcpu,val);
329 }
331 /**********************************
332 * Moves from privileged registers
333 **********************************/
335 static IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
336 {
337 UINT64 val;
338 IA64FAULT fault;
339 UINT64 reg;
341 reg = vcpu_get_gr(vcpu,inst.M43.r3);
342 if (privify_en && inst.M43.r1 > 63) {
343 // privified mov from cpuid
344 fault = vcpu_get_cpuid(vcpu,reg,&val);
345 if (fault == IA64_NO_FAULT)
346 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
347 }
348 else {
349 fault = vcpu_get_rr(vcpu,reg,&val);
350 if (fault == IA64_NO_FAULT)
351 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
352 }
353 return fault;
354 }
356 static IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
357 {
358 UINT64 val;
359 IA64FAULT fault;
361 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
362 if (fault == IA64_NO_FAULT)
363 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
364 else return fault;
365 }
367 static IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
368 {
369 UINT64 val;
370 IA64FAULT fault;
372 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
373 if (fault == IA64_NO_FAULT)
374 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
375 else return fault;
376 }
378 static IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
379 {
380 UINT64 val;
381 IA64FAULT fault;
383 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
384 if (fault == IA64_NO_FAULT)
385 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
386 else return fault;
387 }
389 static IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
390 {
391 UINT64 val;
392 IA64FAULT fault;
393 UINT64 reg;
395 reg = vcpu_get_gr(vcpu,inst.M43.r3);
396 if (privify_en && inst.M43.r1 > 63) {
397 // privified mov from pmd
398 fault = vcpu_get_pmd(vcpu,reg,&val);
399 if (fault == IA64_NO_FAULT)
400 return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
401 }
402 else {
403 fault = vcpu_get_pmc(vcpu,reg,&val);
404 if (fault == IA64_NO_FAULT)
405 return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
406 }
407 return fault;
408 }
410 #define cr_get(cr) \
411 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
412 vcpu_set_gr(vcpu, tgt, val, 0) : fault;
414 static IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
415 {
416 UINT64 tgt = inst.M33.r1;
417 UINT64 val;
418 IA64FAULT fault;
420 privcnt.from_cr_cnt[inst.M33.cr3]++;
421 switch (inst.M33.cr3) {
422 case 0: return cr_get(dcr);
423 case 1: return cr_get(itm);
424 case 2: return cr_get(iva);
425 case 8: return cr_get(pta);
426 case 16:return cr_get(ipsr);
427 case 17:return cr_get(isr);
428 case 19:return cr_get(iip);
429 case 20:return cr_get(ifa);
430 case 21:return cr_get(itir);
431 case 22:return cr_get(iipa);
432 case 23:return cr_get(ifs);
433 case 24:return cr_get(iim);
434 case 25:return cr_get(iha);
435 case 64:return cr_get(lid);
436 case 65:return cr_get(ivr);
437 case 66:return cr_get(tpr);
438 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
439 case 68:return cr_get(irr0);
440 case 69:return cr_get(irr1);
441 case 70:return cr_get(irr2);
442 case 71:return cr_get(irr3);
443 case 72:return cr_get(itv);
444 case 73:return cr_get(pmv);
445 case 74:return cr_get(cmcv);
446 case 80:return cr_get(lrr0);
447 case 81:return cr_get(lrr1);
448 default: return IA64_ILLOP_FAULT;
449 }
450 return IA64_ILLOP_FAULT;
451 }
453 static IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
454 {
455 UINT64 tgt = inst.M33.r1;
456 UINT64 val;
457 IA64FAULT fault;
459 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
460 return vcpu_set_gr(vcpu, tgt, val, 0);
461 else return fault;
462 }
464 /**************************************************************************
465 Privileged operation decode and dispatch routines
466 **************************************************************************/
468 static const IA64_SLOT_TYPE slot_types[0x20][3] = {
469 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
470 {M, I, ILLEGAL}, {M, I, ILLEGAL},
471 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
472 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
473 {M, F, I}, {M, F, I},
474 {M, M, F}, {M, M, F},
475 {M, I, B}, {M, I, B},
476 {M, B, B}, {M, B, B},
477 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
478 {B, B, B}, {B, B, B},
479 {M, M, B}, {M, M, B},
480 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
481 {M, F, B}, {M, F, B},
482 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
483 };
485 // pointer to privileged emulation function
486 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
488 static const PPEFCN Mpriv_funcs[64] = {
489 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
490 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
491 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
492 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
493 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
494 priv_mov_from_pmc, 0, 0, 0,
495 0, 0, 0, 0,
496 0, 0, priv_tpa, priv_tak,
497 0, 0, 0, 0,
498 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
499 0, 0, 0, 0,
500 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
501 0, 0, 0, 0,
502 priv_ptc_e, 0, 0, 0,
503 0, 0, 0, 0, 0, 0, 0, 0
504 };
506 static IA64FAULT
507 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
508 {
509 IA64_BUNDLE bundle;
510 IA64_BUNDLE __get_domain_bundle(UINT64);
511 int slot;
512 IA64_SLOT_TYPE slot_type;
513 INST64 inst;
514 PPEFCN pfunc;
515 unsigned long ipsr = regs->cr_ipsr;
516 UINT64 iip = regs->cr_iip;
517 int x6;
519 // make a local copy of the bundle containing the privop
520 if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) {
521 //return vcpu_force_data_miss(vcpu, regs->cr_iip);
522 return vcpu_force_inst_miss(vcpu, regs->cr_iip);
523 }
525 #if 0
526 if (iip==0xa000000100001820) {
527 static int firstpagefault = 1;
528 if (firstpagefault) {
529 printf("*** First time to domain page fault!\n"); firstpagefault=0;
530 }
531 }
532 #endif
533 if (privop_trace) {
534 static long i = 400;
535 //if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
536 if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, itc=%lx, itm=%lx\n",
537 iip,ia64_get_itc(),ia64_get_itm());
538 i--;
539 }
540 slot = ((struct ia64_psr *)&ipsr)->ri;
541 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
542 else if (slot == 1)
543 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
544 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
545 else printf("priv_handle_op: illegal slot: %d\n", slot);
547 slot_type = slot_types[bundle.template][slot];
548 if (priv_verbose) {
549 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
550 iip, (UINT64)inst.inst, slot, slot_type);
551 }
552 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
553 // break instr for privified cover
554 }
555 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
556 switch (slot_type) {
557 case M:
558 if (inst.generic.major == 0) {
559 #if 0
560 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
561 privcnt.cover++;
562 return priv_cover(vcpu,inst);
563 }
564 #endif
565 if (inst.M29.x3 != 0) break;
566 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
567 privcnt.mov_to_ar_imm++;
568 return priv_mov_to_ar_imm(vcpu,inst);
569 }
570 if (inst.M44.x4 == 6) {
571 privcnt.ssm++;
572 return priv_ssm(vcpu,inst);
573 }
574 if (inst.M44.x4 == 7) {
575 privcnt.rsm++;
576 return priv_rsm(vcpu,inst);
577 }
578 break;
579 }
580 else if (inst.generic.major != 1) break;
581 x6 = inst.M29.x6;
582 if (x6 == 0x2a) {
583 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
584 privcnt.mov_from_ar++; // privified mov from kr
585 else privcnt.mov_to_ar_reg++;
586 return priv_mov_to_ar_reg(vcpu,inst);
587 }
588 if (inst.M29.x3 != 0) break;
589 if (!(pfunc = Mpriv_funcs[x6])) break;
590 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
591 if (privify_en && inst.M46.r3 > 63) {
592 if (x6 == 0x1e) x6 = 0x1b;
593 else x6 = 0x1a;
594 }
595 }
596 if (privify_en && x6 == 52 && inst.M28.r3 > 63)
597 privcnt.fc++;
598 else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
599 privcnt.cpuid++;
600 else privcnt.Mpriv_cnt[x6]++;
601 return (*pfunc)(vcpu,inst);
602 break;
603 case B:
604 if (inst.generic.major != 0) break;
605 if (inst.B8.x6 == 0x08) {
606 IA64FAULT fault;
607 privcnt.rfi++;
608 fault = priv_rfi(vcpu,inst);
609 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
610 return fault;
611 }
612 if (inst.B8.x6 == 0x0c) {
613 privcnt.bsw0++;
614 return priv_bsw0(vcpu,inst);
615 }
616 if (inst.B8.x6 == 0x0d) {
617 privcnt.bsw1++;
618 return priv_bsw1(vcpu,inst);
619 }
620 if (inst.B8.x6 == 0x0) { // break instr for privified cover
621 privcnt.cover++;
622 return priv_cover(vcpu,inst);
623 }
624 break;
625 case I:
626 if (inst.generic.major != 0) break;
627 #if 0
628 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
629 privcnt.cover++;
630 return priv_cover(vcpu,inst);
631 }
632 #endif
633 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
634 if (inst.I26.x6 == 0x2a) {
635 if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
636 privcnt.mov_from_ar++; // privified mov from kr
637 else privcnt.mov_to_ar_reg++;
638 return priv_mov_to_ar_reg(vcpu,inst);
639 }
640 if (inst.I27.x6 == 0x0a) {
641 privcnt.mov_to_ar_imm++;
642 return priv_mov_to_ar_imm(vcpu,inst);
643 }
644 break;
645 default:
646 break;
647 }
648 //printf("We who are about do die salute you\n");
649 printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=0x%lx\n",
650 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
651 //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
652 //thread_mozambique("privop fault\n");
653 return (IA64_ILLOP_FAULT);
654 }
656 /** Emulate a privileged operation.
657 *
658 * This should probably return 0 on success and the "trap number"
659 * (e.g. illegal operation for bad register, priv op for an
660 * instruction that isn't allowed, etc.) on "failure"
661 *
662 * @param vcpu virtual cpu
663 * @param isrcode interrupt service routine code
664 * @return fault
665 */
666 IA64FAULT
667 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
668 {
669 IA64FAULT fault;
670 UINT64 ipsr = regs->cr_ipsr;
671 UINT64 isrcode = (isr >> 4) & 0xf;
672 int privlvl;
674 // handle privops masked as illops? and breaks (6)
675 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
676 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
677 printf("priv_emulate: returning ILLOP, not implemented!\n");
678 while (1);
679 return IA64_ILLOP_FAULT;
680 }
681 //if (isrcode != 1 && isrcode != 2) return 0;
682 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
683 // its OK for a privified-cover to be executed in user-land
684 fault = priv_handle_op(vcpu,regs,privlvl);
685 if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // success!!
686 // update iip/ipsr to point to the next instruction
687 (void)vcpu_increment_iip(vcpu);
688 }
689 if (fault == IA64_ILLOP_FAULT)
690 printf("priv_emulate: priv_handle_op fails, isr=0x%lx\n",isr);
691 return fault;
692 }
694 /* hyperprivops are generally executed in assembly (with physical psr.ic off)
695 * so this code is primarily used for debugging them */
696 int
697 ia64_hyperprivop(unsigned long iim, REGS *regs)
698 {
699 struct vcpu *v = current;
700 UINT64 val;
701 UINT64 itir, ifa;
703 if (!iim || iim > HYPERPRIVOP_MAX) {
704 panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
705 iim, regs->cr_iip);
706 return 1;
707 }
708 slow_hyperpriv_cnt[iim]++;
709 switch(iim) {
710 case HYPERPRIVOP_RFI:
711 (void)vcpu_rfi(v);
712 return 0; // don't update iip
713 case HYPERPRIVOP_RSM_DT:
714 (void)vcpu_reset_psr_dt(v);
715 return 1;
716 case HYPERPRIVOP_SSM_DT:
717 (void)vcpu_set_psr_dt(v);
718 return 1;
719 case HYPERPRIVOP_COVER:
720 (void)vcpu_cover(v);
721 return 1;
722 case HYPERPRIVOP_ITC_D:
723 (void)vcpu_get_itir(v,&itir);
724 (void)vcpu_get_ifa(v,&ifa);
725 (void)vcpu_itc_d(v,regs->r8,itir,ifa);
726 return 1;
727 case HYPERPRIVOP_ITC_I:
728 (void)vcpu_get_itir(v,&itir);
729 (void)vcpu_get_ifa(v,&ifa);
730 (void)vcpu_itc_i(v,regs->r8,itir,ifa);
731 return 1;
732 case HYPERPRIVOP_SSM_I:
733 (void)vcpu_set_psr_i(v);
734 return 1;
735 case HYPERPRIVOP_GET_IVR:
736 (void)vcpu_get_ivr(v,&val);
737 regs->r8 = val;
738 return 1;
739 case HYPERPRIVOP_GET_TPR:
740 (void)vcpu_get_tpr(v,&val);
741 regs->r8 = val;
742 return 1;
743 case HYPERPRIVOP_SET_TPR:
744 (void)vcpu_set_tpr(v,regs->r8);
745 return 1;
746 case HYPERPRIVOP_EOI:
747 (void)vcpu_set_eoi(v,0L);
748 return 1;
749 case HYPERPRIVOP_SET_ITM:
750 (void)vcpu_set_itm(v,regs->r8);
751 return 1;
752 case HYPERPRIVOP_THASH:
753 (void)vcpu_thash(v,regs->r8,&val);
754 regs->r8 = val;
755 return 1;
756 case HYPERPRIVOP_PTC_GA:
757 (void)vcpu_ptc_ga(v,regs->r8,(1L << ((regs->r9 & 0xfc) >> 2)));
758 return 1;
759 case HYPERPRIVOP_ITR_D:
760 (void)vcpu_get_itir(v,&itir);
761 (void)vcpu_get_ifa(v,&ifa);
762 (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
763 return 1;
764 case HYPERPRIVOP_GET_RR:
765 (void)vcpu_get_rr(v,regs->r8,&val);
766 regs->r8 = val;
767 return 1;
768 case HYPERPRIVOP_SET_RR:
769 (void)vcpu_set_rr(v,regs->r8,regs->r9);
770 return 1;
771 case HYPERPRIVOP_SET_KR:
772 (void)vcpu_set_ar(v,regs->r8,regs->r9);
773 return 1;
774 case HYPERPRIVOP_FC:
775 (void)vcpu_fc(v,regs->r8);
776 return 1;
777 case HYPERPRIVOP_GET_CPUID:
778 (void)vcpu_get_cpuid(v,regs->r8,&val);
779 regs->r8 = val;
780 return 1;
781 case HYPERPRIVOP_GET_PMD:
782 (void)vcpu_get_pmd(v,regs->r8,&val);
783 regs->r8 = val;
784 return 1;
785 case HYPERPRIVOP_GET_EFLAG:
786 (void)vcpu_get_ar(v,24,&val);
787 regs->r8 = val;
788 return 1;
789 case HYPERPRIVOP_SET_EFLAG:
790 (void)vcpu_set_ar(v,24,regs->r8);
791 return 1;
792 case HYPERPRIVOP_RSM_BE:
793 (void)vcpu_reset_psr_sm(v, IA64_PSR_BE);
794 return 1;
795 case HYPERPRIVOP_GET_PSR:
796 (void)vcpu_get_psr(v, &val);
797 regs->r8 = val;
798 return 1;
799 }
800 return 0;
801 }