ia64/xen-unstable

view xen/arch/ia64/process.c @ 4146:f2d61710e4d9

bitkeeper revision 1.1236.25.24 (42366e9aQ71LQ8uCB-Y1IwVNqx5eqA)

Merge djm@kirby.fc.hp.com://home/djm/src/xen/xeno-unstable-ia64.bk
into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
author djm@sportsman.spdomain
date Tue Mar 15 05:11:54 2005 +0000 (2005-03-15)
parents f8026d38aa87 0c846e77cca4
children a0b28acf0dcd
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
17 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
18 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 #include <asm/desc.h>
24 //#include <asm/ldt.h>
25 #include <xen/irq.h>
26 #include <xen/event.h>
27 #include <asm/regionreg.h>
28 #include <asm/privop.h>
29 #include <asm/vcpu.h>
30 #include <asm/ia64_int.h>
31 #include <asm/hpsim_ssc.h>
32 #include <asm/dom_fw.h>
34 extern unsigned long vcpu_get_itir_on_fault(struct exec_domain *, UINT64);
35 extern struct ia64_sal_retval pal_emulator_static(UINT64);
36 extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
38 extern unsigned long dom0_start, dom0_size;
40 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
41 // note IA64_PSR_PK removed from following, why is this necessary?
42 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
43 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
44 IA64_PSR_IT | IA64_PSR_BN)
46 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
47 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
48 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
49 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
50 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
51 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
53 #define PSCB(x,y) x->vcpu_info->arch.y
55 extern unsigned long vcpu_verbose;
57 long do_iopl(domid_t domain, unsigned int new_io_pl)
58 {
59 dummy();
60 return 0;
61 }
63 void schedule_tail(struct exec_domain *next)
64 {
65 unsigned long rr7;
66 printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
67 printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
68 if (rr7 = load_region_regs(current)) {
69 printk("schedule_tail: change to rr7 not yet implemented\n");
70 }
71 }
73 extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
75 void tdpfoo(void) { }
77 // given a domain virtual address, pte and pagesize, extract the metaphysical
78 // address, convert the pte for a physical address for (possibly different)
79 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
80 // PAGE_SIZE!)
81 unsigned long translate_domain_pte(unsigned long pteval,
82 unsigned long address, unsigned long itir)
83 {
84 struct domain *d = current->domain;
85 unsigned long mask, pteval2, mpaddr;
86 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
87 extern struct domain *dom0;
88 extern unsigned long dom0_start, dom0_size;
90 // FIXME address had better be pre-validated on insert
91 mask = (1L << ((itir >> 2) & 0x3f)) - 1;
92 mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
93 if (d == dom0) {
94 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
95 //printk("translate_domain_pte: out-of-bounds dom0 mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
96 tdpfoo();
97 }
98 }
99 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
100 printf("translate_domain_pte: bad mpa=%p (> %p),vadr=%p,pteval=%p,itir=%p\n",
101 mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
102 tdpfoo();
103 }
104 pteval2 = lookup_domain_mpa(d,mpaddr);
105 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
106 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
107 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
108 return pteval2;
109 }
111 // given a current domain metaphysical address, return the physical address
112 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
113 {
114 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
115 unsigned long pteval;
117 if (current->domain == dom0) {
118 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
119 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
120 tdpfoo();
121 }
122 }
123 pteval = lookup_domain_mpa(current->domain,mpaddr);
124 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
125 }
127 void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
128 {
129 unsigned long vcpu_get_ipsr_int_state(struct exec_domain *,unsigned long);
130 unsigned long vcpu_get_rr_ve(struct exec_domain *,unsigned long);
131 struct domain *d = current->domain;
132 struct exec_domain *ed = current;
134 if (vector == IA64_EXTINT_VECTOR) {
136 extern unsigned long vcpu_verbose, privop_trace;
137 static first_extint = 1;
138 if (first_extint) {
139 printf("Delivering first extint to domain: ifa=%p, isr=%p, itir=%p, iip=%p\n",ifa,isr,itiriim,regs->cr_iip);
140 //privop_trace = 1; vcpu_verbose = 1;
141 first_extint = 0;
142 }
143 }
144 if (!PSCB(ed,interrupt_collection_enabled)) {
145 if (!(PSCB(ed,ipsr) & IA64_PSR_DT)) {
146 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
147 }
148 vector &= ~0xf;
149 if (vector != IA64_DATA_TLB_VECTOR &&
150 vector != IA64_DATA_TLB_VECTOR) {
151 panic_domain(regs,"psr.ic off, delivering fault=%lx,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
152 vector,regs->cr_iip,ifa,isr,PSCB(ed,iip));
154 }
155 //printf("Delivering NESTED DATA TLB fault\n");
156 vector = IA64_DATA_NESTED_TLB_VECTOR;
157 regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
158 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
159 // NOTE: nested trap must NOT pass PSCB address
160 //regs->r31 = (unsigned long) &PSCB(ed);
161 return;
163 }
164 if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(ed,ifa) = ifa;
165 else ifa = PSCB(ed,ifa);
166 vector &= ~0xf;
167 // always deliver on ALT vector (for now?) because no VHPT
168 // if (!vcpu_get_rr_ve(ed,ifa)) {
169 if (vector == IA64_DATA_TLB_VECTOR)
170 vector = IA64_ALT_DATA_TLB_VECTOR;
171 else if (vector == IA64_INST_TLB_VECTOR)
172 vector = IA64_ALT_INST_TLB_VECTOR;
173 // }
174 PSCB(ed,unat) = regs->ar_unat; // not sure if this is really needed?
175 PSCB(ed,precover_ifs) = regs->cr_ifs;
176 vcpu_bsw0(ed);
177 PSCB(ed,ipsr) = vcpu_get_ipsr_int_state(ed,regs->cr_ipsr);
178 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
179 PSCB(ed,iim) = itiriim;
180 else PSCB(ed,itir) = vcpu_get_itir_on_fault(ed,ifa);
181 PSCB(ed,isr) = isr; // this is unnecessary except for interrupts!
182 PSCB(ed,iip) = regs->cr_iip;
183 PSCB(ed,ifs) = 0;
184 PSCB(ed,incomplete_regframe) = 0;
186 regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
187 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
188 // FIXME: NEED TO PASS PSCB, BUT **NOT** IN R31 WHICH IS BEING USED FOR ar.pr
189 // IN ANY CASE, PASS PINNED ADDRESS, NOT THIS ONE
190 //regs->r31 = (unsigned long) &PSCB(ed);
192 PSCB(ed,interrupt_delivery_enabled) = 0;
193 PSCB(ed,interrupt_collection_enabled) = 0;
194 }
196 void foodpi(void) {}
198 // ONLY gets called from ia64_leave_kernel
199 // ONLY call with interrupts disabled?? (else might miss one?)
200 // NEVER successful if already reflecting a trap/fault because psr.i==0
201 void deliver_pending_interrupt(struct pt_regs *regs)
202 {
203 struct domain *d = current->domain;
204 struct exec_domain *ed = current;
205 // FIXME: Will this work properly if doing an RFI???
206 if (!is_idle_task(d) && user_mode(regs)) {
207 vcpu_poke_timer(ed);
208 if (vcpu_deliverable_interrupts(ed)) {
209 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
210 foodpi();
211 reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
212 }
213 }
214 }
216 int handle_lazy_cover(struct exec_domain *ed, unsigned long isr, struct pt_regs *regs)
217 {
218 if (!PSCB(ed,interrupt_collection_enabled)) {
219 if (isr & IA64_ISR_IR) {
220 // printf("Handling lazy cover\n");
221 PSCB(ed,ifs) = regs->cr_ifs;
222 PSCB(ed,incomplete_regframe) = 1;
223 regs->cr_ifs = 0;
224 return(1); // retry same instruction with cr.ifs off
225 }
226 }
227 return(0);
228 }
230 #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
232 void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
233 {
234 struct domain *d = (struct domain *) current->domain;
235 struct domain *ed = (struct exec_domain *) current;
236 TR_ENTRY *trp;
237 unsigned long psr = regs->cr_ipsr, mask, flags;
238 unsigned long iip = regs->cr_iip;
239 // FIXME should validate address here
240 unsigned long pteval, mpaddr, ps;
241 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
242 unsigned long match_dtlb(struct exec_domain *,unsigned long, unsigned long *, unsigned long *);
243 IA64FAULT fault;
245 // NEED TO HANDLE THREE CASES:
246 // 1) domain is in metaphysical mode
247 // 2) domain address is in TR
248 // 3) domain address is not in TR (reflect data miss)
250 // got here trying to read a privop bundle
251 //if (d->metaphysical_mode) {
252 if (PSCB(current,metaphysical_mode) && !(address>>61)) { //FIXME
253 if (d == dom0) {
254 if (address < dom0_start || address >= dom0_start + dom0_size) {
255 printk("xen_handle_domain_access: out-of-bounds"
256 "dom0 mpaddr %p! continuing...\n",mpaddr);
257 tdpfoo();
258 }
259 }
260 pteval = lookup_domain_mpa(d,address);
261 //FIXME: check return value?
262 // would be nice to have a counter here
263 vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
264 return;
265 }
266 if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
268 // if we are fortunate enough to have it in the 1-entry TLB...
269 if (pteval = match_dtlb(ed,address,&ps,NULL)) {
270 vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
271 return;
272 }
273 // look in the TRs
274 fault = vcpu_tpa(ed,address,&mpaddr);
275 if (fault != IA64_NO_FAULT) {
276 static int uacnt = 0;
277 // can't translate it, just fail (poor man's exception)
278 // which results in retrying execution
279 //printk("*** xen_handle_domain_access: poor man's exception cnt=%i iip=%p, addr=%p...\n",uacnt++,iip,address);
280 if (ia64_done_with_exception(regs)) {
281 //if (!(uacnt++ & 0x3ff)) printk("*** xen_handle_domain_access: successfully handled cnt=%d iip=%p, addr=%p...\n",uacnt,iip,address);
282 return;
283 }
284 else {
285 // should never happen. If it does, region 0 addr may
286 // indicate a bad xen pointer
287 printk("*** xen_handle_domain_access: exception table"
288 " lookup failed, iip=%p, addr=%p, spinning...\n",
289 iip,address);
290 panic_domain(regs,"*** xen_handle_domain_access: exception table"
291 " lookup failed, iip=%p, addr=%p, spinning...\n",
292 iip,address);
293 }
294 }
295 if (d == dom0) {
296 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
297 printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
298 tdpfoo();
299 }
300 }
301 //printk("*** xen_handle_domain_access: tpa resolved miss @%p...\n",address);
302 pteval = lookup_domain_mpa(d,mpaddr);
303 // would be nice to have a counter here
304 //printf("Handling privop data TLB miss\n");
305 // FIXME, must be inlined or potential for nested fault here!
306 vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
307 }
309 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
310 {
311 struct domain *d = (struct domain *) current->domain;
312 TR_ENTRY *trp;
313 unsigned long psr = regs->cr_ipsr, mask, flags;
314 unsigned long iip = regs->cr_iip;
315 // FIXME should validate address here
316 unsigned long pteval, mpaddr;
317 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
318 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
319 unsigned long vector;
320 IA64FAULT fault;
323 //The right way is put in VHPT and take another miss!
325 // weak attempt to avoid doing both I/D tlb insert to avoid
326 // problems for privop bundle fetch, doesn't work, deal with later
327 if (IS_XEN_ADDRESS(d,iip) && !IS_XEN_ADDRESS(d,address)) {
328 xen_handle_domain_access(address, isr, regs, itir);
330 return;
331 }
333 // FIXME: no need to pass itir in to this routine as we need to
334 // compute the virtual itir anyway (based on domain's RR.ps)
335 // AND ACTUALLY reflect_interruption doesn't use it anyway!
336 itir = vcpu_get_itir_on_fault(current,address);
338 if (PSCB(current,metaphysical_mode) && (is_data || !(address>>61))) { //FIXME
339 // FIXME should validate mpaddr here
340 if (d == dom0) {
341 if (address < dom0_start || address >= dom0_start + dom0_size) {
342 printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
343 printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
344 tdpfoo();
345 }
346 }
347 pteval = lookup_domain_mpa(d,address);
348 // FIXME, must be inlined or potential for nested fault here!
349 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,PAGE_SHIFT);
350 return;
351 }
352 if (trp = match_tr(current,address)) {
353 // FIXME address had better be pre-validated on insert
354 pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
355 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(trp->itir>>2)&0x3f);
356 return;
357 }
358 vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
359 if (handle_lazy_cover(current, isr, regs)) return;
360 if (!(address>>61)) {
361 panic_domain(0,"ia64_do_page_fault: @%p???, iip=%p, itc=%p (spinning...)\n",address,iip,ia64_get_itc());
362 }
363 if ((isr & IA64_ISR_SP)
364 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
365 {
366 /*
367 * This fault was due to a speculative load or lfetch.fault, set the "ed"
368 * bit in the psr to ensure forward progress. (Target register will get a
369 * NaT for ld.s, lfetch will be canceled.)
370 */
371 ia64_psr(regs)->ed = 1;
372 return;
373 }
374 reflect_interruption(address, isr, itir, regs, vector);
375 }
377 void
378 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
379 unsigned long iim, unsigned long itir, unsigned long arg5,
380 unsigned long arg6, unsigned long arg7, unsigned long stack)
381 {
382 struct pt_regs *regs = (struct pt_regs *) &stack;
383 unsigned long code, error = isr;
384 char buf[128];
385 int result, sig;
386 static const char *reason[] = {
387 "IA-64 Illegal Operation fault",
388 "IA-64 Privileged Operation fault",
389 "IA-64 Privileged Register fault",
390 "IA-64 Reserved Register/Field fault",
391 "Disabled Instruction Set Transition fault",
392 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
393 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
394 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
395 };
396 #if 0
397 printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
398 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
399 #endif
401 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
402 /*
403 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
404 * the lfetch.
405 */
406 ia64_psr(regs)->ed = 1;
407 printf("ia64_fault: handled lfetch.fault\n");
408 return;
409 }
411 switch (vector) {
412 case 24: /* General Exception */
413 code = (isr >> 4) & 0xf;
414 sprintf(buf, "General Exception: %s%s", reason[code],
415 (code == 3) ? ((isr & (1UL << 37))
416 ? " (RSE access)" : " (data access)") : "");
417 if (code == 8) {
418 # ifdef CONFIG_IA64_PRINT_HAZARDS
419 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
420 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
421 regs->pr);
422 # endif
423 printf("ia64_fault: returning on hazard\n");
424 return;
425 }
426 break;
428 case 25: /* Disabled FP-Register */
429 if (isr & 2) {
430 //disabled_fph_fault(regs);
431 //return;
432 }
433 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
434 break;
436 case 26: /* NaT Consumption */
437 if (user_mode(regs)) {
438 void *addr;
440 if (((isr >> 4) & 0xf) == 2) {
441 /* NaT page consumption */
442 //sig = SIGSEGV;
443 //code = SEGV_ACCERR;
444 addr = (void *) ifa;
445 } else {
446 /* register NaT consumption */
447 //sig = SIGILL;
448 //code = ILL_ILLOPN;
449 addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
450 }
451 //siginfo.si_signo = sig;
452 //siginfo.si_code = code;
453 //siginfo.si_errno = 0;
454 //siginfo.si_addr = addr;
455 //siginfo.si_imm = vector;
456 //siginfo.si_flags = __ISR_VALID;
457 //siginfo.si_isr = isr;
458 //force_sig_info(sig, &siginfo, current);
459 //return;
460 } //else if (ia64_done_with_exception(regs))
461 //return;
462 sprintf(buf, "NaT consumption");
463 break;
465 case 31: /* Unsupported Data Reference */
466 if (user_mode(regs)) {
467 //siginfo.si_signo = SIGILL;
468 //siginfo.si_code = ILL_ILLOPN;
469 //siginfo.si_errno = 0;
470 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
471 //siginfo.si_imm = vector;
472 //siginfo.si_flags = __ISR_VALID;
473 //siginfo.si_isr = isr;
474 //force_sig_info(SIGILL, &siginfo, current);
475 //return;
476 }
477 sprintf(buf, "Unsupported data reference");
478 break;
480 case 29: /* Debug */
481 case 35: /* Taken Branch Trap */
482 case 36: /* Single Step Trap */
483 //if (fsys_mode(current, regs)) {}
484 switch (vector) {
485 case 29:
486 //siginfo.si_code = TRAP_HWBKPT;
487 #ifdef CONFIG_ITANIUM
488 /*
489 * Erratum 10 (IFA may contain incorrect address) now has
490 * "NoFix" status. There are no plans for fixing this.
491 */
492 if (ia64_psr(regs)->is == 0)
493 ifa = regs->cr_iip;
494 #endif
495 break;
496 case 35: ifa = 0; break;
497 case 36: ifa = 0; break;
498 //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
499 //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
500 }
501 //siginfo.si_signo = SIGTRAP;
502 //siginfo.si_errno = 0;
503 //siginfo.si_addr = (void *) ifa;
504 //siginfo.si_imm = 0;
505 //siginfo.si_flags = __ISR_VALID;
506 //siginfo.si_isr = isr;
507 //force_sig_info(SIGTRAP, &siginfo, current);
508 //return;
510 case 32: /* fp fault */
511 case 33: /* fp trap */
512 //result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
513 if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
514 //siginfo.si_signo = SIGFPE;
515 //siginfo.si_errno = 0;
516 //siginfo.si_code = FPE_FLTINV;
517 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
518 //siginfo.si_flags = __ISR_VALID;
519 //siginfo.si_isr = isr;
520 //siginfo.si_imm = 0;
521 //force_sig_info(SIGFPE, &siginfo, current);
522 }
523 //return;
524 sprintf(buf, "FP fault/trap");
525 break;
527 case 34:
528 if (isr & 0x2) {
529 /* Lower-Privilege Transfer Trap */
530 /*
531 * Just clear PSR.lp and then return immediately: all the
532 * interesting work (e.g., signal delivery is done in the kernel
533 * exit path).
534 */
535 //ia64_psr(regs)->lp = 0;
536 //return;
537 sprintf(buf, "Lower-Privilege Transfer trap");
538 } else {
539 /* Unimplemented Instr. Address Trap */
540 if (user_mode(regs)) {
541 //siginfo.si_signo = SIGILL;
542 //siginfo.si_code = ILL_BADIADDR;
543 //siginfo.si_errno = 0;
544 //siginfo.si_flags = 0;
545 //siginfo.si_isr = 0;
546 //siginfo.si_imm = 0;
547 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
548 //force_sig_info(SIGILL, &siginfo, current);
549 //return;
550 }
551 sprintf(buf, "Unimplemented Instruction Address fault");
552 }
553 break;
555 case 45:
556 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
557 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
558 regs->cr_iip, ifa, isr);
559 //force_sig(SIGSEGV, current);
560 break;
562 case 46:
563 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
564 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
565 regs->cr_iip, ifa, isr, iim);
566 //force_sig(SIGSEGV, current);
567 return;
569 case 47:
570 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
571 break;
573 default:
574 sprintf(buf, "Fault %lu", vector);
575 break;
576 }
577 //die_if_kernel(buf, regs, error);
578 printk("ia64_fault: %s: reflecting\n",buf);
579 reflect_interruption(ifa,isr,iim,regs,IA64_GENEX_VECTOR);
580 //while(1);
581 //force_sig(SIGILL, current);
582 }
584 unsigned long running_on_sim = 0;
586 void
587 do_ssc(unsigned long ssc, struct pt_regs *regs)
588 {
589 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
590 unsigned long arg0, arg1, arg2, arg3, retval;
591 char buf[2];
592 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
593 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
594 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
595 extern unsigned long vcpu_verbose, privop_trace;
597 arg0 = vcpu_get_gr(current,32);
598 switch(ssc) {
599 case SSC_PUTCHAR:
600 buf[0] = arg0;
601 buf[1] = '\0';
602 printf(buf);
603 break;
604 case SSC_GETCHAR:
605 retval = ia64_ssc(0,0,0,0,ssc);
606 vcpu_set_gr(current,8,retval);
607 break;
608 case SSC_WAIT_COMPLETION:
609 if (arg0) { // metaphysical address
611 arg0 = translate_domain_mpaddr(arg0);
612 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
613 ///**/ if (stat->fd == last_fd) stat->count = last_count;
614 /**/ stat->count = last_count;
615 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
616 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
617 /**/ retval = 0;
618 }
619 else retval = -1L;
620 vcpu_set_gr(current,8,retval);
621 break;
622 case SSC_OPEN:
623 arg1 = vcpu_get_gr(current,33); // access rights
624 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
625 if (arg0) { // metaphysical address
626 arg0 = translate_domain_mpaddr(arg0);
627 retval = ia64_ssc(arg0,arg1,0,0,ssc);
628 }
629 else retval = -1L;
630 vcpu_set_gr(current,8,retval);
631 break;
632 case SSC_WRITE:
633 case SSC_READ:
634 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
635 arg1 = vcpu_get_gr(current,33);
636 arg2 = vcpu_get_gr(current,34);
637 arg3 = vcpu_get_gr(current,35);
638 if (arg2) { // metaphysical address of descriptor
639 struct ssc_disk_req *req;
640 unsigned long mpaddr, paddr;
641 long len;
643 arg2 = translate_domain_mpaddr(arg2);
644 req = (struct disk_req *)__va(arg2);
645 req->len &= 0xffffffffL; // avoid strange bug
646 len = req->len;
647 /**/ last_fd = arg1;
648 /**/ last_count = len;
649 mpaddr = req->addr;
650 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
651 retval = 0;
652 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
653 // do partial page first
654 req->addr = translate_domain_mpaddr(mpaddr);
655 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
656 len -= req->len; mpaddr += req->len;
657 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
658 arg3 += req->len; // file offset
659 /**/ last_stat.fd = last_fd;
660 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
661 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
662 }
663 if (retval >= 0) while (len > 0) {
664 req->addr = translate_domain_mpaddr(mpaddr);
665 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
666 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
667 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
668 arg3 += req->len; // file offset
669 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
670 /**/ last_stat.fd = last_fd;
671 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
672 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
673 }
674 // set it back to the original value
675 req->len = last_count;
676 }
677 else retval = -1L;
678 vcpu_set_gr(current,8,retval);
679 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
680 break;
681 case SSC_CONNECT_INTERRUPT:
682 arg1 = vcpu_get_gr(current,33);
683 arg2 = vcpu_get_gr(current,34);
684 arg3 = vcpu_get_gr(current,35);
685 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
686 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
687 break;
688 case SSC_NETDEV_PROBE:
689 vcpu_set_gr(current,8,-1L);
690 break;
691 default:
692 printf("ia64_handle_break: bad ssc code %lx\n",ssc);
693 break;
694 }
695 vcpu_increment_iip(current);
696 }
698 void fooefi(void) {}
700 void
701 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
702 {
703 static int first_time = 1;
704 struct domain *d = (struct domain *) current->domain;
705 struct exec_domain *ed = (struct domain *) current;
706 extern unsigned long running_on_sim;
708 if (first_time) {
709 if (platform_is_hp_ski()) running_on_sim = 1;
710 else running_on_sim = 0;
711 first_time = 0;
712 }
713 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
714 if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
715 else do_ssc(vcpu_get_gr(current,36), regs);
716 }
717 else if (iim == d->breakimm) {
718 struct ia64_sal_retval x;
719 switch (regs->r2) {
720 case FW_HYPERCALL_PAL_CALL:
721 //printf("*** PAL hypercall: index=%d\n",regs->r28);
722 //FIXME: This should call a C routine
723 x = pal_emulator_static(regs->r28);
724 regs->r8 = x.status; regs->r9 = x.v0;
725 regs->r10 = x.v1; regs->r11 = x.v2;
726 break;
727 case FW_HYPERCALL_SAL_CALL:
728 x = sal_emulator(vcpu_get_gr(ed,32),vcpu_get_gr(ed,33),
729 vcpu_get_gr(ed,34),vcpu_get_gr(ed,35),
730 vcpu_get_gr(ed,36),vcpu_get_gr(ed,37),
731 vcpu_get_gr(ed,38),vcpu_get_gr(ed,39));
732 regs->r8 = x.status; regs->r9 = x.v0;
733 regs->r10 = x.v1; regs->r11 = x.v2;
734 break;
735 case FW_HYPERCALL_EFI_RESET_SYSTEM:
736 printf("efi.reset_system called ");
737 if (current->domain == dom0) {
738 printf("(by dom0)\n ");
739 (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
740 }
741 printf("(not supported for non-0 domain)\n");
742 regs->r8 = EFI_UNSUPPORTED;
743 break;
744 case FW_HYPERCALL_EFI_GET_TIME:
745 {
746 unsigned long *tv, *tc;
747 fooefi();
748 tv = vcpu_get_gr(ed,32);
749 tc = vcpu_get_gr(ed,33);
750 //printf("efi_get_time(%p,%p) called...",tv,tc);
751 tv = __va(translate_domain_mpaddr(tv));
752 if (tc) tc = __va(translate_domain_mpaddr(tc));
753 regs->r8 = (*efi.get_time)(tv,tc);
754 //printf("and returns %lx\n",regs->r8);
755 }
756 break;
757 case FW_HYPERCALL_EFI_SET_TIME:
758 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
759 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
760 // FIXME: need fixes in efi.h from 2.6.9
761 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
762 // FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
763 // SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
764 // POINTER ARGUMENTS WILL BE VIRTUAL!!
765 case FW_HYPERCALL_EFI_GET_VARIABLE:
766 // FIXME: need fixes in efi.h from 2.6.9
767 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
768 case FW_HYPERCALL_EFI_SET_VARIABLE:
769 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
770 // FIXME: need fixes in efi.h from 2.6.9
771 regs->r8 = EFI_UNSUPPORTED;
772 break;
773 case 0xffff: // test dummy hypercall
774 regs->r8 = dump_privop_counts_to_user(
775 vcpu_get_gr(ed,32),
776 vcpu_get_gr(ed,33));
777 break;
778 case 0xfffe: // test dummy hypercall
779 regs->r8 = zero_privop_counts_to_user(
780 vcpu_get_gr(ed,32),
781 vcpu_get_gr(ed,33));
782 break;
783 }
784 vcpu_increment_iip(current);
785 }
786 else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR);
787 }
789 void
790 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
791 {
792 IA64FAULT vector;
793 struct domain *d = current->domain;
794 struct exec_domain *ed = current;
795 // FIXME: no need to pass itir in to this routine as we need to
796 // compute the virtual itir anyway (based on domain's RR.ps)
797 // AND ACTUALLY reflect_interruption doesn't use it anyway!
798 itir = vcpu_get_itir_on_fault(ed,ifa);
799 vector = priv_emulate(current,regs,isr);
800 if (vector == IA64_RETRY) {
801 reflect_interruption(ifa,isr,itir,regs,
802 IA64_ALT_DATA_TLB_VECTOR | IA64_FORCED_IFA);
803 }
804 else if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
805 reflect_interruption(ifa,isr,itir,regs,vector);
806 }
807 }
809 #define INTR_TYPE_MAX 10
810 UINT64 int_counts[INTR_TYPE_MAX];
812 void
813 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
814 {
815 struct domain *d = (struct domain *) current->domain;
816 struct exec_domain *ed = (struct domain *) current;
817 unsigned long check_lazy_cover = 0;
818 unsigned long psr = regs->cr_ipsr;
819 unsigned long itir = vcpu_get_itir_on_fault(ed,ifa);
821 if (!(psr & IA64_PSR_CPL)) {
822 printk("ia64_handle_reflection: reflecting with priv=0!!\n");
823 }
824 // FIXME: no need to pass itir in to this routine as we need to
825 // compute the virtual itir anyway (based on domain's RR.ps)
826 // AND ACTUALLY reflect_interruption doesn't use it anyway!
827 itir = vcpu_get_itir_on_fault(ed,ifa);
828 switch(vector) {
829 case 8:
830 vector = IA64_DIRTY_BIT_VECTOR; break;
831 case 9:
832 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
833 case 10:
834 check_lazy_cover = 1;
835 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
836 case 20:
837 check_lazy_cover = 1;
838 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
839 case 22:
840 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
841 case 23:
842 check_lazy_cover = 1;
843 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
844 case 25:
845 vector = IA64_DISABLED_FPREG_VECTOR; break;
846 case 26:
847 printf("*** NaT fault... attempting to handle as privop\n");
848 vector = priv_emulate(ed,regs,isr);
849 if (vector == IA64_NO_FAULT) {
850 printf("*** Handled privop masquerading as NaT fault\n");
851 return;
852 }
853 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
854 case 27:
855 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
856 itir = iim;
857 vector = IA64_SPECULATION_VECTOR; break;
858 case 30:
859 // FIXME: Should we handle unaligned refs in Xen??
860 vector = IA64_UNALIGNED_REF_VECTOR; break;
861 default:
862 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
863 while(vector);
864 return;
865 }
866 if (check_lazy_cover && handle_lazy_cover(ed, isr, regs)) return;
867 reflect_interruption(ifa,isr,itir,regs,vector);
868 }