ia64/xen-unstable

view xen/arch/ia64/process.c @ 3108:85d6a1145160

bitkeeper revision 1.1159.187.7 (41a4e12eWWEz6Rwd4YlbRFZKcBjaMQ)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno/BK/xen-2.0-testing.bk
into arcadians.cl.cam.ac.uk:/local/scratch-2/cl349/xen-2.0-testing.bk
author cl349@arcadians.cl.cam.ac.uk
date Wed Nov 24 19:29:50 2004 +0000 (2004-11-24)
parents b7cbbc4c7a3e
children 7ef582b6c9c4
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
17 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
18 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 #include <asm/desc.h>
24 #include <asm/ldt.h>
25 #include <xen/irq.h>
26 #include <xen/event.h>
27 #include <asm/regionreg.h>
28 #include <asm/privop.h>
29 #include <asm/vcpu.h>
30 #include <asm/ia64_int.h>
31 #include <asm/hpsim_ssc.h>
32 #include <asm/dom_fw.h>
34 extern struct ia64_sal_retval pal_emulator_static(UINT64);
35 extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
37 extern unsigned long dom0_start, dom0_size;
39 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
40 // note IA64_PSR_PK removed from following, why is this necessary?
41 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
42 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
43 IA64_PSR_IT | IA64_PSR_BN)
45 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
46 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
47 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
48 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
49 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
50 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
52 #define PSCB(x) x->shared_info->arch
54 extern unsigned long vcpu_verbose;
56 long do_iopl(domid_t domain, unsigned int new_io_pl)
57 {
58 dummy();
59 return 0;
60 }
62 void schedule_tail(struct domain *next)
63 {
64 unsigned long rr7;
65 printk("current=%lx,shared_info=%lx\n",current,current->shared_info);
66 printk("next=%lx,shared_info=%lx\n",next,next->shared_info);
67 if (rr7 = load_region_regs(current)) {
68 printk("schedule_tail: change to rr7 not yet implemented\n");
69 }
70 }
72 extern TR_ENTRY *match_tr(struct domain *d, unsigned long ifa);
74 void tdpfoo(void) { }
76 // given a domain virtual address, pte and pagesize, extract the metaphysical
77 // address, convert the pte for a physical address for (possibly different)
78 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
79 // PAGE_SIZE!)
80 unsigned long translate_domain_pte(unsigned long pteval,
81 unsigned long address, unsigned long itir)
82 {
83 struct domain *d = (struct domain *) current;
84 unsigned long mask, pteval2, mpaddr;
85 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
86 extern struct domain *dom0;
87 extern unsigned long dom0_start, dom0_size;
89 // FIXME address had better be pre-validated on insert
90 mask = (1L << ((itir >> 2) & 0x3f)) - 1;
91 mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
92 if (d == dom0) {
93 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
94 //printk("translate_domain_pte: out-of-bounds dom0 mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
95 tdpfoo();
96 }
97 }
98 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
99 printf("translate_domain_pte: bad mpa=%p (> %p),vadr=%p,pteval=%p,itir=%p\n",
100 mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
101 tdpfoo();
102 }
103 pteval2 = lookup_domain_mpa(d,mpaddr);
104 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
105 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
106 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
107 return pteval2;
108 }
110 // given a current domain metaphysical address, return the physical address
111 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
112 {
113 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
114 unsigned long pteval;
116 if (current == dom0) {
117 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
118 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
119 tdpfoo();
120 }
121 }
122 pteval = lookup_domain_mpa(current,mpaddr);
123 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
124 }
126 void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
127 {
128 unsigned long vcpu_get_ipsr_int_state(struct domain *,unsigned long);
129 unsigned long vcpu_get_rr_ve(struct domain *,unsigned long);
130 unsigned long vcpu_get_itir_on_fault(struct domain *,unsigned long);
131 struct domain *d = (struct domain *) current;
133 if (vector == IA64_EXTINT_VECTOR) {
135 extern unsigned long vcpu_verbose, privop_trace;
136 static first_extint = 1;
137 if (first_extint) {
138 printf("Delivering first extint to domain: ifa=%p, isr=%p, itir=%p, iip=%p\n",ifa,isr,itiriim,regs->cr_iip);
139 //privop_trace = 1; vcpu_verbose = 1;
140 first_extint = 0;
141 }
142 }
143 if (!PSCB(d).interrupt_collection_enabled) {
144 if (!(PSCB(d).ipsr & IA64_PSR_DT)) {
145 printf("psr.dt off, trying to deliver nested dtlb!\n");
146 while(1);
147 }
148 vector &= ~0xf;
149 if (vector != IA64_DATA_TLB_VECTOR &&
150 vector != IA64_DATA_TLB_VECTOR) {
151 printf("psr.ic off, delivering fault=%lx,iip=%p,isr=%p,PSCB.iip=%p\n",
152 vector,regs->cr_iip,isr,PSCB(d).iip);
153 while(1);
155 }
156 //printf("Delivering NESTED DATA TLB fault\n");
157 vector = IA64_DATA_NESTED_TLB_VECTOR;
158 regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
159 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
160 // NOTE: nested trap must NOT pass PSCB address
161 //regs->r31 = (unsigned long) &PSCB(d);
162 return;
164 }
165 if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(d).ifa = ifa;
166 else ifa = PSCB(d).ifa;
167 vector &= ~0xf;
168 // always deliver on ALT vector (for now?) because no VHPT
169 // if (!vcpu_get_rr_ve(d,ifa)) {
170 if (vector == IA64_DATA_TLB_VECTOR)
171 vector = IA64_ALT_DATA_TLB_VECTOR;
172 else if (vector == IA64_INST_TLB_VECTOR)
173 vector = IA64_ALT_INST_TLB_VECTOR;
174 // }
175 PSCB(d).unat = regs->ar_unat; // not sure if this is really needed?
176 PSCB(d).precover_ifs = regs->cr_ifs;
177 vcpu_bsw0(d);
178 PSCB(d).ipsr = vcpu_get_ipsr_int_state(d,regs->cr_ipsr);
179 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
180 PSCB(d).iim = itiriim;
181 else PSCB(d).itir = vcpu_get_itir_on_fault(d,ifa);
182 PSCB(d).isr = isr; // this is unnecessary except for interrupts!
183 PSCB(d).iip = regs->cr_iip;
184 PSCB(d).ifs = 0;
185 PSCB(d).incomplete_regframe = 0;
187 regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
188 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
189 // FIXME: NEED TO PASS PSCB, BUT **NOT** IN R31 WHICH IS BEING USED FOR ar.pr
190 // IN ANY CASE, PASS PINNED ADDRESS, NOT THIS ONE
191 //regs->r31 = (unsigned long) &PSCB(d);
193 PSCB(d).interrupt_delivery_enabled = 0;
194 PSCB(d).interrupt_collection_enabled = 0;
195 }
197 void foodpi(void) {}
199 // ONLY gets called from ia64_leave_kernel
200 // ONLY call with interrupts disabled?? (else might miss one?)
201 // NEVER successful if already reflecting a trap/fault because psr.i==0
202 void deliver_pending_interrupt(struct pt_regs *regs)
203 {
204 struct domain *d = (struct domain *) current;
205 // FIXME: Will this work properly if doing an RFI???
206 if (!is_idle_task(d) && user_mode(regs)) {
207 vcpu_poke_timer(d);
208 if (vcpu_deliverable_interrupts(d)) {
209 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
210 foodpi();
211 reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
212 }
213 }
214 }
216 int handle_lazy_cover(struct domain *d, unsigned long isr, struct pt_regs *regs)
217 {
218 if (!PSCB(d).interrupt_collection_enabled) {
219 if (isr & IA64_ISR_IR) {
220 // printf("Handling lazy cover\n");
221 PSCB(d).ifs = regs->cr_ifs;
222 PSCB(d).incomplete_regframe = 1;
223 regs->cr_ifs = 0;
224 return(1); // retry same instruction with cr.ifs off
225 }
226 }
227 return(0);
228 }
230 #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
232 void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
233 {
234 struct domain *d = (struct domain *) current;
235 TR_ENTRY *trp;
236 unsigned long psr = regs->cr_ipsr, mask, flags;
237 unsigned long iip = regs->cr_iip;
238 // FIXME should validate address here
239 unsigned long pteval, mpaddr;
240 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
241 IA64FAULT fault;
242 extern void __get_domain_bundle(void);
244 // NEED TO HANDLE THREE CASES:
245 // 1) domain is in metaphysical mode
246 // 2) domain address is in TR
247 // 3) domain address is not in TR (reflect data miss)
249 // got here trying to read a privop bundle
250 //if (d->metaphysical_mode) {
251 if (d->metaphysical_mode && !(address>>61)) { //FIXME
252 if (d == dom0) {
253 if (address < dom0_start || address >= dom0_start + dom0_size) {
254 printk("xen_handle_domain_access: out-of-bounds"
255 "dom0 mpaddr %p! continuing...\n",mpaddr);
256 tdpfoo();
257 }
258 }
259 pteval = lookup_domain_mpa(d,address);
260 //FIXME: check return value?
261 // would be nice to have a counter here
262 vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
263 return;
264 }
265 if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
266 if (*(unsigned long *)__get_domain_bundle != iip) {
267 printf("Bad user space access @%p ",address);
268 printf("iip=%p, ipsr=%p, b0=%p\n",iip,psr,regs->b0);
269 while(1);
270 }
272 fault = vcpu_tpa(d,address,&mpaddr);
273 if (fault != IA64_NO_FAULT) {
274 // this is hardcoded to handle __get_domain_bundle only
275 regs->r8 = 0; regs->r9 = 0;
276 regs->cr_iip += 0x20;
277 //regs->cr_iip |= (2UL << IA64_PSR_RI_BIT);
278 return;
279 }
280 if (d == dom0) {
281 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
282 printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
283 tdpfoo();
284 }
285 }
286 pteval = lookup_domain_mpa(d,mpaddr);
287 // would be nice to have a counter here
288 //printf("Handling privop data TLB miss\n");
289 // FIXME, must be inlined or potential for nested fault here!
290 vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
291 }
293 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
294 {
295 struct domain *d = (struct domain *) current;
296 TR_ENTRY *trp;
297 unsigned long psr = regs->cr_ipsr, mask, flags;
298 unsigned long iip = regs->cr_iip;
299 // FIXME should validate address here
300 unsigned long pteval, mpaddr;
301 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
302 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
303 unsigned long vector;
304 IA64FAULT fault;
307 //The right way is put in VHPT and take another miss!
309 // weak attempt to avoid doing both I/D tlb insert to avoid
310 // problems for privop bundle fetch, doesn't work, deal with later
311 if (IS_XEN_ADDRESS(d,iip) && !IS_XEN_ADDRESS(d,address)) {
312 xen_handle_domain_access(address, isr, regs, itir);
314 return;
315 }
317 // FIXME: no need to pass itir in to this routine as we need to
318 // compute the virtual itir anyway (based on domain's RR.ps)
319 // AND ACTUALLY reflect_interruption doesn't use it anyway!
320 itir = vcpu_get_itir_on_fault(d,address);
322 if (d->metaphysical_mode && (is_data || !(address>>61))) { //FIXME
323 // FIXME should validate mpaddr here
324 if (d == dom0) {
325 if (address < dom0_start || address >= dom0_start + dom0_size) {
326 printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
327 printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,d->shared_info->arch.iip);
328 tdpfoo();
329 }
330 }
331 pteval = lookup_domain_mpa(d,address);
332 // FIXME, must be inlined or potential for nested fault here!
333 vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,PAGE_SHIFT);
334 return;
335 }
336 if (trp = match_tr(d,address)) {
337 // FIXME address had better be pre-validated on insert
338 pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
339 vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
340 return;
341 }
342 vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
343 if (handle_lazy_cover(d, isr, regs)) return;
344 if (!(address>>61)) { printf("ia64_do_page_fault: @%p???, iip=%p, itc=%p (spinning...)\n",address,iip,ia64_get_itc()); while(1); }
345 if ((isr & IA64_ISR_SP)
346 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
347 {
348 /*
349 * This fault was due to a speculative load or lfetch.fault, set the "ed"
350 * bit in the psr to ensure forward progress. (Target register will get a
351 * NaT for ld.s, lfetch will be canceled.)
352 */
353 ia64_psr(regs)->ed = 1;
354 return;
355 }
356 reflect_interruption(address, isr, itir, regs, vector);
357 }
359 void
360 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
361 unsigned long iim, unsigned long itir, unsigned long arg5,
362 unsigned long arg6, unsigned long arg7, unsigned long stack)
363 {
364 struct pt_regs *regs = (struct pt_regs *) &stack;
365 unsigned long code, error = isr;
366 char buf[128];
367 int result, sig;
368 static const char *reason[] = {
369 "IA-64 Illegal Operation fault",
370 "IA-64 Privileged Operation fault",
371 "IA-64 Privileged Register fault",
372 "IA-64 Reserved Register/Field fault",
373 "Disabled Instruction Set Transition fault",
374 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
375 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
376 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
377 };
378 #if 0
379 printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
380 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
381 #endif
383 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
384 /*
385 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
386 * the lfetch.
387 */
388 ia64_psr(regs)->ed = 1;
389 printf("ia64_fault: handled lfetch.fault\n");
390 return;
391 }
393 switch (vector) {
394 case 24: /* General Exception */
395 code = (isr >> 4) & 0xf;
396 sprintf(buf, "General Exception: %s%s", reason[code],
397 (code == 3) ? ((isr & (1UL << 37))
398 ? " (RSE access)" : " (data access)") : "");
399 if (code == 8) {
400 # ifdef CONFIG_IA64_PRINT_HAZARDS
401 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
402 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
403 regs->pr);
404 # endif
405 printf("ia64_fault: returning on hazard\n");
406 return;
407 }
408 break;
410 case 25: /* Disabled FP-Register */
411 if (isr & 2) {
412 //disabled_fph_fault(regs);
413 //return;
414 }
415 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
416 break;
418 case 26: /* NaT Consumption */
419 if (user_mode(regs)) {
420 void *addr;
422 if (((isr >> 4) & 0xf) == 2) {
423 /* NaT page consumption */
424 //sig = SIGSEGV;
425 //code = SEGV_ACCERR;
426 addr = (void *) ifa;
427 } else {
428 /* register NaT consumption */
429 //sig = SIGILL;
430 //code = ILL_ILLOPN;
431 addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
432 }
433 //siginfo.si_signo = sig;
434 //siginfo.si_code = code;
435 //siginfo.si_errno = 0;
436 //siginfo.si_addr = addr;
437 //siginfo.si_imm = vector;
438 //siginfo.si_flags = __ISR_VALID;
439 //siginfo.si_isr = isr;
440 //force_sig_info(sig, &siginfo, current);
441 //return;
442 } //else if (ia64_done_with_exception(regs))
443 //return;
444 sprintf(buf, "NaT consumption");
445 break;
447 case 31: /* Unsupported Data Reference */
448 if (user_mode(regs)) {
449 //siginfo.si_signo = SIGILL;
450 //siginfo.si_code = ILL_ILLOPN;
451 //siginfo.si_errno = 0;
452 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
453 //siginfo.si_imm = vector;
454 //siginfo.si_flags = __ISR_VALID;
455 //siginfo.si_isr = isr;
456 //force_sig_info(SIGILL, &siginfo, current);
457 //return;
458 }
459 sprintf(buf, "Unsupported data reference");
460 break;
462 case 29: /* Debug */
463 case 35: /* Taken Branch Trap */
464 case 36: /* Single Step Trap */
465 //if (fsys_mode(current, regs)) {}
466 switch (vector) {
467 case 29:
468 //siginfo.si_code = TRAP_HWBKPT;
469 #ifdef CONFIG_ITANIUM
470 /*
471 * Erratum 10 (IFA may contain incorrect address) now has
472 * "NoFix" status. There are no plans for fixing this.
473 */
474 if (ia64_psr(regs)->is == 0)
475 ifa = regs->cr_iip;
476 #endif
477 break;
478 case 35: ifa = 0; break;
479 case 36: ifa = 0; break;
480 //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
481 //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
482 }
483 //siginfo.si_signo = SIGTRAP;
484 //siginfo.si_errno = 0;
485 //siginfo.si_addr = (void *) ifa;
486 //siginfo.si_imm = 0;
487 //siginfo.si_flags = __ISR_VALID;
488 //siginfo.si_isr = isr;
489 //force_sig_info(SIGTRAP, &siginfo, current);
490 //return;
492 case 32: /* fp fault */
493 case 33: /* fp trap */
494 //result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
495 if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
496 //siginfo.si_signo = SIGFPE;
497 //siginfo.si_errno = 0;
498 //siginfo.si_code = FPE_FLTINV;
499 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
500 //siginfo.si_flags = __ISR_VALID;
501 //siginfo.si_isr = isr;
502 //siginfo.si_imm = 0;
503 //force_sig_info(SIGFPE, &siginfo, current);
504 }
505 //return;
506 sprintf(buf, "FP fault/trap");
507 break;
509 case 34:
510 if (isr & 0x2) {
511 /* Lower-Privilege Transfer Trap */
512 /*
513 * Just clear PSR.lp and then return immediately: all the
514 * interesting work (e.g., signal delivery is done in the kernel
515 * exit path).
516 */
517 //ia64_psr(regs)->lp = 0;
518 //return;
519 sprintf(buf, "Lower-Privilege Transfer trap");
520 } else {
521 /* Unimplemented Instr. Address Trap */
522 if (user_mode(regs)) {
523 //siginfo.si_signo = SIGILL;
524 //siginfo.si_code = ILL_BADIADDR;
525 //siginfo.si_errno = 0;
526 //siginfo.si_flags = 0;
527 //siginfo.si_isr = 0;
528 //siginfo.si_imm = 0;
529 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
530 //force_sig_info(SIGILL, &siginfo, current);
531 //return;
532 }
533 sprintf(buf, "Unimplemented Instruction Address fault");
534 }
535 break;
537 case 45:
538 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
539 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
540 regs->cr_iip, ifa, isr);
541 //force_sig(SIGSEGV, current);
542 break;
544 case 46:
545 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
546 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
547 regs->cr_iip, ifa, isr, iim);
548 //force_sig(SIGSEGV, current);
549 return;
551 case 47:
552 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
553 break;
555 default:
556 sprintf(buf, "Fault %lu", vector);
557 break;
558 }
559 //die_if_kernel(buf, regs, error);
560 printk("ia64_fault: %s: reflecting\n",buf);
561 reflect_interruption(ifa,isr,iim,regs,IA64_GENEX_VECTOR);
562 //while(1);
563 //force_sig(SIGILL, current);
564 }
566 unsigned long running_on_sim = 0;
568 void
569 do_ssc(unsigned long ssc, struct pt_regs *regs)
570 {
571 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
572 unsigned long arg0, arg1, arg2, arg3, retval;
573 char buf[2];
574 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
575 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
576 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
577 extern unsigned long vcpu_verbose, privop_trace;
579 arg0 = vcpu_get_gr(current,32);
580 switch(ssc) {
581 case SSC_PUTCHAR:
582 buf[0] = arg0;
583 buf[1] = '\0';
584 printf(buf);
585 break;
586 case SSC_GETCHAR:
587 retval = ia64_ssc(0,0,0,0,ssc);
588 vcpu_set_gr(current,8,retval);
589 break;
590 case SSC_WAIT_COMPLETION:
591 if (arg0) { // metaphysical address
593 arg0 = translate_domain_mpaddr(arg0);
594 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
595 ///**/ if (stat->fd == last_fd) stat->count = last_count;
596 /**/ stat->count = last_count;
597 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
598 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
599 /**/ retval = 0;
600 }
601 else retval = -1L;
602 vcpu_set_gr(current,8,retval);
603 break;
604 case SSC_OPEN:
605 arg1 = vcpu_get_gr(current,33); // access rights
606 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
607 if (arg0) { // metaphysical address
608 arg0 = translate_domain_mpaddr(arg0);
609 retval = ia64_ssc(arg0,arg1,0,0,ssc);
610 }
611 else retval = -1L;
612 vcpu_set_gr(current,8,retval);
613 break;
614 case SSC_WRITE:
615 case SSC_READ:
616 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
617 arg1 = vcpu_get_gr(current,33);
618 arg2 = vcpu_get_gr(current,34);
619 arg3 = vcpu_get_gr(current,35);
620 if (arg2) { // metaphysical address of descriptor
621 struct ssc_disk_req *req;
622 unsigned long mpaddr, paddr;
623 long len;
625 arg2 = translate_domain_mpaddr(arg2);
626 req = (struct disk_req *)__va(arg2);
627 req->len &= 0xffffffffL; // avoid strange bug
628 len = req->len;
629 /**/ last_fd = arg1;
630 /**/ last_count = len;
631 mpaddr = req->addr;
632 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
633 retval = 0;
634 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
635 // do partial page first
636 req->addr = translate_domain_mpaddr(mpaddr);
637 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
638 len -= req->len; mpaddr += req->len;
639 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
640 arg3 += req->len; // file offset
641 /**/ last_stat.fd = last_fd;
642 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
643 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
644 }
645 if (retval >= 0) while (len > 0) {
646 req->addr = translate_domain_mpaddr(mpaddr);
647 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
648 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
649 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
650 arg3 += req->len; // file offset
651 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
652 /**/ last_stat.fd = last_fd;
653 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
654 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
655 }
656 // set it back to the original value
657 req->len = last_count;
658 }
659 else retval = -1L;
660 vcpu_set_gr(current,8,retval);
661 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
662 break;
663 case SSC_CONNECT_INTERRUPT:
664 arg1 = vcpu_get_gr(current,33);
665 arg2 = vcpu_get_gr(current,34);
666 arg3 = vcpu_get_gr(current,35);
667 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
668 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
669 break;
670 case SSC_NETDEV_PROBE:
671 vcpu_set_gr(current,8,-1L);
672 break;
673 default:
674 printf("ia64_handle_break: bad ssc code %lx\n",ssc);
675 break;
676 }
677 vcpu_increment_iip(current);
678 }
680 void fooefi(void) {}
682 void
683 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
684 {
685 static int first_time = 1;
686 struct domain *d = (struct domain *) current;
687 extern unsigned long running_on_sim;
689 if (first_time) {
690 if (platform_is_hp_ski()) running_on_sim = 1;
691 else running_on_sim = 0;
692 first_time = 0;
693 }
694 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
695 if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
696 else do_ssc(vcpu_get_gr(current,36), regs);
697 }
698 else if (iim == d->breakimm) {
699 struct ia64_sal_retval x;
700 switch (regs->r2) {
701 case FW_HYPERCALL_PAL_CALL:
702 //printf("*** PAL hypercall: index=%d\n",regs->r28);
703 //FIXME: This should call a C routine
704 x = pal_emulator_static(regs->r28);
705 regs->r8 = x.status; regs->r9 = x.v0;
706 regs->r10 = x.v1; regs->r11 = x.v2;
707 break;
708 case FW_HYPERCALL_SAL_CALL:
709 x = sal_emulator(vcpu_get_gr(d,32),vcpu_get_gr(d,33),
710 vcpu_get_gr(d,34),vcpu_get_gr(d,35),
711 vcpu_get_gr(d,36),vcpu_get_gr(d,37),
712 vcpu_get_gr(d,38),vcpu_get_gr(d,39));
713 regs->r8 = x.status; regs->r9 = x.v0;
714 regs->r10 = x.v1; regs->r11 = x.v2;
715 break;
716 case FW_HYPERCALL_EFI_RESET_SYSTEM:
717 printf("efi.reset_system called ");
718 if (current == dom0) {
719 printf("(by dom0)\n ");
720 (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
721 }
722 printf("(not supported for non-0 domain)\n");
723 regs->r8 = EFI_UNSUPPORTED;
724 break;
725 case FW_HYPERCALL_EFI_GET_TIME:
726 {
727 unsigned long *tv, *tc;
728 fooefi();
729 tv = vcpu_get_gr(d,32);
730 tc = vcpu_get_gr(d,33);
731 //printf("efi_get_time(%p,%p) called...",tv,tc);
732 tv = __va(translate_domain_mpaddr(tv));
733 if (tc) tc = __va(translate_domain_mpaddr(tc));
734 regs->r8 = (*efi.get_time)(tv,tc);
735 //printf("and returns %lx\n",regs->r8);
736 }
737 break;
738 case FW_HYPERCALL_EFI_SET_TIME:
739 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
740 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
741 // FIXME: need fixes in efi.h from 2.6.9
742 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
743 // FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
744 // SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
745 // POINTER ARGUMENTS WILL BE VIRTUAL!!
746 case FW_HYPERCALL_EFI_GET_VARIABLE:
747 // FIXME: need fixes in efi.h from 2.6.9
748 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
749 case FW_HYPERCALL_EFI_SET_VARIABLE:
750 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
751 // FIXME: need fixes in efi.h from 2.6.9
752 regs->r8 = EFI_UNSUPPORTED;
753 break;
754 }
755 vcpu_increment_iip(current);
756 }
757 else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR);
758 }
760 void
761 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
762 {
763 IA64FAULT vector;
764 struct domain *d = (struct domain *) current;
765 // FIXME: no need to pass itir in to this routine as we need to
766 // compute the virtual itir anyway (based on domain's RR.ps)
767 // AND ACTUALLY reflect_interruption doesn't use it anyway!
768 itir = vcpu_get_itir_on_fault(d,ifa);
769 vector = priv_emulate((struct domain *)current,regs,isr);
770 if (vector == IA64_RETRY) {
771 reflect_interruption(ifa,isr,itir,regs,
772 IA64_ALT_DATA_TLB_VECTOR | IA64_FORCED_IFA);
773 }
774 else if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
775 reflect_interruption(ifa,isr,itir,regs,vector);
776 }
777 }
779 #define INTR_TYPE_MAX 10
780 UINT64 int_counts[INTR_TYPE_MAX];
782 void
783 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
784 {
785 extern unsigned long vcpu_get_itir_on_fault(struct domain *vcpu, UINT64 ifa);
786 struct domain *d = (struct domain *) current;
787 unsigned long check_lazy_cover = 0;
788 unsigned long psr = regs->cr_ipsr;
789 unsigned long itir = vcpu_get_itir_on_fault(d,ifa);
791 if (!(psr & IA64_PSR_CPL)) {
792 printf("ia64_handle_reflection: reflecting with priv=0!!\n");
793 while(1);
794 }
795 // FIXME: no need to pass itir in to this routine as we need to
796 // compute the virtual itir anyway (based on domain's RR.ps)
797 // AND ACTUALLY reflect_interruption doesn't use it anyway!
798 itir = vcpu_get_itir_on_fault(d,ifa);
799 switch(vector) {
800 case 8:
801 vector = IA64_DIRTY_BIT_VECTOR; break;
802 case 9:
803 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
804 case 10:
805 check_lazy_cover = 1;
806 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
807 case 22:
808 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
809 case 23:
810 check_lazy_cover = 1;
811 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
812 case 25:
813 vector = IA64_DISABLED_FPREG_VECTOR; break;
814 case 26:
815 printf("*** NaT fault... attempting to handle as privop\n");
816 vector = priv_emulate(d,regs,isr);
817 if (vector == IA64_NO_FAULT) {
818 printf("*** Handled privop masquerading as NaT fault\n");
819 return;
820 }
821 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
822 case 27:
823 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
824 itir = iim;
825 vector = IA64_SPECULATION_VECTOR; break;
826 case 30:
827 // FIXME: Should we handle unaligned refs in Xen??
828 vector = IA64_UNALIGNED_REF_VECTOR; break;
829 default:
830 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
831 while(vector);
832 return;
833 }
834 if (check_lazy_cover && handle_lazy_cover(d, isr, regs)) return;
835 reflect_interruption(ifa,isr,itir,regs,vector);
836 }