ia64/xen-unstable

view xen/arch/ia64/process.c @ 4186:863434ec7eab

bitkeeper revision 1.1236.31.5 (4238b7f5GsUEUXTi8daFiX6vrYE-Mg)

Cloned domains now fully working
author djm@kirby.fc.hp.com
date Wed Mar 16 22:49:25 2005 +0000 (2005-03-16)
parents 5cd82956dca3
children 74080d40b2e9 b786214604fd
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
17 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
18 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 #include <asm/desc.h>
24 //#include <asm/ldt.h>
25 #include <xen/irq.h>
26 #include <xen/event.h>
27 #include <asm/regionreg.h>
28 #include <asm/privop.h>
29 #include <asm/vcpu.h>
30 #include <asm/ia64_int.h>
31 #include <asm/hpsim_ssc.h>
32 #include <asm/dom_fw.h>
34 extern unsigned long vcpu_get_itir_on_fault(struct exec_domain *, UINT64);
35 extern struct ia64_sal_retval pal_emulator_static(UINT64);
36 extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
38 extern unsigned long dom0_start, dom0_size;
40 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
41 // note IA64_PSR_PK removed from following, why is this necessary?
42 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
43 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
44 IA64_PSR_IT | IA64_PSR_BN)
46 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
47 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
48 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
49 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
50 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
51 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
53 #define PSCB(x,y) x->vcpu_info->arch.y
55 extern unsigned long vcpu_verbose;
57 long do_iopl(domid_t domain, unsigned int new_io_pl)
58 {
59 dummy();
60 return 0;
61 }
63 void schedule_tail(struct exec_domain *next)
64 {
65 unsigned long rr7;
66 printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
67 printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
68 if (rr7 = load_region_regs(current)) {
69 printk("schedule_tail: change to rr7 not yet implemented\n");
70 }
71 }
73 extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
75 void tdpfoo(void) { }
77 // given a domain virtual address, pte and pagesize, extract the metaphysical
78 // address, convert the pte for a physical address for (possibly different)
79 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
80 // PAGE_SIZE!)
81 unsigned long translate_domain_pte(unsigned long pteval,
82 unsigned long address, unsigned long itir)
83 {
84 struct domain *d = current->domain;
85 unsigned long mask, pteval2, mpaddr;
86 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
87 extern struct domain *dom0;
88 extern unsigned long dom0_start, dom0_size;
90 // FIXME address had better be pre-validated on insert
91 mask = (1L << ((itir >> 2) & 0x3f)) - 1;
92 mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
93 if (d == dom0) {
94 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
95 //printk("translate_domain_pte: out-of-bounds dom0 mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
96 tdpfoo();
97 }
98 }
99 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
100 printf("translate_domain_pte: bad mpa=%p (> %p),vadr=%p,pteval=%p,itir=%p\n",
101 mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
102 tdpfoo();
103 }
104 pteval2 = lookup_domain_mpa(d,mpaddr);
105 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
106 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
107 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
108 return pteval2;
109 }
111 // given a current domain metaphysical address, return the physical address
112 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
113 {
114 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
115 unsigned long pteval;
117 if (current->domain == dom0) {
118 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
119 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
120 tdpfoo();
121 }
122 }
123 pteval = lookup_domain_mpa(current->domain,mpaddr);
124 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
125 }
127 void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
128 {
129 unsigned long vcpu_get_ipsr_int_state(struct exec_domain *,unsigned long);
130 unsigned long vcpu_get_rr_ve(struct exec_domain *,unsigned long);
131 struct domain *d = current->domain;
132 struct exec_domain *ed = current;
134 if (vector == IA64_EXTINT_VECTOR) {
136 extern unsigned long vcpu_verbose, privop_trace;
137 static first_extint = 1;
138 if (first_extint) {
139 printf("Delivering first extint to domain: ifa=%p, isr=%p, itir=%p, iip=%p\n",ifa,isr,itiriim,regs->cr_iip);
140 //privop_trace = 1; vcpu_verbose = 1;
141 first_extint = 0;
142 }
143 }
144 if (!PSCB(ed,interrupt_collection_enabled)) {
145 if (!(PSCB(ed,ipsr) & IA64_PSR_DT)) {
146 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
147 }
148 vector &= ~0xf;
149 if (vector != IA64_DATA_TLB_VECTOR &&
150 vector != IA64_ALT_DATA_TLB_VECTOR) {
151 panic_domain(regs,"psr.ic off, delivering fault=%lx,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
152 vector,regs->cr_iip,ifa,isr,PSCB(ed,iip));
154 }
155 //printf("Delivering NESTED DATA TLB fault\n");
156 vector = IA64_DATA_NESTED_TLB_VECTOR;
157 regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
158 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
159 // NOTE: nested trap must NOT pass PSCB address
160 //regs->r31 = (unsigned long) &PSCB(ed);
161 return;
163 }
164 if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(ed,ifa) = ifa;
165 else ifa = PSCB(ed,ifa);
166 vector &= ~0xf;
167 // always deliver on ALT vector (for now?) because no VHPT
168 // if (!vcpu_get_rr_ve(ed,ifa)) {
169 if (vector == IA64_DATA_TLB_VECTOR)
170 vector = IA64_ALT_DATA_TLB_VECTOR;
171 else if (vector == IA64_INST_TLB_VECTOR)
172 vector = IA64_ALT_INST_TLB_VECTOR;
173 // }
174 PSCB(ed,unat) = regs->ar_unat; // not sure if this is really needed?
175 PSCB(ed,precover_ifs) = regs->cr_ifs;
176 vcpu_bsw0(ed);
177 PSCB(ed,ipsr) = vcpu_get_ipsr_int_state(ed,regs->cr_ipsr);
178 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
179 PSCB(ed,iim) = itiriim;
180 else PSCB(ed,itir) = vcpu_get_itir_on_fault(ed,ifa);
181 PSCB(ed,isr) = isr; // this is unnecessary except for interrupts!
182 PSCB(ed,iip) = regs->cr_iip;
183 PSCB(ed,ifs) = 0;
184 PSCB(ed,incomplete_regframe) = 0;
186 regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
187 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
188 // FIXME: NEED TO PASS PSCB, BUT **NOT** IN R31 WHICH IS BEING USED FOR ar.pr
189 // IN ANY CASE, PASS PINNED ADDRESS, NOT THIS ONE
190 //regs->r31 = (unsigned long) &PSCB(ed);
192 PSCB(ed,interrupt_delivery_enabled) = 0;
193 PSCB(ed,interrupt_collection_enabled) = 0;
194 }
196 void foodpi(void) {}
198 // ONLY gets called from ia64_leave_kernel
199 // ONLY call with interrupts disabled?? (else might miss one?)
200 // NEVER successful if already reflecting a trap/fault because psr.i==0
201 void deliver_pending_interrupt(struct pt_regs *regs)
202 {
203 struct domain *d = current->domain;
204 struct exec_domain *ed = current;
205 // FIXME: Will this work properly if doing an RFI???
206 if (!is_idle_task(d) && user_mode(regs)) {
207 //vcpu_poke_timer(ed);
208 if (vcpu_deliverable_interrupts(ed)) {
209 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
210 if (vcpu_timer_pending_early(ed))
211 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",ed->domain->id);
212 reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
213 }
214 }
215 }
217 int handle_lazy_cover(struct exec_domain *ed, unsigned long isr, struct pt_regs *regs)
218 {
219 if (!PSCB(ed,interrupt_collection_enabled)) {
220 if (isr & IA64_ISR_IR) {
221 // printf("Handling lazy cover\n");
222 PSCB(ed,ifs) = regs->cr_ifs;
223 PSCB(ed,incomplete_regframe) = 1;
224 regs->cr_ifs = 0;
225 return(1); // retry same instruction with cr.ifs off
226 }
227 }
228 return(0);
229 }
231 #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
233 void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
234 {
235 struct domain *d = (struct domain *) current->domain;
236 struct domain *ed = (struct exec_domain *) current;
237 TR_ENTRY *trp;
238 unsigned long psr = regs->cr_ipsr, mask, flags;
239 unsigned long iip = regs->cr_iip;
240 // FIXME should validate address here
241 unsigned long pteval, mpaddr, ps;
242 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
243 unsigned long match_dtlb(struct exec_domain *,unsigned long, unsigned long *, unsigned long *);
244 IA64FAULT fault;
246 // NEED TO HANDLE THREE CASES:
247 // 1) domain is in metaphysical mode
248 // 2) domain address is in TR
249 // 3) domain address is not in TR (reflect data miss)
251 // got here trying to read a privop bundle
252 //if (d->metaphysical_mode) {
253 if (PSCB(current,metaphysical_mode) && !(address>>61)) { //FIXME
254 if (d == dom0) {
255 if (address < dom0_start || address >= dom0_start + dom0_size) {
256 printk("xen_handle_domain_access: out-of-bounds"
257 "dom0 mpaddr %p! continuing...\n",mpaddr);
258 tdpfoo();
259 }
260 }
261 pteval = lookup_domain_mpa(d,address);
262 //FIXME: check return value?
263 // would be nice to have a counter here
264 vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
265 return;
266 }
267 if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
269 // if we are fortunate enough to have it in the 1-entry TLB...
270 if (pteval = match_dtlb(ed,address,&ps,NULL)) {
271 vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
272 return;
273 }
274 // look in the TRs
275 fault = vcpu_tpa(ed,address,&mpaddr);
276 if (fault != IA64_NO_FAULT) {
277 static int uacnt = 0;
278 // can't translate it, just fail (poor man's exception)
279 // which results in retrying execution
280 //printk("*** xen_handle_domain_access: poor man's exception cnt=%i iip=%p, addr=%p...\n",uacnt++,iip,address);
281 if (ia64_done_with_exception(regs)) {
282 //if (!(uacnt++ & 0x3ff)) printk("*** xen_handle_domain_access: successfully handled cnt=%d iip=%p, addr=%p...\n",uacnt,iip,address);
283 return;
284 }
285 else {
286 // should never happen. If it does, region 0 addr may
287 // indicate a bad xen pointer
288 printk("*** xen_handle_domain_access: exception table"
289 " lookup failed, iip=%p, addr=%p, spinning...\n",
290 iip,address);
291 panic_domain(regs,"*** xen_handle_domain_access: exception table"
292 " lookup failed, iip=%p, addr=%p, spinning...\n",
293 iip,address);
294 }
295 }
296 if (d == dom0) {
297 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
298 printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
299 tdpfoo();
300 }
301 }
302 //printk("*** xen_handle_domain_access: tpa resolved miss @%p...\n",address);
303 pteval = lookup_domain_mpa(d,mpaddr);
304 // would be nice to have a counter here
305 //printf("Handling privop data TLB miss\n");
306 // FIXME, must be inlined or potential for nested fault here!
307 vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
308 }
310 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
311 {
312 struct domain *d = (struct domain *) current->domain;
313 TR_ENTRY *trp;
314 unsigned long psr = regs->cr_ipsr, mask, flags;
315 unsigned long iip = regs->cr_iip;
316 // FIXME should validate address here
317 unsigned long pteval, mpaddr;
318 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
319 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
320 unsigned long vector;
321 IA64FAULT fault;
324 //The right way is put in VHPT and take another miss!
326 // weak attempt to avoid doing both I/D tlb insert to avoid
327 // problems for privop bundle fetch, doesn't work, deal with later
328 if (IS_XEN_ADDRESS(d,iip) && !IS_XEN_ADDRESS(d,address)) {
329 xen_handle_domain_access(address, isr, regs, itir);
331 return;
332 }
334 // FIXME: no need to pass itir in to this routine as we need to
335 // compute the virtual itir anyway (based on domain's RR.ps)
336 // AND ACTUALLY reflect_interruption doesn't use it anyway!
337 itir = vcpu_get_itir_on_fault(current,address);
339 if (PSCB(current,metaphysical_mode) && (is_data || !(address>>61))) { //FIXME
340 // FIXME should validate mpaddr here
341 if (d == dom0) {
342 if (address < dom0_start || address >= dom0_start + dom0_size) {
343 printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
344 printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
345 tdpfoo();
346 }
347 }
348 pteval = lookup_domain_mpa(d,address);
349 // FIXME, must be inlined or potential for nested fault here!
350 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,PAGE_SHIFT);
351 return;
352 }
353 if (trp = match_tr(current,address)) {
354 // FIXME address had better be pre-validated on insert
355 pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
356 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(trp->itir>>2)&0x3f);
357 return;
358 }
359 vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
360 if (handle_lazy_cover(current, isr, regs)) return;
361 if (!(address>>61)) {
362 panic_domain(0,"ia64_do_page_fault: @%p???, iip=%p, itc=%p (spinning...)\n",address,iip,ia64_get_itc());
363 }
364 if ((isr & IA64_ISR_SP)
365 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
366 {
367 /*
368 * This fault was due to a speculative load or lfetch.fault, set the "ed"
369 * bit in the psr to ensure forward progress. (Target register will get a
370 * NaT for ld.s, lfetch will be canceled.)
371 */
372 ia64_psr(regs)->ed = 1;
373 return;
374 }
375 reflect_interruption(address, isr, itir, regs, vector);
376 }
378 void
379 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
380 unsigned long iim, unsigned long itir, unsigned long arg5,
381 unsigned long arg6, unsigned long arg7, unsigned long stack)
382 {
383 struct pt_regs *regs = (struct pt_regs *) &stack;
384 unsigned long code, error = isr;
385 char buf[128];
386 int result, sig;
387 static const char *reason[] = {
388 "IA-64 Illegal Operation fault",
389 "IA-64 Privileged Operation fault",
390 "IA-64 Privileged Register fault",
391 "IA-64 Reserved Register/Field fault",
392 "Disabled Instruction Set Transition fault",
393 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
394 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
395 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
396 };
397 #if 0
398 printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
399 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
400 #endif
402 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
403 /*
404 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
405 * the lfetch.
406 */
407 ia64_psr(regs)->ed = 1;
408 printf("ia64_fault: handled lfetch.fault\n");
409 return;
410 }
412 switch (vector) {
413 case 24: /* General Exception */
414 code = (isr >> 4) & 0xf;
415 sprintf(buf, "General Exception: %s%s", reason[code],
416 (code == 3) ? ((isr & (1UL << 37))
417 ? " (RSE access)" : " (data access)") : "");
418 if (code == 8) {
419 # ifdef CONFIG_IA64_PRINT_HAZARDS
420 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
421 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
422 regs->pr);
423 # endif
424 printf("ia64_fault: returning on hazard\n");
425 return;
426 }
427 break;
429 case 25: /* Disabled FP-Register */
430 if (isr & 2) {
431 //disabled_fph_fault(regs);
432 //return;
433 }
434 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
435 break;
437 case 26: /* NaT Consumption */
438 if (user_mode(regs)) {
439 void *addr;
441 if (((isr >> 4) & 0xf) == 2) {
442 /* NaT page consumption */
443 //sig = SIGSEGV;
444 //code = SEGV_ACCERR;
445 addr = (void *) ifa;
446 } else {
447 /* register NaT consumption */
448 //sig = SIGILL;
449 //code = ILL_ILLOPN;
450 addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
451 }
452 //siginfo.si_signo = sig;
453 //siginfo.si_code = code;
454 //siginfo.si_errno = 0;
455 //siginfo.si_addr = addr;
456 //siginfo.si_imm = vector;
457 //siginfo.si_flags = __ISR_VALID;
458 //siginfo.si_isr = isr;
459 //force_sig_info(sig, &siginfo, current);
460 //return;
461 } //else if (ia64_done_with_exception(regs))
462 //return;
463 sprintf(buf, "NaT consumption");
464 break;
466 case 31: /* Unsupported Data Reference */
467 if (user_mode(regs)) {
468 //siginfo.si_signo = SIGILL;
469 //siginfo.si_code = ILL_ILLOPN;
470 //siginfo.si_errno = 0;
471 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
472 //siginfo.si_imm = vector;
473 //siginfo.si_flags = __ISR_VALID;
474 //siginfo.si_isr = isr;
475 //force_sig_info(SIGILL, &siginfo, current);
476 //return;
477 }
478 sprintf(buf, "Unsupported data reference");
479 break;
481 case 29: /* Debug */
482 case 35: /* Taken Branch Trap */
483 case 36: /* Single Step Trap */
484 //if (fsys_mode(current, regs)) {}
485 switch (vector) {
486 case 29:
487 //siginfo.si_code = TRAP_HWBKPT;
488 #ifdef CONFIG_ITANIUM
489 /*
490 * Erratum 10 (IFA may contain incorrect address) now has
491 * "NoFix" status. There are no plans for fixing this.
492 */
493 if (ia64_psr(regs)->is == 0)
494 ifa = regs->cr_iip;
495 #endif
496 break;
497 case 35: ifa = 0; break;
498 case 36: ifa = 0; break;
499 //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
500 //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
501 }
502 //siginfo.si_signo = SIGTRAP;
503 //siginfo.si_errno = 0;
504 //siginfo.si_addr = (void *) ifa;
505 //siginfo.si_imm = 0;
506 //siginfo.si_flags = __ISR_VALID;
507 //siginfo.si_isr = isr;
508 //force_sig_info(SIGTRAP, &siginfo, current);
509 //return;
511 case 32: /* fp fault */
512 case 33: /* fp trap */
513 //result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
514 if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
515 //siginfo.si_signo = SIGFPE;
516 //siginfo.si_errno = 0;
517 //siginfo.si_code = FPE_FLTINV;
518 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
519 //siginfo.si_flags = __ISR_VALID;
520 //siginfo.si_isr = isr;
521 //siginfo.si_imm = 0;
522 //force_sig_info(SIGFPE, &siginfo, current);
523 }
524 //return;
525 sprintf(buf, "FP fault/trap");
526 break;
528 case 34:
529 if (isr & 0x2) {
530 /* Lower-Privilege Transfer Trap */
531 /*
532 * Just clear PSR.lp and then return immediately: all the
533 * interesting work (e.g., signal delivery is done in the kernel
534 * exit path).
535 */
536 //ia64_psr(regs)->lp = 0;
537 //return;
538 sprintf(buf, "Lower-Privilege Transfer trap");
539 } else {
540 /* Unimplemented Instr. Address Trap */
541 if (user_mode(regs)) {
542 //siginfo.si_signo = SIGILL;
543 //siginfo.si_code = ILL_BADIADDR;
544 //siginfo.si_errno = 0;
545 //siginfo.si_flags = 0;
546 //siginfo.si_isr = 0;
547 //siginfo.si_imm = 0;
548 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
549 //force_sig_info(SIGILL, &siginfo, current);
550 //return;
551 }
552 sprintf(buf, "Unimplemented Instruction Address fault");
553 }
554 break;
556 case 45:
557 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
558 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
559 regs->cr_iip, ifa, isr);
560 //force_sig(SIGSEGV, current);
561 break;
563 case 46:
564 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
565 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
566 regs->cr_iip, ifa, isr, iim);
567 //force_sig(SIGSEGV, current);
568 return;
570 case 47:
571 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
572 break;
574 default:
575 sprintf(buf, "Fault %lu", vector);
576 break;
577 }
578 //die_if_kernel(buf, regs, error);
579 printk("ia64_fault: %s: reflecting\n",buf);
580 reflect_interruption(ifa,isr,iim,regs,IA64_GENEX_VECTOR);
581 //while(1);
582 //force_sig(SIGILL, current);
583 }
585 unsigned long running_on_sim = 0;
587 void
588 do_ssc(unsigned long ssc, struct pt_regs *regs)
589 {
590 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
591 unsigned long arg0, arg1, arg2, arg3, retval;
592 char buf[2];
593 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
594 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
595 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
596 extern unsigned long vcpu_verbose, privop_trace;
598 arg0 = vcpu_get_gr(current,32);
599 switch(ssc) {
600 case SSC_PUTCHAR:
601 buf[0] = arg0;
602 buf[1] = '\0';
603 printf(buf);
604 break;
605 case SSC_GETCHAR:
606 retval = ia64_ssc(0,0,0,0,ssc);
607 vcpu_set_gr(current,8,retval);
608 break;
609 case SSC_WAIT_COMPLETION:
610 if (arg0) { // metaphysical address
612 arg0 = translate_domain_mpaddr(arg0);
613 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
614 ///**/ if (stat->fd == last_fd) stat->count = last_count;
615 /**/ stat->count = last_count;
616 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
617 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
618 /**/ retval = 0;
619 }
620 else retval = -1L;
621 vcpu_set_gr(current,8,retval);
622 break;
623 case SSC_OPEN:
624 arg1 = vcpu_get_gr(current,33); // access rights
625 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
626 if (arg0) { // metaphysical address
627 arg0 = translate_domain_mpaddr(arg0);
628 retval = ia64_ssc(arg0,arg1,0,0,ssc);
629 }
630 else retval = -1L;
631 vcpu_set_gr(current,8,retval);
632 break;
633 case SSC_WRITE:
634 case SSC_READ:
635 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
636 arg1 = vcpu_get_gr(current,33);
637 arg2 = vcpu_get_gr(current,34);
638 arg3 = vcpu_get_gr(current,35);
639 if (arg2) { // metaphysical address of descriptor
640 struct ssc_disk_req *req;
641 unsigned long mpaddr, paddr;
642 long len;
644 arg2 = translate_domain_mpaddr(arg2);
645 req = (struct disk_req *)__va(arg2);
646 req->len &= 0xffffffffL; // avoid strange bug
647 len = req->len;
648 /**/ last_fd = arg1;
649 /**/ last_count = len;
650 mpaddr = req->addr;
651 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
652 retval = 0;
653 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
654 // do partial page first
655 req->addr = translate_domain_mpaddr(mpaddr);
656 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
657 len -= req->len; mpaddr += req->len;
658 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
659 arg3 += req->len; // file offset
660 /**/ last_stat.fd = last_fd;
661 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
662 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
663 }
664 if (retval >= 0) while (len > 0) {
665 req->addr = translate_domain_mpaddr(mpaddr);
666 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
667 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
668 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
669 arg3 += req->len; // file offset
670 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
671 /**/ last_stat.fd = last_fd;
672 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
673 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
674 }
675 // set it back to the original value
676 req->len = last_count;
677 }
678 else retval = -1L;
679 vcpu_set_gr(current,8,retval);
680 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
681 break;
682 case SSC_CONNECT_INTERRUPT:
683 arg1 = vcpu_get_gr(current,33);
684 arg2 = vcpu_get_gr(current,34);
685 arg3 = vcpu_get_gr(current,35);
686 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
687 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
688 break;
689 case SSC_NETDEV_PROBE:
690 vcpu_set_gr(current,8,-1L);
691 break;
692 default:
693 printf("ia64_handle_break: bad ssc code %lx\n",ssc);
694 break;
695 }
696 vcpu_increment_iip(current);
697 }
699 void fooefi(void) {}
701 void
702 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
703 {
704 static int first_time = 1;
705 struct domain *d = (struct domain *) current->domain;
706 struct exec_domain *ed = (struct domain *) current;
707 extern unsigned long running_on_sim;
709 if (first_time) {
710 if (platform_is_hp_ski()) running_on_sim = 1;
711 else running_on_sim = 0;
712 first_time = 0;
713 }
714 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
715 if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
716 else do_ssc(vcpu_get_gr(current,36), regs);
717 }
718 else if (iim == d->breakimm) {
719 struct ia64_sal_retval x;
720 switch (regs->r2) {
721 case FW_HYPERCALL_PAL_CALL:
722 //printf("*** PAL hypercall: index=%d\n",regs->r28);
723 //FIXME: This should call a C routine
724 x = pal_emulator_static(regs->r28);
725 regs->r8 = x.status; regs->r9 = x.v0;
726 regs->r10 = x.v1; regs->r11 = x.v2;
727 break;
728 case FW_HYPERCALL_SAL_CALL:
729 x = sal_emulator(vcpu_get_gr(ed,32),vcpu_get_gr(ed,33),
730 vcpu_get_gr(ed,34),vcpu_get_gr(ed,35),
731 vcpu_get_gr(ed,36),vcpu_get_gr(ed,37),
732 vcpu_get_gr(ed,38),vcpu_get_gr(ed,39));
733 regs->r8 = x.status; regs->r9 = x.v0;
734 regs->r10 = x.v1; regs->r11 = x.v2;
735 break;
736 case FW_HYPERCALL_EFI_RESET_SYSTEM:
737 printf("efi.reset_system called ");
738 if (current->domain == dom0) {
739 printf("(by dom0)\n ");
740 (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
741 }
742 printf("(not supported for non-0 domain)\n");
743 regs->r8 = EFI_UNSUPPORTED;
744 break;
745 case FW_HYPERCALL_EFI_GET_TIME:
746 {
747 unsigned long *tv, *tc;
748 fooefi();
749 tv = vcpu_get_gr(ed,32);
750 tc = vcpu_get_gr(ed,33);
751 //printf("efi_get_time(%p,%p) called...",tv,tc);
752 tv = __va(translate_domain_mpaddr(tv));
753 if (tc) tc = __va(translate_domain_mpaddr(tc));
754 regs->r8 = (*efi.get_time)(tv,tc);
755 //printf("and returns %lx\n",regs->r8);
756 }
757 break;
758 case FW_HYPERCALL_EFI_SET_TIME:
759 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
760 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
761 // FIXME: need fixes in efi.h from 2.6.9
762 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
763 // FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
764 // SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
765 // POINTER ARGUMENTS WILL BE VIRTUAL!!
766 case FW_HYPERCALL_EFI_GET_VARIABLE:
767 // FIXME: need fixes in efi.h from 2.6.9
768 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
769 case FW_HYPERCALL_EFI_SET_VARIABLE:
770 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
771 // FIXME: need fixes in efi.h from 2.6.9
772 regs->r8 = EFI_UNSUPPORTED;
773 break;
774 case 0xffff: // test dummy hypercall
775 regs->r8 = dump_privop_counts_to_user(
776 vcpu_get_gr(ed,32),
777 vcpu_get_gr(ed,33));
778 break;
779 case 0xfffe: // test dummy hypercall
780 regs->r8 = zero_privop_counts_to_user(
781 vcpu_get_gr(ed,32),
782 vcpu_get_gr(ed,33));
783 break;
784 }
785 vcpu_increment_iip(current);
786 }
787 else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR);
788 }
790 void
791 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
792 {
793 IA64FAULT vector;
794 struct domain *d = current->domain;
795 struct exec_domain *ed = current;
796 // FIXME: no need to pass itir in to this routine as we need to
797 // compute the virtual itir anyway (based on domain's RR.ps)
798 // AND ACTUALLY reflect_interruption doesn't use it anyway!
799 itir = vcpu_get_itir_on_fault(ed,ifa);
800 vector = priv_emulate(current,regs,isr);
801 if (vector == IA64_RETRY) {
802 reflect_interruption(ifa,isr,itir,regs,
803 IA64_ALT_DATA_TLB_VECTOR | IA64_FORCED_IFA);
804 }
805 else if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
806 reflect_interruption(ifa,isr,itir,regs,vector);
807 }
808 }
810 #define INTR_TYPE_MAX 10
811 UINT64 int_counts[INTR_TYPE_MAX];
813 void
814 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
815 {
816 struct domain *d = (struct domain *) current->domain;
817 struct exec_domain *ed = (struct domain *) current;
818 unsigned long check_lazy_cover = 0;
819 unsigned long psr = regs->cr_ipsr;
820 unsigned long itir = vcpu_get_itir_on_fault(ed,ifa);
822 if (!(psr & IA64_PSR_CPL)) {
823 printk("ia64_handle_reflection: reflecting with priv=0!!\n");
824 }
825 // FIXME: no need to pass itir in to this routine as we need to
826 // compute the virtual itir anyway (based on domain's RR.ps)
827 // AND ACTUALLY reflect_interruption doesn't use it anyway!
828 itir = vcpu_get_itir_on_fault(ed,ifa);
829 switch(vector) {
830 case 8:
831 vector = IA64_DIRTY_BIT_VECTOR; break;
832 case 9:
833 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
834 case 10:
835 check_lazy_cover = 1;
836 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
837 case 20:
838 check_lazy_cover = 1;
839 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
840 case 22:
841 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
842 case 23:
843 check_lazy_cover = 1;
844 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
845 case 25:
846 vector = IA64_DISABLED_FPREG_VECTOR; break;
847 case 26:
848 printf("*** NaT fault... attempting to handle as privop\n");
849 vector = priv_emulate(ed,regs,isr);
850 if (vector == IA64_NO_FAULT) {
851 printf("*** Handled privop masquerading as NaT fault\n");
852 return;
853 }
854 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
855 case 27:
856 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
857 itir = iim;
858 vector = IA64_SPECULATION_VECTOR; break;
859 case 30:
860 // FIXME: Should we handle unaligned refs in Xen??
861 vector = IA64_UNALIGNED_REF_VECTOR; break;
862 default:
863 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
864 while(vector);
865 return;
866 }
867 if (check_lazy_cover && handle_lazy_cover(ed, isr, regs)) return;
868 reflect_interruption(ifa,isr,itir,regs,vector);
869 }