ia64/xen-unstable

view xen/arch/ia64/process.c @ 6457:d34925e4144b

Stil more cleanup and moving to 2.6.13 base
author djm@kirby.fc.hp.com
date Thu Sep 01 11:09:27 2005 -0600 (2005-09-01)
parents f5c4042212b0
children
line source
1 /*
2 * Miscellaneous process/domain related routines
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/smp.h>
14 #include <asm/ptrace.h>
15 #include <xen/delay.h>
17 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
18 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 #include <asm/desc.h>
24 //#include <asm/ldt.h>
25 #include <xen/irq.h>
26 #include <xen/event.h>
27 #include <asm/regionreg.h>
28 #include <asm/privop.h>
29 #include <asm/vcpu.h>
30 #include <asm/ia64_int.h>
31 #include <asm/dom_fw.h>
32 #include "hpsim_ssc.h"
34 extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
35 extern struct ia64_sal_retval pal_emulator_static(UINT64);
36 extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
38 extern unsigned long dom0_start, dom0_size;
40 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
41 // note IA64_PSR_PK removed from following, why is this necessary?
42 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
43 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
44 IA64_PSR_IT | IA64_PSR_BN)
46 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
47 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
48 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
49 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
50 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
51 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
53 #define PSCB(x,y) VCPU(x,y)
54 #define PSCBX(x,y) x->arch.y
56 extern unsigned long vcpu_verbose;
58 long do_iopl(domid_t domain, unsigned int new_io_pl)
59 {
60 dummy();
61 return 0;
62 }
64 void schedule_tail(struct vcpu *next)
65 {
66 unsigned long rr7;
67 //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
68 //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
69 #ifdef CONFIG_VTI
70 /* rr7 will be postponed to last point when resuming back to guest */
71 vmx_load_all_rr(current);
72 #else // CONFIG_VTI
73 if (rr7 = load_region_regs(current)) {
74 printk("schedule_tail: change to rr7 not yet implemented\n");
75 }
76 #endif // CONFIG_VTI
77 }
79 void tdpfoo(void) { }
81 // given a domain virtual address, pte and pagesize, extract the metaphysical
82 // address, convert the pte for a physical address for (possibly different)
83 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
84 // PAGE_SIZE!)
85 unsigned long translate_domain_pte(unsigned long pteval,
86 unsigned long address, unsigned long itir)
87 {
88 struct domain *d = current->domain;
89 unsigned long mask, pteval2, mpaddr;
90 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
91 extern struct domain *dom0;
92 extern unsigned long dom0_start, dom0_size;
94 // FIXME address had better be pre-validated on insert
95 mask = (1L << ((itir >> 2) & 0x3f)) - 1;
96 mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
97 if (d == dom0) {
98 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
99 //printk("translate_domain_pte: out-of-bounds dom0 mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
100 tdpfoo();
101 }
102 }
103 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
104 printf("translate_domain_pte: bad mpa=%p (> %p),vadr=%p,pteval=%p,itir=%p\n",
105 mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
106 tdpfoo();
107 }
108 pteval2 = lookup_domain_mpa(d,mpaddr);
109 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
110 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
111 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
112 return pteval2;
113 }
115 // given a current domain metaphysical address, return the physical address
116 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
117 {
118 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
119 unsigned long pteval;
121 if (current->domain == dom0) {
122 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
123 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
124 tdpfoo();
125 }
126 }
127 pteval = lookup_domain_mpa(current->domain,mpaddr);
128 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
129 }
131 unsigned long slow_reflect_count[0x80] = { 0 };
132 unsigned long fast_reflect_count[0x80] = { 0 };
134 #define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
136 void zero_reflect_counts(void)
137 {
138 int i;
139 for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
140 for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
141 }
143 int dump_reflect_counts(char *buf)
144 {
145 int i,j,cnt;
146 char *s = buf;
148 s += sprintf(s,"Slow reflections by vector:\n");
149 for (i = 0, j = 0; i < 0x80; i++) {
150 if (cnt = slow_reflect_count[i]) {
151 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
152 if ((j++ & 3) == 3) s += sprintf(s,"\n");
153 }
154 }
155 if (j & 3) s += sprintf(s,"\n");
156 s += sprintf(s,"Fast reflections by vector:\n");
157 for (i = 0, j = 0; i < 0x80; i++) {
158 if (cnt = fast_reflect_count[i]) {
159 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
160 if ((j++ & 3) == 3) s += sprintf(s,"\n");
161 }
162 }
163 if (j & 3) s += sprintf(s,"\n");
164 return s - buf;
165 }
167 void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
168 {
169 unsigned long vcpu_get_ipsr_int_state(struct vcpu *,unsigned long);
170 unsigned long vcpu_get_rr_ve(struct vcpu *,unsigned long);
171 struct domain *d = current->domain;
172 struct vcpu *v = current;
174 if (vector == IA64_EXTINT_VECTOR) {
176 extern unsigned long vcpu_verbose, privop_trace;
177 static first_extint = 1;
178 if (first_extint) {
179 printf("Delivering first extint to domain: ifa=%p, isr=%p, itir=%p, iip=%p\n",ifa,isr,itiriim,regs->cr_iip);
180 //privop_trace = 1; vcpu_verbose = 1;
181 first_extint = 0;
182 }
183 }
184 if (!PSCB(v,interrupt_collection_enabled)) {
185 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
186 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
187 }
188 vector &= ~0xf;
189 if (vector != IA64_DATA_TLB_VECTOR &&
190 vector != IA64_ALT_DATA_TLB_VECTOR &&
191 vector != IA64_VHPT_TRANS_VECTOR) {
192 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%p,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
193 vector,regs->cr_ipsr,regs->cr_iip,ifa,isr,PSCB(v,iip));
195 }
196 //printf("Delivering NESTED DATA TLB fault\n");
197 vector = IA64_DATA_NESTED_TLB_VECTOR;
198 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
199 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
200 // NOTE: nested trap must NOT pass PSCB address
201 //regs->r31 = (unsigned long) &PSCB(v);
202 inc_slow_reflect_count(vector);
203 return;
205 }
206 if ((vector & 0xf) == IA64_FORCED_IFA)
207 ifa = PSCB(v,tmp[0]);
208 vector &= ~0xf;
209 PSCB(v,ifa) = ifa;
210 if (vector < IA64_DATA_NESTED_TLB_VECTOR) /* VHPT miss, TLB miss, Alt TLB miss */
211 vcpu_thash(v,ifa,&PSCB(current,iha));
212 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
213 PSCB(v,precover_ifs) = regs->cr_ifs;
214 vcpu_bsw0(v);
215 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
216 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
217 PSCB(v,iim) = itiriim;
218 else PSCB(v,itir) = vcpu_get_itir_on_fault(v,ifa);
219 PSCB(v,isr) = isr; // this is unnecessary except for interrupts!
220 PSCB(v,iip) = regs->cr_iip;
221 PSCB(v,ifs) = 0;
222 PSCB(v,incomplete_regframe) = 0;
224 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
225 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
226 #ifdef CONFIG_SMP
227 #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
228 #endif
229 regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
231 PSCB(v,interrupt_delivery_enabled) = 0;
232 PSCB(v,interrupt_collection_enabled) = 0;
234 inc_slow_reflect_count(vector);
235 }
237 void foodpi(void) {}
239 unsigned long pending_false_positive = 0;
241 // ONLY gets called from ia64_leave_kernel
242 // ONLY call with interrupts disabled?? (else might miss one?)
243 // NEVER successful if already reflecting a trap/fault because psr.i==0
244 void deliver_pending_interrupt(struct pt_regs *regs)
245 {
246 struct domain *d = current->domain;
247 struct vcpu *v = current;
248 // FIXME: Will this work properly if doing an RFI???
249 if (!is_idle_task(d) && user_mode(regs)) {
250 //vcpu_poke_timer(v);
251 if (vcpu_deliverable_interrupts(v)) {
252 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
253 if (vcpu_timer_pending_early(v))
254 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
255 reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
256 }
257 else if (PSCB(v,pending_interruption))
258 ++pending_false_positive;
259 }
260 }
261 unsigned long lazy_cover_count = 0;
263 int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
264 {
265 if (!PSCB(v,interrupt_collection_enabled)) {
266 PSCB(v,ifs) = regs->cr_ifs;
267 PSCB(v,incomplete_regframe) = 1;
268 regs->cr_ifs = 0;
269 lazy_cover_count++;
270 return(1); // retry same instruction with cr.ifs off
271 }
272 return(0);
273 }
275 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
276 {
277 unsigned long iip = regs->cr_iip;
278 // FIXME should validate address here
279 unsigned long pteval;
280 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
281 IA64FAULT fault;
283 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs)) return;
284 if ((isr & IA64_ISR_SP)
285 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
286 {
287 /*
288 * This fault was due to a speculative load or lfetch.fault, set the "ed"
289 * bit in the psr to ensure forward progress. (Target register will get a
290 * NaT for ld.s, lfetch will be canceled.)
291 */
292 ia64_psr(regs)->ed = 1;
293 return;
294 }
296 fault = vcpu_translate(current,address,is_data,&pteval,&itir);
297 if (fault == IA64_NO_FAULT)
298 {
299 pteval = translate_domain_pte(pteval,address,itir);
300 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
301 return;
302 }
303 else if (IS_VMM_ADDRESS(iip))
304 {
305 if (!ia64_done_with_exception(regs)) {
306 // should never happen. If it does, region 0 addr may
307 // indicate a bad xen pointer
308 printk("*** xen_handle_domain_access: exception table"
309 " lookup failed, iip=%p, addr=%p, spinning...\n",
310 iip,address);
311 panic_domain(regs,"*** xen_handle_domain_access: exception table"
312 " lookup failed, iip=%p, addr=%p, spinning...\n",
313 iip,address);
314 }
315 return;
316 }
318 reflect_interruption(address, isr, 0, regs, fault);
319 }
321 void
322 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
323 unsigned long iim, unsigned long itir, unsigned long arg5,
324 unsigned long arg6, unsigned long arg7, unsigned long stack)
325 {
326 struct pt_regs *regs = (struct pt_regs *) &stack;
327 unsigned long code, error = isr;
328 char buf[128];
329 int result, sig;
330 static const char *reason[] = {
331 "IA-64 Illegal Operation fault",
332 "IA-64 Privileged Operation fault",
333 "IA-64 Privileged Register fault",
334 "IA-64 Reserved Register/Field fault",
335 "Disabled Instruction Set Transition fault",
336 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
337 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
338 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
339 };
340 #if 0
341 printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
342 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
343 #endif
345 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
346 /*
347 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
348 * the lfetch.
349 */
350 ia64_psr(regs)->ed = 1;
351 printf("ia64_fault: handled lfetch.fault\n");
352 return;
353 }
355 switch (vector) {
356 case 24: /* General Exception */
357 code = (isr >> 4) & 0xf;
358 sprintf(buf, "General Exception: %s%s", reason[code],
359 (code == 3) ? ((isr & (1UL << 37))
360 ? " (RSE access)" : " (data access)") : "");
361 if (code == 8) {
362 # ifdef CONFIG_IA64_PRINT_HAZARDS
363 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
364 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
365 regs->pr);
366 # endif
367 printf("ia64_fault: returning on hazard\n");
368 return;
369 }
370 break;
372 case 25: /* Disabled FP-Register */
373 if (isr & 2) {
374 //disabled_fph_fault(regs);
375 //return;
376 }
377 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
378 break;
380 case 26: /* NaT Consumption */
381 if (user_mode(regs)) {
382 void *addr;
384 if (((isr >> 4) & 0xf) == 2) {
385 /* NaT page consumption */
386 //sig = SIGSEGV;
387 //code = SEGV_ACCERR;
388 addr = (void *) ifa;
389 } else {
390 /* register NaT consumption */
391 //sig = SIGILL;
392 //code = ILL_ILLOPN;
393 addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
394 }
395 //siginfo.si_signo = sig;
396 //siginfo.si_code = code;
397 //siginfo.si_errno = 0;
398 //siginfo.si_addr = addr;
399 //siginfo.si_imm = vector;
400 //siginfo.si_flags = __ISR_VALID;
401 //siginfo.si_isr = isr;
402 //force_sig_info(sig, &siginfo, current);
403 //return;
404 } //else if (ia64_done_with_exception(regs))
405 //return;
406 sprintf(buf, "NaT consumption");
407 break;
409 case 31: /* Unsupported Data Reference */
410 if (user_mode(regs)) {
411 //siginfo.si_signo = SIGILL;
412 //siginfo.si_code = ILL_ILLOPN;
413 //siginfo.si_errno = 0;
414 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
415 //siginfo.si_imm = vector;
416 //siginfo.si_flags = __ISR_VALID;
417 //siginfo.si_isr = isr;
418 //force_sig_info(SIGILL, &siginfo, current);
419 //return;
420 }
421 sprintf(buf, "Unsupported data reference");
422 break;
424 case 29: /* Debug */
425 case 35: /* Taken Branch Trap */
426 case 36: /* Single Step Trap */
427 //if (fsys_mode(current, regs)) {}
428 switch (vector) {
429 case 29:
430 //siginfo.si_code = TRAP_HWBKPT;
431 #ifdef CONFIG_ITANIUM
432 /*
433 * Erratum 10 (IFA may contain incorrect address) now has
434 * "NoFix" status. There are no plans for fixing this.
435 */
436 if (ia64_psr(regs)->is == 0)
437 ifa = regs->cr_iip;
438 #endif
439 break;
440 case 35: ifa = 0; break;
441 case 36: ifa = 0; break;
442 //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
443 //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
444 }
445 //siginfo.si_signo = SIGTRAP;
446 //siginfo.si_errno = 0;
447 //siginfo.si_addr = (void *) ifa;
448 //siginfo.si_imm = 0;
449 //siginfo.si_flags = __ISR_VALID;
450 //siginfo.si_isr = isr;
451 //force_sig_info(SIGTRAP, &siginfo, current);
452 //return;
454 case 32: /* fp fault */
455 case 33: /* fp trap */
456 //result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
457 //if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
458 //siginfo.si_signo = SIGFPE;
459 //siginfo.si_errno = 0;
460 //siginfo.si_code = FPE_FLTINV;
461 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
462 //siginfo.si_flags = __ISR_VALID;
463 //siginfo.si_isr = isr;
464 //siginfo.si_imm = 0;
465 //force_sig_info(SIGFPE, &siginfo, current);
466 //}
467 //return;
468 sprintf(buf, "FP fault/trap");
469 break;
471 case 34:
472 if (isr & 0x2) {
473 /* Lower-Privilege Transfer Trap */
474 /*
475 * Just clear PSR.lp and then return immediately: all the
476 * interesting work (e.g., signal delivery is done in the kernel
477 * exit path).
478 */
479 //ia64_psr(regs)->lp = 0;
480 //return;
481 sprintf(buf, "Lower-Privilege Transfer trap");
482 } else {
483 /* Unimplemented Instr. Address Trap */
484 if (user_mode(regs)) {
485 //siginfo.si_signo = SIGILL;
486 //siginfo.si_code = ILL_BADIADDR;
487 //siginfo.si_errno = 0;
488 //siginfo.si_flags = 0;
489 //siginfo.si_isr = 0;
490 //siginfo.si_imm = 0;
491 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
492 //force_sig_info(SIGILL, &siginfo, current);
493 //return;
494 }
495 sprintf(buf, "Unimplemented Instruction Address fault");
496 }
497 break;
499 case 45:
500 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
501 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
502 regs->cr_iip, ifa, isr);
503 //force_sig(SIGSEGV, current);
504 break;
506 case 46:
507 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
508 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
509 regs->cr_iip, ifa, isr, iim);
510 //force_sig(SIGSEGV, current);
511 return;
513 case 47:
514 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
515 break;
517 default:
518 sprintf(buf, "Fault %lu", vector);
519 break;
520 }
521 //die_if_kernel(buf, regs, error);
522 printk("ia64_fault: %s: reflecting\n",buf);
523 reflect_interruption(ifa,isr,iim,regs,IA64_GENEX_VECTOR);
524 //while(1);
525 //force_sig(SIGILL, current);
526 }
528 unsigned long running_on_sim = 0;
530 void
531 do_ssc(unsigned long ssc, struct pt_regs *regs)
532 {
533 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
534 unsigned long arg0, arg1, arg2, arg3, retval;
535 char buf[2];
536 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
537 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
538 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
539 extern unsigned long vcpu_verbose, privop_trace;
541 arg0 = vcpu_get_gr(current,32);
542 switch(ssc) {
543 case SSC_PUTCHAR:
544 buf[0] = arg0;
545 buf[1] = '\0';
546 printf(buf);
547 break;
548 case SSC_GETCHAR:
549 retval = ia64_ssc(0,0,0,0,ssc);
550 vcpu_set_gr(current,8,retval);
551 break;
552 case SSC_WAIT_COMPLETION:
553 if (arg0) { // metaphysical address
555 arg0 = translate_domain_mpaddr(arg0);
556 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
557 ///**/ if (stat->fd == last_fd) stat->count = last_count;
558 /**/ stat->count = last_count;
559 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
560 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
561 /**/ retval = 0;
562 }
563 else retval = -1L;
564 vcpu_set_gr(current,8,retval);
565 break;
566 case SSC_OPEN:
567 arg1 = vcpu_get_gr(current,33); // access rights
568 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
569 if (arg0) { // metaphysical address
570 arg0 = translate_domain_mpaddr(arg0);
571 retval = ia64_ssc(arg0,arg1,0,0,ssc);
572 }
573 else retval = -1L;
574 vcpu_set_gr(current,8,retval);
575 break;
576 case SSC_WRITE:
577 case SSC_READ:
578 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
579 arg1 = vcpu_get_gr(current,33);
580 arg2 = vcpu_get_gr(current,34);
581 arg3 = vcpu_get_gr(current,35);
582 if (arg2) { // metaphysical address of descriptor
583 struct ssc_disk_req *req;
584 unsigned long mpaddr, paddr;
585 long len;
587 arg2 = translate_domain_mpaddr(arg2);
588 req = (struct disk_req *)__va(arg2);
589 req->len &= 0xffffffffL; // avoid strange bug
590 len = req->len;
591 /**/ last_fd = arg1;
592 /**/ last_count = len;
593 mpaddr = req->addr;
594 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
595 retval = 0;
596 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
597 // do partial page first
598 req->addr = translate_domain_mpaddr(mpaddr);
599 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
600 len -= req->len; mpaddr += req->len;
601 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
602 arg3 += req->len; // file offset
603 /**/ last_stat.fd = last_fd;
604 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
605 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
606 }
607 if (retval >= 0) while (len > 0) {
608 req->addr = translate_domain_mpaddr(mpaddr);
609 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
610 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
611 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
612 arg3 += req->len; // file offset
613 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
614 /**/ last_stat.fd = last_fd;
615 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
616 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
617 }
618 // set it back to the original value
619 req->len = last_count;
620 }
621 else retval = -1L;
622 vcpu_set_gr(current,8,retval);
623 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
624 break;
625 case SSC_CONNECT_INTERRUPT:
626 arg1 = vcpu_get_gr(current,33);
627 arg2 = vcpu_get_gr(current,34);
628 arg3 = vcpu_get_gr(current,35);
629 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
630 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
631 break;
632 case SSC_NETDEV_PROBE:
633 vcpu_set_gr(current,8,-1L);
634 break;
635 default:
636 printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p... spinning\n",ssc,regs->cr_iip,regs->b0);
637 while(1);
638 break;
639 }
640 vcpu_increment_iip(current);
641 }
643 int first_break = 1;
645 void
646 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
647 {
648 struct domain *d = (struct domain *) current->domain;
649 struct vcpu *v = (struct domain *) current;
650 extern unsigned long running_on_sim;
652 if (first_break) {
653 if (platform_is_hp_ski()) running_on_sim = 1;
654 else running_on_sim = 0;
655 first_break = 0;
656 }
657 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
658 if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
659 else do_ssc(vcpu_get_gr(current,36), regs);
660 }
661 else if (iim == d->arch.breakimm) {
662 if (ia64_hypercall(regs))
663 vcpu_increment_iip(current);
664 }
665 else if (!PSCB(v,interrupt_collection_enabled)) {
666 if (ia64_hyperprivop(iim,regs))
667 vcpu_increment_iip(current);
668 }
669 else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR);
670 }
672 void
673 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
674 {
675 IA64FAULT vector;
676 struct domain *d = current->domain;
677 struct vcpu *v = current;
678 // FIXME: no need to pass itir in to this routine as we need to
679 // compute the virtual itir anyway (based on domain's RR.ps)
680 // AND ACTUALLY reflect_interruption doesn't use it anyway!
681 itir = vcpu_get_itir_on_fault(v,ifa);
682 vector = priv_emulate(current,regs,isr);
683 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
684 reflect_interruption(ifa,isr,itir,regs,vector);
685 }
686 }
688 #define INTR_TYPE_MAX 10
689 UINT64 int_counts[INTR_TYPE_MAX];
691 void
692 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
693 {
694 struct domain *d = (struct domain *) current->domain;
695 struct vcpu *v = (struct domain *) current;
696 unsigned long check_lazy_cover = 0;
697 unsigned long psr = regs->cr_ipsr;
698 unsigned long itir = vcpu_get_itir_on_fault(v,ifa);
700 if (!(psr & IA64_PSR_CPL)) {
701 printk("ia64_handle_reflection: reflecting with priv=0!!\n");
702 }
703 // FIXME: no need to pass itir in to this routine as we need to
704 // compute the virtual itir anyway (based on domain's RR.ps)
705 // AND ACTUALLY reflect_interruption doesn't use it anyway!
706 itir = vcpu_get_itir_on_fault(v,ifa);
707 switch(vector) {
708 case 8:
709 vector = IA64_DIRTY_BIT_VECTOR; break;
710 case 9:
711 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
712 case 10:
713 check_lazy_cover = 1;
714 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
715 case 20:
716 check_lazy_cover = 1;
717 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
718 case 22:
719 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
720 case 23:
721 check_lazy_cover = 1;
722 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
723 case 25:
724 vector = IA64_DISABLED_FPREG_VECTOR;
725 break;
726 case 26:
727 printf("*** NaT fault... attempting to handle as privop\n");
728 printf("isr=%p, ifa=%p,iip=%p,ipsr=%p\n",isr,ifa,regs->cr_iip,psr);
729 vector = priv_emulate(v,regs,isr);
730 if (vector == IA64_NO_FAULT) {
731 printf("*** Handled privop masquerading as NaT fault\n");
732 return;
733 }
734 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
735 case 27:
736 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
737 itir = iim;
738 vector = IA64_SPECULATION_VECTOR; break;
739 case 30:
740 // FIXME: Should we handle unaligned refs in Xen??
741 vector = IA64_UNALIGNED_REF_VECTOR; break;
742 default:
743 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
744 while(vector);
745 return;
746 }
747 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
748 reflect_interruption(ifa,isr,itir,regs,vector);
749 }