ia64/xen-unstable

view xen/arch/ia64/xen/process.c @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents f517be67eeac
children 2b6e531dab38
line source
2 /*
3 * Miscellaneous process/domain related routines
4 *
5 * Copyright (C) 2004 Hewlett-Packard Co.
6 * Dan Magenheimer (dan.magenheimer@hp.com)
7 *
8 */
10 #include <xen/config.h>
11 #include <xen/lib.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/smp.h>
15 #include <asm/ptrace.h>
16 #include <xen/delay.h>
18 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
19 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
21 #include <asm/system.h>
22 #include <asm/io.h>
23 #include <asm/processor.h>
24 #include <asm/desc.h>
25 //#include <asm/ldt.h>
26 #include <xen/irq.h>
27 #include <xen/event.h>
28 #include <asm/regionreg.h>
29 #include <asm/privop.h>
30 #include <asm/vcpu.h>
31 #include <asm/ia64_int.h>
32 #include <asm/dom_fw.h>
33 #include "hpsim_ssc.h"
34 #include <xen/multicall.h>
35 #include <asm/debugger.h>
37 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
38 /* FIXME: where these declarations shold be there ? */
39 extern void load_region_regs(struct vcpu *);
40 extern void panic_domain(struct pt_regs *, const char *, ...);
41 extern long platform_is_hp_ski(void);
42 extern int ia64_hyperprivop(unsigned long, REGS *);
43 extern int ia64_hypercall(struct pt_regs *regs);
44 extern void vmx_do_launch(struct vcpu *);
45 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
47 extern unsigned long dom0_start, dom0_size;
49 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
50 // note IA64_PSR_PK removed from following, why is this necessary?
51 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
52 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
53 IA64_PSR_IT | IA64_PSR_BN)
55 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
56 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
57 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
58 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
59 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
60 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
62 #include <xen/sched-if.h>
64 void schedule_tail(struct vcpu *prev)
65 {
66 context_saved(prev);
68 if (VMX_DOMAIN(current)) {
69 vmx_do_launch(current);
70 } else {
71 load_region_regs(current);
72 vcpu_load_kernel_regs(current);
73 }
74 }
76 void tdpfoo(void) { }
78 // given a domain virtual address, pte and pagesize, extract the metaphysical
79 // address, convert the pte for a physical address for (possibly different)
80 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
81 // PAGE_SIZE!)
82 unsigned long translate_domain_pte(unsigned long pteval,
83 unsigned long address, unsigned long itir)
84 {
85 struct domain *d = current->domain;
86 unsigned long mask, pteval2, mpaddr;
88 // FIXME address had better be pre-validated on insert
89 mask = ~itir_mask(itir);
90 mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
91 if (d == dom0) {
92 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
93 /*
94 printk("translate_domain_pte: out-of-bounds dom0 mpaddr 0x%lx! itc=%lx...\n",
95 mpaddr, ia64_get_itc());
96 */
97 tdpfoo();
98 }
99 }
100 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
101 /* Address beyond the limit. However the grant table is
102 also beyond the limit. Display a message if not in the
103 grant table. */
104 if (mpaddr >= IA64_GRANT_TABLE_PADDR
105 && mpaddr < (IA64_GRANT_TABLE_PADDR
106 + (ORDER_GRANT_FRAMES << PAGE_SHIFT)))
107 printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
108 "vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
109 mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
110 address, pteval, itir);
111 tdpfoo();
112 }
113 pteval2 = lookup_domain_mpa(d,mpaddr);
114 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
115 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
116 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
117 return pteval2;
118 }
120 // given a current domain metaphysical address, return the physical address
121 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
122 {
123 unsigned long pteval;
125 if (current->domain == dom0) {
126 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
127 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
128 mpaddr);
129 tdpfoo();
130 }
131 }
132 pteval = lookup_domain_mpa(current->domain,mpaddr);
133 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
134 }
136 unsigned long slow_reflect_count[0x80] = { 0 };
137 unsigned long fast_reflect_count[0x80] = { 0 };
139 #define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
141 void zero_reflect_counts(void)
142 {
143 int i;
144 for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
145 for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
146 }
148 int dump_reflect_counts(char *buf)
149 {
150 int i,j,cnt;
151 char *s = buf;
153 s += sprintf(s,"Slow reflections by vector:\n");
154 for (i = 0, j = 0; i < 0x80; i++) {
155 if ( (cnt = slow_reflect_count[i]) != 0 ) {
156 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
157 if ((j++ & 3) == 3) s += sprintf(s,"\n");
158 }
159 }
160 if (j & 3) s += sprintf(s,"\n");
161 s += sprintf(s,"Fast reflections by vector:\n");
162 for (i = 0, j = 0; i < 0x80; i++) {
163 if ( (cnt = fast_reflect_count[i]) != 0 ) {
164 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
165 if ((j++ & 3) == 3) s += sprintf(s,"\n");
166 }
167 }
168 if (j & 3) s += sprintf(s,"\n");
169 return s - buf;
170 }
172 // should never panic domain... if it does, stack may have been overrun
173 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
174 {
175 struct vcpu *v = current;
177 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
178 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
179 }
180 vector &= ~0xf;
181 if (vector != IA64_DATA_TLB_VECTOR &&
182 vector != IA64_ALT_DATA_TLB_VECTOR &&
183 vector != IA64_VHPT_TRANS_VECTOR) {
184 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%p,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
185 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
186 }
187 }
189 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
190 {
191 struct vcpu *v = current;
193 if (!PSCB(v,interrupt_collection_enabled))
194 check_bad_nested_interruption(isr,regs,vector);
195 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
196 PSCB(v,precover_ifs) = regs->cr_ifs;
197 vcpu_bsw0(v);
198 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
199 PSCB(v,isr) = isr;
200 PSCB(v,iip) = regs->cr_iip;
201 PSCB(v,ifs) = 0;
202 PSCB(v,incomplete_regframe) = 0;
204 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
205 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
206 #ifdef CONFIG_SMP
207 #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
208 #endif
209 regs->r31 = (unsigned long) &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
211 PSCB(v,interrupt_delivery_enabled) = 0;
212 PSCB(v,interrupt_collection_enabled) = 0;
214 inc_slow_reflect_count(vector);
215 }
217 void foodpi(void) {}
219 static unsigned long pending_false_positive = 0;
221 void reflect_extint(struct pt_regs *regs)
222 {
223 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
224 struct vcpu *v = current;
225 static int first_extint = 1;
227 if (first_extint) {
228 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
229 first_extint = 0;
230 }
231 if (vcpu_timer_pending_early(v))
232 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
233 PSCB(current,itir) = 0;
234 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
235 }
237 // ONLY gets called from ia64_leave_kernel
238 // ONLY call with interrupts disabled?? (else might miss one?)
239 // NEVER successful if already reflecting a trap/fault because psr.i==0
240 void deliver_pending_interrupt(struct pt_regs *regs)
241 {
242 struct domain *d = current->domain;
243 struct vcpu *v = current;
244 // FIXME: Will this work properly if doing an RFI???
245 if (!is_idle_domain(d) && user_mode(regs)) {
246 //vcpu_poke_timer(v);
247 if (vcpu_deliverable_interrupts(v))
248 reflect_extint(regs);
249 else if (PSCB(v,pending_interruption))
250 ++pending_false_positive;
251 }
252 }
253 unsigned long lazy_cover_count = 0;
255 int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
256 {
257 if (!PSCB(v,interrupt_collection_enabled)) {
258 PSCB(v,ifs) = regs->cr_ifs;
259 PSCB(v,incomplete_regframe) = 1;
260 regs->cr_ifs = 0;
261 lazy_cover_count++;
262 return(1); // retry same instruction with cr.ifs off
263 }
264 return(0);
265 }
267 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
268 {
269 unsigned long iip = regs->cr_iip, iha;
270 // FIXME should validate address here
271 unsigned long pteval;
272 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
273 IA64FAULT fault;
275 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs)) return;
276 if ((isr & IA64_ISR_SP)
277 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
278 {
279 /*
280 * This fault was due to a speculative load or lfetch.fault, set the "ed"
281 * bit in the psr to ensure forward progress. (Target register will get a
282 * NaT for ld.s, lfetch will be canceled.)
283 */
284 ia64_psr(regs)->ed = 1;
285 return;
286 }
288 fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
289 if (fault == IA64_NO_FAULT) {
290 pteval = translate_domain_pte(pteval,address,itir);
291 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
292 return;
293 }
294 if (!user_mode (regs)) {
295 /* The fault occurs inside Xen. */
296 if (!ia64_done_with_exception(regs)) {
297 // should never happen. If it does, region 0 addr may
298 // indicate a bad xen pointer
299 printk("*** xen_handle_domain_access: exception table"
300 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
301 iip, address);
302 panic_domain(regs,"*** xen_handle_domain_access: exception table"
303 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
304 iip, address);
305 }
306 return;
307 }
308 if (!PSCB(current,interrupt_collection_enabled)) {
309 check_bad_nested_interruption(isr,regs,fault);
310 //printf("Delivering NESTED DATA TLB fault\n");
311 fault = IA64_DATA_NESTED_TLB_VECTOR;
312 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
313 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
314 // NOTE: nested trap must NOT pass PSCB address
315 //regs->r31 = (unsigned long) &PSCB(current);
316 inc_slow_reflect_count(fault);
317 return;
318 }
320 PSCB(current,itir) = itir;
321 PSCB(current,iha) = iha;
322 PSCB(current,ifa) = address;
323 reflect_interruption(isr, regs, fault);
324 }
326 void
327 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
328 unsigned long iim, unsigned long itir, unsigned long arg5,
329 unsigned long arg6, unsigned long arg7, unsigned long stack)
330 {
331 struct pt_regs *regs = (struct pt_regs *) &stack;
332 unsigned long code;
333 char buf[128];
334 static const char * const reason[] = {
335 "IA-64 Illegal Operation fault",
336 "IA-64 Privileged Operation fault",
337 "IA-64 Privileged Register fault",
338 "IA-64 Reserved Register/Field fault",
339 "Disabled Instruction Set Transition fault",
340 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
341 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
342 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
343 };
344 #if 0
345 printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
346 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
347 #endif
349 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
350 /*
351 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
352 * the lfetch.
353 */
354 ia64_psr(regs)->ed = 1;
355 printf("ia64_fault: handled lfetch.fault\n");
356 return;
357 }
359 switch (vector) {
360 case 24: /* General Exception */
361 code = (isr >> 4) & 0xf;
362 sprintf(buf, "General Exception: %s%s", reason[code],
363 (code == 3) ? ((isr & (1UL << 37))
364 ? " (RSE access)" : " (data access)") : "");
365 if (code == 8) {
366 # ifdef CONFIG_IA64_PRINT_HAZARDS
367 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
368 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
369 regs->pr);
370 # endif
371 printf("ia64_fault: returning on hazard\n");
372 return;
373 }
374 break;
376 case 25: /* Disabled FP-Register */
377 if (isr & 2) {
378 //disabled_fph_fault(regs);
379 //return;
380 }
381 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
382 break;
384 case 26: /* NaT Consumption */
385 if (user_mode(regs)) {
386 void *addr;
388 if (((isr >> 4) & 0xf) == 2) {
389 /* NaT page consumption */
390 //sig = SIGSEGV;
391 //code = SEGV_ACCERR;
392 addr = (void *) ifa;
393 } else {
394 /* register NaT consumption */
395 //sig = SIGILL;
396 //code = ILL_ILLOPN;
397 addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
398 }
399 //siginfo.si_signo = sig;
400 //siginfo.si_code = code;
401 //siginfo.si_errno = 0;
402 //siginfo.si_addr = addr;
403 //siginfo.si_imm = vector;
404 //siginfo.si_flags = __ISR_VALID;
405 //siginfo.si_isr = isr;
406 //force_sig_info(sig, &siginfo, current);
407 //return;
408 } //else if (ia64_done_with_exception(regs))
409 //return;
410 sprintf(buf, "NaT consumption");
411 break;
413 case 31: /* Unsupported Data Reference */
414 if (user_mode(regs)) {
415 //siginfo.si_signo = SIGILL;
416 //siginfo.si_code = ILL_ILLOPN;
417 //siginfo.si_errno = 0;
418 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
419 //siginfo.si_imm = vector;
420 //siginfo.si_flags = __ISR_VALID;
421 //siginfo.si_isr = isr;
422 //force_sig_info(SIGILL, &siginfo, current);
423 //return;
424 }
425 sprintf(buf, "Unsupported data reference");
426 break;
428 case 29: /* Debug */
429 case 35: /* Taken Branch Trap */
430 case 36: /* Single Step Trap */
431 //if (fsys_mode(current, regs)) {}
432 switch (vector) {
433 case 29:
434 //siginfo.si_code = TRAP_HWBKPT;
435 #ifdef CONFIG_ITANIUM
436 /*
437 * Erratum 10 (IFA may contain incorrect address) now has
438 * "NoFix" status. There are no plans for fixing this.
439 */
440 if (ia64_psr(regs)->is == 0)
441 ifa = regs->cr_iip;
442 #endif
443 break;
444 case 35: ifa = 0; break;
445 case 36: ifa = 0; break;
446 //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
447 //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
448 }
449 //siginfo.si_signo = SIGTRAP;
450 //siginfo.si_errno = 0;
451 //siginfo.si_addr = (void *) ifa;
452 //siginfo.si_imm = 0;
453 //siginfo.si_flags = __ISR_VALID;
454 //siginfo.si_isr = isr;
455 //force_sig_info(SIGTRAP, &siginfo, current);
456 //return;
458 case 32: /* fp fault */
459 case 33: /* fp trap */
460 //result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
461 //if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
462 //siginfo.si_signo = SIGFPE;
463 //siginfo.si_errno = 0;
464 //siginfo.si_code = FPE_FLTINV;
465 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
466 //siginfo.si_flags = __ISR_VALID;
467 //siginfo.si_isr = isr;
468 //siginfo.si_imm = 0;
469 //force_sig_info(SIGFPE, &siginfo, current);
470 //}
471 //return;
472 sprintf(buf, "FP fault/trap");
473 break;
475 case 34:
476 if (isr & 0x2) {
477 /* Lower-Privilege Transfer Trap */
478 /*
479 * Just clear PSR.lp and then return immediately: all the
480 * interesting work (e.g., signal delivery is done in the kernel
481 * exit path).
482 */
483 //ia64_psr(regs)->lp = 0;
484 //return;
485 sprintf(buf, "Lower-Privilege Transfer trap");
486 } else {
487 /* Unimplemented Instr. Address Trap */
488 if (user_mode(regs)) {
489 //siginfo.si_signo = SIGILL;
490 //siginfo.si_code = ILL_BADIADDR;
491 //siginfo.si_errno = 0;
492 //siginfo.si_flags = 0;
493 //siginfo.si_isr = 0;
494 //siginfo.si_imm = 0;
495 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
496 //force_sig_info(SIGILL, &siginfo, current);
497 //return;
498 }
499 sprintf(buf, "Unimplemented Instruction Address fault");
500 }
501 break;
503 case 45:
504 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
505 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
506 regs->cr_iip, ifa, isr);
507 //force_sig(SIGSEGV, current);
508 break;
510 case 46:
511 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
512 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
513 regs->cr_iip, ifa, isr, iim);
514 //force_sig(SIGSEGV, current);
515 return;
517 case 47:
518 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
519 break;
521 default:
522 sprintf(buf, "Fault %lu", vector);
523 break;
524 }
525 //die_if_kernel(buf, regs, error);
526 printk("ia64_fault: %s: reflecting\n",buf);
527 PSCB(current,itir) = vcpu_get_itir_on_fault(current,ifa);
528 PSCB(current,ifa) = ifa;
529 reflect_interruption(isr,regs,IA64_GENEX_VECTOR);
530 //while(1);
531 //force_sig(SIGILL, current);
532 }
534 unsigned long running_on_sim = 0;
536 void
537 do_ssc(unsigned long ssc, struct pt_regs *regs)
538 {
539 unsigned long arg0, arg1, arg2, arg3, retval;
540 char buf[2];
541 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
542 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
543 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
545 arg0 = vcpu_get_gr(current,32);
546 switch(ssc) {
547 case SSC_PUTCHAR:
548 buf[0] = arg0;
549 buf[1] = '\0';
550 printf(buf);
551 break;
552 case SSC_GETCHAR:
553 retval = ia64_ssc(0,0,0,0,ssc);
554 vcpu_set_gr(current,8,retval,0);
555 break;
556 case SSC_WAIT_COMPLETION:
557 if (arg0) { // metaphysical address
559 arg0 = translate_domain_mpaddr(arg0);
560 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
561 ///**/ if (stat->fd == last_fd) stat->count = last_count;
562 /**/ stat->count = last_count;
563 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
564 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
565 /**/ retval = 0;
566 }
567 else retval = -1L;
568 vcpu_set_gr(current,8,retval,0);
569 break;
570 case SSC_OPEN:
571 arg1 = vcpu_get_gr(current,33); // access rights
572 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
573 if (arg0) { // metaphysical address
574 arg0 = translate_domain_mpaddr(arg0);
575 retval = ia64_ssc(arg0,arg1,0,0,ssc);
576 }
577 else retval = -1L;
578 vcpu_set_gr(current,8,retval,0);
579 break;
580 case SSC_WRITE:
581 case SSC_READ:
582 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
583 arg1 = vcpu_get_gr(current,33);
584 arg2 = vcpu_get_gr(current,34);
585 arg3 = vcpu_get_gr(current,35);
586 if (arg2) { // metaphysical address of descriptor
587 struct ssc_disk_req *req;
588 unsigned long mpaddr;
589 long len;
591 arg2 = translate_domain_mpaddr(arg2);
592 req = (struct ssc_disk_req *) __va(arg2);
593 req->len &= 0xffffffffL; // avoid strange bug
594 len = req->len;
595 /**/ last_fd = arg1;
596 /**/ last_count = len;
597 mpaddr = req->addr;
598 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
599 retval = 0;
600 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
601 // do partial page first
602 req->addr = translate_domain_mpaddr(mpaddr);
603 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
604 len -= req->len; mpaddr += req->len;
605 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
606 arg3 += req->len; // file offset
607 /**/ last_stat.fd = last_fd;
608 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
609 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
610 }
611 if (retval >= 0) while (len > 0) {
612 req->addr = translate_domain_mpaddr(mpaddr);
613 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
614 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
615 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
616 arg3 += req->len; // file offset
617 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
618 /**/ last_stat.fd = last_fd;
619 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
620 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
621 }
622 // set it back to the original value
623 req->len = last_count;
624 }
625 else retval = -1L;
626 vcpu_set_gr(current,8,retval,0);
627 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
628 break;
629 case SSC_CONNECT_INTERRUPT:
630 arg1 = vcpu_get_gr(current,33);
631 arg2 = vcpu_get_gr(current,34);
632 arg3 = vcpu_get_gr(current,35);
633 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
634 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
635 break;
636 case SSC_NETDEV_PROBE:
637 vcpu_set_gr(current,8,-1L,0);
638 break;
639 default:
640 printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, b0=0x%lx... spinning\n",
641 ssc, regs->cr_iip, regs->b0);
642 while(1);
643 break;
644 }
645 vcpu_increment_iip(current);
646 }
648 /* Also read in hyperprivop.S */
649 int first_break = 1;
651 void
652 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
653 {
654 struct domain *d = current->domain;
655 struct vcpu *v = current;
657 if (first_break) {
658 if (platform_is_hp_ski()) running_on_sim = 1;
659 else running_on_sim = 0;
660 first_break = 0;
661 }
662 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
663 do_ssc(vcpu_get_gr(current,36), regs);
664 }
665 #ifdef CRASH_DEBUG
666 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
667 if (iim == 0)
668 show_registers(regs);
669 debugger_trap_fatal(0 /* don't care */, regs);
670 }
671 #endif
672 else if (iim == d->arch.breakimm) {
673 /* by default, do not continue */
674 v->arch.hypercall_continuation = 0;
676 if (ia64_hypercall(regs) &&
677 !PSCBX(v, hypercall_continuation))
678 vcpu_increment_iip(current);
679 }
680 else if (!PSCB(v,interrupt_collection_enabled)) {
681 if (ia64_hyperprivop(iim,regs))
682 vcpu_increment_iip(current);
683 }
684 else {
685 if (iim == 0)
686 die_if_kernel("bug check", regs, iim);
687 PSCB(v,iim) = iim;
688 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
689 }
690 }
692 void
693 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
694 {
695 IA64FAULT vector;
697 vector = priv_emulate(current,regs,isr);
698 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
699 // Note: if a path results in a vector to reflect that requires
700 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
701 reflect_interruption(isr,regs,vector);
702 }
703 }
705 /* Used in vhpt.h. */
706 #define INTR_TYPE_MAX 10
707 UINT64 int_counts[INTR_TYPE_MAX];
709 void
710 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
711 {
712 struct vcpu *v = current;
713 unsigned long check_lazy_cover = 0;
714 unsigned long psr = regs->cr_ipsr;
716 /* Following faults shouldn'g be seen from Xen itself */
717 if (!(psr & IA64_PSR_CPL)) BUG();
719 switch(vector) {
720 case 8:
721 vector = IA64_DIRTY_BIT_VECTOR; break;
722 case 9:
723 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
724 case 10:
725 check_lazy_cover = 1;
726 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
727 case 20:
728 check_lazy_cover = 1;
729 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
730 case 22:
731 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
732 case 23:
733 check_lazy_cover = 1;
734 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
735 case 25:
736 vector = IA64_DISABLED_FPREG_VECTOR;
737 break;
738 case 26:
739 if (((isr >> 4L) & 0xfL) == 1) {
740 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
741 printf("ia64_handle_reflection: handling regNaT fault");
742 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
743 }
744 #if 1
745 // pass null pointer dereferences through with no error
746 // but retain debug output for non-zero ifa
747 if (!ifa) {
748 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
749 }
750 #endif
751 printf("*** NaT fault... attempting to handle as privop\n");
752 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
753 isr, ifa, regs->cr_iip, psr);
754 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
755 // certain NaT faults are higher priority than privop faults
756 vector = priv_emulate(v,regs,isr);
757 if (vector == IA64_NO_FAULT) {
758 printf("*** Handled privop masquerading as NaT fault\n");
759 return;
760 }
761 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
762 case 27:
763 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
764 PSCB(current,iim) = iim;
765 vector = IA64_SPECULATION_VECTOR; break;
766 case 30:
767 // FIXME: Should we handle unaligned refs in Xen??
768 vector = IA64_UNALIGNED_REF_VECTOR; break;
769 case 32:
770 printf("ia64_handle_reflection: handling FP fault");
771 vector = IA64_FP_FAULT_VECTOR; break;
772 case 33:
773 printf("ia64_handle_reflection: handling FP trap");
774 vector = IA64_FP_TRAP_VECTOR; break;
775 case 34:
776 printf("ia64_handle_reflection: handling lowerpriv trap");
777 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
778 case 35:
779 printf("ia64_handle_reflection: handling taken branch trap");
780 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
781 case 36:
782 printf("ia64_handle_reflection: handling single step trap");
783 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
785 default:
786 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
787 while(vector);
788 return;
789 }
790 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
791 PSCB(current,ifa) = ifa;
792 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
793 reflect_interruption(isr,regs,vector);
794 }
796 unsigned long hypercall_create_continuation(
797 unsigned int op, const char *format, ...)
798 {
799 struct mc_state *mcs = &mc_state[smp_processor_id()];
800 struct vcpu *v = current;
801 const char *p = format;
802 unsigned long arg;
803 unsigned int i;
804 va_list args;
806 va_start(args, format);
807 if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
808 panic("PREEMPT happen in multicall\n"); // Not support yet
809 } else {
810 vcpu_set_gr(v, 2, op, 0);
811 for ( i = 0; *p != '\0'; i++) {
812 switch ( *p++ )
813 {
814 case 'i':
815 arg = (unsigned long)va_arg(args, unsigned int);
816 break;
817 case 'l':
818 arg = (unsigned long)va_arg(args, unsigned long);
819 break;
820 case 'h':
821 arg = (unsigned long)va_arg(args, void *);
822 break;
823 default:
824 arg = 0;
825 BUG();
826 }
827 switch (i) {
828 case 0: vcpu_set_gr(v, 14, arg, 0);
829 break;
830 case 1: vcpu_set_gr(v, 15, arg, 0);
831 break;
832 case 2: vcpu_set_gr(v, 16, arg, 0);
833 break;
834 case 3: vcpu_set_gr(v, 17, arg, 0);
835 break;
836 case 4: vcpu_set_gr(v, 18, arg, 0);
837 break;
838 default: panic("Too many args for hypercall continuation\n");
839 break;
840 }
841 }
842 }
843 v->arch.hypercall_continuation = 1;
844 va_end(args);
845 return op;
846 }