ia64/xen-unstable

view xen/arch/ia64/xen/process.c @ 9756:14a34d811e81

[IA64] introduce P2M conversion

introduce P2M conversion functions necessary for dom0vp model.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@ldap.hp.com
date Tue Apr 25 13:06:57 2006 -0600 (2006-04-25)
parents bbf325d76768
children ced37bea0647
line source
2 /*
3 * Miscellaneous process/domain related routines
4 *
5 * Copyright (C) 2004 Hewlett-Packard Co.
6 * Dan Magenheimer (dan.magenheimer@hp.com)
7 *
8 */
10 #include <xen/config.h>
11 #include <xen/lib.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/smp.h>
15 #include <asm/ptrace.h>
16 #include <xen/delay.h>
18 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
19 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
21 #include <asm/system.h>
22 #include <asm/io.h>
23 #include <asm/processor.h>
24 #include <asm/desc.h>
25 //#include <asm/ldt.h>
26 #include <xen/irq.h>
27 #include <xen/event.h>
28 #include <asm/regionreg.h>
29 #include <asm/privop.h>
30 #include <asm/vcpu.h>
31 #include <asm/ia64_int.h>
32 #include <asm/dom_fw.h>
33 #include <asm/vhpt.h>
34 #include "hpsim_ssc.h"
35 #include <xen/multicall.h>
36 #include <asm/debugger.h>
38 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
39 /* FIXME: where these declarations shold be there ? */
40 extern void panic_domain(struct pt_regs *, const char *, ...);
41 extern long platform_is_hp_ski(void);
42 extern int ia64_hyperprivop(unsigned long, REGS *);
43 extern int ia64_hypercall(struct pt_regs *regs);
44 extern void vmx_do_launch(struct vcpu *);
45 extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
47 #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
48 // note IA64_PSR_PK removed from following, why is this necessary?
49 #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
50 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
51 IA64_PSR_IT | IA64_PSR_BN)
53 #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
54 IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
55 IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
56 IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
57 IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
58 IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
60 #include <xen/sched-if.h>
62 void schedule_tail(struct vcpu *prev)
63 {
64 extern char ia64_ivt;
65 context_saved(prev);
67 if (VMX_DOMAIN(current)) {
68 vmx_do_launch(current);
69 } else {
70 ia64_set_iva(&ia64_ivt);
71 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
72 VHPT_ENABLED);
73 load_region_regs(current);
74 vcpu_load_kernel_regs(current);
75 }
76 }
78 void tdpfoo(void) { }
80 // given a domain virtual address, pte and pagesize, extract the metaphysical
81 // address, convert the pte for a physical address for (possibly different)
82 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
83 // PAGE_SIZE!)
84 u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
85 {
86 struct domain *d = current->domain;
87 ia64_itir_t itir = {.itir = itir__};
88 u64 mask, mpaddr, pteval2;
90 pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
92 // FIXME address had better be pre-validated on insert
93 mask = ~itir_mask(itir.itir);
94 mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
95 (address & mask);
96 #ifdef CONFIG_XEN_IA64_DOM0_VP
97 if (itir.ps > PAGE_SHIFT) {
98 itir.ps = PAGE_SHIFT;
99 }
100 #endif
101 *logps = itir.ps;
102 #ifndef CONFIG_XEN_IA64_DOM0_VP
103 if (d == dom0) {
104 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
105 /*
106 printk("translate_domain_pte: out-of-bounds dom0 mpaddr 0x%lx! itc=%lx...\n",
107 mpaddr, ia64_get_itc());
108 */
109 tdpfoo();
110 }
111 }
112 else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
113 /* Address beyond the limit. However the grant table is
114 also beyond the limit. Display a message if not in the
115 grant table. */
116 if (mpaddr >= IA64_GRANT_TABLE_PADDR
117 && mpaddr < (IA64_GRANT_TABLE_PADDR
118 + (ORDER_GRANT_FRAMES << PAGE_SHIFT)))
119 printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
120 "vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
121 mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
122 address, pteval, itir.itir);
123 tdpfoo();
124 }
125 #endif
126 pteval2 = lookup_domain_mpa(d,mpaddr);
127 pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
128 pteval2 |= (pteval & _PAGE_ED);
129 pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
130 pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
131 return pteval2;
132 }
134 // given a current domain metaphysical address, return the physical address
135 unsigned long translate_domain_mpaddr(unsigned long mpaddr)
136 {
137 unsigned long pteval;
139 #ifndef CONFIG_XEN_IA64_DOM0_VP
140 if (current->domain == dom0) {
141 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
142 printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
143 mpaddr);
144 tdpfoo();
145 }
146 }
147 #endif
148 pteval = lookup_domain_mpa(current->domain,mpaddr);
149 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
150 }
152 unsigned long slow_reflect_count[0x80] = { 0 };
153 unsigned long fast_reflect_count[0x80] = { 0 };
155 #define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
157 void zero_reflect_counts(void)
158 {
159 int i;
160 for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
161 for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
162 }
164 int dump_reflect_counts(char *buf)
165 {
166 int i,j,cnt;
167 char *s = buf;
169 s += sprintf(s,"Slow reflections by vector:\n");
170 for (i = 0, j = 0; i < 0x80; i++) {
171 if ( (cnt = slow_reflect_count[i]) != 0 ) {
172 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
173 if ((j++ & 3) == 3) s += sprintf(s,"\n");
174 }
175 }
176 if (j & 3) s += sprintf(s,"\n");
177 s += sprintf(s,"Fast reflections by vector:\n");
178 for (i = 0, j = 0; i < 0x80; i++) {
179 if ( (cnt = fast_reflect_count[i]) != 0 ) {
180 s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
181 if ((j++ & 3) == 3) s += sprintf(s,"\n");
182 }
183 }
184 if (j & 3) s += sprintf(s,"\n");
185 return s - buf;
186 }
188 // should never panic domain... if it does, stack may have been overrun
189 void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
190 {
191 struct vcpu *v = current;
193 if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
194 panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
195 }
196 vector &= ~0xf;
197 if (vector != IA64_DATA_TLB_VECTOR &&
198 vector != IA64_ALT_DATA_TLB_VECTOR &&
199 vector != IA64_VHPT_TRANS_VECTOR) {
200 panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%p,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
201 vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
202 }
203 }
205 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
206 {
207 struct vcpu *v = current;
209 if (!PSCB(v,interrupt_collection_enabled))
210 check_bad_nested_interruption(isr,regs,vector);
211 PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
212 PSCB(v,precover_ifs) = regs->cr_ifs;
213 vcpu_bsw0(v);
214 PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
215 PSCB(v,isr) = isr;
216 PSCB(v,iip) = regs->cr_iip;
217 PSCB(v,ifs) = 0;
218 PSCB(v,incomplete_regframe) = 0;
220 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
221 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
222 #ifdef CONFIG_SMP
223 #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
224 #endif
225 regs->r31 = XSI_IPSR;
227 v->vcpu_info->evtchn_upcall_mask = 1;
228 PSCB(v,interrupt_collection_enabled) = 0;
230 inc_slow_reflect_count(vector);
231 }
233 void foodpi(void) {}
235 static unsigned long pending_false_positive = 0;
237 void reflect_extint(struct pt_regs *regs)
238 {
239 unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
240 struct vcpu *v = current;
241 static int first_extint = 1;
243 if (first_extint) {
244 printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
245 first_extint = 0;
246 }
247 if (vcpu_timer_pending_early(v))
248 printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
249 PSCB(current,itir) = 0;
250 reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
251 }
253 // ONLY gets called from ia64_leave_kernel
254 // ONLY call with interrupts disabled?? (else might miss one?)
255 // NEVER successful if already reflecting a trap/fault because psr.i==0
256 void deliver_pending_interrupt(struct pt_regs *regs)
257 {
258 struct domain *d = current->domain;
259 struct vcpu *v = current;
260 // FIXME: Will this work properly if doing an RFI???
261 if (!is_idle_domain(d) && user_mode(regs)) {
262 //vcpu_poke_timer(v);
263 if (vcpu_deliverable_interrupts(v))
264 reflect_extint(regs);
265 else if (PSCB(v,pending_interruption))
266 ++pending_false_positive;
267 }
268 }
269 unsigned long lazy_cover_count = 0;
271 int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
272 {
273 if (!PSCB(v,interrupt_collection_enabled)) {
274 PSCB(v,ifs) = regs->cr_ifs;
275 PSCB(v,incomplete_regframe) = 1;
276 regs->cr_ifs = 0;
277 lazy_cover_count++;
278 return(1); // retry same instruction with cr.ifs off
279 }
280 return(0);
281 }
283 void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
284 {
285 unsigned long iip = regs->cr_iip, iha;
286 // FIXME should validate address here
287 unsigned long pteval;
288 unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
289 IA64FAULT fault;
291 if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs)) return;
292 if ((isr & IA64_ISR_SP)
293 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
294 {
295 /*
296 * This fault was due to a speculative load or lfetch.fault, set the "ed"
297 * bit in the psr to ensure forward progress. (Target register will get a
298 * NaT for ld.s, lfetch will be canceled.)
299 */
300 ia64_psr(regs)->ed = 1;
301 return;
302 }
304 again:
305 fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
306 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
307 u64 logps;
308 pteval = translate_domain_pte(pteval, address, itir, &logps);
309 vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
310 if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
311 /* dtlb has been purged in-between. This dtlb was
312 matching. Undo the work. */
313 #ifdef VHPT_GLOBAL
314 vhpt_flush_address (address, 1);
315 #endif
316 ia64_ptcl(address, 1<<2);
317 ia64_srlz_i();
318 goto again;
319 }
320 return;
321 }
323 if (!user_mode (regs)) {
324 /* The fault occurs inside Xen. */
325 if (!ia64_done_with_exception(regs)) {
326 // should never happen. If it does, region 0 addr may
327 // indicate a bad xen pointer
328 printk("*** xen_handle_domain_access: exception table"
329 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
330 iip, address);
331 panic_domain(regs,"*** xen_handle_domain_access: exception table"
332 " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
333 iip, address);
334 }
335 return;
336 }
337 if (!PSCB(current,interrupt_collection_enabled)) {
338 check_bad_nested_interruption(isr,regs,fault);
339 //printf("Delivering NESTED DATA TLB fault\n");
340 fault = IA64_DATA_NESTED_TLB_VECTOR;
341 regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
342 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
343 // NOTE: nested trap must NOT pass PSCB address
344 //regs->r31 = (unsigned long) &PSCB(current);
345 inc_slow_reflect_count(fault);
346 return;
347 }
349 PSCB(current,itir) = itir;
350 PSCB(current,iha) = iha;
351 PSCB(current,ifa) = address;
352 reflect_interruption(isr, regs, fault);
353 }
355 void
356 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
357 unsigned long iim, unsigned long itir, unsigned long arg5,
358 unsigned long arg6, unsigned long arg7, unsigned long stack)
359 {
360 struct pt_regs *regs = (struct pt_regs *) &stack;
361 unsigned long code;
362 char buf[128];
363 static const char * const reason[] = {
364 "IA-64 Illegal Operation fault",
365 "IA-64 Privileged Operation fault",
366 "IA-64 Privileged Register fault",
367 "IA-64 Reserved Register/Field fault",
368 "Disabled Instruction Set Transition fault",
369 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
370 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
371 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
372 };
373 #if 0
374 printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
375 vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
376 #endif
378 if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
379 /*
380 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
381 * the lfetch.
382 */
383 ia64_psr(regs)->ed = 1;
384 printf("ia64_fault: handled lfetch.fault\n");
385 return;
386 }
388 switch (vector) {
389 case 24: /* General Exception */
390 code = (isr >> 4) & 0xf;
391 sprintf(buf, "General Exception: %s%s", reason[code],
392 (code == 3) ? ((isr & (1UL << 37))
393 ? " (RSE access)" : " (data access)") : "");
394 if (code == 8) {
395 # ifdef CONFIG_IA64_PRINT_HAZARDS
396 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
397 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
398 regs->pr);
399 # endif
400 printf("ia64_fault: returning on hazard\n");
401 return;
402 }
403 break;
405 case 25: /* Disabled FP-Register */
406 if (isr & 2) {
407 //disabled_fph_fault(regs);
408 //return;
409 }
410 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
411 break;
413 case 26: /* NaT Consumption */
414 if (user_mode(regs)) {
415 void *addr;
417 if (((isr >> 4) & 0xf) == 2) {
418 /* NaT page consumption */
419 //sig = SIGSEGV;
420 //code = SEGV_ACCERR;
421 addr = (void *) ifa;
422 } else {
423 /* register NaT consumption */
424 //sig = SIGILL;
425 //code = ILL_ILLOPN;
426 addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
427 }
428 //siginfo.si_signo = sig;
429 //siginfo.si_code = code;
430 //siginfo.si_errno = 0;
431 //siginfo.si_addr = addr;
432 //siginfo.si_imm = vector;
433 //siginfo.si_flags = __ISR_VALID;
434 //siginfo.si_isr = isr;
435 //force_sig_info(sig, &siginfo, current);
436 //return;
437 } //else if (ia64_done_with_exception(regs))
438 //return;
439 sprintf(buf, "NaT consumption");
440 break;
442 case 31: /* Unsupported Data Reference */
443 if (user_mode(regs)) {
444 //siginfo.si_signo = SIGILL;
445 //siginfo.si_code = ILL_ILLOPN;
446 //siginfo.si_errno = 0;
447 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
448 //siginfo.si_imm = vector;
449 //siginfo.si_flags = __ISR_VALID;
450 //siginfo.si_isr = isr;
451 //force_sig_info(SIGILL, &siginfo, current);
452 //return;
453 }
454 sprintf(buf, "Unsupported data reference");
455 break;
457 case 29: /* Debug */
458 case 35: /* Taken Branch Trap */
459 case 36: /* Single Step Trap */
460 //if (fsys_mode(current, regs)) {}
461 switch (vector) {
462 case 29:
463 //siginfo.si_code = TRAP_HWBKPT;
464 #ifdef CONFIG_ITANIUM
465 /*
466 * Erratum 10 (IFA may contain incorrect address) now has
467 * "NoFix" status. There are no plans for fixing this.
468 */
469 if (ia64_psr(regs)->is == 0)
470 ifa = regs->cr_iip;
471 #endif
472 break;
473 case 35: ifa = 0; break;
474 case 36: ifa = 0; break;
475 //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
476 //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
477 }
478 //siginfo.si_signo = SIGTRAP;
479 //siginfo.si_errno = 0;
480 //siginfo.si_addr = (void *) ifa;
481 //siginfo.si_imm = 0;
482 //siginfo.si_flags = __ISR_VALID;
483 //siginfo.si_isr = isr;
484 //force_sig_info(SIGTRAP, &siginfo, current);
485 //return;
487 case 32: /* fp fault */
488 case 33: /* fp trap */
489 //result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
490 //if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
491 //siginfo.si_signo = SIGFPE;
492 //siginfo.si_errno = 0;
493 //siginfo.si_code = FPE_FLTINV;
494 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
495 //siginfo.si_flags = __ISR_VALID;
496 //siginfo.si_isr = isr;
497 //siginfo.si_imm = 0;
498 //force_sig_info(SIGFPE, &siginfo, current);
499 //}
500 //return;
501 sprintf(buf, "FP fault/trap");
502 break;
504 case 34:
505 if (isr & 0x2) {
506 /* Lower-Privilege Transfer Trap */
507 /*
508 * Just clear PSR.lp and then return immediately: all the
509 * interesting work (e.g., signal delivery is done in the kernel
510 * exit path).
511 */
512 //ia64_psr(regs)->lp = 0;
513 //return;
514 sprintf(buf, "Lower-Privilege Transfer trap");
515 } else {
516 /* Unimplemented Instr. Address Trap */
517 if (user_mode(regs)) {
518 //siginfo.si_signo = SIGILL;
519 //siginfo.si_code = ILL_BADIADDR;
520 //siginfo.si_errno = 0;
521 //siginfo.si_flags = 0;
522 //siginfo.si_isr = 0;
523 //siginfo.si_imm = 0;
524 //siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
525 //force_sig_info(SIGILL, &siginfo, current);
526 //return;
527 }
528 sprintf(buf, "Unimplemented Instruction Address fault");
529 }
530 break;
532 case 45:
533 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
534 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
535 regs->cr_iip, ifa, isr);
536 //force_sig(SIGSEGV, current);
537 break;
539 case 46:
540 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
541 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
542 regs->cr_iip, ifa, isr, iim);
543 //force_sig(SIGSEGV, current);
544 return;
546 case 47:
547 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
548 break;
550 default:
551 sprintf(buf, "Fault %lu", vector);
552 break;
553 }
554 //die_if_kernel(buf, regs, error);
555 printk("ia64_fault: %s: reflecting\n",buf);
556 PSCB(current,itir) = vcpu_get_itir_on_fault(current,ifa);
557 PSCB(current,ifa) = ifa;
558 reflect_interruption(isr,regs,IA64_GENEX_VECTOR);
559 //while(1);
560 //force_sig(SIGILL, current);
561 }
563 unsigned long running_on_sim = 0;
565 void
566 do_ssc(unsigned long ssc, struct pt_regs *regs)
567 {
568 unsigned long arg0, arg1, arg2, arg3, retval;
569 char buf[2];
570 /**/ static int last_fd, last_count; // FIXME FIXME FIXME
571 /**/ // BROKEN FOR MULTIPLE DOMAINS & SMP
572 /**/ struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
574 arg0 = vcpu_get_gr(current,32);
575 switch(ssc) {
576 case SSC_PUTCHAR:
577 buf[0] = arg0;
578 buf[1] = '\0';
579 printf(buf);
580 break;
581 case SSC_GETCHAR:
582 retval = ia64_ssc(0,0,0,0,ssc);
583 vcpu_set_gr(current,8,retval,0);
584 break;
585 case SSC_WAIT_COMPLETION:
586 if (arg0) { // metaphysical address
588 arg0 = translate_domain_mpaddr(arg0);
589 /**/ stat = (struct ssc_disk_stat *)__va(arg0);
590 ///**/ if (stat->fd == last_fd) stat->count = last_count;
591 /**/ stat->count = last_count;
592 //if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
593 ///**/ retval = ia64_ssc(arg0,0,0,0,ssc);
594 /**/ retval = 0;
595 }
596 else retval = -1L;
597 vcpu_set_gr(current,8,retval,0);
598 break;
599 case SSC_OPEN:
600 arg1 = vcpu_get_gr(current,33); // access rights
601 if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring...)\n"); arg0 = 0; }
602 if (arg0) { // metaphysical address
603 arg0 = translate_domain_mpaddr(arg0);
604 retval = ia64_ssc(arg0,arg1,0,0,ssc);
605 }
606 else retval = -1L;
607 vcpu_set_gr(current,8,retval,0);
608 break;
609 case SSC_WRITE:
610 case SSC_READ:
611 //if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
612 arg1 = vcpu_get_gr(current,33);
613 arg2 = vcpu_get_gr(current,34);
614 arg3 = vcpu_get_gr(current,35);
615 if (arg2) { // metaphysical address of descriptor
616 struct ssc_disk_req *req;
617 unsigned long mpaddr;
618 long len;
620 arg2 = translate_domain_mpaddr(arg2);
621 req = (struct ssc_disk_req *) __va(arg2);
622 req->len &= 0xffffffffL; // avoid strange bug
623 len = req->len;
624 /**/ last_fd = arg1;
625 /**/ last_count = len;
626 mpaddr = req->addr;
627 //if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
628 retval = 0;
629 if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
630 // do partial page first
631 req->addr = translate_domain_mpaddr(mpaddr);
632 req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
633 len -= req->len; mpaddr += req->len;
634 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
635 arg3 += req->len; // file offset
636 /**/ last_stat.fd = last_fd;
637 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
638 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
639 }
640 if (retval >= 0) while (len > 0) {
641 req->addr = translate_domain_mpaddr(mpaddr);
642 req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
643 len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
644 retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
645 arg3 += req->len; // file offset
646 // TEMP REMOVED AGAIN arg3 += req->len; // file offset
647 /**/ last_stat.fd = last_fd;
648 /**/ (void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
649 //if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
650 }
651 // set it back to the original value
652 req->len = last_count;
653 }
654 else retval = -1L;
655 vcpu_set_gr(current,8,retval,0);
656 //if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
657 break;
658 case SSC_CONNECT_INTERRUPT:
659 arg1 = vcpu_get_gr(current,33);
660 arg2 = vcpu_get_gr(current,34);
661 arg3 = vcpu_get_gr(current,35);
662 if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware. (ignoring...)\n"); break; }
663 (void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
664 break;
665 case SSC_NETDEV_PROBE:
666 vcpu_set_gr(current,8,-1L,0);
667 break;
668 default:
669 printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, b0=0x%lx... spinning\n",
670 ssc, regs->cr_iip, regs->b0);
671 while(1);
672 break;
673 }
674 vcpu_increment_iip(current);
675 }
677 /* Also read in hyperprivop.S */
678 int first_break = 1;
680 void
681 ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
682 {
683 struct domain *d = current->domain;
684 struct vcpu *v = current;
686 if (first_break) {
687 if (platform_is_hp_ski()) running_on_sim = 1;
688 else running_on_sim = 0;
689 first_break = 0;
690 }
691 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
692 do_ssc(vcpu_get_gr(current,36), regs);
693 }
694 #ifdef CRASH_DEBUG
695 else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
696 if (iim == 0)
697 show_registers(regs);
698 debugger_trap_fatal(0 /* don't care */, regs);
699 }
700 #endif
701 else if (iim == d->arch.breakimm) {
702 /* by default, do not continue */
703 v->arch.hypercall_continuation = 0;
705 if (ia64_hypercall(regs) &&
706 !PSCBX(v, hypercall_continuation))
707 vcpu_increment_iip(current);
708 }
709 else if (!PSCB(v,interrupt_collection_enabled)) {
710 if (ia64_hyperprivop(iim,regs))
711 vcpu_increment_iip(current);
712 }
713 else {
714 if (iim == 0)
715 die_if_kernel("bug check", regs, iim);
716 PSCB(v,iim) = iim;
717 reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
718 }
719 }
721 void
722 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
723 {
724 IA64FAULT vector;
726 vector = priv_emulate(current,regs,isr);
727 if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
728 // Note: if a path results in a vector to reflect that requires
729 // iha/itir (e.g. vcpu_force_data_miss), they must be set there
730 reflect_interruption(isr,regs,vector);
731 }
732 }
734 /* Used in vhpt.h. */
735 #define INTR_TYPE_MAX 10
736 UINT64 int_counts[INTR_TYPE_MAX];
738 void
739 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
740 {
741 struct vcpu *v = current;
742 unsigned long check_lazy_cover = 0;
743 unsigned long psr = regs->cr_ipsr;
745 /* Following faults shouldn'g be seen from Xen itself */
746 if (!(psr & IA64_PSR_CPL)) BUG();
748 switch(vector) {
749 case 8:
750 vector = IA64_DIRTY_BIT_VECTOR; break;
751 case 9:
752 vector = IA64_INST_ACCESS_BIT_VECTOR; break;
753 case 10:
754 check_lazy_cover = 1;
755 vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
756 case 20:
757 check_lazy_cover = 1;
758 vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
759 case 22:
760 vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
761 case 23:
762 check_lazy_cover = 1;
763 vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
764 case 25:
765 vector = IA64_DISABLED_FPREG_VECTOR;
766 break;
767 case 26:
768 if (((isr >> 4L) & 0xfL) == 1) {
769 //regs->eml_unat = 0; FIXME: DO WE NEED THIS??
770 printf("ia64_handle_reflection: handling regNaT fault");
771 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
772 }
773 #if 1
774 // pass null pointer dereferences through with no error
775 // but retain debug output for non-zero ifa
776 if (!ifa) {
777 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
778 }
779 #endif
780 printf("*** NaT fault... attempting to handle as privop\n");
781 printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
782 isr, ifa, regs->cr_iip, psr);
783 //regs->eml_unat = 0; FIXME: DO WE NEED THIS???
784 // certain NaT faults are higher priority than privop faults
785 vector = priv_emulate(v,regs,isr);
786 if (vector == IA64_NO_FAULT) {
787 printf("*** Handled privop masquerading as NaT fault\n");
788 return;
789 }
790 vector = IA64_NAT_CONSUMPTION_VECTOR; break;
791 case 27:
792 //printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
793 PSCB(current,iim) = iim;
794 vector = IA64_SPECULATION_VECTOR; break;
795 case 30:
796 // FIXME: Should we handle unaligned refs in Xen??
797 vector = IA64_UNALIGNED_REF_VECTOR; break;
798 case 32:
799 printf("ia64_handle_reflection: handling FP fault");
800 vector = IA64_FP_FAULT_VECTOR; break;
801 case 33:
802 printf("ia64_handle_reflection: handling FP trap");
803 vector = IA64_FP_TRAP_VECTOR; break;
804 case 34:
805 printf("ia64_handle_reflection: handling lowerpriv trap");
806 vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
807 case 35:
808 printf("ia64_handle_reflection: handling taken branch trap");
809 vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
810 case 36:
811 printf("ia64_handle_reflection: handling single step trap");
812 vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
814 default:
815 printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
816 while(vector);
817 return;
818 }
819 if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
820 PSCB(current,ifa) = ifa;
821 PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
822 reflect_interruption(isr,regs,vector);
823 }
825 unsigned long hypercall_create_continuation(
826 unsigned int op, const char *format, ...)
827 {
828 struct mc_state *mcs = &mc_state[smp_processor_id()];
829 struct vcpu *v = current;
830 const char *p = format;
831 unsigned long arg;
832 unsigned int i;
833 va_list args;
835 va_start(args, format);
836 if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
837 panic("PREEMPT happen in multicall\n"); // Not support yet
838 } else {
839 vcpu_set_gr(v, 2, op, 0);
840 for ( i = 0; *p != '\0'; i++) {
841 switch ( *p++ )
842 {
843 case 'i':
844 arg = (unsigned long)va_arg(args, unsigned int);
845 break;
846 case 'l':
847 arg = (unsigned long)va_arg(args, unsigned long);
848 break;
849 case 'h':
850 arg = (unsigned long)va_arg(args, void *);
851 break;
852 default:
853 arg = 0;
854 BUG();
855 }
856 switch (i) {
857 case 0: vcpu_set_gr(v, 14, arg, 0);
858 break;
859 case 1: vcpu_set_gr(v, 15, arg, 0);
860 break;
861 case 2: vcpu_set_gr(v, 16, arg, 0);
862 break;
863 case 3: vcpu_set_gr(v, 17, arg, 0);
864 break;
865 case 4: vcpu_set_gr(v, 18, arg, 0);
866 break;
867 default: panic("Too many args for hypercall continuation\n");
868 break;
869 }
870 }
871 }
872 v->arch.hypercall_continuation = 1;
873 va_end(args);
874 return op;
875 }