ia64/xen-unstable

view xen/arch/ia64/xen/xenmisc.c @ 9770:ced37bea0647

[IA64] FPH enabling + cleanup

Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Apr 25 22:35:41 2006 -0600 (2006-04-25)
parents 14a34d811e81
children fcfc614d3713
line source
1 /*
2 * Xen misc
3 *
4 * Functions/decls that are/may be needed to link with Xen because
5 * of x86 dependencies
6 *
7 * Copyright (C) 2004 Hewlett-Packard Co.
8 * Dan Magenheimer (dan.magenheimer@hp.com)
9 *
10 */
12 #include <linux/config.h>
13 #include <xen/sched.h>
14 #include <linux/efi.h>
15 #include <asm/processor.h>
16 #include <xen/serial.h>
17 #include <asm/io.h>
18 #include <xen/softirq.h>
19 #include <public/sched.h>
20 #include <asm/vhpt.h>
21 #include <asm/debugger.h>
22 #include <asm/vmx.h>
23 #include <asm/vmx_vcpu.h>
24 #include <asm/vcpu.h>
26 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
28 /* FIXME: where these declarations should be there ? */
29 extern void show_registers(struct pt_regs *regs);
31 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
32 void ia64_mca_cpu_init(void *x) { }
33 void hpsim_setup(char **x)
34 {
35 #ifdef CONFIG_SMP
36 init_smp_config();
37 #endif
38 }
40 // called from mem_init... don't think s/w I/O tlb is needed in Xen
41 //void swiotlb_init(void) { } ...looks like it IS needed
43 long
44 is_platform_hp_ski(void)
45 {
46 int i;
47 long cpuid[6];
49 for (i = 0; i < 5; ++i)
50 cpuid[i] = ia64_get_cpuid(i);
51 if ((cpuid[0] & 0xff) != 'H') return 0;
52 if ((cpuid[3] & 0xff) != 0x4) return 0;
53 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
54 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
55 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
56 return 1;
57 }
59 long
60 platform_is_hp_ski(void)
61 {
62 extern long running_on_sim;
63 return running_on_sim;
64 }
67 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
69 unsigned long
70 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
71 {
72 #ifndef CONFIG_XEN_IA64_DOM0_VP
73 if (d == dom0)
74 return(gpfn);
75 else
76 #endif
77 {
78 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
79 if (!pte) {
80 printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
81 while(1);
82 return 0;
83 }
84 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
85 }
86 }
87 #if 0
88 u32
89 mfn_to_gmfn(struct domain *d, unsigned long frame)
90 {
91 // FIXME: is this right?
92 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
93 printk("mfn_to_gmfn: bad frame. spinning...\n");
94 while(1);
95 }
96 return frame;
97 }
98 #endif
100 ///////////////////////////////
101 // from arch/x86/flushtlb.c
102 ///////////////////////////////
104 u32 tlbflush_clock;
105 u32 tlbflush_time[NR_CPUS];
107 ///////////////////////////////
108 // from arch/x86/memory.c
109 ///////////////////////////////
112 static void free_page_type(struct page_info *page, u32 type)
113 {
114 }
116 static int alloc_page_type(struct page_info *page, u32 type)
117 {
118 return 1;
119 }
121 ///////////////////////////////
122 //// misc memory stuff
123 ///////////////////////////////
125 unsigned long __get_free_pages(unsigned int mask, unsigned int order)
126 {
127 void *p = alloc_xenheap_pages(order);
129 memset(p,0,PAGE_SIZE<<order);
130 return (unsigned long)p;
131 }
133 void __free_pages(struct page_info *page, unsigned int order)
134 {
135 if (order) BUG();
136 free_xenheap_page(page);
137 }
139 void *pgtable_quicklist_alloc(void)
140 {
141 void *p;
142 p = alloc_xenheap_pages(0);
143 if (p)
144 clear_page(p);
145 return p;
146 }
148 void pgtable_quicklist_free(void *pgtable_entry)
149 {
150 free_xenheap_page(pgtable_entry);
151 }
153 ///////////////////////////////
154 // from arch/ia64/traps.c
155 ///////////////////////////////
157 int is_kernel_text(unsigned long addr)
158 {
159 extern char _stext[], _etext[];
160 if (addr >= (unsigned long) _stext &&
161 addr <= (unsigned long) _etext)
162 return 1;
164 return 0;
165 }
167 unsigned long kernel_text_end(void)
168 {
169 extern char _etext[];
170 return (unsigned long) _etext;
171 }
173 ///////////////////////////////
174 // from common/keyhandler.c
175 ///////////////////////////////
176 void dump_pageframe_info(struct domain *d)
177 {
178 printk("dump_pageframe_info not implemented\n");
179 }
181 ///////////////////////////////
182 // called from arch/ia64/head.S
183 ///////////////////////////////
185 void console_print(char *msg)
186 {
187 printk("console_print called, how did start_kernel return???\n");
188 }
190 void kernel_thread_helper(void)
191 {
192 printk("kernel_thread_helper not implemented\n");
193 dummy();
194 }
196 void sys_exit(void)
197 {
198 printk("sys_exit not implemented\n");
199 dummy();
200 }
202 ////////////////////////////////////
203 // called from unaligned.c
204 ////////////////////////////////////
206 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
207 {
208 if (user_mode(regs))
209 return;
211 printk("%s: %s %ld\n", __func__, str, err);
212 debugtrace_dump();
213 show_registers(regs);
214 domain_crash_synchronous();
215 }
217 long
218 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
219 unsigned long user_rbs_end, unsigned long addr, long *val)
220 {
221 printk("ia64_peek: called, not implemented\n");
222 return 1;
223 }
225 long
226 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
227 unsigned long user_rbs_end, unsigned long addr, long val)
228 {
229 printk("ia64_poke: called, not implemented\n");
230 return 1;
231 }
233 void
234 ia64_sync_fph (struct task_struct *task)
235 {
236 printk("ia64_sync_fph: called, not implemented\n");
237 }
239 void
240 ia64_flush_fph (struct task_struct *task)
241 {
242 printk("ia64_flush_fph: called, not implemented\n");
243 }
245 ////////////////////////////////////
246 // called from irq_ia64.c:init_IRQ()
247 // (because CONFIG_IA64_HP_SIM is specified)
248 ////////////////////////////////////
249 void hpsim_irq_init(void) { }
252 // accomodate linux extable.c
253 //const struct exception_table_entry *
254 void *search_module_extables(unsigned long addr) { return NULL; }
255 void *__module_text_address(unsigned long addr) { return NULL; }
256 void *module_text_address(unsigned long addr) { return NULL; }
258 unsigned long context_switch_count = 0;
260 extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
263 void context_switch(struct vcpu *prev, struct vcpu *next)
264 {
265 uint64_t spsr;
266 uint64_t pta;
268 local_irq_save(spsr);
269 context_switch_count++;
271 __ia64_save_fpu(prev->arch._thread.fph);
272 __ia64_load_fpu(next->arch._thread.fph);
273 if (VMX_DOMAIN(prev))
274 vmx_save_state(prev);
275 if (VMX_DOMAIN(next))
276 vmx_load_state(next);
277 /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
278 prev = ia64_switch_to(next);
279 if (!VMX_DOMAIN(current)){
280 vcpu_set_next_timer(current);
281 }
284 // leave this debug for now: it acts as a heartbeat when more than
285 // one domain is active
286 {
287 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
288 static int i = 100;
289 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
290 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
291 if (!i--) { printk("+"); i = 1000000; }
292 }
294 if (VMX_DOMAIN(current)){
295 vmx_load_all_rr(current);
296 }else{
297 extern char ia64_ivt;
298 ia64_set_iva(&ia64_ivt);
299 if (!is_idle_domain(current->domain)) {
300 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
301 VHPT_ENABLED);
302 load_region_regs(current);
303 vcpu_load_kernel_regs(current);
304 if (vcpu_timer_expired(current))
305 vcpu_pend_timer(current);
306 }else {
307 /* When switching to idle domain, only need to disable vhpt
308 * walker. Then all accesses happen within idle context will
309 * be handled by TR mapping and identity mapping.
310 */
311 pta = ia64_get_pta();
312 ia64_set_pta(pta & ~VHPT_ENABLED);
313 }
314 }
315 local_irq_restore(spsr);
316 context_saved(prev);
317 }
319 void continue_running(struct vcpu *same)
320 {
321 /* nothing to do */
322 }
324 void arch_dump_domain_info(struct domain *d)
325 {
326 }
328 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
329 {
330 va_list args;
331 char buf[128];
332 struct vcpu *v = current;
334 printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
335 v->domain->domain_id,
336 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
337 va_start(args, fmt);
338 (void)vsnprintf(buf, sizeof(buf), fmt, args);
339 va_end(args);
340 printf(buf);
341 if (regs) show_registers(regs);
342 if (regs) {
343 debugger_trap_fatal(0 /* don't care */, regs);
344 } else {
345 debugger_trap_immediate();
346 }
347 domain_crash_synchronous ();
348 }
350 ///////////////////////////////
351 // from arch/x86/mm.c
352 ///////////////////////////////
354 #ifdef VERBOSE
355 #define MEM_LOG(_f, _a...) \
356 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
357 current->domain->domain_id , __LINE__ , ## _a )
358 #else
359 #define MEM_LOG(_f, _a...) ((void)0)
360 #endif
362 void cleanup_writable_pagetable(struct domain *d)
363 {
364 return;
365 }
367 void put_page_type(struct page_info *page)
368 {
369 u32 nx, x, y = page->u.inuse.type_info;
371 again:
372 do {
373 x = y;
374 nx = x - 1;
376 ASSERT((x & PGT_count_mask) != 0);
378 /*
379 * The page should always be validated while a reference is held. The
380 * exception is during domain destruction, when we forcibly invalidate
381 * page-table pages if we detect a referential loop.
382 * See domain.c:relinquish_list().
383 */
384 ASSERT((x & PGT_validated) ||
385 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
387 if ( unlikely((nx & PGT_count_mask) == 0) )
388 {
389 /* Record TLB information for flush later. Races are harmless. */
390 page->tlbflush_timestamp = tlbflush_current_time();
392 if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
393 likely(nx & PGT_validated) )
394 {
395 /*
396 * Page-table pages must be unvalidated when count is zero. The
397 * 'free' is safe because the refcnt is non-zero and validated
398 * bit is clear => other ops will spin or fail.
399 */
400 if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
401 x & ~PGT_validated)) != x) )
402 goto again;
403 /* We cleared the 'valid bit' so we do the clean up. */
404 free_page_type(page, x);
405 /* Carry on, but with the 'valid bit' now clear. */
406 x &= ~PGT_validated;
407 nx &= ~PGT_validated;
408 }
409 }
410 else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
411 (PGT_pinned | 1)) &&
412 ((nx & PGT_type_mask) != PGT_writable_page)) )
413 {
414 /* Page is now only pinned. Make the back pointer mutable again. */
415 nx |= PGT_va_mutable;
416 }
417 }
418 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
419 }
422 int get_page_type(struct page_info *page, u32 type)
423 {
424 u32 nx, x, y = page->u.inuse.type_info;
426 again:
427 do {
428 x = y;
429 nx = x + 1;
430 if ( unlikely((nx & PGT_count_mask) == 0) )
431 {
432 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
433 return 0;
434 }
435 else if ( unlikely((x & PGT_count_mask) == 0) )
436 {
437 if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
438 {
439 if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
440 {
441 /*
442 * On type change we check to flush stale TLB
443 * entries. This may be unnecessary (e.g., page
444 * was GDT/LDT) but those circumstances should be
445 * very rare.
446 */
447 cpumask_t mask =
448 page_get_owner(page)->domain_dirty_cpumask;
449 tlbflush_filter(mask, page->tlbflush_timestamp);
451 if ( unlikely(!cpus_empty(mask)) )
452 {
453 perfc_incrc(need_flush_tlb_flush);
454 flush_tlb_mask(mask);
455 }
456 }
458 /* We lose existing type, back pointer, and validity. */
459 nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
460 nx |= type;
462 /* No special validation needed for writable pages. */
463 /* Page tables and GDT/LDT need to be scanned for validity. */
464 if ( type == PGT_writable_page )
465 nx |= PGT_validated;
466 }
467 }
468 else
469 {
470 if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
471 {
472 if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
473 {
474 if ( current->domain == page_get_owner(page) )
475 {
476 /*
477 * This ensures functions like set_gdt() see up-to-date
478 * type info without needing to clean up writable p.t.
479 * state on the fast path.
480 */
481 LOCK_BIGLOCK(current->domain);
482 cleanup_writable_pagetable(current->domain);
483 y = page->u.inuse.type_info;
484 UNLOCK_BIGLOCK(current->domain);
485 /* Can we make progress now? */
486 if ( ((y & PGT_type_mask) == (type & PGT_type_mask)) ||
487 ((y & PGT_count_mask) == 0) )
488 goto again;
489 }
490 if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
491 ((type & PGT_type_mask) != PGT_l1_page_table) )
492 MEM_LOG("Bad type (saw %08x != exp %08x) "
493 "for mfn %016lx (pfn %016lx)",
494 x, type, page_to_mfn(page),
495 get_gpfn_from_mfn(page_to_mfn(page)));
496 return 0;
497 }
498 else if ( (x & PGT_va_mask) == PGT_va_mutable )
499 {
500 /* The va backpointer is mutable, hence we update it. */
501 nx &= ~PGT_va_mask;
502 nx |= type; /* we know the actual type is correct */
503 }
504 else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
505 ((type & PGT_va_mask) != (x & PGT_va_mask)) )
506 {
507 #ifdef CONFIG_X86_PAE
508 /* We use backptr as extra typing. Cannot be unknown. */
509 if ( (type & PGT_type_mask) == PGT_l2_page_table )
510 return 0;
511 #endif
512 /* This table is possibly mapped at multiple locations. */
513 nx &= ~PGT_va_mask;
514 nx |= PGT_va_unknown;
515 }
516 }
517 if ( unlikely(!(x & PGT_validated)) )
518 {
519 /* Someone else is updating validation of this page. Wait... */
520 while ( (y = page->u.inuse.type_info) == x )
521 cpu_relax();
522 goto again;
523 }
524 }
525 }
526 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
528 if ( unlikely(!(nx & PGT_validated)) )
529 {
530 /* Try to validate page type; drop the new reference on failure. */
531 if ( unlikely(!alloc_page_type(page, type)) )
532 {
533 MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x"
534 ": caf=%08x taf=%" PRtype_info,
535 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
536 type, page->count_info, page->u.inuse.type_info);
537 /* Noone else can get a reference. We hold the only ref. */
538 page->u.inuse.type_info = 0;
539 return 0;
540 }
542 /* Noone else is updating simultaneously. */
543 __set_bit(_PGT_validated, &page->u.inuse.type_info);
544 }
546 return 1;
547 }