ia64/xen-unstable

view xen/arch/ia64/xen/xenmisc.c @ 9756:14a34d811e81

[IA64] introduce P2M conversion

introduce P2M conversion functions necessary for dom0vp model.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@ldap.hp.com
date Tue Apr 25 13:06:57 2006 -0600 (2006-04-25)
parents 96bc87dd7ca9
children ced37bea0647
line source
1 /*
2 * Xen misc
3 *
4 * Functions/decls that are/may be needed to link with Xen because
5 * of x86 dependencies
6 *
7 * Copyright (C) 2004 Hewlett-Packard Co.
8 * Dan Magenheimer (dan.magenheimer@hp.com)
9 *
10 */
12 #include <linux/config.h>
13 #include <xen/sched.h>
14 #include <linux/efi.h>
15 #include <asm/processor.h>
16 #include <xen/serial.h>
17 #include <asm/io.h>
18 #include <xen/softirq.h>
19 #include <public/sched.h>
20 #include <asm/vhpt.h>
21 #include <asm/debugger.h>
22 #include <asm/vmx.h>
23 #include <asm/vmx_vcpu.h>
25 efi_memory_desc_t ia64_efi_io_md;
26 EXPORT_SYMBOL(ia64_efi_io_md);
27 unsigned long wait_init_idle;
28 int phys_proc_id[NR_CPUS];
29 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
31 /* FIXME: where these declarations should be there ? */
32 extern void show_registers(struct pt_regs *regs);
34 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
35 void ia64_mca_cpu_init(void *x) { }
36 void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
37 void ia64_patch_vtop(unsigned long a, unsigned long b) { }
38 void hpsim_setup(char **x)
39 {
40 #ifdef CONFIG_SMP
41 init_smp_config();
42 #endif
43 }
45 // called from mem_init... don't think s/w I/O tlb is needed in Xen
46 //void swiotlb_init(void) { } ...looks like it IS needed
48 long
49 is_platform_hp_ski(void)
50 {
51 int i;
52 long cpuid[6];
54 for (i = 0; i < 5; ++i)
55 cpuid[i] = ia64_get_cpuid(i);
56 if ((cpuid[0] & 0xff) != 'H') return 0;
57 if ((cpuid[3] & 0xff) != 0x4) return 0;
58 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
59 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
60 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
61 return 1;
62 }
64 long
65 platform_is_hp_ski(void)
66 {
67 extern long running_on_sim;
68 return running_on_sim;
69 }
71 /* calls in xen/common code that are unused on ia64 */
73 void sync_lazy_execstate_cpu(unsigned int cpu) {}
75 #if 0
76 int grant_table_create(struct domain *d) { return 0; }
77 void grant_table_destroy(struct domain *d) { return; }
78 #endif
80 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
82 void raise_actimer_softirq(void)
83 {
84 raise_softirq(TIMER_SOFTIRQ);
85 }
87 unsigned long
88 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
89 {
90 #ifndef CONFIG_XEN_IA64_DOM0_VP
91 if (d == dom0)
92 return(gpfn);
93 else
94 #endif
95 {
96 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
97 if (!pte) {
98 printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
99 while(1);
100 return 0;
101 }
102 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
103 }
104 }
105 #if 0
106 u32
107 mfn_to_gmfn(struct domain *d, unsigned long frame)
108 {
109 // FIXME: is this right?
110 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
111 printk("mfn_to_gmfn: bad frame. spinning...\n");
112 while(1);
113 }
114 return frame;
115 }
116 #endif
118 ///////////////////////////////
119 // from arch/x86/flushtlb.c
120 ///////////////////////////////
122 u32 tlbflush_clock;
123 u32 tlbflush_time[NR_CPUS];
125 ///////////////////////////////
126 // from arch/x86/memory.c
127 ///////////////////////////////
130 void free_page_type(struct page_info *page, u32 type)
131 {
132 // dummy();
133 return;
134 }
136 int alloc_page_type(struct page_info *page, u32 type)
137 {
138 // dummy();
139 return 1;
140 }
142 ///////////////////////////////
143 //// misc memory stuff
144 ///////////////////////////////
146 unsigned long __get_free_pages(unsigned int mask, unsigned int order)
147 {
148 void *p = alloc_xenheap_pages(order);
150 memset(p,0,PAGE_SIZE<<order);
151 return (unsigned long)p;
152 }
154 void __free_pages(struct page_info *page, unsigned int order)
155 {
156 if (order) BUG();
157 free_xenheap_page(page);
158 }
160 void *pgtable_quicklist_alloc(void)
161 {
162 void *p;
163 p = alloc_xenheap_pages(0);
164 if (p)
165 clear_page(p);
166 return p;
167 }
169 void pgtable_quicklist_free(void *pgtable_entry)
170 {
171 free_xenheap_page(pgtable_entry);
172 }
174 ///////////////////////////////
175 // from arch/ia64/traps.c
176 ///////////////////////////////
178 int is_kernel_text(unsigned long addr)
179 {
180 extern char _stext[], _etext[];
181 if (addr >= (unsigned long) _stext &&
182 addr <= (unsigned long) _etext)
183 return 1;
185 return 0;
186 }
188 unsigned long kernel_text_end(void)
189 {
190 extern char _etext[];
191 return (unsigned long) _etext;
192 }
194 ///////////////////////////////
195 // from common/keyhandler.c
196 ///////////////////////////////
197 void dump_pageframe_info(struct domain *d)
198 {
199 printk("dump_pageframe_info not implemented\n");
200 }
202 ///////////////////////////////
203 // called from arch/ia64/head.S
204 ///////////////////////////////
206 void console_print(char *msg)
207 {
208 printk("console_print called, how did start_kernel return???\n");
209 }
211 void kernel_thread_helper(void)
212 {
213 printk("kernel_thread_helper not implemented\n");
214 dummy();
215 }
217 void sys_exit(void)
218 {
219 printk("sys_exit not implemented\n");
220 dummy();
221 }
223 ////////////////////////////////////
224 // called from unaligned.c
225 ////////////////////////////////////
227 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
228 {
229 if (user_mode(regs))
230 return;
232 printk("%s: %s %ld\n", __func__, str, err);
233 debugtrace_dump();
234 show_registers(regs);
235 domain_crash_synchronous();
236 }
238 long
239 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
240 unsigned long user_rbs_end, unsigned long addr, long *val)
241 {
242 printk("ia64_peek: called, not implemented\n");
243 return 1;
244 }
246 long
247 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
248 unsigned long user_rbs_end, unsigned long addr, long val)
249 {
250 printk("ia64_poke: called, not implemented\n");
251 return 1;
252 }
254 void
255 ia64_sync_fph (struct task_struct *task)
256 {
257 printk("ia64_sync_fph: called, not implemented\n");
258 }
260 void
261 ia64_flush_fph (struct task_struct *task)
262 {
263 printk("ia64_flush_fph: called, not implemented\n");
264 }
266 ////////////////////////////////////
267 // called from irq_ia64.c:init_IRQ()
268 // (because CONFIG_IA64_HP_SIM is specified)
269 ////////////////////////////////////
270 void hpsim_irq_init(void) { }
273 // accomodate linux extable.c
274 //const struct exception_table_entry *
275 void *search_module_extables(unsigned long addr) { return NULL; }
276 void *__module_text_address(unsigned long addr) { return NULL; }
277 void *module_text_address(unsigned long addr) { return NULL; }
279 void cs10foo(void) {}
280 void cs01foo(void) {}
282 unsigned long context_switch_count = 0;
284 #include <asm/vcpu.h>
286 void context_switch(struct vcpu *prev, struct vcpu *next)
287 {
288 uint64_t spsr;
289 uint64_t pta;
291 local_irq_save(spsr);
292 // if(VMX_DOMAIN(prev)){
293 // vtm_domain_out(prev);
294 // }
295 context_switch_count++;
296 switch_to(prev,next,prev);
297 // if(VMX_DOMAIN(current)){
298 // vtm_domain_in(current);
299 // }
301 // leave this debug for now: it acts as a heartbeat when more than
302 // one domain is active
303 {
304 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
305 static int i = 100;
306 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
307 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
308 if (!i--) { printk("+"); i = 1000000; }
309 }
311 if (VMX_DOMAIN(current)){
312 // vtm_domain_in(current);
313 vmx_load_all_rr(current);
314 }else{
315 extern char ia64_ivt;
316 ia64_set_iva(&ia64_ivt);
317 if (!is_idle_domain(current->domain)) {
318 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
319 VHPT_ENABLED);
320 load_region_regs(current);
321 vcpu_load_kernel_regs(current);
322 if (vcpu_timer_expired(current))
323 vcpu_pend_timer(current);
324 }else {
325 /* When switching to idle domain, only need to disable vhpt
326 * walker. Then all accesses happen within idle context will
327 * be handled by TR mapping and identity mapping.
328 */
329 pta = ia64_get_pta();
330 ia64_set_pta(pta & ~VHPT_ENABLED);
331 }
332 }
334 local_irq_restore(spsr);
335 context_saved(prev);
336 }
338 void continue_running(struct vcpu *same)
339 {
340 /* nothing to do */
341 }
343 void arch_dump_domain_info(struct domain *d)
344 {
345 }
347 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
348 {
349 va_list args;
350 char buf[128];
351 struct vcpu *v = current;
353 printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
354 v->domain->domain_id,
355 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
356 va_start(args, fmt);
357 (void)vsnprintf(buf, sizeof(buf), fmt, args);
358 va_end(args);
359 printf(buf);
360 if (regs) show_registers(regs);
361 if (regs) {
362 debugger_trap_fatal(0 /* don't care */, regs);
363 } else {
364 debugger_trap_immediate();
365 }
366 domain_crash_synchronous ();
367 }
369 ///////////////////////////////
370 // from arch/x86/mm.c
371 ///////////////////////////////
373 #ifdef VERBOSE
374 #define MEM_LOG(_f, _a...) \
375 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
376 current->domain->domain_id , __LINE__ , ## _a )
377 #else
378 #define MEM_LOG(_f, _a...) ((void)0)
379 #endif
381 void cleanup_writable_pagetable(struct domain *d)
382 {
383 return;
384 }
386 void put_page_type(struct page_info *page)
387 {
388 u32 nx, x, y = page->u.inuse.type_info;
390 again:
391 do {
392 x = y;
393 nx = x - 1;
395 ASSERT((x & PGT_count_mask) != 0);
397 /*
398 * The page should always be validated while a reference is held. The
399 * exception is during domain destruction, when we forcibly invalidate
400 * page-table pages if we detect a referential loop.
401 * See domain.c:relinquish_list().
402 */
403 ASSERT((x & PGT_validated) ||
404 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
406 if ( unlikely((nx & PGT_count_mask) == 0) )
407 {
408 /* Record TLB information for flush later. Races are harmless. */
409 page->tlbflush_timestamp = tlbflush_current_time();
411 if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
412 likely(nx & PGT_validated) )
413 {
414 /*
415 * Page-table pages must be unvalidated when count is zero. The
416 * 'free' is safe because the refcnt is non-zero and validated
417 * bit is clear => other ops will spin or fail.
418 */
419 if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
420 x & ~PGT_validated)) != x) )
421 goto again;
422 /* We cleared the 'valid bit' so we do the clean up. */
423 free_page_type(page, x);
424 /* Carry on, but with the 'valid bit' now clear. */
425 x &= ~PGT_validated;
426 nx &= ~PGT_validated;
427 }
428 }
429 else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
430 (PGT_pinned | 1)) &&
431 ((nx & PGT_type_mask) != PGT_writable_page)) )
432 {
433 /* Page is now only pinned. Make the back pointer mutable again. */
434 nx |= PGT_va_mutable;
435 }
436 }
437 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
438 }
441 int get_page_type(struct page_info *page, u32 type)
442 {
443 u32 nx, x, y = page->u.inuse.type_info;
445 again:
446 do {
447 x = y;
448 nx = x + 1;
449 if ( unlikely((nx & PGT_count_mask) == 0) )
450 {
451 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
452 return 0;
453 }
454 else if ( unlikely((x & PGT_count_mask) == 0) )
455 {
456 if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
457 {
458 if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
459 {
460 /*
461 * On type change we check to flush stale TLB
462 * entries. This may be unnecessary (e.g., page
463 * was GDT/LDT) but those circumstances should be
464 * very rare.
465 */
466 cpumask_t mask =
467 page_get_owner(page)->domain_dirty_cpumask;
468 tlbflush_filter(mask, page->tlbflush_timestamp);
470 if ( unlikely(!cpus_empty(mask)) )
471 {
472 perfc_incrc(need_flush_tlb_flush);
473 flush_tlb_mask(mask);
474 }
475 }
477 /* We lose existing type, back pointer, and validity. */
478 nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
479 nx |= type;
481 /* No special validation needed for writable pages. */
482 /* Page tables and GDT/LDT need to be scanned for validity. */
483 if ( type == PGT_writable_page )
484 nx |= PGT_validated;
485 }
486 }
487 else
488 {
489 if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
490 {
491 if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
492 {
493 if ( current->domain == page_get_owner(page) )
494 {
495 /*
496 * This ensures functions like set_gdt() see up-to-date
497 * type info without needing to clean up writable p.t.
498 * state on the fast path.
499 */
500 LOCK_BIGLOCK(current->domain);
501 cleanup_writable_pagetable(current->domain);
502 y = page->u.inuse.type_info;
503 UNLOCK_BIGLOCK(current->domain);
504 /* Can we make progress now? */
505 if ( ((y & PGT_type_mask) == (type & PGT_type_mask)) ||
506 ((y & PGT_count_mask) == 0) )
507 goto again;
508 }
509 if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
510 ((type & PGT_type_mask) != PGT_l1_page_table) )
511 MEM_LOG("Bad type (saw %08x != exp %08x) "
512 "for mfn %016lx (pfn %016lx)",
513 x, type, page_to_mfn(page),
514 get_gpfn_from_mfn(page_to_mfn(page)));
515 return 0;
516 }
517 else if ( (x & PGT_va_mask) == PGT_va_mutable )
518 {
519 /* The va backpointer is mutable, hence we update it. */
520 nx &= ~PGT_va_mask;
521 nx |= type; /* we know the actual type is correct */
522 }
523 else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
524 ((type & PGT_va_mask) != (x & PGT_va_mask)) )
525 {
526 #ifdef CONFIG_X86_PAE
527 /* We use backptr as extra typing. Cannot be unknown. */
528 if ( (type & PGT_type_mask) == PGT_l2_page_table )
529 return 0;
530 #endif
531 /* This table is possibly mapped at multiple locations. */
532 nx &= ~PGT_va_mask;
533 nx |= PGT_va_unknown;
534 }
535 }
536 if ( unlikely(!(x & PGT_validated)) )
537 {
538 /* Someone else is updating validation of this page. Wait... */
539 while ( (y = page->u.inuse.type_info) == x )
540 cpu_relax();
541 goto again;
542 }
543 }
544 }
545 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
547 if ( unlikely(!(nx & PGT_validated)) )
548 {
549 /* Try to validate page type; drop the new reference on failure. */
550 if ( unlikely(!alloc_page_type(page, type)) )
551 {
552 MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x"
553 ": caf=%08x taf=%" PRtype_info,
554 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
555 type, page->count_info, page->u.inuse.type_info);
556 /* Noone else can get a reference. We hold the only ref. */
557 page->u.inuse.type_info = 0;
558 return 0;
559 }
561 /* Noone else is updating simultaneously. */
562 __set_bit(_PGT_validated, &page->u.inuse.type_info);
563 }
565 return 1;
566 }