ia64/xen-unstable

view xen/arch/ia64/xen/xenmisc.c @ 9686:96bc87dd7ca9

[IA64] get rid of sync_split_cache

Get rid of sync_split_cache.
Use flush_icache_range and ia64_fc instead.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Fri Apr 14 14:20:04 2006 -0600 (2006-04-14)
parents 99e1c7f276ea
children 14a34d811e81
line source
1 /*
2 * Xen misc
3 *
4 * Functions/decls that are/may be needed to link with Xen because
5 * of x86 dependencies
6 *
7 * Copyright (C) 2004 Hewlett-Packard Co.
8 * Dan Magenheimer (dan.magenheimer@hp.com)
9 *
10 */
12 #include <linux/config.h>
13 #include <xen/sched.h>
14 #include <linux/efi.h>
15 #include <asm/processor.h>
16 #include <xen/serial.h>
17 #include <asm/io.h>
18 #include <xen/softirq.h>
19 #include <public/sched.h>
20 #include <asm/vhpt.h>
21 #include <asm/debugger.h>
22 #include <asm/vmx.h>
23 #include <asm/vmx_vcpu.h>
25 efi_memory_desc_t ia64_efi_io_md;
26 EXPORT_SYMBOL(ia64_efi_io_md);
27 unsigned long wait_init_idle;
28 int phys_proc_id[NR_CPUS];
29 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
31 /* FIXME: where these declarations should be there ? */
32 extern void show_registers(struct pt_regs *regs);
34 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
35 void ia64_mca_cpu_init(void *x) { }
36 void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
37 void ia64_patch_vtop(unsigned long a, unsigned long b) { }
38 void hpsim_setup(char **x)
39 {
40 #ifdef CONFIG_SMP
41 init_smp_config();
42 #endif
43 }
45 // called from mem_init... don't think s/w I/O tlb is needed in Xen
46 //void swiotlb_init(void) { } ...looks like it IS needed
48 long
49 is_platform_hp_ski(void)
50 {
51 int i;
52 long cpuid[6];
54 for (i = 0; i < 5; ++i)
55 cpuid[i] = ia64_get_cpuid(i);
56 if ((cpuid[0] & 0xff) != 'H') return 0;
57 if ((cpuid[3] & 0xff) != 0x4) return 0;
58 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
59 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
60 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
61 return 1;
62 }
64 long
65 platform_is_hp_ski(void)
66 {
67 extern long running_on_sim;
68 return running_on_sim;
69 }
71 /* calls in xen/common code that are unused on ia64 */
73 void sync_lazy_execstate_cpu(unsigned int cpu) {}
75 #if 0
76 int grant_table_create(struct domain *d) { return 0; }
77 void grant_table_destroy(struct domain *d) { return; }
78 #endif
80 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
82 void raise_actimer_softirq(void)
83 {
84 raise_softirq(TIMER_SOFTIRQ);
85 }
87 unsigned long
88 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
89 {
90 if (d == dom0)
91 return(gpfn);
92 else {
93 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
94 if (!pte) {
95 printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
96 while(1);
97 return 0;
98 }
99 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
100 }
101 }
102 #if 0
103 u32
104 mfn_to_gmfn(struct domain *d, unsigned long frame)
105 {
106 // FIXME: is this right?
107 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
108 printk("mfn_to_gmfn: bad frame. spinning...\n");
109 while(1);
110 }
111 return frame;
112 }
113 #endif
115 ///////////////////////////////
116 // from arch/x86/flushtlb.c
117 ///////////////////////////////
119 u32 tlbflush_clock;
120 u32 tlbflush_time[NR_CPUS];
122 ///////////////////////////////
123 // from arch/x86/memory.c
124 ///////////////////////////////
127 void free_page_type(struct page_info *page, u32 type)
128 {
129 // dummy();
130 return;
131 }
133 int alloc_page_type(struct page_info *page, u32 type)
134 {
135 // dummy();
136 return 1;
137 }
139 ///////////////////////////////
140 //// misc memory stuff
141 ///////////////////////////////
143 unsigned long __get_free_pages(unsigned int mask, unsigned int order)
144 {
145 void *p = alloc_xenheap_pages(order);
147 memset(p,0,PAGE_SIZE<<order);
148 return (unsigned long)p;
149 }
151 void __free_pages(struct page_info *page, unsigned int order)
152 {
153 if (order) BUG();
154 free_xenheap_page(page);
155 }
157 void *pgtable_quicklist_alloc(void)
158 {
159 void *p;
160 p = alloc_xenheap_pages(0);
161 if (p)
162 clear_page(p);
163 return p;
164 }
166 void pgtable_quicklist_free(void *pgtable_entry)
167 {
168 free_xenheap_page(pgtable_entry);
169 }
171 ///////////////////////////////
172 // from arch/ia64/traps.c
173 ///////////////////////////////
175 int is_kernel_text(unsigned long addr)
176 {
177 extern char _stext[], _etext[];
178 if (addr >= (unsigned long) _stext &&
179 addr <= (unsigned long) _etext)
180 return 1;
182 return 0;
183 }
185 unsigned long kernel_text_end(void)
186 {
187 extern char _etext[];
188 return (unsigned long) _etext;
189 }
191 ///////////////////////////////
192 // from common/keyhandler.c
193 ///////////////////////////////
194 void dump_pageframe_info(struct domain *d)
195 {
196 printk("dump_pageframe_info not implemented\n");
197 }
199 ///////////////////////////////
200 // called from arch/ia64/head.S
201 ///////////////////////////////
203 void console_print(char *msg)
204 {
205 printk("console_print called, how did start_kernel return???\n");
206 }
208 void kernel_thread_helper(void)
209 {
210 printk("kernel_thread_helper not implemented\n");
211 dummy();
212 }
214 void sys_exit(void)
215 {
216 printk("sys_exit not implemented\n");
217 dummy();
218 }
220 ////////////////////////////////////
221 // called from unaligned.c
222 ////////////////////////////////////
224 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
225 {
226 if (user_mode(regs))
227 return;
229 printk("%s: %s %ld\n", __func__, str, err);
230 debugtrace_dump();
231 show_registers(regs);
232 domain_crash_synchronous();
233 }
235 long
236 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
237 unsigned long user_rbs_end, unsigned long addr, long *val)
238 {
239 printk("ia64_peek: called, not implemented\n");
240 return 1;
241 }
243 long
244 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
245 unsigned long user_rbs_end, unsigned long addr, long val)
246 {
247 printk("ia64_poke: called, not implemented\n");
248 return 1;
249 }
251 void
252 ia64_sync_fph (struct task_struct *task)
253 {
254 printk("ia64_sync_fph: called, not implemented\n");
255 }
257 void
258 ia64_flush_fph (struct task_struct *task)
259 {
260 printk("ia64_flush_fph: called, not implemented\n");
261 }
263 ////////////////////////////////////
264 // called from irq_ia64.c:init_IRQ()
265 // (because CONFIG_IA64_HP_SIM is specified)
266 ////////////////////////////////////
267 void hpsim_irq_init(void) { }
270 // accomodate linux extable.c
271 //const struct exception_table_entry *
272 void *search_module_extables(unsigned long addr) { return NULL; }
273 void *__module_text_address(unsigned long addr) { return NULL; }
274 void *module_text_address(unsigned long addr) { return NULL; }
276 void cs10foo(void) {}
277 void cs01foo(void) {}
279 unsigned long context_switch_count = 0;
281 #include <asm/vcpu.h>
283 void context_switch(struct vcpu *prev, struct vcpu *next)
284 {
285 uint64_t spsr;
286 uint64_t pta;
288 local_irq_save(spsr);
289 // if(VMX_DOMAIN(prev)){
290 // vtm_domain_out(prev);
291 // }
292 context_switch_count++;
293 switch_to(prev,next,prev);
294 // if(VMX_DOMAIN(current)){
295 // vtm_domain_in(current);
296 // }
298 // leave this debug for now: it acts as a heartbeat when more than
299 // one domain is active
300 {
301 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
302 static int i = 100;
303 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
304 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
305 if (!i--) { printk("+"); i = 1000000; }
306 }
308 if (VMX_DOMAIN(current)){
309 // vtm_domain_in(current);
310 vmx_load_all_rr(current);
311 }else{
312 extern char ia64_ivt;
313 ia64_set_iva(&ia64_ivt);
314 if (!is_idle_domain(current->domain)) {
315 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
316 VHPT_ENABLED);
317 load_region_regs(current);
318 vcpu_load_kernel_regs(current);
319 if (vcpu_timer_expired(current))
320 vcpu_pend_timer(current);
321 }else {
322 /* When switching to idle domain, only need to disable vhpt
323 * walker. Then all accesses happen within idle context will
324 * be handled by TR mapping and identity mapping.
325 */
326 pta = ia64_get_pta();
327 ia64_set_pta(pta & ~VHPT_ENABLED);
328 }
329 }
331 local_irq_restore(spsr);
332 context_saved(prev);
333 }
335 void continue_running(struct vcpu *same)
336 {
337 /* nothing to do */
338 }
340 void arch_dump_domain_info(struct domain *d)
341 {
342 }
344 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
345 {
346 va_list args;
347 char buf[128];
348 struct vcpu *v = current;
350 printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
351 v->domain->domain_id,
352 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
353 va_start(args, fmt);
354 (void)vsnprintf(buf, sizeof(buf), fmt, args);
355 va_end(args);
356 printf(buf);
357 if (regs) show_registers(regs);
358 if (regs) {
359 debugger_trap_fatal(0 /* don't care */, regs);
360 } else {
361 debugger_trap_immediate();
362 }
363 domain_crash_synchronous ();
364 }
366 ///////////////////////////////
367 // from arch/x86/mm.c
368 ///////////////////////////////
370 #ifdef VERBOSE
371 #define MEM_LOG(_f, _a...) \
372 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
373 current->domain->domain_id , __LINE__ , ## _a )
374 #else
375 #define MEM_LOG(_f, _a...) ((void)0)
376 #endif
378 void cleanup_writable_pagetable(struct domain *d)
379 {
380 return;
381 }
383 void put_page_type(struct page_info *page)
384 {
385 u32 nx, x, y = page->u.inuse.type_info;
387 again:
388 do {
389 x = y;
390 nx = x - 1;
392 ASSERT((x & PGT_count_mask) != 0);
394 /*
395 * The page should always be validated while a reference is held. The
396 * exception is during domain destruction, when we forcibly invalidate
397 * page-table pages if we detect a referential loop.
398 * See domain.c:relinquish_list().
399 */
400 ASSERT((x & PGT_validated) ||
401 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
403 if ( unlikely((nx & PGT_count_mask) == 0) )
404 {
405 /* Record TLB information for flush later. Races are harmless. */
406 page->tlbflush_timestamp = tlbflush_current_time();
408 if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
409 likely(nx & PGT_validated) )
410 {
411 /*
412 * Page-table pages must be unvalidated when count is zero. The
413 * 'free' is safe because the refcnt is non-zero and validated
414 * bit is clear => other ops will spin or fail.
415 */
416 if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
417 x & ~PGT_validated)) != x) )
418 goto again;
419 /* We cleared the 'valid bit' so we do the clean up. */
420 free_page_type(page, x);
421 /* Carry on, but with the 'valid bit' now clear. */
422 x &= ~PGT_validated;
423 nx &= ~PGT_validated;
424 }
425 }
426 else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
427 (PGT_pinned | 1)) &&
428 ((nx & PGT_type_mask) != PGT_writable_page)) )
429 {
430 /* Page is now only pinned. Make the back pointer mutable again. */
431 nx |= PGT_va_mutable;
432 }
433 }
434 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
435 }
438 int get_page_type(struct page_info *page, u32 type)
439 {
440 u32 nx, x, y = page->u.inuse.type_info;
442 again:
443 do {
444 x = y;
445 nx = x + 1;
446 if ( unlikely((nx & PGT_count_mask) == 0) )
447 {
448 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
449 return 0;
450 }
451 else if ( unlikely((x & PGT_count_mask) == 0) )
452 {
453 if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
454 {
455 if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
456 {
457 /*
458 * On type change we check to flush stale TLB
459 * entries. This may be unnecessary (e.g., page
460 * was GDT/LDT) but those circumstances should be
461 * very rare.
462 */
463 cpumask_t mask =
464 page_get_owner(page)->domain_dirty_cpumask;
465 tlbflush_filter(mask, page->tlbflush_timestamp);
467 if ( unlikely(!cpus_empty(mask)) )
468 {
469 perfc_incrc(need_flush_tlb_flush);
470 flush_tlb_mask(mask);
471 }
472 }
474 /* We lose existing type, back pointer, and validity. */
475 nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
476 nx |= type;
478 /* No special validation needed for writable pages. */
479 /* Page tables and GDT/LDT need to be scanned for validity. */
480 if ( type == PGT_writable_page )
481 nx |= PGT_validated;
482 }
483 }
484 else
485 {
486 if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
487 {
488 if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
489 {
490 if ( current->domain == page_get_owner(page) )
491 {
492 /*
493 * This ensures functions like set_gdt() see up-to-date
494 * type info without needing to clean up writable p.t.
495 * state on the fast path.
496 */
497 LOCK_BIGLOCK(current->domain);
498 cleanup_writable_pagetable(current->domain);
499 y = page->u.inuse.type_info;
500 UNLOCK_BIGLOCK(current->domain);
501 /* Can we make progress now? */
502 if ( ((y & PGT_type_mask) == (type & PGT_type_mask)) ||
503 ((y & PGT_count_mask) == 0) )
504 goto again;
505 }
506 if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
507 ((type & PGT_type_mask) != PGT_l1_page_table) )
508 MEM_LOG("Bad type (saw %08x != exp %08x) "
509 "for mfn %016lx (pfn %016lx)",
510 x, type, page_to_mfn(page),
511 get_gpfn_from_mfn(page_to_mfn(page)));
512 return 0;
513 }
514 else if ( (x & PGT_va_mask) == PGT_va_mutable )
515 {
516 /* The va backpointer is mutable, hence we update it. */
517 nx &= ~PGT_va_mask;
518 nx |= type; /* we know the actual type is correct */
519 }
520 else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
521 ((type & PGT_va_mask) != (x & PGT_va_mask)) )
522 {
523 #ifdef CONFIG_X86_PAE
524 /* We use backptr as extra typing. Cannot be unknown. */
525 if ( (type & PGT_type_mask) == PGT_l2_page_table )
526 return 0;
527 #endif
528 /* This table is possibly mapped at multiple locations. */
529 nx &= ~PGT_va_mask;
530 nx |= PGT_va_unknown;
531 }
532 }
533 if ( unlikely(!(x & PGT_validated)) )
534 {
535 /* Someone else is updating validation of this page. Wait... */
536 while ( (y = page->u.inuse.type_info) == x )
537 cpu_relax();
538 goto again;
539 }
540 }
541 }
542 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
544 if ( unlikely(!(nx & PGT_validated)) )
545 {
546 /* Try to validate page type; drop the new reference on failure. */
547 if ( unlikely(!alloc_page_type(page, type)) )
548 {
549 MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x"
550 ": caf=%08x taf=%" PRtype_info,
551 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
552 type, page->count_info, page->u.inuse.type_info);
553 /* Noone else can get a reference. We hold the only ref. */
554 page->u.inuse.type_info = 0;
555 return 0;
556 }
558 /* Noone else is updating simultaneously. */
559 __set_bit(_PGT_validated, &page->u.inuse.type_info);
560 }
562 return 1;
563 }