direct-io.hg

view xen/arch/ia64/xen/xenmisc.c @ 8483:b4925703b56c

Missing initialization in cache sync code (by Anthony Xu)
author djm@kirby.fc.hp.com
date Fri Dec 30 12:55:19 2005 -0600 (2005-12-30)
parents 40648452d45f
children 45c4e735fc8c
line source
1 /*
2 * Xen misc
3 *
4 * Functions/decls that are/may be needed to link with Xen because
5 * of x86 dependencies
6 *
7 * Copyright (C) 2004 Hewlett-Packard Co.
8 * Dan Magenheimer (dan.magenheimer@hp.com)
9 *
10 */
12 #include <linux/config.h>
13 #include <xen/sched.h>
14 #include <linux/efi.h>
15 #include <asm/processor.h>
16 #include <xen/serial.h>
17 #include <asm/io.h>
18 #include <xen/softirq.h>
19 #include <public/sched.h>
20 #include <asm/vhpt.h>
22 efi_memory_desc_t ia64_efi_io_md;
23 EXPORT_SYMBOL(ia64_efi_io_md);
24 unsigned long wait_init_idle;
25 int phys_proc_id[NR_CPUS];
26 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
28 void unw_init(void) { printf("unw_init() skipped (NEED FOR KERNEL UNWIND)\n"); }
29 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
30 void ia64_mca_cpu_init(void *x) { }
31 void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
32 void ia64_patch_vtop(unsigned long a, unsigned long b) { }
33 void hpsim_setup(char **x)
34 {
35 #ifdef CONFIG_SMP
36 init_smp_config();
37 #endif
38 }
40 // called from mem_init... don't think s/w I/O tlb is needed in Xen
41 //void swiotlb_init(void) { } ...looks like it IS needed
43 long
44 is_platform_hp_ski(void)
45 {
46 int i;
47 long cpuid[6];
49 for (i = 0; i < 5; ++i)
50 cpuid[i] = ia64_get_cpuid(i);
51 if ((cpuid[0] & 0xff) != 'H') return 0;
52 if ((cpuid[3] & 0xff) != 0x4) return 0;
53 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
54 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
55 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
56 return 1;
57 }
59 long
60 platform_is_hp_ski(void)
61 {
62 extern long running_on_sim;
63 return running_on_sim;
64 }
66 /* calls in xen/common code that are unused on ia64 */
68 void sync_lazy_execstate_cpu(unsigned int cpu) {}
70 #if 0
71 int grant_table_create(struct domain *d) { return 0; }
72 void grant_table_destroy(struct domain *d) { return; }
73 #endif
75 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
77 void raise_actimer_softirq(void)
78 {
79 raise_softirq(AC_TIMER_SOFTIRQ);
80 }
82 unsigned long
83 __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
84 {
85 if (d == dom0)
86 return(gpfn);
87 else {
88 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
89 if (!pte) {
90 printk("__gpfn_to_mfn_foreign: bad gpfn. spinning...\n");
91 while(1);
92 return 0;
93 }
94 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
95 }
96 }
97 #if 0
98 u32
99 __mfn_to_gpfn(struct domain *d, unsigned long frame)
100 {
101 // FIXME: is this right?
102 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
103 printk("__mfn_to_gpfn: bad frame. spinning...\n");
104 while(1);
105 }
106 return frame;
107 }
108 #endif
110 ///////////////////////////////
111 // from arch/ia64/page_alloc.c
112 ///////////////////////////////
113 DEFINE_PER_CPU(struct page_state, page_states) = {0};
114 unsigned long totalram_pages;
116 void __mod_page_state(unsigned long offset, unsigned long delta)
117 {
118 unsigned long flags;
119 void* ptr;
121 local_irq_save(flags);
122 ptr = &__get_cpu_var(page_states);
123 *(unsigned long*)(ptr + offset) += delta;
124 local_irq_restore(flags);
125 }
127 ///////////////////////////////
128 // from arch/x86/flushtlb.c
129 ///////////////////////////////
131 u32 tlbflush_clock;
132 u32 tlbflush_time[NR_CPUS];
134 ///////////////////////////////
135 // from arch/x86/memory.c
136 ///////////////////////////////
138 void init_percpu_info(void)
139 {
140 dummy();
141 //memset(percpu_info, 0, sizeof(percpu_info));
142 }
144 #if 0
145 void free_page_type(struct pfn_info *page, unsigned int type)
146 {
147 dummy();
148 }
149 #endif
151 ///////////////////////////////
152 //// misc memory stuff
153 ///////////////////////////////
155 unsigned long __get_free_pages(unsigned int mask, unsigned int order)
156 {
157 void *p = alloc_xenheap_pages(order);
159 memset(p,0,PAGE_SIZE<<order);
160 return (unsigned long)p;
161 }
163 void __free_pages(struct page *page, unsigned int order)
164 {
165 if (order) BUG();
166 free_xenheap_page(page);
167 }
169 void *pgtable_quicklist_alloc(void)
170 {
171 return alloc_xenheap_pages(0);
172 }
174 void pgtable_quicklist_free(void *pgtable_entry)
175 {
176 free_xenheap_page(pgtable_entry);
177 }
179 ///////////////////////////////
180 // from arch/ia64/traps.c
181 ///////////////////////////////
183 void show_registers(struct pt_regs *regs)
184 {
185 printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n");
186 }
188 int is_kernel_text(unsigned long addr)
189 {
190 extern char _stext[], _etext[];
191 if (addr >= (unsigned long) _stext &&
192 addr <= (unsigned long) _etext)
193 return 1;
195 return 0;
196 }
198 unsigned long kernel_text_end(void)
199 {
200 extern char _etext[];
201 return (unsigned long) _etext;
202 }
204 ///////////////////////////////
205 // from common/keyhandler.c
206 ///////////////////////////////
207 void dump_pageframe_info(struct domain *d)
208 {
209 printk("dump_pageframe_info not implemented\n");
210 }
212 ///////////////////////////////
213 // called from arch/ia64/head.S
214 ///////////////////////////////
216 void console_print(char *msg)
217 {
218 printk("console_print called, how did start_kernel return???\n");
219 }
221 void kernel_thread_helper(void)
222 {
223 printk("kernel_thread_helper not implemented\n");
224 dummy();
225 }
227 void sys_exit(void)
228 {
229 printk("sys_exit not implemented\n");
230 dummy();
231 }
233 ////////////////////////////////////
234 // called from unaligned.c
235 ////////////////////////////////////
237 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
238 {
239 printk("die_if_kernel: called, not implemented\n");
240 }
242 long
243 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
244 unsigned long user_rbs_end, unsigned long addr, long *val)
245 {
246 printk("ia64_peek: called, not implemented\n");
247 }
249 long
250 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
251 unsigned long user_rbs_end, unsigned long addr, long val)
252 {
253 printk("ia64_poke: called, not implemented\n");
254 }
256 void
257 ia64_sync_fph (struct task_struct *task)
258 {
259 printk("ia64_sync_fph: called, not implemented\n");
260 }
262 void
263 ia64_flush_fph (struct task_struct *task)
264 {
265 printk("ia64_flush_fph: called, not implemented\n");
266 }
268 ////////////////////////////////////
269 // called from irq_ia64.c:init_IRQ()
270 // (because CONFIG_IA64_HP_SIM is specified)
271 ////////////////////////////////////
272 void hpsim_irq_init(void) { }
275 // accomodate linux extable.c
276 //const struct exception_table_entry *
277 void *search_module_extables(unsigned long addr) { return NULL; }
278 void *__module_text_address(unsigned long addr) { return NULL; }
279 void *module_text_address(unsigned long addr) { return NULL; }
281 void cs10foo(void) {}
282 void cs01foo(void) {}
284 unsigned long context_switch_count = 0;
286 #include <asm/vcpu.h>
288 void context_switch(struct vcpu *prev, struct vcpu *next)
289 {
290 //printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
291 //printk("@@@@@@ context switch from domain %d (%x) to domain %d (%x)\n",
292 //prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff);
293 //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo();
294 //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo();
295 //printk("@@sw%d/%x %d->%d\n",smp_processor_id(), hard_smp_processor_id (),
296 // prev->domain->domain_id,next->domain->domain_id);
297 if(VMX_DOMAIN(prev)){
298 vtm_domain_out(prev);
299 }
300 context_switch_count++;
301 switch_to(prev,next,prev);
302 if(VMX_DOMAIN(current)){
303 vtm_domain_in(current);
304 }
306 // leave this debug for now: it acts as a heartbeat when more than
307 // one domain is active
308 {
309 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
310 static int i = 100;
311 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
312 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
313 if (!i--) { printk("+",id); i = 1000000; }
314 }
316 if (VMX_DOMAIN(current)){
317 vmx_load_all_rr(current);
318 }else{
319 extern char ia64_ivt;
320 ia64_set_iva(&ia64_ivt);
321 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
322 VHPT_ENABLED);
323 if (!is_idle_task(current->domain)) {
324 load_region_regs(current);
325 vcpu_load_kernel_regs(current);
326 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
327 }
328 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
329 }
330 }
332 void context_switch_finalise(struct vcpu *next)
333 {
334 /* nothing to do */
335 }
337 void continue_running(struct vcpu *same)
338 {
339 /* nothing to do */
340 }
342 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
343 {
344 va_list args;
345 char buf[128];
346 struct vcpu *v = current;
347 static volatile int test = 1; // so can continue easily in debug
348 extern spinlock_t console_lock;
349 unsigned long flags;
351 loop:
352 printf("$$$$$ PANIC in domain %d (k6=%p): ",
353 v->domain->domain_id,
354 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
355 va_start(args, fmt);
356 (void)vsnprintf(buf, sizeof(buf), fmt, args);
357 va_end(args);
358 printf(buf);
359 if (regs) show_registers(regs);
360 domain_pause_by_systemcontroller(current->domain);
361 v->domain->shutdown_code = SHUTDOWN_crash;
362 set_bit(_DOMF_shutdown, v->domain->domain_flags);
363 if (v->domain->domain_id == 0) {
364 int i = 1000000000L;
365 // if domain0 crashes, just periodically print out panic
366 // message to make post-mortem easier
367 while(i--);
368 goto loop;
369 }
370 }
372 /* FIXME: for the forseeable future, all cpu's that enable VTi have split
373 * caches and all cpu's that have split caches enable VTi. This may
374 * eventually be untrue though. */
375 #define cpu_has_split_cache vmx_enabled
376 extern unsigned int vmx_enabled;
378 void sync_split_caches(void)
379 {
380 unsigned long ret, progress = 0;
382 if (cpu_has_split_cache) {
383 /* Sync d/i cache conservatively */
384 ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
385 if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
386 printk("PAL CACHE FLUSH failed\n");
387 else printk("Sync i/d cache for guest SUCC\n");
388 }
389 else printk("sync_split_caches ignored for CPU with no split cache\n");
390 }