ia64/xen-unstable

view xen/arch/ia64/xen/xenmisc.c @ 8655:b1b9049c4eb2

Rev8517 (Reduce locked critical region in __enter_scheduler()), enable
interrupt now before context switch. Then arch specific context_switch
stub needs to disable interrupt itself.

To solve some intermittent corruption.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Jan 25 11:44:39 2006 +0100 (2006-01-25)
parents 34f2b388beb0
children 0c94043f5c5b
line source
1 /*
2 * Xen misc
3 *
4 * Functions/decls that are/may be needed to link with Xen because
5 * of x86 dependencies
6 *
7 * Copyright (C) 2004 Hewlett-Packard Co.
8 * Dan Magenheimer (dan.magenheimer@hp.com)
9 *
10 */
12 #include <linux/config.h>
13 #include <xen/sched.h>
14 #include <linux/efi.h>
15 #include <asm/processor.h>
16 #include <xen/serial.h>
17 #include <asm/io.h>
18 #include <xen/softirq.h>
19 #include <public/sched.h>
20 #include <asm/vhpt.h>
21 #include <asm/debugger.h>
23 efi_memory_desc_t ia64_efi_io_md;
24 EXPORT_SYMBOL(ia64_efi_io_md);
25 unsigned long wait_init_idle;
26 int phys_proc_id[NR_CPUS];
27 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
29 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
30 void ia64_mca_cpu_init(void *x) { }
31 void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
32 void ia64_patch_vtop(unsigned long a, unsigned long b) { }
33 void hpsim_setup(char **x)
34 {
35 #ifdef CONFIG_SMP
36 init_smp_config();
37 #endif
38 }
40 // called from mem_init... don't think s/w I/O tlb is needed in Xen
41 //void swiotlb_init(void) { } ...looks like it IS needed
43 long
44 is_platform_hp_ski(void)
45 {
46 int i;
47 long cpuid[6];
49 for (i = 0; i < 5; ++i)
50 cpuid[i] = ia64_get_cpuid(i);
51 if ((cpuid[0] & 0xff) != 'H') return 0;
52 if ((cpuid[3] & 0xff) != 0x4) return 0;
53 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
54 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
55 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
56 return 1;
57 }
59 long
60 platform_is_hp_ski(void)
61 {
62 extern long running_on_sim;
63 return running_on_sim;
64 }
66 /* calls in xen/common code that are unused on ia64 */
68 void sync_lazy_execstate_cpu(unsigned int cpu) {}
70 #if 0
71 int grant_table_create(struct domain *d) { return 0; }
72 void grant_table_destroy(struct domain *d) { return; }
73 #endif
75 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
77 void raise_actimer_softirq(void)
78 {
79 raise_softirq(TIMER_SOFTIRQ);
80 }
82 unsigned long
83 __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
84 {
85 if (d == dom0)
86 return(gpfn);
87 else {
88 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
89 if (!pte) {
90 printk("__gpfn_to_mfn_foreign: bad gpfn. spinning...\n");
91 while(1);
92 return 0;
93 }
94 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
95 }
96 }
97 #if 0
98 u32
99 __mfn_to_gpfn(struct domain *d, unsigned long frame)
100 {
101 // FIXME: is this right?
102 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
103 printk("__mfn_to_gpfn: bad frame. spinning...\n");
104 while(1);
105 }
106 return frame;
107 }
108 #endif
110 ///////////////////////////////
111 // from arch/ia64/page_alloc.c
112 ///////////////////////////////
113 DEFINE_PER_CPU(struct page_state, page_states) = {0};
114 unsigned long totalram_pages;
116 void __mod_page_state(unsigned long offset, unsigned long delta)
117 {
118 unsigned long flags;
119 void* ptr;
121 local_irq_save(flags);
122 ptr = &__get_cpu_var(page_states);
123 *(unsigned long*)(ptr + offset) += delta;
124 local_irq_restore(flags);
125 }
127 ///////////////////////////////
128 // from arch/x86/flushtlb.c
129 ///////////////////////////////
131 u32 tlbflush_clock;
132 u32 tlbflush_time[NR_CPUS];
134 ///////////////////////////////
135 // from arch/x86/memory.c
136 ///////////////////////////////
138 void init_percpu_info(void)
139 {
140 dummy();
141 //memset(percpu_info, 0, sizeof(percpu_info));
142 }
144 #if 0
145 void free_page_type(struct pfn_info *page, unsigned int type)
146 {
147 dummy();
148 }
149 #endif
151 ///////////////////////////////
152 //// misc memory stuff
153 ///////////////////////////////
155 unsigned long __get_free_pages(unsigned int mask, unsigned int order)
156 {
157 void *p = alloc_xenheap_pages(order);
159 memset(p,0,PAGE_SIZE<<order);
160 return (unsigned long)p;
161 }
163 void __free_pages(struct page *page, unsigned int order)
164 {
165 if (order) BUG();
166 free_xenheap_page(page);
167 }
169 void *pgtable_quicklist_alloc(void)
170 {
171 return alloc_xenheap_pages(0);
172 }
174 void pgtable_quicklist_free(void *pgtable_entry)
175 {
176 free_xenheap_page(pgtable_entry);
177 }
179 ///////////////////////////////
180 // from arch/ia64/traps.c
181 ///////////////////////////////
183 int is_kernel_text(unsigned long addr)
184 {
185 extern char _stext[], _etext[];
186 if (addr >= (unsigned long) _stext &&
187 addr <= (unsigned long) _etext)
188 return 1;
190 return 0;
191 }
193 unsigned long kernel_text_end(void)
194 {
195 extern char _etext[];
196 return (unsigned long) _etext;
197 }
199 ///////////////////////////////
200 // from common/keyhandler.c
201 ///////////////////////////////
202 void dump_pageframe_info(struct domain *d)
203 {
204 printk("dump_pageframe_info not implemented\n");
205 }
207 int nmi_count(int x) { return x; }
209 ///////////////////////////////
210 // called from arch/ia64/head.S
211 ///////////////////////////////
213 void console_print(char *msg)
214 {
215 printk("console_print called, how did start_kernel return???\n");
216 }
218 void kernel_thread_helper(void)
219 {
220 printk("kernel_thread_helper not implemented\n");
221 dummy();
222 }
224 void sys_exit(void)
225 {
226 printk("sys_exit not implemented\n");
227 dummy();
228 }
230 ////////////////////////////////////
231 // called from unaligned.c
232 ////////////////////////////////////
234 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
235 {
236 if (user_mode(regs))
237 return;
239 printk("%s: %s %ld\n", __func__, str, err);
240 debugtrace_dump();
241 show_registers(regs);
242 domain_crash_synchronous();
243 }
245 long
246 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
247 unsigned long user_rbs_end, unsigned long addr, long *val)
248 {
249 printk("ia64_peek: called, not implemented\n");
250 }
252 long
253 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
254 unsigned long user_rbs_end, unsigned long addr, long val)
255 {
256 printk("ia64_poke: called, not implemented\n");
257 }
259 void
260 ia64_sync_fph (struct task_struct *task)
261 {
262 printk("ia64_sync_fph: called, not implemented\n");
263 }
265 void
266 ia64_flush_fph (struct task_struct *task)
267 {
268 printk("ia64_flush_fph: called, not implemented\n");
269 }
271 ////////////////////////////////////
272 // called from irq_ia64.c:init_IRQ()
273 // (because CONFIG_IA64_HP_SIM is specified)
274 ////////////////////////////////////
275 void hpsim_irq_init(void) { }
278 // accomodate linux extable.c
279 //const struct exception_table_entry *
280 void *search_module_extables(unsigned long addr) { return NULL; }
281 void *__module_text_address(unsigned long addr) { return NULL; }
282 void *module_text_address(unsigned long addr) { return NULL; }
284 void cs10foo(void) {}
285 void cs01foo(void) {}
287 unsigned long context_switch_count = 0;
289 #include <asm/vcpu.h>
291 void context_switch(struct vcpu *prev, struct vcpu *next)
292 {
293 uint64_t spsr;
295 local_irq_save(spsr);
296 if(VMX_DOMAIN(prev)){
297 vtm_domain_out(prev);
298 }
299 context_switch_count++;
300 switch_to(prev,next,prev);
301 if(VMX_DOMAIN(current)){
302 vtm_domain_in(current);
303 }
305 // leave this debug for now: it acts as a heartbeat when more than
306 // one domain is active
307 {
308 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
309 static int i = 100;
310 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
311 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
312 if (!i--) { printk("+",id); i = 1000000; }
313 }
315 if (VMX_DOMAIN(current)){
316 vmx_load_all_rr(current);
317 }else{
318 extern char ia64_ivt;
319 ia64_set_iva(&ia64_ivt);
320 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
321 VHPT_ENABLED);
322 if (!is_idle_domain(current->domain)) {
323 load_region_regs(current);
324 vcpu_load_kernel_regs(current);
325 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
326 }
327 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
328 }
330 local_irq_restore(spsr);
331 context_saved(prev);
332 }
334 void continue_running(struct vcpu *same)
335 {
336 /* nothing to do */
337 }
339 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
340 {
341 va_list args;
342 char buf[128];
343 struct vcpu *v = current;
344 static volatile int test = 1; // so can continue easily in debug
345 extern spinlock_t console_lock;
346 unsigned long flags;
348 loop:
349 printf("$$$$$ PANIC in domain %d (k6=%p): ",
350 v->domain->domain_id,
351 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
352 va_start(args, fmt);
353 (void)vsnprintf(buf, sizeof(buf), fmt, args);
354 va_end(args);
355 printf(buf);
356 if (regs) show_registers(regs);
357 if (regs) {
358 debugger_trap_fatal(0 /* don't care */, regs);
359 } else {
360 debugger_trap_immediate();
361 }
362 domain_pause_by_systemcontroller(current->domain);
363 v->domain->shutdown_code = SHUTDOWN_crash;
364 set_bit(_DOMF_shutdown, v->domain->domain_flags);
365 if (v->domain->domain_id == 0) {
366 int i = 1000000000L;
367 // if domain0 crashes, just periodically print out panic
368 // message to make post-mortem easier
369 while(i--);
370 goto loop;
371 }
372 }
374 /* FIXME: for the forseeable future, all cpu's that enable VTi have split
375 * caches and all cpu's that have split caches enable VTi. This may
376 * eventually be untrue though. */
377 #define cpu_has_split_cache vmx_enabled
378 extern unsigned int vmx_enabled;
380 void sync_split_caches(void)
381 {
382 unsigned long ret, progress = 0;
384 if (cpu_has_split_cache) {
385 /* Sync d/i cache conservatively */
386 ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
387 if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
388 printk("PAL CACHE FLUSH failed\n");
389 else printk("Sync i/d cache for guest SUCC\n");
390 }
391 else printk("sync_split_caches ignored for CPU with no split cache\n");
392 }