ia64/xen-unstable

view xen/arch/ia64/xenmisc.c @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 3b0ce44f7b7a
children 99914b54f7bf
line source
1 /*
2 * Xen misc
3 *
4 * Functions/decls that are/may be needed to link with Xen because
5 * of x86 dependencies
6 *
7 * Copyright (C) 2004 Hewlett-Packard Co.
8 * Dan Magenheimer (dan.magenheimer@hp.com)
9 *
10 */
12 #include <linux/config.h>
13 #include <xen/sched.h>
14 #include <linux/efi.h>
15 #include <asm/processor.h>
16 #include <xen/serial.h>
17 #include <asm/io.h>
18 #include <xen/softirq.h>
20 efi_memory_desc_t ia64_efi_io_md;
21 EXPORT_SYMBOL(ia64_efi_io_md);
22 unsigned long wait_init_idle;
23 int phys_proc_id[NR_CPUS];
24 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
26 void unw_init(void) { printf("unw_init() skipped (NEED FOR KERNEL UNWIND)\n"); }
27 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
28 void ia64_mca_cpu_init(void *x) { }
29 void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
30 void ia64_patch_vtop(unsigned long a, unsigned long b) { }
31 void hpsim_setup(char **x) { }
33 // called from mem_init... don't think s/w I/O tlb is needed in Xen
34 //void swiotlb_init(void) { } ...looks like it IS needed
36 long
37 is_platform_hp_ski(void)
38 {
39 int i;
40 long cpuid[6];
42 for (i = 0; i < 5; ++i)
43 cpuid[i] = ia64_get_cpuid(i);
44 if ((cpuid[0] & 0xff) != 'H') return 0;
45 if ((cpuid[3] & 0xff) != 0x4) return 0;
46 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
47 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
48 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
49 return 1;
50 }
52 long
53 platform_is_hp_ski(void)
54 {
55 extern long running_on_sim;
56 return running_on_sim;
57 }
59 /* calls in xen/common code that are unused on ia64 */
61 void sync_lazy_execstate_cpu(unsigned int cpu) {}
62 void sync_lazy_execstate_mask(cpumask_t mask) {}
63 void sync_lazy_execstate_all(void) {}
65 #ifdef CONFIG_VTI
66 int grant_table_create(struct domain *d) { return 0; }
67 void grant_table_destroy(struct domain *d) { return; }
68 #endif
70 struct pt_regs *guest_cpu_user_regs(void) { return ia64_task_regs(current); }
72 void raise_actimer_softirq(void)
73 {
74 raise_softirq(AC_TIMER_SOFTIRQ);
75 }
77 #ifndef CONFIG_VTI
78 unsigned long
79 __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
80 {
81 if (d == dom0)
82 return(gpfn);
83 else {
84 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
85 if (!pte) {
86 printk("__gpfn_to_mfn_foreign: bad gpfn. spinning...\n");
87 while(1);
88 return 0;
89 }
90 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
91 }
92 }
94 u32
95 __mfn_to_gpfn(struct domain *d, unsigned long frame)
96 {
97 // FIXME: is this right?
98 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
99 printk("__mfn_to_gpfn: bad frame. spinning...\n");
100 while(1);
101 }
102 return frame;
103 }
104 #endif
106 #ifndef CONFIG_VTI
107 unsigned long __hypercall_create_continuation(
108 unsigned int op, unsigned int nr_args, ...)
109 {
110 printf("__hypercall_create_continuation: not implemented!!!\n");
111 }
112 #endif
114 ///////////////////////////////
116 ///////////////////////////////
117 // from arch/x86/apic.c
118 ///////////////////////////////
120 extern unsigned long domain0_ready;
122 int reprogram_ac_timer(s_time_t timeout)
123 {
124 struct vcpu *v = current;
126 #ifdef CONFIG_VTI
127 // if(VMX_DOMAIN(v))
128 return 1;
129 #endif // CONFIG_VTI
130 if (!domain0_ready) return 1;
131 local_cpu_data->itm_next = timeout;
132 if (is_idle_task(v->domain)) vcpu_safe_set_itm(timeout);
133 else vcpu_set_next_timer(current);
134 return 1;
135 }
137 ///////////////////////////////
138 // from arch/ia64/page_alloc.c
139 ///////////////////////////////
140 DEFINE_PER_CPU(struct page_state, page_states) = {0};
141 unsigned long totalram_pages;
143 void __mod_page_state(unsigned offset, unsigned long delta)
144 {
145 unsigned long flags;
146 void* ptr;
148 local_irq_save(flags);
149 ptr = &__get_cpu_var(page_states);
150 *(unsigned long*)(ptr + offset) += delta;
151 local_irq_restore(flags);
152 }
154 ///////////////////////////////
155 // from arch/x86/flushtlb.c
156 ///////////////////////////////
158 u32 tlbflush_clock;
159 u32 tlbflush_time[NR_CPUS];
161 ///////////////////////////////
162 // from arch/x86/memory.c
163 ///////////////////////////////
165 void init_percpu_info(void)
166 {
167 dummy();
168 //memset(percpu_info, 0, sizeof(percpu_info));
169 }
171 void free_page_type(struct pfn_info *page, unsigned int type)
172 {
173 dummy();
174 }
176 ///////////////////////////////
177 // from arch/ia64/traps.c
178 ///////////////////////////////
180 void show_registers(struct pt_regs *regs)
181 {
182 printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n");
183 }
185 int is_kernel_text(unsigned long addr)
186 {
187 extern char _stext[], _etext[];
188 if (addr >= (unsigned long) _stext &&
189 addr <= (unsigned long) _etext)
190 return 1;
192 return 0;
193 }
195 unsigned long kernel_text_end(void)
196 {
197 extern char _etext[];
198 return (unsigned long) _etext;
199 }
201 ///////////////////////////////
202 // from common/keyhandler.c
203 ///////////////////////////////
204 void dump_pageframe_info(struct domain *d)
205 {
206 printk("dump_pageframe_info not implemented\n");
207 }
209 ///////////////////////////////
210 // called from arch/ia64/head.S
211 ///////////////////////////////
213 void console_print(char *msg)
214 {
215 printk("console_print called, how did start_kernel return???\n");
216 }
218 void kernel_thread_helper(void)
219 {
220 printk("kernel_thread_helper not implemented\n");
221 dummy();
222 }
224 void sys_exit(void)
225 {
226 printk("sys_exit not implemented\n");
227 dummy();
228 }
230 ////////////////////////////////////
231 // called from unaligned.c
232 ////////////////////////////////////
234 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
235 {
236 printk("die_if_kernel: called, not implemented\n");
237 }
239 long
240 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
241 unsigned long user_rbs_end, unsigned long addr, long *val)
242 {
243 printk("ia64_peek: called, not implemented\n");
244 }
246 long
247 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
248 unsigned long user_rbs_end, unsigned long addr, long val)
249 {
250 printk("ia64_poke: called, not implemented\n");
251 }
253 void
254 ia64_sync_fph (struct task_struct *task)
255 {
256 printk("ia64_sync_fph: called, not implemented\n");
257 }
259 void
260 ia64_flush_fph (struct task_struct *task)
261 {
262 printk("ia64_flush_fph: called, not implemented\n");
263 }
265 ////////////////////////////////////
266 // called from irq_ia64.c:init_IRQ()
267 // (because CONFIG_IA64_HP_SIM is specified)
268 ////////////////////////////////////
269 void hpsim_irq_init(void) { }
272 // accomodate linux extable.c
273 //const struct exception_table_entry *
274 void *search_module_extables(unsigned long addr) { return NULL; }
275 void *__module_text_address(unsigned long addr) { return NULL; }
276 void *module_text_address(unsigned long addr) { return NULL; }
278 void cs10foo(void) {}
279 void cs01foo(void) {}
281 unsigned long context_switch_count = 0;
283 void context_switch(struct vcpu *prev, struct vcpu *next)
284 {
285 //printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
286 //printk("@@@@@@ context switch from domain %d (%x) to domain %d (%x)\n",
287 //prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff);
288 //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo();
289 //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo();
290 //printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id);
291 #ifdef CONFIG_VTI
292 vtm_domain_out(prev);
293 #endif
294 context_switch_count++;
295 switch_to(prev,next,prev);
296 #ifdef CONFIG_VTI
297 vtm_domain_in(current);
298 #endif
300 // leave this debug for now: it acts as a heartbeat when more than
301 // one domain is active
302 {
303 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
304 static int i = 100;
305 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
306 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
307 if (!i--) { printk("+",id); i = 1000000; }
308 }
310 #ifdef CONFIG_VTI
311 if (VMX_DOMAIN(current))
312 vmx_load_all_rr(current);
313 #else
314 if (!is_idle_task(current->domain)) {
315 load_region_regs(current);
316 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
317 }
318 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
319 #endif
320 }
322 void context_switch_finalise(struct vcpu *next)
323 {
324 /* nothing to do */
325 }
327 void continue_running(struct vcpu *same)
328 {
329 /* nothing to do */
330 }
332 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
333 {
334 va_list args;
335 char buf[128];
336 struct vcpu *v = current;
337 static volatile int test = 1; // so can continue easily in debug
338 extern spinlock_t console_lock;
339 unsigned long flags;
341 loop:
342 printf("$$$$$ PANIC in domain %d (k6=%p): ",
343 v->domain->domain_id, ia64_get_kr(IA64_KR_CURRENT));
344 va_start(args, fmt);
345 (void)vsnprintf(buf, sizeof(buf), fmt, args);
346 va_end(args);
347 printf(buf);
348 if (regs) show_registers(regs);
349 domain_pause_by_systemcontroller(current->domain);
350 v->domain->shutdown_code = SHUTDOWN_crash;
351 set_bit(_DOMF_shutdown, v->domain->domain_flags);
352 if (v->domain->domain_id == 0) {
353 int i = 1000000000L;
354 // if domain0 crashes, just periodically print out panic
355 // message to make post-mortem easier
356 while(i--);
357 goto loop;
358 }
359 }