ia64/xen-unstable

view xen/arch/ia64/xen/xenmisc.c @ 9157:a693ccb4d581

[IA64] VTI: fix Oops: time tick before it's due

1. Guest may set itm several times in one execution of timer handler of
guest. VMM need to handle this situation.
2. VMM don't need to stop guest timer when switching out and rest guest
timer when switching in, this may make room for some corner case, I don't
figure out this kind of corner cases now :-), I just removed this logic.
3. When VMM emulate writing itv, VMM can't simply stop timer, when guest
is masked.
4. All operations such as read/write itv, itc, itm don't need to disable
interrupt, due to there is no conflict access.

After all these modifications, VTIdomain don't complain "Oops: time tick
before it's due", I don't do the full test:-).

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Mar 07 20:01:29 2006 -0700 (2006-03-07)
parents 0b0be946cf9c
children c644eb4049ab
line source
1 /*
2 * Xen misc
3 *
4 * Functions/decls that are/may be needed to link with Xen because
5 * of x86 dependencies
6 *
7 * Copyright (C) 2004 Hewlett-Packard Co.
8 * Dan Magenheimer (dan.magenheimer@hp.com)
9 *
10 */
12 #include <linux/config.h>
13 #include <xen/sched.h>
14 #include <linux/efi.h>
15 #include <asm/processor.h>
16 #include <xen/serial.h>
17 #include <asm/io.h>
18 #include <xen/softirq.h>
19 #include <public/sched.h>
20 #include <asm/vhpt.h>
21 #include <asm/debugger.h>
22 #include <asm/vmx.h>
23 #include <asm/vmx_vcpu.h>
25 efi_memory_desc_t ia64_efi_io_md;
26 EXPORT_SYMBOL(ia64_efi_io_md);
27 unsigned long wait_init_idle;
28 int phys_proc_id[NR_CPUS];
29 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
31 /* FIXME: where these declarations should be there ? */
32 extern void load_region_regs(struct vcpu *);
33 extern void show_registers(struct pt_regs *regs);
35 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
36 void ia64_mca_cpu_init(void *x) { }
37 void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
38 void ia64_patch_vtop(unsigned long a, unsigned long b) { }
39 void hpsim_setup(char **x)
40 {
41 #ifdef CONFIG_SMP
42 init_smp_config();
43 #endif
44 }
46 // called from mem_init... don't think s/w I/O tlb is needed in Xen
47 //void swiotlb_init(void) { } ...looks like it IS needed
49 long
50 is_platform_hp_ski(void)
51 {
52 int i;
53 long cpuid[6];
55 for (i = 0; i < 5; ++i)
56 cpuid[i] = ia64_get_cpuid(i);
57 if ((cpuid[0] & 0xff) != 'H') return 0;
58 if ((cpuid[3] & 0xff) != 0x4) return 0;
59 if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
60 if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
61 if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
62 return 1;
63 }
65 long
66 platform_is_hp_ski(void)
67 {
68 extern long running_on_sim;
69 return running_on_sim;
70 }
72 /* calls in xen/common code that are unused on ia64 */
74 void sync_lazy_execstate_cpu(unsigned int cpu) {}
76 #if 0
77 int grant_table_create(struct domain *d) { return 0; }
78 void grant_table_destroy(struct domain *d) { return; }
79 #endif
81 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
83 void raise_actimer_softirq(void)
84 {
85 raise_softirq(TIMER_SOFTIRQ);
86 }
88 unsigned long
89 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
90 {
91 if (d == dom0)
92 return(gpfn);
93 else {
94 unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
95 if (!pte) {
96 printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
97 while(1);
98 return 0;
99 }
100 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
101 }
102 }
103 #if 0
104 u32
105 mfn_to_gmfn(struct domain *d, unsigned long frame)
106 {
107 // FIXME: is this right?
108 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
109 printk("mfn_to_gmfn: bad frame. spinning...\n");
110 while(1);
111 }
112 return frame;
113 }
114 #endif
116 ///////////////////////////////
117 // from arch/ia64/page_alloc.c
118 ///////////////////////////////
119 DEFINE_PER_CPU(struct page_state, page_states) = {0};
120 unsigned long totalram_pages;
122 void __mod_page_state(unsigned long offset, unsigned long delta)
123 {
124 unsigned long flags;
125 void* ptr;
127 local_irq_save(flags);
128 ptr = &__get_cpu_var(page_states);
129 *(unsigned long*)(ptr + offset) += delta;
130 local_irq_restore(flags);
131 }
133 ///////////////////////////////
134 // from arch/x86/flushtlb.c
135 ///////////////////////////////
137 u32 tlbflush_clock;
138 u32 tlbflush_time[NR_CPUS];
140 ///////////////////////////////
141 // from arch/x86/memory.c
142 ///////////////////////////////
144 void init_percpu_info(void)
145 {
146 dummy();
147 //memset(percpu_info, 0, sizeof(percpu_info));
148 }
150 #if 0
151 void free_page_type(struct page_info *page, unsigned int type)
152 {
153 dummy();
154 }
155 #endif
157 ///////////////////////////////
158 //// misc memory stuff
159 ///////////////////////////////
161 unsigned long __get_free_pages(unsigned int mask, unsigned int order)
162 {
163 void *p = alloc_xenheap_pages(order);
165 memset(p,0,PAGE_SIZE<<order);
166 return (unsigned long)p;
167 }
169 void __free_pages(struct page *page, unsigned int order)
170 {
171 if (order) BUG();
172 free_xenheap_page(page);
173 }
175 void *pgtable_quicklist_alloc(void)
176 {
177 void *p;
178 p = alloc_xenheap_pages(0);
179 if (p)
180 clear_page(p);
181 return p;
182 }
184 void pgtable_quicklist_free(void *pgtable_entry)
185 {
186 free_xenheap_page(pgtable_entry);
187 }
189 ///////////////////////////////
190 // from arch/ia64/traps.c
191 ///////////////////////////////
193 int is_kernel_text(unsigned long addr)
194 {
195 extern char _stext[], _etext[];
196 if (addr >= (unsigned long) _stext &&
197 addr <= (unsigned long) _etext)
198 return 1;
200 return 0;
201 }
203 unsigned long kernel_text_end(void)
204 {
205 extern char _etext[];
206 return (unsigned long) _etext;
207 }
209 ///////////////////////////////
210 // from common/keyhandler.c
211 ///////////////////////////////
212 void dump_pageframe_info(struct domain *d)
213 {
214 printk("dump_pageframe_info not implemented\n");
215 }
217 int nmi_count(int x) { return x; }
219 ///////////////////////////////
220 // called from arch/ia64/head.S
221 ///////////////////////////////
223 void console_print(char *msg)
224 {
225 printk("console_print called, how did start_kernel return???\n");
226 }
228 void kernel_thread_helper(void)
229 {
230 printk("kernel_thread_helper not implemented\n");
231 dummy();
232 }
234 void sys_exit(void)
235 {
236 printk("sys_exit not implemented\n");
237 dummy();
238 }
240 ////////////////////////////////////
241 // called from unaligned.c
242 ////////////////////////////////////
244 void die_if_kernel(char *str, struct pt_regs *regs, long err) /* __attribute__ ((noreturn)) */
245 {
246 if (user_mode(regs))
247 return;
249 printk("%s: %s %ld\n", __func__, str, err);
250 debugtrace_dump();
251 show_registers(regs);
252 domain_crash_synchronous();
253 }
255 long
256 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
257 unsigned long user_rbs_end, unsigned long addr, long *val)
258 {
259 printk("ia64_peek: called, not implemented\n");
260 return 1;
261 }
263 long
264 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
265 unsigned long user_rbs_end, unsigned long addr, long val)
266 {
267 printk("ia64_poke: called, not implemented\n");
268 return 1;
269 }
271 void
272 ia64_sync_fph (struct task_struct *task)
273 {
274 printk("ia64_sync_fph: called, not implemented\n");
275 }
277 void
278 ia64_flush_fph (struct task_struct *task)
279 {
280 printk("ia64_flush_fph: called, not implemented\n");
281 }
283 ////////////////////////////////////
284 // called from irq_ia64.c:init_IRQ()
285 // (because CONFIG_IA64_HP_SIM is specified)
286 ////////////////////////////////////
287 void hpsim_irq_init(void) { }
290 // accomodate linux extable.c
291 //const struct exception_table_entry *
292 void *search_module_extables(unsigned long addr) { return NULL; }
293 void *__module_text_address(unsigned long addr) { return NULL; }
294 void *module_text_address(unsigned long addr) { return NULL; }
296 void cs10foo(void) {}
297 void cs01foo(void) {}
299 unsigned long context_switch_count = 0;
301 #include <asm/vcpu.h>
303 void context_switch(struct vcpu *prev, struct vcpu *next)
304 {
305 uint64_t spsr;
306 uint64_t pta;
308 local_irq_save(spsr);
309 // if(VMX_DOMAIN(prev)){
310 // vtm_domain_out(prev);
311 // }
312 context_switch_count++;
313 switch_to(prev,next,prev);
314 // if(VMX_DOMAIN(current)){
315 // vtm_domain_in(current);
316 // }
318 // leave this debug for now: it acts as a heartbeat when more than
319 // one domain is active
320 {
321 static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
322 static int i = 100;
323 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
324 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
325 if (!i--) { printk("+"); i = 1000000; }
326 }
328 if (VMX_DOMAIN(current)){
329 // vtm_domain_in(current);
330 vmx_load_all_rr(current);
331 }else{
332 extern char ia64_ivt;
333 ia64_set_iva(&ia64_ivt);
334 if (!is_idle_domain(current->domain)) {
335 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
336 VHPT_ENABLED);
337 load_region_regs(current);
338 vcpu_load_kernel_regs(current);
339 if (vcpu_timer_expired(current))
340 vcpu_pend_timer(current);
341 }else {
342 /* When switching to idle domain, only need to disable vhpt
343 * walker. Then all accesses happen within idle context will
344 * be handled by TR mapping and identity mapping.
345 */
346 pta = ia64_get_pta();
347 ia64_set_pta(pta & ~VHPT_ENABLED);
348 }
349 }
351 local_irq_restore(spsr);
352 context_saved(prev);
353 }
355 void continue_running(struct vcpu *same)
356 {
357 /* nothing to do */
358 }
360 void arch_dump_domain_info(struct domain *d)
361 {
362 }
364 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
365 {
366 va_list args;
367 char buf[128];
368 struct vcpu *v = current;
369 // static volatile int test = 1; // so can continue easily in debug
370 // extern spinlock_t console_lock;
371 // unsigned long flags;
373 loop:
374 printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
375 v->domain->domain_id,
376 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
377 va_start(args, fmt);
378 (void)vsnprintf(buf, sizeof(buf), fmt, args);
379 va_end(args);
380 printf(buf);
381 if (regs) show_registers(regs);
382 if (regs) {
383 debugger_trap_fatal(0 /* don't care */, regs);
384 } else {
385 debugger_trap_immediate();
386 }
387 domain_pause_by_systemcontroller(current->domain);
388 v->domain->shutdown_code = SHUTDOWN_crash;
389 set_bit(_DOMF_shutdown, &v->domain->domain_flags);
390 if (v->domain->domain_id == 0) {
391 int i = 1000000000L;
392 // if domain0 crashes, just periodically print out panic
393 // message to make post-mortem easier
394 while(i--);
395 goto loop;
396 }
397 }
399 /* FIXME: for the forseeable future, all cpu's that enable VTi have split
400 * caches and all cpu's that have split caches enable VTi. This may
401 * eventually be untrue though. */
402 #define cpu_has_split_cache vmx_enabled
403 extern unsigned int vmx_enabled;
405 void sync_split_caches(void)
406 {
407 unsigned long ret, progress = 0;
409 if (cpu_has_split_cache) {
410 /* Sync d/i cache conservatively */
411 ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
412 if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
413 printk("PAL CACHE FLUSH failed\n");
414 else printk("Sync i/d cache for guest SUCC\n");
415 }
416 else printk("sync_split_caches ignored for CPU with no split cache\n");
417 }