ia64/xen-unstable

view xen/arch/ia64/domain.c @ 4146:f2d61710e4d9

bitkeeper revision 1.1236.25.24 (42366e9aQ71LQ8uCB-Y1IwVNqx5eqA)

Merge djm@kirby.fc.hp.com://home/djm/src/xen/xeno-unstable-ia64.bk
into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
author djm@sportsman.spdomain
date Tue Mar 15 05:11:54 2005 +0000 (2005-03-15)
parents ef90e028e868 0c846e77cca4
children a0b28acf0dcd
line source
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 */
8 #include <xen/config.h>
9 #include <xen/lib.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/smp.h>
13 #include <xen/delay.h>
14 #include <xen/softirq.h>
15 #include <xen/mm.h>
16 #include <asm/ptrace.h>
17 #include <asm/system.h>
18 #include <asm/io.h>
19 #include <asm/processor.h>
20 #include <asm/desc.h>
21 //#include <asm/mpspec.h>
22 #include <xen/irq.h>
23 #include <xen/event.h>
24 //#include <xen/shadow.h>
25 #include <xen/console.h>
27 #include <xen/elf.h>
28 //#include <asm/page.h>
29 #include <asm/pgalloc.h>
30 #include <asm/dma.h> /* for MAX_DMA_ADDRESS */
32 #include <asm/asm-offsets.h> /* for IA64_THREAD_INFO_SIZE */
34 #include <asm/vcpu.h> /* for function declarations */
36 #define CONFIG_DOMAIN0_CONTIGUOUS
37 unsigned long dom0_start = -1L;
38 unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
39 //FIXME: alignment should be 256MB, lest Linux use a 256MB page size
40 unsigned long dom0_align = 64*1024*1024;
42 extern kmem_cache_t *domain_struct_cachep;
44 // initialized by arch/ia64/setup.c:find_initrd()
45 unsigned long initrd_start = 0, initrd_end = 0;
47 extern int loadelfimage(char *);
48 extern int readelfimage_base_and_size(char *, unsigned long,
49 unsigned long *, unsigned long *, unsigned long *);
51 unsigned long map_domain_page0(struct domain *);
52 extern unsigned long dom_fw_setup(struct domain *, char *, int);
54 /* this belongs in include/asm, but there doesn't seem to be a suitable place */
55 void free_perdomain_pt(struct domain *d)
56 {
57 dummy();
58 //free_page((unsigned long)d->mm.perdomain_pt);
59 }
61 int hlt_counter;
63 void disable_hlt(void)
64 {
65 hlt_counter++;
66 }
68 void enable_hlt(void)
69 {
70 hlt_counter--;
71 }
73 static void default_idle(void)
74 {
75 if ( hlt_counter == 0 )
76 {
77 local_irq_disable();
78 if ( !softirq_pending(smp_processor_id()) )
79 safe_halt();
80 //else
81 local_irq_enable();
82 }
83 }
85 void continue_cpu_idle_loop(void)
86 {
87 int cpu = smp_processor_id();
88 for ( ; ; )
89 {
90 #ifdef IA64
91 // __IRQ_STAT(cpu, idle_timestamp) = jiffies
92 #else
93 irq_stat[cpu].idle_timestamp = jiffies;
94 #endif
95 while ( !softirq_pending(cpu) )
96 default_idle();
97 do_softirq();
98 }
99 }
101 void startup_cpu_idle_loop(void)
102 {
103 /* Just some sanity to ensure that the scheduler is set up okay. */
104 ASSERT(current->domain == IDLE_DOMAIN_ID);
105 domain_unpause_by_systemcontroller(current->domain);
106 __enter_scheduler();
108 /*
109 * Declares CPU setup done to the boot processor.
110 * Therefore memory barrier to ensure state is visible.
111 */
112 smp_mb();
113 init_idle();
114 #if 0
115 //do we have to ensure the idle task has a shared page so that, for example,
116 //region registers can be loaded from it. Apparently not...
117 idle0_task.shared_info = (void *)alloc_xenheap_page();
118 memset(idle0_task.shared_info, 0, PAGE_SIZE);
119 /* pin mapping */
120 // FIXME: Does this belong here? Or do only at domain switch time?
121 {
122 /* WARNING: following must be inlined to avoid nested fault */
123 unsigned long psr = ia64_clear_ic();
124 ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
125 pte_val(pfn_pte(ia64_tpa(idle0_task.shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
126 PAGE_SHIFT);
127 ia64_set_psr(psr);
128 ia64_srlz_i();
129 }
130 #endif
132 continue_cpu_idle_loop();
133 }
135 struct domain *arch_alloc_domain_struct(void)
136 {
137 return xmem_cache_alloc(domain_struct_cachep);
138 }
140 void arch_free_domain_struct(struct domain *d)
141 {
142 xmem_cache_free(domain_struct_cachep,d);
143 }
145 struct exec_domain *arch_alloc_exec_domain_struct(void)
146 {
147 return alloc_task_struct();
148 }
150 void arch_free_exec_domain_struct(struct exec_domain *ed)
151 {
152 free_task_struct(ed);
153 }
155 void arch_do_createdomain(struct exec_domain *ed)
156 {
157 struct domain *d = ed->domain;
159 d->shared_info = (void *)alloc_xenheap_page();
160 ed->vcpu_info = (void *)alloc_xenheap_page();
161 if (!ed->vcpu_info) {
162 printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
163 while (1);
164 }
165 memset(ed->vcpu_info, 0, PAGE_SIZE);
166 /* pin mapping */
167 // FIXME: Does this belong here? Or do only at domain switch time?
168 #if 0
169 // this is now done in ia64_new_rr7
170 {
171 /* WARNING: following must be inlined to avoid nested fault */
172 unsigned long psr = ia64_clear_ic();
173 ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
174 pte_val(pfn_pte(ia64_tpa(d->shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
175 PAGE_SHIFT);
176 ia64_set_psr(psr);
177 ia64_srlz_i();
178 }
179 #endif
180 d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
181 if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
182 BUG();
183 ed->vcpu_info->arch.metaphysical_mode = 1;
184 #define DOMAIN_RID_BITS_DEFAULT 18
185 if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
186 BUG();
187 // the following will eventually need to be negotiated dynamically
188 d->xen_vastart = 0xfffc000000000000;
189 d->xen_vaend = 0xfffe000000000000;
190 d->shared_info_va = 0xfffd000000000000;
191 d->breakimm = 0x1000;
192 // stay on kernel stack because may get interrupts!
193 // ia64_ret_from_clone (which b0 gets in new_thread) switches
194 // to user stack
195 ed->thread.on_ustack = 0;
196 }
198 void arch_do_boot_vcpu(struct exec_domain *p)
199 {
200 return;
201 }
203 int arch_set_info_guest(struct exec_domain *p, full_execution_context_t *c)
204 {
205 dummy();
206 return 1;
207 }
209 void domain_relinquish_memory(struct domain *d)
210 {
211 dummy();
212 }
214 // heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
215 // and linux/arch/ia64/kernel/process.c:kernel_thread()
216 void new_thread(struct exec_domain *ed,
217 unsigned long start_pc,
218 unsigned long start_stack,
219 unsigned long start_info)
220 {
221 struct domain *d = ed->domain;
222 struct switch_stack *sw;
223 struct pt_regs *regs;
224 unsigned long new_rbs;
225 struct ia64_boot_param *bp;
226 extern char ia64_ret_from_clone;
227 extern char saved_command_line[];
229 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
230 if (d == dom0) start_pc += dom0_start;
231 #endif
232 regs = (struct pt_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
233 sw = (struct switch_stack *) regs - 1;
234 new_rbs = (unsigned long) ed + IA64_RBS_OFFSET;
235 regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
236 | IA64_PSR_BITS_TO_SET | IA64_PSR_BN
237 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
238 regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
239 regs->cr_iip = start_pc;
240 regs->ar_rsc = 0xf; /* eager mode, privilege level 1 */
241 regs->ar_rnat = 0;
242 regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
243 regs->loadrs = 0;
244 //regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */
245 //regs->r8 = 0x01234567890abcdef; // FIXME: temp marker
246 //regs->r12 = ((unsigned long) regs - 16); /* 16 byte scratch */
247 regs->cr_ifs = 1UL << 63;
248 regs->pr = 0;
249 sw->pr = 0;
250 regs->ar_pfs = 0;
251 sw->ar_pfs = 0;
252 sw->ar_bspstore = new_rbs;
253 //regs->r13 = (unsigned long) ed;
254 printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
255 ed,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
256 sw->b0 = (unsigned long) &ia64_ret_from_clone;
257 ed->thread.ksp = (unsigned long) sw - 16;
258 //ed->thread_info->flags = 0;
259 printk("new_thread, about to call init_all_rr\n");
260 init_all_rr(ed);
261 // set up boot parameters (and fake firmware)
262 printk("new_thread, about to call dom_fw_setup\n");
263 regs->r28 = dom_fw_setup(d,saved_command_line,256L); //FIXME
264 printk("new_thread, done with dom_fw_setup\n");
265 // don't forget to set this!
266 ed->vcpu_info->arch.banknum = 1;
267 }
269 static struct page * map_new_domain0_page(unsigned long mpaddr)
270 {
271 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
272 printk("map_new_domain0_page: bad domain0 mpaddr %p!\n",mpaddr);
273 printk("map_new_domain0_page: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
274 while(1);
275 }
276 return pfn_to_page((mpaddr >> PAGE_SHIFT));
277 }
279 /* allocate new page for domain and map it to the specified metaphysical addr */
280 struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
281 {
282 struct mm_struct *mm = d->arch.mm;
283 struct page *p = (struct page *)0;
284 pgd_t *pgd;
285 pmd_t *pmd;
286 pte_t *pte;
287 extern unsigned long vhpt_paddr, vhpt_pend;
289 if (!mm->pgd) {
290 printk("map_new_domain_page: domain pgd must exist!\n");
291 return(p);
292 }
293 pgd = pgd_offset(mm,mpaddr);
294 if (pgd_none(*pgd))
295 pgd_populate(mm, pgd, pmd_alloc_one(mm,mpaddr));
297 pmd = pmd_offset(pgd, mpaddr);
298 if (pmd_none(*pmd))
299 pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
301 pte = pte_offset_map(pmd, mpaddr);
302 if (pte_none(*pte)) {
303 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
304 if (d == dom0) p = map_new_domain0_page(mpaddr);
305 else
306 #endif
307 p = alloc_page(GFP_KERNEL);
308 if (unlikely(!p)) {
309 printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
310 return(p);
311 }
312 if (unlikely(page_to_phys(p) > vhpt_paddr && page_to_phys(p) < vhpt_pend)) {
313 printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_phys(p));
314 }
315 set_pte(pte, pfn_pte(page_to_phys(p) >> PAGE_SHIFT,
316 __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
317 }
318 else printk("map_new_domain_page: page %p already mapped!\n",p);
319 return p;
320 }
322 void mpafoo(unsigned long mpaddr)
323 {
324 extern unsigned long privop_trace;
325 if (mpaddr == 0x3800)
326 privop_trace = 1;
327 }
329 unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
330 {
331 struct mm_struct *mm = d->arch.mm;
332 pgd_t *pgd = pgd_offset(mm, mpaddr);
333 pmd_t *pmd;
334 pte_t *pte;
336 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
337 if (d == dom0) {
338 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
339 //printk("lookup_domain_mpa: bad dom0 mpaddr %p!\n",mpaddr);
340 //printk("lookup_domain_mpa: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
341 mpafoo(mpaddr);
342 }
343 pte_t pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
344 __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
345 pte = &pteval;
346 return *(unsigned long *)pte;
347 }
348 #endif
349 tryagain:
350 if (pgd_present(*pgd)) {
351 pmd = pmd_offset(pgd,mpaddr);
352 if (pmd_present(*pmd)) {
353 pte = pte_offset_map(pmd,mpaddr);
354 if (pte_present(*pte)) {
355 //printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
356 return *(unsigned long *)pte;
357 }
358 }
359 }
360 /* if lookup fails and mpaddr is "legal", "create" the page */
361 if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
362 // FIXME: should zero out pages for security reasons
363 if (map_new_domain_page(d,mpaddr)) goto tryagain;
364 }
365 printk("lookup_domain_mpa: bad mpa %p (> %p\n",
366 mpaddr,d->max_pages<<PAGE_SHIFT);
367 mpafoo(mpaddr);
368 return 0;
369 }
371 // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
372 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
373 {
374 unsigned long pte = lookup_domain_mpa(d,mpaddr);
375 unsigned long imva;
377 pte &= _PAGE_PPN_MASK;
378 imva = __va(pte);
379 imva |= mpaddr & ~PAGE_MASK;
380 return(imva);
381 }
383 // remove following line if not privifying in memory
384 //#define HAVE_PRIVIFY_MEMORY
385 #ifndef HAVE_PRIVIFY_MEMORY
386 #define privify_memory(x,y) do {} while(0)
387 #endif
389 // see arch/x86/xxx/domain_build.c
390 int elf_sanity_check(Elf_Ehdr *ehdr)
391 {
392 return (IS_ELF(*ehdr));
393 }
395 void loaddomainelfimage(struct domain *d, unsigned long image_start)
396 {
397 char *elfbase = image_start;
398 Elf_Ehdr *ehdr = (Elf_Ehdr *)image_start;
399 Elf_Phdr *phdr;
400 int h, filesz, memsz, paddr;
401 unsigned long elfaddr, dom_mpaddr, dom_imva;
402 struct page *p;
404 for ( h = 0; h < ehdr->e_phnum; h++ ) {
405 phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
406 //if ( !is_loadable_phdr(phdr) )
407 if ((phdr->p_type != PT_LOAD)) {
408 continue;
409 }
410 filesz = phdr->p_filesz; memsz = phdr->p_memsz;
411 elfaddr = elfbase + phdr->p_offset;
412 dom_mpaddr = phdr->p_paddr;
413 //printf("p_offset: %x, size=%x\n",elfaddr,filesz);
414 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
415 if (d == dom0) {
416 if (dom_mpaddr+memsz>dom0_size || dom_mpaddr+filesz>dom0_size) {
417 printf("Domain0 doesn't fit in allocated space!\n");
418 while(1);
419 }
420 dom_imva = __va(dom_mpaddr + dom0_start);
421 memcpy(dom_imva,elfaddr,filesz);
422 if (memsz > filesz) memset(dom_imva+filesz,0,memsz-filesz);
423 //FIXME: This test for code seems to find a lot more than objdump -x does
424 if (phdr->p_flags & PF_X) privify_memory(dom_imva,filesz);
425 }
426 else
427 #endif
428 while (memsz > 0) {
429 p = map_new_domain_page(d,dom_mpaddr);
430 if (unlikely(!p)) BUG();
431 dom_imva = __va(page_to_phys(p));
432 if (filesz > 0) {
433 if (filesz >= PAGE_SIZE)
434 memcpy(dom_imva,elfaddr,PAGE_SIZE);
435 else { // copy partial page, zero the rest of page
436 memcpy(dom_imva,elfaddr,filesz);
437 memset(dom_imva+filesz,0,PAGE_SIZE-filesz);
438 }
439 //FIXME: This test for code seems to find a lot more than objdump -x does
440 if (phdr->p_flags & PF_X)
441 privify_memory(dom_imva,PAGE_SIZE);
442 }
443 else if (memsz > 0) // always zero out entire page
444 memset(dom_imva,0,PAGE_SIZE);
445 memsz -= PAGE_SIZE; filesz -= PAGE_SIZE;
446 elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE;
447 }
448 }
449 }
452 void alloc_dom0(void)
453 {
454 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
455 if (platform_is_hp_ski()) {
456 dom0_size = 128*1024*1024; //FIXME: Should be configurable
457 }
458 printf("alloc_dom0: starting (initializing %d MB...)\n",dom0_size/(1024*1024));
459 dom0_start = __alloc_bootmem(dom0_size,dom0_align,__pa(MAX_DMA_ADDRESS));
460 if (!dom0_start) {
461 printf("construct_dom0: can't allocate contiguous memory size=%p\n",
462 dom0_size);
463 while(1);
464 }
465 printf("alloc_dom0: dom0_start=%p\n",dom0_start);
466 #else
467 dom0_start = 0;
468 #endif
470 }
472 int construct_dom0(struct domain *d,
473 unsigned long image_start, unsigned long image_len,
474 unsigned long initrd_start, unsigned long initrd_len,
475 char *cmdline)
476 {
477 char *dst;
478 int i, rc;
479 unsigned long pfn, mfn;
480 unsigned long nr_pt_pages;
481 unsigned long count;
482 //l2_pgentry_t *l2tab, *l2start;
483 //l1_pgentry_t *l1tab = NULL, *l1start = NULL;
484 struct pfn_info *page = NULL;
485 start_info_t *si;
486 struct exec_domain *ed = d->exec_domain[0];
488 struct domain_setup_info dsi;
489 unsigned long p_start;
490 unsigned long pkern_start;
491 unsigned long pkern_entry;
492 unsigned long pkern_end;
494 extern void physdev_init_dom0(struct domain *);
496 //printf("construct_dom0: starting\n");
497 /* Sanity! */
498 #ifndef CLONE_DOMAIN0
499 if ( d != dom0 )
500 BUG();
501 if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
502 BUG();
503 #endif
505 memset(&dsi, 0, sizeof(struct domain_setup_info));
507 printk("*** LOADING DOMAIN 0 ***\n");
509 d->max_pages = dom0_size/PAGE_SIZE;
510 image_start = __va(ia64_boot_param->initrd_start);
511 image_len = ia64_boot_param->initrd_size;
512 //printk("image_start=%lx, image_len=%lx\n",image_start,image_len);
513 //printk("First word of image: %lx\n",*(unsigned long *)image_start);
515 //printf("construct_dom0: about to call parseelfimage\n");
516 rc = parseelfimage(image_start, image_len, &dsi);
517 if ( rc != 0 )
518 return rc;
520 p_start = dsi.v_start;
521 pkern_start = dsi.v_kernstart;
522 pkern_end = dsi.v_kernend;
523 pkern_entry = dsi.v_kernentry;
525 //printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",p_start,pkern_start,pkern_end,pkern_entry);
527 if ( (p_start & (PAGE_SIZE-1)) != 0 )
528 {
529 printk("Initial guest OS must load to a page boundary.\n");
530 return -EINVAL;
531 }
533 printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
534 " Kernel image: %lx->%lx\n"
535 " Entry address: %lx\n"
536 " Init. ramdisk: (NOT IMPLEMENTED YET)\n",
537 pkern_start, pkern_end, pkern_entry);
539 if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
540 {
541 printk("Initial guest OS requires too much space\n"
542 "(%luMB is greater than %luMB limit)\n",
543 (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
544 return -ENOMEM;
545 }
547 // if high 3 bits of pkern start are non-zero, error
549 // if pkern end is after end of metaphysical memory, error
550 // (we should be able to deal with this... later)
553 //
555 #if 0
556 strcpy(d->name,"Domain0");
557 #endif
559 // prepare domain0 pagetable (maps METAphysical to physical)
560 // following is roughly mm_init() in linux/kernel/fork.c
561 d->arch.mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
562 if (unlikely(!d->arch.mm)) {
563 printk("Can't allocate mm_struct for domain0\n");
564 return -ENOMEM;
565 }
566 memset(d->arch.mm, 0, sizeof(*d->arch.mm));
567 d->arch.mm->pgd = pgd_alloc(d->arch.mm);
568 if (unlikely(!d->arch.mm->pgd)) {
569 printk("Can't allocate pgd for domain0\n");
570 return -ENOMEM;
571 }
574 /* Mask all upcalls... */
575 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
576 d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
578 /* Copy the OS image. */
579 //(void)loadelfimage(image_start);
580 loaddomainelfimage(d,image_start);
582 /* Copy the initial ramdisk. */
583 //if ( initrd_len != 0 )
584 // memcpy((void *)vinitrd_start, initrd_start, initrd_len);
586 #if 0
587 /* Set up start info area. */
588 //si = (start_info_t *)vstartinfo_start;
589 memset(si, 0, PAGE_SIZE);
590 si->nr_pages = d->tot_pages;
591 si->shared_info = virt_to_phys(d->shared_info);
592 si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
593 //si->pt_base = vpt_start;
594 //si->nr_pt_frames = nr_pt_pages;
595 //si->mfn_list = vphysmap_start;
597 if ( initrd_len != 0 )
598 {
599 //si->mod_start = vinitrd_start;
600 si->mod_len = initrd_len;
601 printk("Initrd len 0x%lx, start at 0x%08lx\n",
602 si->mod_len, si->mod_start);
603 }
605 dst = si->cmd_line;
606 if ( cmdline != NULL )
607 {
608 for ( i = 0; i < 255; i++ )
609 {
610 if ( cmdline[i] == '\0' )
611 break;
612 *dst++ = cmdline[i];
613 }
614 }
615 *dst = '\0';
617 zap_low_mappings(); /* Do the same for the idle page tables. */
618 #endif
620 /* Give up the VGA console if DOM0 is configured to grab it. */
621 #ifdef IA64
622 if (cmdline != NULL)
623 #endif
624 console_endboot(strstr(cmdline, "tty0") != NULL);
626 /* DOM0 gets access to everything. */
627 #ifdef CLONE_DOMAIN0
628 if (d == dom0)
629 #endif
630 physdev_init_dom0(d);
632 set_bit(DF_CONSTRUCTED, &d->d_flags);
634 new_thread(ed, pkern_entry, 0, 0);
635 // FIXME: Hack for keyboard input
636 #ifdef CLONE_DOMAIN0
637 if (d == dom0)
638 #endif
639 serial_input_init();
640 if (d == dom0) {
641 ed->vcpu_info->arch.delivery_mask[0] = -1L;
642 ed->vcpu_info->arch.delivery_mask[1] = -1L;
643 ed->vcpu_info->arch.delivery_mask[2] = -1L;
644 ed->vcpu_info->arch.delivery_mask[3] = -1L;
645 }
646 else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
648 return 0;
649 }
651 void machine_restart(char * __unused)
652 {
653 if (platform_is_hp_ski()) dummy();
654 printf("machine_restart called: spinning....\n");
655 while(1);
656 }
658 void machine_halt(void)
659 {
660 if (platform_is_hp_ski()) dummy();
661 printf("machine_halt called: spinning....\n");
662 while(1);
663 }
665 void dummy(void)
666 {
667 if (platform_is_hp_ski()) asm("break 0;;");
668 printf("dummy called: spinning....\n");
669 while(1);
670 }
673 #if 0
674 void switch_to(struct exec_domain *prev, struct exec_domain *next)
675 {
676 struct exec_domain *last;
678 __switch_to(prev,next,last);
679 //set_current(next);
680 }
681 #endif
683 void domain_pend_keyboard_interrupt(int irq)
684 {
685 vcpu_pend_interrupt(dom0->exec_domain[0],irq);
686 }