ia64/xen-unstable

view xen/arch/ia64/xen/xensetup.c @ 19788:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents 81d6b5762c40
children
line source
1 /******************************************************************************
2 * xensetup.c
3 * Copyright (c) 2004-2005 Hewlett-Packard Co
4 * Dan Magenheimer <dan.magenheimer@hp.com>
5 */
7 #include <xen/config.h>
8 #include <xen/lib.h>
9 #include <xen/errno.h>
10 #include <xen/multiboot.h>
11 #include <xen/sched.h>
12 #include <xen/mm.h>
13 #include <public/version.h>
14 #include <xen/gdbstub.h>
15 #include <xen/version.h>
16 #include <xen/console.h>
17 #include <xen/domain.h>
18 #include <xen/serial.h>
19 #include <xen/trace.h>
20 #include <xen/keyhandler.h>
21 #include <xen/vga.h>
22 #include <asm/meminit.h>
23 #include <asm/page.h>
24 #include <asm/setup.h>
25 #include <asm/vhpt.h>
26 #include <xen/string.h>
27 #include <asm/vmx.h>
28 #include <linux/efi.h>
29 #include <asm/iosapic.h>
30 #include <xen/softirq.h>
31 #include <xen/rcupdate.h>
32 #include <xsm/acm/acm_hooks.h>
33 #include <asm/sn/simulator.h>
34 #include <asm/sal.h>
36 unsigned long total_pages;
38 char saved_command_line[COMMAND_LINE_SIZE];
39 char __initdata dom0_command_line[COMMAND_LINE_SIZE];
41 cpumask_t cpu_present_map;
43 extern unsigned long domain0_ready;
45 int find_max_pfn (unsigned long, unsigned long, void *);
47 /* FIXME: which header these declarations should be there ? */
48 extern void early_setup_arch(char **);
49 extern void late_setup_arch(char **);
50 extern void hpsim_serial_init(void);
51 extern void setup_per_cpu_areas(void);
52 extern void mem_init(void);
53 extern void init_IRQ(void);
54 extern void trap_init(void);
55 extern void xen_patch_kernel(void);
57 /* nosmp: ignore secondary processors */
58 static int __initdata opt_nosmp;
59 boolean_param("nosmp", opt_nosmp);
61 /* maxcpus: maximum number of CPUs to activate */
62 static unsigned int __initdata max_cpus = NR_CPUS;
63 integer_param("maxcpus", max_cpus);
65 /* xencons: toggle xenconsole input (and irq).
66 Note: you have to disable 8250 serials in domains (to avoid use of the
67 same resource). */
68 static int __initdata opt_xencons = 1;
69 integer_param("xencons", opt_xencons);
71 /* xencons_poll: toggle non-legacy xencons UARTs to run in polling mode */
72 static int __initdata opt_xencons_poll;
73 boolean_param("xencons_poll", opt_xencons_poll);
75 #define XENHEAP_DEFAULT_SIZE KERNEL_TR_PAGE_SIZE
76 #define XENHEAP_SIZE_MIN (16 * 1024 * 1024) /* 16MBytes */
77 unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE;
78 unsigned long xen_pstart;
80 static int __init
81 xen_count_pages(u64 start, u64 end, void *arg)
82 {
83 unsigned long *count = arg;
85 /* FIXME: do we need consider difference between DMA-usable memory and
86 * normal memory? Seems that HV has no requirement to operate DMA which
87 * is owned by Dom0? */
88 *count += (end - start) >> PAGE_SHIFT;
89 return 0;
90 }
92 static void __init do_initcalls(void)
93 {
94 initcall_t *call;
95 for ( call = &__initcall_start; call < &__initcall_end; call++ )
96 (*call)();
97 }
99 /*
100 * IPF loader only supports one command line currently, for
101 * both xen and guest kernel. This function provides pre-parse
102 * to mixed command line, to split it into two parts.
103 *
104 * User should split the parameters by "--", with strings after
105 * spliter for guest kernel. Missing "--" means whole line belongs
106 * to guest. Example:
107 * "com2=57600,8n1 console=com2 -- console=ttyS1 console=tty
108 * root=/dev/sda3 ro"
109 */
110 static char null[4] = { 0 };
112 void __init early_cmdline_parse(char **cmdline_p)
113 {
114 char *guest_cmd;
115 static const char * const split = "--";
117 if (*cmdline_p == NULL) {
118 *cmdline_p = &null[0];
119 saved_command_line[0] = '\0';
120 dom0_command_line[0] = '\0';
121 return;
122 }
124 guest_cmd = strstr(*cmdline_p, split);
125 /* If no spliter, whole line is for guest */
126 if (guest_cmd == NULL) {
127 guest_cmd = *cmdline_p;
128 *cmdline_p = &null[0];
129 } else {
130 *guest_cmd = '\0'; /* Split boot parameters for xen and guest */
131 guest_cmd += strlen(split);
132 while (*guest_cmd == ' ') guest_cmd++;
133 }
135 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
136 strlcpy(dom0_command_line, guest_cmd, COMMAND_LINE_SIZE);
137 return;
138 }
140 struct ns16550_defaults ns16550_com1 = {
141 .data_bits = 8,
142 .parity = 'n',
143 .stop_bits = 1
144 };
146 unsigned int ns16550_com1_gsi;
147 unsigned int ns16550_com1_polarity;
148 unsigned int ns16550_com1_trigger;
150 struct ns16550_defaults ns16550_com2 = {
151 .data_bits = 8,
152 .parity = 'n',
153 .stop_bits = 1
154 };
156 /* efi_print: print efi table at boot */
157 static int __initdata opt_efi_print;
158 boolean_param("efi_print", opt_efi_print);
160 /* print EFI memory map: */
161 static void __init
162 efi_print(void)
163 {
164 void *efi_map_start, *efi_map_end;
165 u64 efi_desc_size;
167 efi_memory_desc_t *md;
168 void *p;
169 int i;
171 if (!opt_efi_print)
172 return;
174 efi_map_start = __va(ia64_boot_param->efi_memmap);
175 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
176 efi_desc_size = ia64_boot_param->efi_memdesc_size;
178 for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
179 md = p;
180 printk("mem%02u: type=%2u, attr=0x%016lx, range=[0x%016lx-0x%016lx) "
181 "(%luMB)\n", i, md->type, md->attribute, md->phys_addr,
182 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
183 md->num_pages >> (20 - EFI_PAGE_SHIFT));
184 }
185 }
187 /*
188 * These functions are utility functions for getting and
189 * testing memory descriptors for allocating the xenheap area.
190 */
191 static efi_memory_desc_t * __init
192 efi_get_md (unsigned long phys_addr)
193 {
194 void *efi_map_start, *efi_map_end, *p;
195 efi_memory_desc_t *md;
196 u64 efi_desc_size;
198 efi_map_start = __va(ia64_boot_param->efi_memmap);
199 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
200 efi_desc_size = ia64_boot_param->efi_memdesc_size;
202 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
203 md = p;
204 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
205 return md;
206 }
207 return 0;
208 }
210 static int __init
211 is_xenheap_usable_memory(efi_memory_desc_t *md)
212 {
213 if (!(md->attribute & EFI_MEMORY_WB))
214 return 0;
216 switch (md->type) {
217 case EFI_LOADER_CODE:
218 case EFI_LOADER_DATA:
219 case EFI_BOOT_SERVICES_CODE:
220 case EFI_BOOT_SERVICES_DATA:
221 case EFI_CONVENTIONAL_MEMORY:
222 return 1;
223 }
224 return 0;
225 }
227 static inline int __init
228 md_overlaps(const efi_memory_desc_t *md, unsigned long phys_addr)
229 {
230 return (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT));
231 }
233 static inline int __init
234 md_overlap_with_boot_param(const efi_memory_desc_t *md)
235 {
236 return md_overlaps(md, __pa(ia64_boot_param)) ||
237 md_overlaps(md, ia64_boot_param->efi_memmap) ||
238 md_overlaps(md, ia64_boot_param->command_line);
239 }
241 #define MD_SIZE(md) (md->num_pages << EFI_PAGE_SHIFT)
242 #define MD_END(md) ((md)->phys_addr + MD_SIZE(md))
244 static unsigned long __init
245 efi_get_max_addr (void)
246 {
247 void *efi_map_start, *efi_map_end, *p;
248 efi_memory_desc_t *md;
249 u64 efi_desc_size;
250 unsigned long max_addr = 0;
252 efi_map_start = __va(ia64_boot_param->efi_memmap);
253 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
254 efi_desc_size = ia64_boot_param->efi_memdesc_size;
256 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
257 md = p;
258 if (is_xenheap_usable_memory(md) && MD_END(md) > max_addr)
259 max_addr = MD_END(md);
260 }
261 return max_addr;
262 }
264 extern char __init_begin[], __init_end[];
265 static void noinline init_done(void)
266 {
267 memset(__init_begin, 0, __init_end - __init_begin);
268 flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
269 init_xenheap_pages(__pa(__init_begin), __pa(__init_end));
270 printk("Freed %ldkB init memory.\n",
271 (long)(__init_end-__init_begin)>>10);
273 startup_cpu_idle_loop();
274 }
276 struct xen_heap_desc {
277 void* xen_heap_start;
278 unsigned long xenheap_phys_end;
279 efi_memory_desc_t* kern_md;
280 };
282 static int __init
283 init_xenheap_mds(unsigned long start, unsigned long end, void *arg)
284 {
285 struct xen_heap_desc *desc = (struct xen_heap_desc*)arg;
286 unsigned long md_end = __pa(desc->xen_heap_start);
287 efi_memory_desc_t* md;
289 start = __pa(start);
290 end = __pa(end);
292 for (md = efi_get_md(md_end);
293 md != NULL && md->phys_addr < desc->xenheap_phys_end;
294 md = efi_get_md(md_end)) {
295 md_end = MD_END(md);
297 if (md == desc->kern_md ||
298 (md->type == EFI_LOADER_DATA && !md_overlap_with_boot_param(md)) ||
299 ((md->attribute & EFI_MEMORY_WB) &&
300 is_xenheap_usable_memory(md))) {
301 unsigned long s = max(start, max(__pa(desc->xen_heap_start),
302 md->phys_addr));
303 unsigned long e = min(end, min(md_end, desc->xenheap_phys_end));
304 init_boot_pages(s, e);
305 }
306 }
308 return 0;
309 }
311 int running_on_sim;
313 static int __init
314 is_platform_hp_ski(void)
315 {
316 int i;
317 long cpuid[6];
319 for (i = 0; i < 5; ++i)
320 cpuid[i] = ia64_get_cpuid(i);
322 if ((cpuid[0] & 0xff) != 'H')
323 return 0;
324 if ((cpuid[3] & 0xff) != 0x4)
325 return 0;
326 if (((cpuid[3] >> 8) & 0xff) != 0x0)
327 return 0;
328 if (((cpuid[3] >> 16) & 0xff) != 0x0)
329 return 0;
330 if (((cpuid[3] >> 24) & 0x7) != 0x7)
331 return 0;
333 return 1;
334 }
336 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
337 static int __initdata dom0_vhpt_size_log2;
338 integer_param("dom0_vhpt_size_log2", dom0_vhpt_size_log2);
339 #endif
340 unsigned long xen_fixed_mfn_start __read_mostly;
341 unsigned long xen_fixed_mfn_end __read_mostly;
343 void __init start_kernel(void)
344 {
345 char *cmdline;
346 unsigned long nr_pages;
347 unsigned long dom0_memory_start, dom0_memory_size;
348 unsigned long dom0_initrd_start, dom0_initrd_size;
349 unsigned long md_end, relo_start, relo_end, relo_size = 0;
350 struct domain *idle_domain;
351 struct vcpu *dom0_vcpu0;
352 efi_memory_desc_t *kern_md, *last_md, *md;
353 unsigned long xenheap_phys_end;
354 void *xen_heap_start;
355 struct xen_heap_desc heap_desc;
356 #ifdef CONFIG_SMP
357 int i;
358 #endif
360 /* Be sure the struct shared_info size is <= XSI_SIZE. */
361 BUILD_BUG_ON(sizeof(struct shared_info) > XSI_SIZE);
363 /* Kernel may be relocated by EFI loader */
364 xen_pstart = ia64_tpa(KERNEL_START);
366 running_on_sim = is_platform_hp_ski();
368 early_setup_arch(&cmdline);
370 /* We initialise the serial devices very early so we can get debugging. */
371 if (running_on_sim)
372 hpsim_serial_init();
373 else {
374 ns16550_init(0, &ns16550_com1);
375 ns16550_init(1, &ns16550_com2);
376 }
378 #ifdef CONFIG_VGA
379 /* Plug in a default VGA mode */
380 vga_console_info.video_type = XEN_VGATYPE_TEXT_MODE_3;
381 vga_console_info.u.text_mode_3.font_height = 16; /* generic VGA? */
382 vga_console_info.u.text_mode_3.cursor_x =
383 ia64_boot_param->console_info.orig_x;
384 vga_console_info.u.text_mode_3.cursor_y =
385 ia64_boot_param->console_info.orig_y;
386 vga_console_info.u.text_mode_3.rows =
387 ia64_boot_param->console_info.num_rows;
388 vga_console_info.u.text_mode_3.columns =
389 ia64_boot_param->console_info.num_cols;
390 #endif
392 console_init_preirq();
394 if (running_on_sim || ia64_boot_param->domain_start == 0 ||
395 ia64_boot_param->domain_size == 0) {
396 /* This is possible only with the old elilo, which does not support
397 a vmm. Fix now, and continue without initrd. */
398 printk ("Your elilo is not Xen-aware. Bootparams fixed\n");
399 ia64_boot_param->domain_start = ia64_boot_param->initrd_start;
400 ia64_boot_param->domain_size = ia64_boot_param->initrd_size;
401 ia64_boot_param->initrd_start = 0;
402 ia64_boot_param->initrd_size = 0;
403 }
405 printk("Xen command line: %s\n", saved_command_line);
407 /*
408 * Test if the boot allocator bitmap will overflow xenheap_size. If
409 * so, continue to bump it up until we have at least a minimum space
410 * for the actual xenheap.
411 */
412 max_page = efi_get_max_addr() >> PAGE_SHIFT;
413 while ((max_page >> 3) > xenheap_size - XENHEAP_SIZE_MIN)
414 xenheap_size <<= 1;
416 xenheap_phys_end = xen_pstart + xenheap_size;
417 printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
418 xen_pstart, xenheap_phys_end);
420 xen_patch_kernel();
422 kern_md = md = efi_get_md(xen_pstart);
423 md_end = __pa(ia64_imva(&_end));
424 relo_start = xenheap_phys_end;
426 /*
427 * Scan through the memory descriptors after the kernel
428 * image to make sure we have enough room for the xenheap
429 * area, pushing out whatever may already be there.
430 */
431 while (relo_start + relo_size >= md_end) {
432 md = efi_get_md(md_end);
434 if (md == NULL) {
435 printk("no room to move loader data. skip moving loader data\n");
436 goto skip_move;
437 }
439 md_end = MD_END(md);
440 if (relo_start < md->phys_addr)
441 relo_start = md->phys_addr;
443 if (!is_xenheap_usable_memory(md)) {
444 /* Skip this area */
445 if (md_end > relo_start)
446 relo_start = md_end;
447 continue;
448 }
450 /*
451 * The dom0 kernel or initrd could overlap, reserve space
452 * at the end to relocate them later.
453 */
454 if (md->type == EFI_LOADER_DATA) {
455 /* Test for ranges we're not prepared to move */
456 if (!md_overlap_with_boot_param(md))
457 relo_size += MD_SIZE(md);
459 /* If range overlaps the end, push out the relocation start */
460 if (md_end > relo_start)
461 relo_start = md_end;
462 }
463 }
464 last_md = md;
465 relo_start = md_end - relo_size;
466 relo_end = relo_start + relo_size;
468 md_end = __pa(ia64_imva(&_end));
470 /*
471 * Move any relocated data out into the previously found relocation
472 * area. Any extra memory descriptrs are moved out to the end
473 * and set to zero pages.
474 */
475 for (md = efi_get_md(md_end) ;; md = efi_get_md(md_end)) {
476 md_end = MD_END(md);
478 if (md->type == EFI_LOADER_DATA && !md_overlap_with_boot_param(md)) {
479 unsigned long relo_offset;
481 if (md_overlaps(md, ia64_boot_param->domain_start)) {
482 relo_offset = ia64_boot_param->domain_start - md->phys_addr;
483 printk("Moving Dom0 kernel image: 0x%lx -> 0x%lx (%ld KiB)\n",
484 ia64_boot_param->domain_start, relo_start + relo_offset,
485 ia64_boot_param->domain_size >> 10);
486 ia64_boot_param->domain_start = relo_start + relo_offset;
487 }
488 if (ia64_boot_param->initrd_size &&
489 md_overlaps(md, ia64_boot_param->initrd_start)) {
490 relo_offset = ia64_boot_param->initrd_start - md->phys_addr;
491 printk("Moving Dom0 initrd image: 0x%lx -> 0x%lx (%ld KiB)\n",
492 ia64_boot_param->initrd_start, relo_start + relo_offset,
493 ia64_boot_param->initrd_size >> 10);
494 ia64_boot_param->initrd_start = relo_start + relo_offset;
495 }
496 memcpy(__va(relo_start), __va(md->phys_addr), MD_SIZE(md));
497 relo_start += MD_SIZE(md);
498 }
500 if (md == last_md)
501 break;
502 }
504 /* Trim the last entry */
505 md->num_pages -= (relo_size >> EFI_PAGE_SHIFT);
507 skip_move:
508 reserve_memory();
510 /* first find highest page frame number */
511 max_page = 0;
512 efi_memmap_walk(find_max_pfn, &max_page);
513 printk("find_memory: efi_memmap_walk returns max_page=%lx\n",max_page);
514 efi_print();
516 xen_heap_start = memguard_init(ia64_imva(&_end));
517 printk("Before xen_heap_start: %p\n", xen_heap_start);
518 xen_heap_start = __va(init_boot_allocator(__pa(xen_heap_start)));
519 printk("After xen_heap_start: %p\n", xen_heap_start);
521 efi_memmap_walk(filter_rsvd_memory, init_boot_pages);
522 efi_memmap_walk(xen_count_pages, &nr_pages);
524 printk("System RAM: %luMB (%lukB)\n",
525 nr_pages >> (20 - PAGE_SHIFT),
526 nr_pages << (PAGE_SHIFT - 10));
527 total_pages = nr_pages;
529 init_frametable();
531 trap_init();
533 /* process SAL system table */
534 /* must be before any pal/sal call */
535 BUG_ON(efi.sal_systab == EFI_INVALID_TABLE_ADDR);
536 ia64_sal_init(__va(efi.sal_systab));
538 /* early_setup_arch() maps PAL code. */
539 identify_vmx_feature();
540 /* If vmx feature is on, do necessary initialization for vmx */
541 if (vmx_enabled)
542 xen_heap_start = vmx_init_env(xen_heap_start, xenheap_phys_end);
544 /* allocate memory for percpu area
545 * per_cpu_init() called from late_set_arch() is called after
546 * end_boot_allocate(). It's too late to allocate memory in
547 * xenva.
548 */
549 xen_heap_start = per_cpu_allocate(xen_heap_start, xenheap_phys_end);
551 heap_desc.xen_heap_start = xen_heap_start;
552 heap_desc.xenheap_phys_end = xenheap_phys_end;
553 heap_desc.kern_md = kern_md;
554 efi_memmap_walk(&init_xenheap_mds, &heap_desc);
556 printk("Xen heap: %luMB (%lukB)\n",
557 (xenheap_phys_end-__pa(xen_heap_start)) >> 20,
558 (xenheap_phys_end-__pa(xen_heap_start)) >> 10);
560 /* for is_xen_fixed_mfn() */
561 xen_fixed_mfn_start = virt_to_mfn(&_start);
562 xen_fixed_mfn_end = virt_to_mfn(xen_heap_start);
564 end_boot_allocator();
566 softirq_init();
568 late_setup_arch(&cmdline);
570 scheduler_init();
571 idle_vcpu[0] = (struct vcpu*) ia64_r13;
572 idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
573 if ( idle_domain == NULL )
574 BUG();
575 idle_domain->vcpu = idle_vcpu;
576 idle_domain->max_vcpus = NR_CPUS;
577 if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
578 BUG();
580 alloc_dom_xen_and_dom_io();
581 setup_per_cpu_areas();
582 mem_init();
584 local_irq_disable();
585 init_IRQ ();
586 init_xen_time(); /* initialise the time */
587 timer_init();
589 rcu_init();
591 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
592 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
593 #endif
595 #ifdef CONFIG_SMP
596 if ( opt_nosmp )
597 {
598 max_cpus = 0;
599 smp_num_siblings = 1;
600 //boot_cpu_data.x86_num_cores = 1;
601 }
603 /* A vcpu is created for the idle domain on every physical cpu.
604 Limit the number of cpus to the maximum number of vcpus. */
605 if (max_cpus > MAX_VIRT_CPUS)
606 max_cpus = MAX_VIRT_CPUS;
608 smp_prepare_cpus(max_cpus);
610 /* We aren't hotplug-capable yet. */
611 for_each_cpu ( i )
612 cpu_set(i, cpu_present_map);
614 /* Enable IRQ to receive IPI (needed for ITC sync). */
615 local_irq_enable();
617 printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
618 for_each_present_cpu ( i )
619 {
620 if ( num_online_cpus() >= max_cpus )
621 break;
622 if ( !cpu_online(i) ) {
623 rcu_online_cpu(i);
624 __cpu_up(i);
625 }
626 }
628 local_irq_disable();
630 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
631 smp_cpus_done(max_cpus);
632 #endif
634 initialise_gdb(); /* could be moved earlier */
636 do_initcalls();
637 sort_main_extable();
639 init_rid_allocator ();
641 local_irq_enable();
643 if (opt_xencons) {
644 initialize_keytable();
645 if (ns16550_com1_gsi) {
646 if (opt_xencons_poll ||
647 iosapic_register_intr(ns16550_com1_gsi,
648 ns16550_com1_polarity,
649 ns16550_com1_trigger) < 0) {
650 ns16550_com1.irq = 0;
651 ns16550_init(0, &ns16550_com1);
652 }
653 }
654 console_init_postirq();
655 }
657 expose_p2m_init();
659 /* Create initial domain 0. */
660 dom0 = domain_create(0, 0, DOM0_SSIDREF);
661 if (dom0 == NULL)
662 panic("Error creating domain 0\n");
663 domain_set_vhpt_size(dom0, dom0_vhpt_size_log2);
664 dom0_vcpu0 = alloc_dom0_vcpu0();
665 if (dom0_vcpu0 == NULL || vcpu_late_initialise(dom0_vcpu0) != 0)
666 panic("Cannot allocate dom0 vcpu 0\n");
668 dom0->is_privileged = 1;
669 dom0->target = NULL;
671 /*
672 * We're going to setup domain0 using the module(s) that we stashed safely
673 * above our heap. The second module, if present, is an initrd ramdisk.
674 */
675 dom0_memory_start = (unsigned long) __va(ia64_boot_param->domain_start);
676 dom0_memory_size = ia64_boot_param->domain_size;
677 dom0_initrd_start = (unsigned long) __va(ia64_boot_param->initrd_start);
678 dom0_initrd_size = ia64_boot_param->initrd_size;
680 if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_size,
681 dom0_initrd_start,dom0_initrd_size,
682 0) != 0)
683 panic("Could not set up DOM0 guest OS\n");
685 if (!running_on_sim && !IS_MEDUSA()) // slow on ski and pages are pre-initialized to zero
686 scrub_heap_pages();
688 init_trace_bufs();
690 if (opt_xencons) {
691 console_endboot();
692 serial_endboot();
693 }
695 domain0_ready = 1;
697 domain_unpause_by_systemcontroller(dom0);
699 init_done();
700 }
702 void arch_get_xen_caps(xen_capabilities_info_t *info)
703 {
704 /* Interface name is always xen-3.0-* for Xen-3.x. */
705 int major = 3, minor = 0;
706 char s[32];
708 (*info)[0] = '\0';
710 snprintf(s, sizeof(s), "xen-%d.%d-ia64 ", major, minor);
711 safe_strcat(*info, s);
713 snprintf(s, sizeof(s), "xen-%d.%d-ia64be ", major, minor);
714 safe_strcat(*info, s);
716 if (vmx_enabled)
717 {
718 snprintf(s, sizeof(s), "hvm-%d.%d-ia64 ", major, minor);
719 safe_strcat(*info, s);
721 snprintf(s, sizeof(s), "hvm-%d.%d-ia64-sioemu ", major, minor);
722 safe_strcat(*info, s);
723 }
724 }
726 int xen_in_range(paddr_t start, paddr_t end)
727 {
728 paddr_t xs = __pa(&_start);
729 paddr_t xe = __pa(&_end);
731 return (start < xe) && (end > xs);
732 }