ia64/xen-unstable

view xen/arch/x86/setup.c @ 12226:45e34f00a78f

[HVM] Clean up VCPU initialisation in Xen. No longer
parse HVM e820 tables in Xen (add some extra HVM parameters as a
cleaner alternative). Lots of code removal.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Nov 02 15:55:51 2006 +0000 (2006-11-02)
parents 7b5115221dfc
children 2b43fb3afb3e
line source
1 #include <xen/config.h>
2 #include <xen/init.h>
3 #include <xen/lib.h>
4 #include <xen/sched.h>
5 #include <xen/domain.h>
6 #include <xen/serial.h>
7 #include <xen/softirq.h>
8 #include <xen/acpi.h>
9 #include <xen/console.h>
10 #include <xen/serial.h>
11 #include <xen/trace.h>
12 #include <xen/multiboot.h>
13 #include <xen/domain_page.h>
14 #include <xen/version.h>
15 #include <xen/gdbstub.h>
16 #include <xen/percpu.h>
17 #include <xen/hypercall.h>
18 #include <xen/keyhandler.h>
19 #include <xen/numa.h>
20 #include <public/version.h>
21 #include <asm/bitops.h>
22 #include <asm/smp.h>
23 #include <asm/processor.h>
24 #include <asm/mpspec.h>
25 #include <asm/apic.h>
26 #include <asm/desc.h>
27 #include <asm/shadow.h>
28 #include <asm/e820.h>
29 #include <acm/acm_hooks.h>
31 extern void dmi_scan_machine(void);
32 extern void generic_apic_probe(void);
33 extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
35 /*
36 * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
37 * page_info table and allocation bitmap.
38 */
39 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
40 #if defined(CONFIG_X86_64)
41 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
42 #endif
44 /* opt_nosmp: If true, secondary processors are ignored. */
45 static int opt_nosmp = 0;
46 boolean_param("nosmp", opt_nosmp);
48 /* maxcpus: maximum number of CPUs to activate. */
49 static unsigned int max_cpus = NR_CPUS;
50 integer_param("maxcpus", max_cpus);
52 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
53 static int opt_watchdog = 0;
54 boolean_param("watchdog", opt_watchdog);
56 /* **** Linux config option: propagated to domain0. */
57 /* "acpi=off": Sisables both ACPI table parsing and interpreter. */
58 /* "acpi=force": Override the disable blacklist. */
59 /* "acpi=strict": Disables out-of-spec workarounds. */
60 /* "acpi=ht": Limit ACPI just to boot-time to enable HT. */
61 /* "acpi=noirq": Disables ACPI interrupt routing. */
62 static void parse_acpi_param(char *s);
63 custom_param("acpi", parse_acpi_param);
65 /* **** Linux config option: propagated to domain0. */
66 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
67 extern int acpi_skip_timer_override;
68 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
70 /* **** Linux config option: propagated to domain0. */
71 /* noapic: Disable IOAPIC setup. */
72 extern int skip_ioapic_setup;
73 boolean_param("noapic", skip_ioapic_setup);
75 int early_boot = 1;
77 cpumask_t cpu_present_map;
79 /* Limits of Xen heap, used to initialise the allocator. */
80 unsigned long xenheap_phys_start, xenheap_phys_end;
82 extern void arch_init_memory(void);
83 extern void init_IRQ(void);
84 extern void trap_init(void);
85 extern void early_time_init(void);
86 extern void early_cpu_init(void);
88 struct tss_struct init_tss[NR_CPUS];
90 extern unsigned long cpu0_stack[];
92 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
94 #if CONFIG_PAGING_LEVELS > 2
95 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
96 #else
97 unsigned long mmu_cr4_features = X86_CR4_PSE;
98 #endif
99 EXPORT_SYMBOL(mmu_cr4_features);
101 int acpi_disabled;
103 int acpi_force;
104 char acpi_param[10] = "";
105 static void parse_acpi_param(char *s)
106 {
107 /* Save the parameter so it can be propagated to domain0. */
108 strncpy(acpi_param, s, sizeof(acpi_param));
109 acpi_param[sizeof(acpi_param)-1] = '\0';
111 /* Interpret the parameter for use within Xen. */
112 if ( !strcmp(s, "off") )
113 {
114 disable_acpi();
115 }
116 else if ( !strcmp(s, "force") )
117 {
118 acpi_force = 1;
119 acpi_ht = 1;
120 acpi_disabled = 0;
121 }
122 else if ( !strcmp(s, "strict") )
123 {
124 acpi_strict = 1;
125 }
126 else if ( !strcmp(s, "ht") )
127 {
128 if ( !acpi_force )
129 disable_acpi();
130 acpi_ht = 1;
131 }
132 else if ( !strcmp(s, "noirq") )
133 {
134 acpi_noirq_set();
135 }
136 }
138 static void __init do_initcalls(void)
139 {
140 initcall_t *call;
141 for ( call = &__initcall_start; call < &__initcall_end; call++ )
142 (*call)();
143 }
145 #define EARLY_FAIL() for ( ; ; ) __asm__ __volatile__ ( "hlt" )
147 static struct e820entry e820_raw[E820MAX];
149 static unsigned long initial_images_start, initial_images_end;
151 unsigned long initial_images_nrpages(void)
152 {
153 unsigned long s = initial_images_start + PAGE_SIZE - 1;
154 unsigned long e = initial_images_end;
155 return ((e >> PAGE_SHIFT) - (s >> PAGE_SHIFT));
156 }
158 void discard_initial_images(void)
159 {
160 init_domheap_pages(initial_images_start, initial_images_end);
161 }
163 extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[];
165 static void __init percpu_init_areas(void)
166 {
167 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
169 BUG_ON(data_size > PERCPU_SIZE);
171 for_each_cpu ( i )
172 {
173 memguard_unguard_range(__per_cpu_start + (i << PERCPU_SHIFT),
174 1 << PERCPU_SHIFT);
175 if ( i != 0 )
176 memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
177 __per_cpu_start,
178 data_size);
179 }
180 }
182 static void __init percpu_guard_areas(void)
183 {
184 memguard_guard_range(__per_cpu_start, __per_cpu_end - __per_cpu_start);
185 }
187 static void __init percpu_free_unused_areas(void)
188 {
189 unsigned int i, first_unused;
191 /* Find first unused CPU number. */
192 for ( i = 0; i < NR_CPUS; i++ )
193 if ( !cpu_online(i) )
194 break;
195 first_unused = i;
197 /* Check that there are no holes in cpu_online_map. */
198 for ( ; i < NR_CPUS; i++ )
199 BUG_ON(cpu_online(i));
201 #ifndef MEMORY_GUARD
202 init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
203 __pa(__per_cpu_end));
204 #endif
205 }
207 /* Fetch acm policy module from multiboot modules. */
208 static void extract_acm_policy(
209 multiboot_info_t *mbi,
210 unsigned int *initrdidx,
211 char **_policy_start,
212 unsigned long *_policy_len)
213 {
214 int i;
215 module_t *mod = (module_t *)__va(mbi->mods_addr);
216 unsigned long start, policy_len;
217 char *policy_start;
219 /*
220 * Try all modules and see whichever could be the binary policy.
221 * Adjust the initrdidx if module[1] is the binary policy.
222 */
223 for ( i = mbi->mods_count-1; i >= 1; i-- )
224 {
225 start = initial_images_start + (mod[i].mod_start-mod[0].mod_start);
226 #if defined(__i386__)
227 policy_start = (char *)start;
228 #elif defined(__x86_64__)
229 policy_start = __va(start);
230 #endif
231 policy_len = mod[i].mod_end - mod[i].mod_start;
232 if ( acm_is_policy(policy_start, policy_len) )
233 {
234 printk("Policy len 0x%lx, start at %p - module %d.\n",
235 policy_len, policy_start, i);
236 *_policy_start = policy_start;
237 *_policy_len = policy_len;
238 if ( i == 1 )
239 *initrdidx = (mbi->mods_count > 2) ? 2 : 0;
240 break;
241 }
242 }
243 }
245 static void __init init_idle_domain(void)
246 {
247 struct domain *idle_domain;
249 /* Domain creation requires that scheduler structures are initialised. */
250 scheduler_init();
252 idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
253 if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
254 BUG();
256 set_current(idle_domain->vcpu[0]);
257 idle_vcpu[0] = this_cpu(curr_vcpu) = current;
259 setup_idle_pagetable();
260 }
262 static void srat_detect_node(int cpu)
263 {
264 unsigned node;
265 u8 apicid = x86_cpu_to_apicid[cpu];
267 node = apicid_to_node[apicid];
268 if ( node == NUMA_NO_NODE )
269 node = 0;
270 numa_set_node(cpu, node);
272 if ( acpi_numa > 0 )
273 printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
274 }
276 void __init __start_xen(multiboot_info_t *mbi)
277 {
278 char __cmdline[] = "", *cmdline = __cmdline;
279 unsigned long _initrd_start = 0, _initrd_len = 0;
280 unsigned int initrdidx = 1;
281 char *_policy_start = NULL;
282 unsigned long _policy_len = 0;
283 module_t *mod = (module_t *)__va(mbi->mods_addr);
284 unsigned long nr_pages, modules_length;
285 paddr_t s, e;
286 int i, e820_warn = 0, e820_raw_nr = 0, bytes = 0;
287 struct ns16550_defaults ns16550 = {
288 .data_bits = 8,
289 .parity = 'n',
290 .stop_bits = 1
291 };
293 /* Parse the command-line options. */
294 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
295 cmdline = __va(mbi->cmdline);
296 cmdline_parse(cmdline);
298 set_current((struct vcpu *)0xfffff000); /* debug sanity */
299 idle_vcpu[0] = current;
300 set_processor_id(0); /* needed early, for smp_processor_id() */
302 smp_prepare_boot_cpu();
304 /* We initialise the serial devices very early so we can get debugging. */
305 ns16550.io_base = 0x3f8;
306 ns16550.irq = 4;
307 ns16550_init(0, &ns16550);
308 ns16550.io_base = 0x2f8;
309 ns16550.irq = 3;
310 ns16550_init(1, &ns16550);
311 serial_init_preirq();
313 init_console();
315 printk("Command line: %s\n", cmdline);
317 /* Check that we have at least one Multiboot module. */
318 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
319 {
320 printk("FATAL ERROR: dom0 kernel not specified."
321 " Check bootloader configuration.\n");
322 EARLY_FAIL();
323 }
325 if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 )
326 {
327 printk("FATAL ERROR: Misaligned CPU0 stack.\n");
328 EARLY_FAIL();
329 }
331 /*
332 * Since there are some stubs getting built on the stacks which use
333 * direct calls/jumps, the heap must be confined to the lower 2G so
334 * that those branches can reach their targets.
335 */
336 if ( opt_xenheap_megabytes > 2048 )
337 opt_xenheap_megabytes = 2048;
338 xenheap_phys_end = opt_xenheap_megabytes << 20;
340 if ( mbi->flags & MBI_MEMMAP )
341 {
342 while ( bytes < mbi->mmap_length )
343 {
344 memory_map_t *map = __va(mbi->mmap_addr + bytes);
346 /*
347 * This is a gross workaround for a BIOS bug. Some bootloaders do
348 * not write e820 map entries into pre-zeroed memory. This is
349 * okay if the BIOS fills in all fields of the map entry, but
350 * some broken BIOSes do not bother to write the high word of
351 * the length field if the length is smaller than 4GB. We
352 * detect and fix this by flagging sections below 4GB that
353 * appear to be larger than 4GB in size.
354 */
355 if ( (map->base_addr_high == 0) && (map->length_high != 0) )
356 {
357 e820_warn = 1;
358 map->length_high = 0;
359 }
361 e820_raw[e820_raw_nr].addr =
362 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
363 e820_raw[e820_raw_nr].size =
364 ((u64)map->length_high << 32) | (u64)map->length_low;
365 e820_raw[e820_raw_nr].type =
366 (map->type > E820_NVS) ? E820_RESERVED : map->type;
367 e820_raw_nr++;
369 bytes += map->size + 4;
370 }
371 }
372 else if ( mbi->flags & MBI_MEMLIMITS )
373 {
374 e820_raw[0].addr = 0;
375 e820_raw[0].size = mbi->mem_lower << 10;
376 e820_raw[0].type = E820_RAM;
377 e820_raw[1].addr = 0x100000;
378 e820_raw[1].size = mbi->mem_upper << 10;
379 e820_raw[1].type = E820_RAM;
380 e820_raw_nr = 2;
381 }
382 else
383 {
384 printk("FATAL ERROR: Bootloader provided no memory information.\n");
385 for ( ; ; ) ;
386 }
388 if ( e820_warn )
389 printk("WARNING: Buggy e820 map detected and fixed "
390 "(truncated length fields).\n");
392 max_page = init_e820(e820_raw, &e820_raw_nr);
394 modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
396 /* Find a large enough RAM extent to stash the DOM0 modules. */
397 for ( i = 0; ; i++ )
398 {
399 if ( i == e820.nr_map )
400 {
401 printk("Not enough memory to stash the DOM0 kernel image.\n");
402 for ( ; ; ) ;
403 }
405 if ( (e820.map[i].type == E820_RAM) &&
406 (e820.map[i].size >= modules_length) &&
407 ((e820.map[i].addr + e820.map[i].size) >=
408 (xenheap_phys_end + modules_length)) )
409 break;
410 }
412 /* Stash as near as possible to the beginning of the RAM extent. */
413 initial_images_start = e820.map[i].addr;
414 if ( initial_images_start < xenheap_phys_end )
415 initial_images_start = xenheap_phys_end;
416 initial_images_end = initial_images_start + modules_length;
418 #if defined(CONFIG_X86_32)
419 memmove((void *)initial_images_start, /* use low mapping */
420 (void *)mod[0].mod_start, /* use low mapping */
421 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
422 #elif defined(CONFIG_X86_64)
423 memmove(__va(initial_images_start),
424 __va(mod[0].mod_start),
425 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
426 #endif
428 /* Initialise boot-time allocator with all RAM situated after modules. */
429 xenheap_phys_start = init_boot_allocator(__pa(&_end));
430 nr_pages = 0;
431 for ( i = 0; i < e820.nr_map; i++ )
432 {
433 if ( e820.map[i].type != E820_RAM )
434 continue;
436 nr_pages += e820.map[i].size >> PAGE_SHIFT;
438 /* Initialise boot heap, skipping Xen heap and dom0 modules. */
439 s = e820.map[i].addr;
440 e = s + e820.map[i].size;
441 if ( s < xenheap_phys_end )
442 s = xenheap_phys_end;
443 if ( (s < initial_images_end) && (e > initial_images_start) )
444 s = initial_images_end;
445 init_boot_pages(s, e);
447 #if defined (CONFIG_X86_64)
448 /*
449 * x86/64 maps all registered RAM. Points to note:
450 * 1. The initial pagetable already maps low 1GB, so skip that.
451 * 2. We must map *only* RAM areas, taking care to avoid I/O holes.
452 * Failure to do this can cause coherency problems and deadlocks
453 * due to cache-attribute mismatches (e.g., AMD/AGP Linux bug).
454 */
455 {
456 /* Calculate page-frame range, discarding partial frames. */
457 unsigned long start, end;
458 unsigned long init_mapped = 1UL << (30 - PAGE_SHIFT); /* 1GB */
459 start = PFN_UP(e820.map[i].addr);
460 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
461 /* Clip the range to exclude what the bootstrapper initialised. */
462 if ( end < init_mapped )
463 continue;
464 if ( start < init_mapped )
465 start = init_mapped;
466 /* Request the mapping. */
467 map_pages_to_xen(
468 PAGE_OFFSET + (start << PAGE_SHIFT),
469 start, end-start, PAGE_HYPERVISOR);
470 }
471 #endif
472 }
474 memguard_init();
475 percpu_guard_areas();
477 printk("System RAM: %luMB (%lukB)\n",
478 nr_pages >> (20 - PAGE_SHIFT),
479 nr_pages << (PAGE_SHIFT - 10));
480 total_pages = nr_pages;
482 /* Sanity check for unwanted bloat of certain hypercall structures. */
483 BUILD_BUG_ON(sizeof(((struct xen_platform_op *)0)->u) !=
484 sizeof(((struct xen_platform_op *)0)->u.pad));
485 BUILD_BUG_ON(sizeof(((struct xen_domctl *)0)->u) !=
486 sizeof(((struct xen_domctl *)0)->u.pad));
487 BUILD_BUG_ON(sizeof(((struct xen_sysctl *)0)->u) !=
488 sizeof(((struct xen_sysctl *)0)->u.pad));
490 BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
491 BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
492 BUILD_BUG_ON(sizeof(vcpu_info_t) != 64);
494 /* Check definitions in public headers match internal defs. */
495 BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
496 #ifdef HYPERVISOR_VIRT_END
497 BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END);
498 #endif
499 BUILD_BUG_ON(MACH2PHYS_VIRT_START != RO_MPT_VIRT_START);
500 BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END);
502 init_frametable();
504 acpi_boot_table_init();
506 acpi_numa_init();
508 numa_initmem_init(0, max_page);
510 end_boot_allocator();
512 /* Initialise the Xen heap, skipping RAM holes. */
513 nr_pages = 0;
514 for ( i = 0; i < e820.nr_map; i++ )
515 {
516 if ( e820.map[i].type != E820_RAM )
517 continue;
519 s = e820.map[i].addr;
520 e = s + e820.map[i].size;
521 if ( s < xenheap_phys_start )
522 s = xenheap_phys_start;
523 if ( e > xenheap_phys_end )
524 e = xenheap_phys_end;
526 if ( s < e )
527 {
528 nr_pages += (e - s) >> PAGE_SHIFT;
529 init_xenheap_pages(s, e);
530 }
531 }
533 printk("Xen heap: %luMB (%lukB)\n",
534 nr_pages >> (20 - PAGE_SHIFT),
535 nr_pages << (PAGE_SHIFT - 10));
537 early_boot = 0;
539 early_cpu_init();
541 paging_init();
543 /* Unmap the first page of CPU0's stack. */
544 memguard_guard_stack(cpu0_stack);
546 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
548 if ( opt_watchdog )
549 nmi_watchdog = NMI_LOCAL_APIC;
551 sort_exception_tables();
553 find_smp_config();
555 smp_alloc_memory();
557 dmi_scan_machine();
559 generic_apic_probe();
561 acpi_boot_init();
563 init_cpu_to_node();
565 if ( smp_found_config )
566 get_smp_config();
568 init_apic_mappings();
570 init_IRQ();
572 percpu_init_areas();
574 init_idle_domain();
576 trap_init();
578 timer_init();
580 early_time_init();
582 arch_init_memory();
584 identify_cpu(&boot_cpu_data);
585 if ( cpu_has_fxsr )
586 set_in_cr4(X86_CR4_OSFXSR);
587 if ( cpu_has_xmm )
588 set_in_cr4(X86_CR4_OSXMMEXCPT);
590 if ( opt_nosmp )
591 max_cpus = 0;
593 smp_prepare_cpus(max_cpus);
595 /*
596 * Initialise higher-level timer functions. We do this fairly late
597 * (post-SMP) because the time bases and scale factors need to be updated
598 * regularly, and SMP initialisation can cause a long delay with
599 * interrupts not yet enabled.
600 */
601 init_xen_time();
603 initialize_keytable();
605 serial_init_postirq();
607 BUG_ON(!local_irq_is_enabled());
609 for_each_present_cpu ( i )
610 {
611 if ( num_online_cpus() >= max_cpus )
612 break;
613 if ( !cpu_online(i) )
614 __cpu_up(i);
616 /* Set up cpu_to_node[]. */
617 srat_detect_node(i);
618 /* Set up node_to_cpumask based on cpu_to_node[]. */
619 numa_add_cpu(i);
620 }
622 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
623 smp_cpus_done(max_cpus);
625 percpu_free_unused_areas();
627 initialise_gdb(); /* could be moved earlier */
629 do_initcalls();
631 schedulers_start();
633 if ( opt_watchdog )
634 watchdog_enable();
636 /* Extract policy from multiboot. */
637 extract_acm_policy(mbi, &initrdidx, &_policy_start, &_policy_len);
639 /* initialize access control security module */
640 acm_init(_policy_start, _policy_len);
642 /* Create initial domain 0. */
643 dom0 = domain_create(0, 0);
644 if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
645 panic("Error creating domain 0\n");
647 dom0->is_privileged = 1;
649 /* Post-create hook sets security label. */
650 acm_post_domain0_create(dom0->domain_id);
652 /* Grab the DOM0 command line. */
653 cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
654 if ( cmdline != NULL )
655 {
656 static char dom0_cmdline[MAX_GUEST_CMDLINE];
658 /* Skip past the image name and copy to a local buffer. */
659 while ( *cmdline == ' ' ) cmdline++;
660 if ( (cmdline = strchr(cmdline, ' ')) != NULL )
661 {
662 while ( *cmdline == ' ' ) cmdline++;
663 strcpy(dom0_cmdline, cmdline);
664 }
666 cmdline = dom0_cmdline;
668 /* Append any extra parameters. */
669 if ( skip_ioapic_setup && !strstr(cmdline, "noapic") )
670 strcat(cmdline, " noapic");
671 if ( acpi_skip_timer_override &&
672 !strstr(cmdline, "acpi_skip_timer_override") )
673 strcat(cmdline, " acpi_skip_timer_override");
674 if ( (strlen(acpi_param) != 0) && !strstr(cmdline, "acpi=") )
675 {
676 strcat(cmdline, " acpi=");
677 strcat(cmdline, acpi_param);
678 }
679 }
681 if ( (initrdidx > 0) && (initrdidx < mbi->mods_count) )
682 {
683 _initrd_start = initial_images_start +
684 (mod[initrdidx].mod_start - mod[0].mod_start);
685 _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start;
686 }
688 /*
689 * We're going to setup domain0 using the module(s) that we stashed safely
690 * above our heap. The second module, if present, is an initrd ramdisk.
691 */
692 if ( construct_dom0(dom0,
693 initial_images_start,
694 mod[0].mod_end-mod[0].mod_start,
695 _initrd_start,
696 _initrd_len,
697 cmdline) != 0)
698 panic("Could not set up DOM0 guest OS\n");
700 /* Scrub RAM that is still free and so may go to an unprivileged domain. */
701 scrub_heap_pages();
703 init_trace_bufs();
705 console_endboot();
707 /* Hide UART from DOM0 if we're using it */
708 serial_endboot();
710 domain_unpause_by_systemcontroller(dom0);
712 startup_cpu_idle_loop();
713 }
715 void arch_get_xen_caps(xen_capabilities_info_t info)
716 {
717 char *p = info;
718 int major = xen_major_version();
719 int minor = xen_minor_version();
721 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
723 p += sprintf(p, "xen-%d.%d-x86_32 ", major, minor);
724 if ( hvm_enabled )
725 p += sprintf(p, "hvm-%d.%d-x86_32 ", major, minor);
727 #elif defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
729 p += sprintf(p, "xen-%d.%d-x86_32p ", major, minor);
730 if ( hvm_enabled )
731 {
732 p += sprintf(p, "hvm-%d.%d-x86_32 ", major, minor);
733 p += sprintf(p, "hvm-%d.%d-x86_32p ", major, minor);
734 }
736 #elif defined(CONFIG_X86_64)
738 p += sprintf(p, "xen-%d.%d-x86_64 ", major, minor);
739 if ( hvm_enabled )
740 {
741 p += sprintf(p, "hvm-%d.%d-x86_32 ", major, minor);
742 p += sprintf(p, "hvm-%d.%d-x86_32p ", major, minor);
743 p += sprintf(p, "hvm-%d.%d-x86_64 ", major, minor);
744 }
746 #else
748 p++;
750 #endif
752 *(p-1) = 0;
754 BUG_ON((p - info) > sizeof(xen_capabilities_info_t));
755 }
757 /*
758 * Local variables:
759 * mode: C
760 * c-set-style: "BSD"
761 * c-basic-offset: 4
762 * tab-width: 4
763 * indent-tabs-mode: nil
764 * End:
765 */