ia64/xen-unstable

view xen/arch/x86/setup.c @ 10293:4122e88b6c75

Move idle-vcpu allocation logic to a common function.
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jun 02 09:31:35 2006 +0100 (2006-06-02)
parents ef0a56c0784d
children f7bb99cdc391
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/sched.h>
6 #include <xen/domain.h>
7 #include <xen/serial.h>
8 #include <xen/softirq.h>
9 #include <xen/acpi.h>
10 #include <xen/console.h>
11 #include <xen/serial.h>
12 #include <xen/trace.h>
13 #include <xen/multiboot.h>
14 #include <xen/domain_page.h>
15 #include <xen/compile.h>
16 #include <xen/gdbstub.h>
17 #include <xen/percpu.h>
18 #include <public/version.h>
19 #include <asm/bitops.h>
20 #include <asm/smp.h>
21 #include <asm/processor.h>
22 #include <asm/mpspec.h>
23 #include <asm/apic.h>
24 #include <asm/desc.h>
25 #include <asm/shadow.h>
26 #include <asm/e820.h>
27 #include <acm/acm_hooks.h>
29 extern void dmi_scan_machine(void);
30 extern void generic_apic_probe(void);
32 /*
33 * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
34 * page_info table and allocation bitmap.
35 */
36 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
37 #if defined(CONFIG_X86_64)
38 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
39 #endif
41 /* opt_nosmp: If true, secondary processors are ignored. */
42 static int opt_nosmp = 0;
43 boolean_param("nosmp", opt_nosmp);
45 /* maxcpus: maximum number of CPUs to activate. */
46 static unsigned int max_cpus = NR_CPUS;
47 integer_param("maxcpus", max_cpus);
49 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
50 static int opt_watchdog = 0;
51 boolean_param("watchdog", opt_watchdog);
53 /* **** Linux config option: propagated to domain0. */
54 /* "acpi=off": Sisables both ACPI table parsing and interpreter. */
55 /* "acpi=force": Override the disable blacklist. */
56 /* "acpi=strict": Disables out-of-spec workarounds. */
57 /* "acpi=ht": Limit ACPI just to boot-time to enable HT. */
58 /* "acpi=noirq": Disables ACPI interrupt routing. */
59 static void parse_acpi_param(char *s);
60 custom_param("acpi", parse_acpi_param);
62 /* **** Linux config option: propagated to domain0. */
63 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
64 extern int acpi_skip_timer_override;
65 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
67 /* **** Linux config option: propagated to domain0. */
68 /* noapic: Disable IOAPIC setup. */
69 extern int skip_ioapic_setup;
70 boolean_param("noapic", skip_ioapic_setup);
72 int early_boot = 1;
74 cpumask_t cpu_present_map;
76 /* Limits of Xen heap, used to initialise the allocator. */
77 unsigned long xenheap_phys_start, xenheap_phys_end;
79 extern void arch_init_memory(void);
80 extern void init_IRQ(void);
81 extern void trap_init(void);
82 extern void early_time_init(void);
83 extern void initialize_keytable(void);
84 extern void early_cpu_init(void);
86 struct tss_struct init_tss[NR_CPUS];
88 extern unsigned long cpu0_stack[];
90 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
92 #if CONFIG_PAGING_LEVELS > 2
93 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
94 #else
95 unsigned long mmu_cr4_features = X86_CR4_PSE;
96 #endif
97 EXPORT_SYMBOL(mmu_cr4_features);
99 int acpi_disabled;
101 int acpi_force;
102 char acpi_param[10] = "";
103 static void parse_acpi_param(char *s)
104 {
105 /* Save the parameter so it can be propagated to domain0. */
106 strncpy(acpi_param, s, sizeof(acpi_param));
107 acpi_param[sizeof(acpi_param)-1] = '\0';
109 /* Interpret the parameter for use within Xen. */
110 if ( !strcmp(s, "off") )
111 {
112 disable_acpi();
113 }
114 else if ( !strcmp(s, "force") )
115 {
116 acpi_force = 1;
117 acpi_ht = 1;
118 acpi_disabled = 0;
119 }
120 else if ( !strcmp(s, "strict") )
121 {
122 acpi_strict = 1;
123 }
124 else if ( !strcmp(s, "ht") )
125 {
126 if ( !acpi_force )
127 disable_acpi();
128 acpi_ht = 1;
129 }
130 else if ( !strcmp(s, "noirq") )
131 {
132 acpi_noirq_set();
133 }
134 }
136 static void __init do_initcalls(void)
137 {
138 initcall_t *call;
139 for ( call = &__initcall_start; call < &__initcall_end; call++ )
140 (*call)();
141 }
143 #define EARLY_FAIL() for ( ; ; ) __asm__ __volatile__ ( "hlt" )
145 static struct e820entry e820_raw[E820MAX];
147 static unsigned long initial_images_start, initial_images_end;
149 unsigned long initial_images_nrpages(void)
150 {
151 unsigned long s = initial_images_start + PAGE_SIZE - 1;
152 unsigned long e = initial_images_end;
153 return ((e >> PAGE_SHIFT) - (s >> PAGE_SHIFT));
154 }
156 void discard_initial_images(void)
157 {
158 init_domheap_pages(initial_images_start, initial_images_end);
159 }
161 extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[];
163 static void percpu_init_areas(void)
164 {
165 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
167 BUG_ON(data_size > PERCPU_SIZE);
169 for ( i = 1; i < NR_CPUS; i++ )
170 memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
171 __per_cpu_start,
172 data_size);
173 }
175 static void percpu_free_unused_areas(void)
176 {
177 unsigned int i, first_unused;
179 /* Find first unused CPU number. */
180 for ( i = 0; i < NR_CPUS; i++ )
181 if ( !cpu_online(i) )
182 break;
183 first_unused = i;
185 /* Check that there are no holes in cpu_online_map. */
186 for ( ; i < NR_CPUS; i++ )
187 BUG_ON(cpu_online(i));
189 init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
190 __pa(__per_cpu_end));
191 }
193 void __init __start_xen(multiboot_info_t *mbi)
194 {
195 char __cmdline[] = "", *cmdline = __cmdline;
196 struct domain *idle_domain;
197 unsigned long _initrd_start = 0, _initrd_len = 0;
198 unsigned int initrdidx = 1;
199 module_t *mod = (module_t *)__va(mbi->mods_addr);
200 unsigned long nr_pages, modules_length;
201 paddr_t s, e;
202 int i, e820_warn = 0, e820_raw_nr = 0, bytes = 0;
203 struct ns16550_defaults ns16550 = {
204 .data_bits = 8,
205 .parity = 'n',
206 .stop_bits = 1
207 };
209 /* Parse the command-line options. */
210 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
211 cmdline = __va(mbi->cmdline);
212 cmdline_parse(cmdline);
214 set_current((struct vcpu *)0xfffff000); /* debug sanity */
215 set_processor_id(0); /* needed early, for smp_processor_id() */
217 smp_prepare_boot_cpu();
219 /* We initialise the serial devices very early so we can get debugging. */
220 ns16550.io_base = 0x3f8;
221 ns16550.irq = 4;
222 ns16550_init(0, &ns16550);
223 ns16550.io_base = 0x2f8;
224 ns16550.irq = 3;
225 ns16550_init(1, &ns16550);
226 serial_init_preirq();
228 init_console();
230 printf("Command line: %s\n", cmdline);
232 /* Check that we have at least one Multiboot module. */
233 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
234 {
235 printk("FATAL ERROR: dom0 kernel not specified."
236 " Check bootloader configuration.\n");
237 EARLY_FAIL();
238 }
240 if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 )
241 {
242 printk("FATAL ERROR: Misaligned CPU0 stack.\n");
243 EARLY_FAIL();
244 }
246 percpu_init_areas();
248 xenheap_phys_end = opt_xenheap_megabytes << 20;
250 if ( mbi->flags & MBI_MEMMAP )
251 {
252 while ( bytes < mbi->mmap_length )
253 {
254 memory_map_t *map = __va(mbi->mmap_addr + bytes);
256 /*
257 * This is a gross workaround for a BIOS bug. Some bootloaders do
258 * not write e820 map entries into pre-zeroed memory. This is
259 * okay if the BIOS fills in all fields of the map entry, but
260 * some broken BIOSes do not bother to write the high word of
261 * the length field if the length is smaller than 4GB. We
262 * detect and fix this by flagging sections below 4GB that
263 * appear to be larger than 4GB in size.
264 */
265 if ( (map->base_addr_high == 0) && (map->length_high != 0) )
266 {
267 e820_warn = 1;
268 map->length_high = 0;
269 }
271 e820_raw[e820_raw_nr].addr =
272 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
273 e820_raw[e820_raw_nr].size =
274 ((u64)map->length_high << 32) | (u64)map->length_low;
275 e820_raw[e820_raw_nr].type =
276 (map->type > E820_SHARED_PAGE) ? E820_RESERVED : map->type;
277 e820_raw_nr++;
279 bytes += map->size + 4;
280 }
281 }
282 else if ( mbi->flags & MBI_MEMLIMITS )
283 {
284 e820_raw[0].addr = 0;
285 e820_raw[0].size = mbi->mem_lower << 10;
286 e820_raw[0].type = E820_RAM;
287 e820_raw[1].addr = 0x100000;
288 e820_raw[1].size = mbi->mem_upper << 10;
289 e820_raw[1].type = E820_RAM;
290 e820_raw_nr = 2;
291 }
292 else
293 {
294 printk("FATAL ERROR: Bootloader provided no memory information.\n");
295 for ( ; ; ) ;
296 }
298 if ( e820_warn )
299 printk("WARNING: Buggy e820 map detected and fixed "
300 "(truncated length fields).\n");
302 max_page = init_e820(e820_raw, &e820_raw_nr);
304 modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
306 /* Find a large enough RAM extent to stash the DOM0 modules. */
307 for ( i = 0; ; i++ )
308 {
309 if ( i == e820.nr_map )
310 {
311 printk("Not enough memory to stash the DOM0 kernel image.\n");
312 for ( ; ; ) ;
313 }
315 if ( (e820.map[i].type == E820_RAM) &&
316 (e820.map[i].size >= modules_length) &&
317 ((e820.map[i].addr + e820.map[i].size) >=
318 (xenheap_phys_end + modules_length)) )
319 break;
320 }
322 /* Stash as near as possible to the beginning of the RAM extent. */
323 initial_images_start = e820.map[i].addr;
324 if ( initial_images_start < xenheap_phys_end )
325 initial_images_start = xenheap_phys_end;
326 initial_images_end = initial_images_start + modules_length;
328 #if defined(CONFIG_X86_32)
329 memmove((void *)initial_images_start, /* use low mapping */
330 (void *)mod[0].mod_start, /* use low mapping */
331 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
332 #elif defined(CONFIG_X86_64)
333 memmove(__va(initial_images_start),
334 __va(mod[0].mod_start),
335 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
336 #endif
338 /* Initialise boot-time allocator with all RAM situated after modules. */
339 xenheap_phys_start = init_boot_allocator(__pa(&_end));
340 nr_pages = 0;
341 for ( i = 0; i < e820.nr_map; i++ )
342 {
343 if ( e820.map[i].type != E820_RAM )
344 continue;
346 nr_pages += e820.map[i].size >> PAGE_SHIFT;
348 /* Initialise boot heap, skipping Xen heap and dom0 modules. */
349 s = e820.map[i].addr;
350 e = s + e820.map[i].size;
351 if ( s < xenheap_phys_end )
352 s = xenheap_phys_end;
353 if ( (s < initial_images_end) && (e > initial_images_start) )
354 s = initial_images_end;
355 init_boot_pages(s, e);
357 #if defined (CONFIG_X86_64)
358 /*
359 * x86/64 maps all registered RAM. Points to note:
360 * 1. The initial pagetable already maps low 1GB, so skip that.
361 * 2. We must map *only* RAM areas, taking care to avoid I/O holes.
362 * Failure to do this can cause coherency problems and deadlocks
363 * due to cache-attribute mismatches (e.g., AMD/AGP Linux bug).
364 */
365 {
366 /* Calculate page-frame range, discarding partial frames. */
367 unsigned long start, end;
368 unsigned long init_mapped = 1UL << (30 - PAGE_SHIFT); /* 1GB */
369 start = PFN_UP(e820.map[i].addr);
370 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
371 /* Clip the range to exclude what the bootstrapper initialised. */
372 if ( end < init_mapped )
373 continue;
374 if ( start < init_mapped )
375 start = init_mapped;
376 /* Request the mapping. */
377 map_pages_to_xen(
378 PAGE_OFFSET + (start << PAGE_SHIFT),
379 start, end-start, PAGE_HYPERVISOR);
380 }
381 #endif
382 }
384 memguard_init();
386 printk("System RAM: %luMB (%lukB)\n",
387 nr_pages >> (20 - PAGE_SHIFT),
388 nr_pages << (PAGE_SHIFT - 10));
389 total_pages = nr_pages;
391 /* Sanity check for unwanted bloat of dom0_op structure. */
392 BUILD_BUG_ON(sizeof(((struct dom0_op *)0)->u) !=
393 sizeof(((struct dom0_op *)0)->u.pad));
395 BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
396 BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
397 BUILD_BUG_ON(sizeof(vcpu_info_t) != 64);
399 /* __foo are defined in public headers. Check they match internal defs. */
400 BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
401 #ifdef HYPERVISOR_VIRT_END
402 BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END);
403 #endif
405 init_frametable();
407 end_boot_allocator();
409 /* Initialise the Xen heap, skipping RAM holes. */
410 nr_pages = 0;
411 for ( i = 0; i < e820.nr_map; i++ )
412 {
413 if ( e820.map[i].type != E820_RAM )
414 continue;
416 s = e820.map[i].addr;
417 e = s + e820.map[i].size;
418 if ( s < xenheap_phys_start )
419 s = xenheap_phys_start;
420 if ( e > xenheap_phys_end )
421 e = xenheap_phys_end;
423 if ( s < e )
424 {
425 nr_pages += (e - s) >> PAGE_SHIFT;
426 init_xenheap_pages(s, e);
427 }
428 }
430 printk("Xen heap: %luMB (%lukB)\n",
431 nr_pages >> (20 - PAGE_SHIFT),
432 nr_pages << (PAGE_SHIFT - 10));
434 early_boot = 0;
436 early_cpu_init();
438 scheduler_init();
440 idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
441 BUG_ON(idle_domain == NULL);
443 set_current(idle_domain->vcpu[0]);
444 this_cpu(curr_vcpu) = idle_domain->vcpu[0];
445 idle_vcpu[0] = current;
447 paging_init();
449 /* Unmap the first page of CPU0's stack. */
450 memguard_guard_stack(cpu0_stack);
452 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
454 if ( opt_watchdog )
455 nmi_watchdog = NMI_LOCAL_APIC;
457 sort_exception_tables();
459 find_smp_config();
461 smp_alloc_memory();
463 dmi_scan_machine();
465 generic_apic_probe();
467 acpi_boot_table_init();
468 acpi_boot_init();
470 if ( smp_found_config )
471 get_smp_config();
473 init_apic_mappings();
475 init_IRQ();
477 trap_init();
479 timer_init();
481 early_time_init();
483 arch_init_memory();
485 identify_cpu(&boot_cpu_data);
486 if ( cpu_has_fxsr )
487 set_in_cr4(X86_CR4_OSFXSR);
488 if ( cpu_has_xmm )
489 set_in_cr4(X86_CR4_OSXMMEXCPT);
491 if ( opt_nosmp )
492 max_cpus = 0;
494 smp_prepare_cpus(max_cpus);
496 /*
497 * Initialise higher-level timer functions. We do this fairly late
498 * (post-SMP) because the time bases and scale factors need to be updated
499 * regularly, and SMP initialisation can cause a long delay with
500 * interrupts not yet enabled.
501 */
502 init_xen_time();
504 initialize_keytable();
506 serial_init_postirq();
508 BUG_ON(!local_irq_is_enabled());
510 for_each_present_cpu ( i )
511 {
512 if ( num_online_cpus() >= max_cpus )
513 break;
514 if ( !cpu_online(i) )
515 __cpu_up(i);
516 }
518 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
519 smp_cpus_done(max_cpus);
521 percpu_free_unused_areas();
523 initialise_gdb(); /* could be moved earlier */
525 do_initcalls();
527 schedulers_start();
529 if ( opt_watchdog )
530 watchdog_enable();
532 shadow_mode_init();
534 /* initialize access control security module */
535 acm_init(&initrdidx, mbi, initial_images_start);
537 /* Create initial domain 0. */
538 dom0 = domain_create(0, 0);
539 if ( dom0 == NULL )
540 panic("Error creating domain 0\n");
542 set_bit(_DOMF_privileged, &dom0->domain_flags);
543 /* post-create hooks sets security label */
544 acm_post_domain0_create(dom0->domain_id);
546 /* Grab the DOM0 command line. */
547 cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
548 if ( cmdline != NULL )
549 {
550 static char dom0_cmdline[MAX_GUEST_CMDLINE];
552 /* Skip past the image name and copy to a local buffer. */
553 while ( *cmdline == ' ' ) cmdline++;
554 if ( (cmdline = strchr(cmdline, ' ')) != NULL )
555 {
556 while ( *cmdline == ' ' ) cmdline++;
557 strcpy(dom0_cmdline, cmdline);
558 }
560 cmdline = dom0_cmdline;
562 /* Append any extra parameters. */
563 if ( skip_ioapic_setup && !strstr(cmdline, "noapic") )
564 strcat(cmdline, " noapic");
565 if ( acpi_skip_timer_override &&
566 !strstr(cmdline, "acpi_skip_timer_override") )
567 strcat(cmdline, " acpi_skip_timer_override");
568 if ( (strlen(acpi_param) != 0) && !strstr(cmdline, "acpi=") )
569 {
570 strcat(cmdline, " acpi=");
571 strcat(cmdline, acpi_param);
572 }
573 }
575 if ( (initrdidx > 0) && (initrdidx < mbi->mods_count) )
576 {
577 _initrd_start = initial_images_start +
578 (mod[initrdidx].mod_start - mod[0].mod_start);
579 _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start;
580 }
582 /*
583 * We're going to setup domain0 using the module(s) that we stashed safely
584 * above our heap. The second module, if present, is an initrd ramdisk.
585 */
586 if ( construct_dom0(dom0,
587 initial_images_start,
588 mod[0].mod_end-mod[0].mod_start,
589 _initrd_start,
590 _initrd_len,
591 cmdline) != 0)
592 panic("Could not set up DOM0 guest OS\n");
594 /* Scrub RAM that is still free and so may go to an unprivileged domain. */
595 scrub_heap_pages();
597 init_trace_bufs();
599 /* Give up the VGA console if DOM0 is configured to grab it. */
600 console_endboot(cmdline && strstr(cmdline, "tty0"));
602 /* Hide UART from DOM0 if we're using it */
603 serial_endboot();
605 domain_unpause_by_systemcontroller(dom0);
607 startup_cpu_idle_loop();
608 }
610 void arch_get_xen_caps(xen_capabilities_info_t info)
611 {
612 char *p = info;
614 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
616 p += sprintf(p, "xen-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION);
617 if ( hvm_enabled )
618 p += sprintf(p, "hvm-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION);
620 #elif defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
622 p += sprintf(p, "xen-%d.%d-x86_32p ", XEN_VERSION, XEN_SUBVERSION);
623 if ( hvm_enabled )
624 {
625 p += sprintf(p, "hvm-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION);
626 //p += sprintf(p, "hvm-%d.%d-x86_32p ", XEN_VERSION, XEN_SUBVERSION);
627 }
629 #elif defined(CONFIG_X86_64)
631 p += sprintf(p, "xen-%d.%d-x86_64 ", XEN_VERSION, XEN_SUBVERSION);
632 if ( hvm_enabled )
633 {
634 p += sprintf(p, "hvm-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION);
635 p += sprintf(p, "hvm-%d.%d-x86_32p ", XEN_VERSION, XEN_SUBVERSION);
636 p += sprintf(p, "hvm-%d.%d-x86_64 ", XEN_VERSION, XEN_SUBVERSION);
637 }
639 #else
641 p++;
643 #endif
645 *(p-1) = 0;
647 BUG_ON((p - info) > sizeof(xen_capabilities_info_t));
648 }
650 /*
651 * Local variables:
652 * mode: C
653 * c-set-style: "BSD"
654 * c-basic-offset: 4
655 * tab-width: 4
656 * indent-tabs-mode: nil
657 * End:
658 */