ia64/xen-unstable

annotate xen/arch/x86/setup.c @ 15293:345ae2e61ba0

x86: Print source of e820 memory map during boot. Fix Xen-e801 memmap
parsing. Get rid of unneeded e820_raw variable -- map straight onto
boot-trampoline e820 array.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jun 07 20:02:27 2007 +0100 (2007-06-07)
parents a5ae31a91b10
children 3cc79ef896a2
rev   line source
kaf24@1452 1 #include <xen/config.h>
kaf24@1452 2 #include <xen/init.h>
kaf24@1452 3 #include <xen/lib.h>
kaf24@1452 4 #include <xen/sched.h>
cl349@5247 5 #include <xen/domain.h>
kaf24@1452 6 #include <xen/serial.h>
kaf24@1506 7 #include <xen/softirq.h>
kaf24@1452 8 #include <xen/acpi.h>
kaf24@3338 9 #include <xen/console.h>
iap10@4287 10 #include <xen/serial.h>
kaf24@3338 11 #include <xen/trace.h>
kaf24@3338 12 #include <xen/multiboot.h>
kaf24@5356 13 #include <xen/domain_page.h>
kfraser@10890 14 #include <xen/version.h>
kaf24@9117 15 #include <xen/gdbstub.h>
kaf24@9818 16 #include <xen/percpu.h>
kfraser@11296 17 #include <xen/hypercall.h>
kfraser@11601 18 #include <xen/keyhandler.h>
kfraser@11971 19 #include <xen/numa.h>
kaf24@13662 20 #include <xen/rcupdate.h>
iap10@6721 21 #include <public/version.h>
ack@13291 22 #ifdef CONFIG_COMPAT
ack@13291 23 #include <compat/platform.h>
ack@13291 24 #include <compat/xen.h>
ack@13291 25 #endif
kaf24@1452 26 #include <asm/bitops.h>
kaf24@1452 27 #include <asm/smp.h>
kaf24@1452 28 #include <asm/processor.h>
kaf24@1452 29 #include <asm/mpspec.h>
kaf24@1452 30 #include <asm/apic.h>
kaf24@1452 31 #include <asm/desc.h>
Tim@13909 32 #include <asm/paging.h>
kaf24@3344 33 #include <asm/e820.h>
kaf24@5536 34 #include <acm/acm_hooks.h>
ian@12677 35 #include <xen/kexec.h>
kaf24@3338 36
kfraser@15074 37 #if defined(CONFIG_X86_64)
kfraser@15074 38 #define BOOTSTRAP_DIRECTMAP_END (1UL << 32)
kfraser@15074 39 #define maddr_to_bootstrap_virt(m) maddr_to_virt(m)
kfraser@15074 40 #else
kfraser@15074 41 #define BOOTSTRAP_DIRECTMAP_END HYPERVISOR_VIRT_START
kfraser@15074 42 #define maddr_to_bootstrap_virt(m) ((void *)(long)(m))
kfraser@15074 43 #endif
kfraser@15074 44
kaf24@5157 45 extern void dmi_scan_machine(void);
kaf24@5211 46 extern void generic_apic_probe(void);
kfraser@11971 47 extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
kaf24@5157 48
kaf24@3338 49 /*
kaf24@3338 50 * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
kaf24@8726 51 * page_info table and allocation bitmap.
kaf24@3338 52 */
kaf24@3338 53 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
kaf24@4950 54 #if defined(CONFIG_X86_64)
kaf24@3338 55 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
kaf24@3354 56 #endif
kaf24@1452 57
kaf24@5146 58 /* opt_nosmp: If true, secondary processors are ignored. */
kaf24@5900 59 static int opt_nosmp = 0;
kaf24@5146 60 boolean_param("nosmp", opt_nosmp);
kaf24@5146 61
kaf24@5146 62 /* maxcpus: maximum number of CPUs to activate. */
kaf24@5146 63 static unsigned int max_cpus = NR_CPUS;
shand@11156 64 integer_param("maxcpus", max_cpus);
kaf24@5146 65
kaf24@3334 66 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
kaf24@3334 67 static int opt_watchdog = 0;
kaf24@3334 68 boolean_param("watchdog", opt_watchdog);
kaf24@3334 69
kaf24@4850 70 /* **** Linux config option: propagated to domain0. */
kaf24@4850 71 /* "acpi=off": Sisables both ACPI table parsing and interpreter. */
kaf24@4850 72 /* "acpi=force": Override the disable blacklist. */
kaf24@4850 73 /* "acpi=strict": Disables out-of-spec workarounds. */
kaf24@4850 74 /* "acpi=ht": Limit ACPI just to boot-time to enable HT. */
kaf24@4850 75 /* "acpi=noirq": Disables ACPI interrupt routing. */
kaf24@4850 76 static void parse_acpi_param(char *s);
kaf24@4850 77 custom_param("acpi", parse_acpi_param);
kaf24@4850 78
kaf24@4850 79 /* **** Linux config option: propagated to domain0. */
kaf24@4850 80 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
kaf24@4850 81 extern int acpi_skip_timer_override;
kaf24@4850 82 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
kaf24@4850 83
kaf24@4850 84 /* **** Linux config option: propagated to domain0. */
kaf24@4850 85 /* noapic: Disable IOAPIC setup. */
kaf24@4850 86 extern int skip_ioapic_setup;
kaf24@4850 87 boolean_param("noapic", skip_ioapic_setup);
kaf24@4850 88
kaf24@3594 89 int early_boot = 1;
kaf24@3594 90
kaf24@5146 91 cpumask_t cpu_present_map;
kaf24@5146 92
kfraser@15074 93 unsigned long xen_phys_start;
kfraser@15074 94
kaf24@5003 95 /* Limits of Xen heap, used to initialise the allocator. */
kaf24@5003 96 unsigned long xenheap_phys_start, xenheap_phys_end;
kaf24@3338 97
kaf24@2298 98 extern void arch_init_memory(void);
kaf24@1589 99 extern void init_IRQ(void);
kaf24@1589 100 extern void trap_init(void);
kaf24@5604 101 extern void early_time_init(void);
kaf24@5167 102 extern void early_cpu_init(void);
kaf24@1589 103
kaf24@8533 104 struct tss_struct init_tss[NR_CPUS];
kaf24@8533 105
kfraser@15074 106 char __attribute__ ((__section__(".bss.page_aligned"))) cpu0_stack[STACK_SIZE];
kaf24@5011 107
kaf24@5214 108 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
kaf24@1452 109
kaf24@5237 110 #if CONFIG_PAGING_LEVELS > 2
kaf24@1670 111 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
kaf24@1670 112 #else
kaf24@5593 113 unsigned long mmu_cr4_features = X86_CR4_PSE;
kaf24@1670 114 #endif
kaf24@1452 115 EXPORT_SYMBOL(mmu_cr4_features);
kaf24@1452 116
kaf24@4818 117 int acpi_disabled;
kaf24@1452 118
kaf24@4850 119 int acpi_force;
kaf24@4850 120 char acpi_param[10] = "";
kfraser@15074 121 static void __init parse_acpi_param(char *s)
kaf24@4850 122 {
kaf24@4850 123 /* Save the parameter so it can be propagated to domain0. */
kfraser@13689 124 safe_strcpy(acpi_param, s);
kaf24@4850 125
kaf24@4850 126 /* Interpret the parameter for use within Xen. */
kaf24@4850 127 if ( !strcmp(s, "off") )
kaf24@4850 128 {
kaf24@4850 129 disable_acpi();
kaf24@4850 130 }
kaf24@4850 131 else if ( !strcmp(s, "force") )
kaf24@4850 132 {
kaf24@4850 133 acpi_force = 1;
kaf24@4850 134 acpi_ht = 1;
kaf24@4850 135 acpi_disabled = 0;
kaf24@4850 136 }
kaf24@4850 137 else if ( !strcmp(s, "strict") )
kaf24@4850 138 {
kaf24@4850 139 acpi_strict = 1;
kaf24@4850 140 }
kaf24@4850 141 else if ( !strcmp(s, "ht") )
kaf24@4850 142 {
kaf24@4850 143 if ( !acpi_force )
kaf24@4850 144 disable_acpi();
kaf24@4850 145 acpi_ht = 1;
kaf24@4850 146 }
kaf24@4850 147 else if ( !strcmp(s, "noirq") )
kaf24@4850 148 {
kaf24@4850 149 acpi_noirq_set();
kaf24@4850 150 }
kaf24@4850 151 }
kaf24@4850 152
kaf24@1452 153 static void __init do_initcalls(void)
kaf24@1452 154 {
kaf24@1452 155 initcall_t *call;
kaf24@1452 156 for ( call = &__initcall_start; call < &__initcall_end; call++ )
kaf24@1452 157 (*call)();
kaf24@1452 158 }
kaf24@1452 159
kfraser@15074 160 #define EARLY_FAIL(f, a...) do { \
kfraser@15074 161 printk( f , ## a ); \
kfraser@15074 162 for ( ; ; ) __asm__ __volatile__ ( "hlt" ); \
kfraser@15074 163 } while (0)
kaf24@8459 164
kfraser@15074 165 static unsigned long __initdata initial_images_start, initial_images_end;
kaf24@9067 166
kfraser@15074 167 unsigned long __init initial_images_nrpages(void)
kaf24@9067 168 {
kaf24@9067 169 unsigned long s = initial_images_start + PAGE_SIZE - 1;
kaf24@9067 170 unsigned long e = initial_images_end;
kaf24@9067 171 return ((e >> PAGE_SHIFT) - (s >> PAGE_SHIFT));
kaf24@9067 172 }
kaf24@9067 173
kfraser@15074 174 void __init discard_initial_images(void)
kaf24@9067 175 {
kaf24@9067 176 init_domheap_pages(initial_images_start, initial_images_end);
kaf24@9067 177 }
kaf24@9067 178
kaf24@9818 179 extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[];
kaf24@9818 180
kfraser@11241 181 static void __init percpu_init_areas(void)
kaf24@9818 182 {
kaf24@9818 183 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
kfraser@15074 184 unsigned int first_unused;
kaf24@9818 185
kaf24@9818 186 BUG_ON(data_size > PERCPU_SIZE);
kaf24@9818 187
kfraser@15074 188 /* Initialise per-cpu data area for all possible secondary CPUs. */
kfraser@15074 189 for ( i = 1; (i < NR_CPUS) && cpu_possible(i); i++ )
kfraser@15074 190 memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
kfraser@15074 191 __per_cpu_start,
kfraser@15074 192 data_size);
kaf24@9818 193 first_unused = i;
kaf24@9818 194
kfraser@14340 195 /* Check that there are no holes in cpu_possible_map. */
kaf24@9818 196 for ( ; i < NR_CPUS; i++ )
kfraser@14340 197 BUG_ON(cpu_possible(i));
kaf24@9818 198
kfraser@11241 199 #ifndef MEMORY_GUARD
kaf24@9818 200 init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
kaf24@9818 201 __pa(__per_cpu_end));
kfraser@11241 202 #endif
keir@15082 203 memguard_guard_range(&__per_cpu_start[first_unused << PERCPU_SHIFT],
keir@15082 204 (NR_CPUS - first_unused) << PERCPU_SHIFT);
keir@15082 205 #if defined(CONFIG_X86_64)
keir@15082 206 /* Also zap the mapping in the 1:1 area. */
keir@15082 207 memguard_guard_range(__va(__pa(__per_cpu_start)) +
keir@15082 208 (first_unused << PERCPU_SHIFT),
keir@15082 209 (NR_CPUS - first_unused) << PERCPU_SHIFT);
keir@15082 210 #endif
kaf24@9818 211 }
kaf24@9818 212
kfraser@11881 213 /* Fetch acm policy module from multiboot modules. */
kfraser@15074 214 static void __init extract_acm_policy(
kfraser@11881 215 multiboot_info_t *mbi,
kfraser@11881 216 unsigned int *initrdidx,
kfraser@11881 217 char **_policy_start,
kfraser@11881 218 unsigned long *_policy_len)
kfraser@11881 219 {
kfraser@11881 220 int i;
kfraser@11881 221 module_t *mod = (module_t *)__va(mbi->mods_addr);
kfraser@11881 222 unsigned long start, policy_len;
kfraser@11881 223 char *policy_start;
kfraser@11881 224
kfraser@11881 225 /*
kfraser@11881 226 * Try all modules and see whichever could be the binary policy.
kfraser@11881 227 * Adjust the initrdidx if module[1] is the binary policy.
kfraser@11881 228 */
kfraser@11881 229 for ( i = mbi->mods_count-1; i >= 1; i-- )
kfraser@11881 230 {
kfraser@11881 231 start = initial_images_start + (mod[i].mod_start-mod[0].mod_start);
kfraser@15074 232 policy_start = maddr_to_bootstrap_virt(start);
kfraser@11881 233 policy_len = mod[i].mod_end - mod[i].mod_start;
kfraser@11881 234 if ( acm_is_policy(policy_start, policy_len) )
kfraser@11881 235 {
kfraser@11881 236 printk("Policy len 0x%lx, start at %p - module %d.\n",
kfraser@11881 237 policy_len, policy_start, i);
kfraser@11881 238 *_policy_start = policy_start;
kfraser@11881 239 *_policy_len = policy_len;
kfraser@11881 240 if ( i == 1 )
kfraser@11881 241 *initrdidx = (mbi->mods_count > 2) ? 2 : 0;
kfraser@11881 242 break;
kfraser@11881 243 }
kfraser@11881 244 }
kfraser@11881 245 }
kfraser@11881 246
kfraser@11241 247 static void __init init_idle_domain(void)
kfraser@11240 248 {
kfraser@11240 249 struct domain *idle_domain;
kfraser@11240 250
kfraser@11240 251 /* Domain creation requires that scheduler structures are initialised. */
kfraser@11240 252 scheduler_init();
kfraser@11240 253
kfraser@14911 254 idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
kfraser@11240 255 if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
kfraser@11240 256 BUG();
kfraser@11240 257
kfraser@11240 258 set_current(idle_domain->vcpu[0]);
kfraser@11240 259 idle_vcpu[0] = this_cpu(curr_vcpu) = current;
kfraser@11240 260
kfraser@11240 261 setup_idle_pagetable();
kfraser@11240 262 }
kfraser@11240 263
kfraser@15074 264 static void __init srat_detect_node(int cpu)
kfraser@11971 265 {
kfraser@11998 266 unsigned node;
kfraser@11998 267 u8 apicid = x86_cpu_to_apicid[cpu];
kfraser@11971 268
kfraser@11998 269 node = apicid_to_node[apicid];
kfraser@11998 270 if ( node == NUMA_NO_NODE )
kfraser@11998 271 node = 0;
kfraser@11998 272 numa_set_node(cpu, node);
kfraser@11971 273
kfraser@11998 274 if ( acpi_numa > 0 )
kfraser@11998 275 printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
kfraser@11971 276 }
kfraser@11971 277
kfraser@15074 278 static void __init move_memory(
kfraser@15074 279 unsigned long dst, unsigned long src_start, unsigned long src_end)
ian@12677 280 {
kfraser@15074 281 memmove(maddr_to_bootstrap_virt(dst),
kfraser@15074 282 maddr_to_bootstrap_virt(src_start),
ian@12677 283 src_end - src_start);
kfraser@15074 284 }
kfraser@15074 285
kfraser@15074 286 /* A temporary copy of the e820 map that we can mess with during bootstrap. */
kfraser@15074 287 static struct e820map __initdata boot_e820;
kfraser@15074 288
kfraser@15074 289 /* Reserve area (@s,@e) in the temporary bootstrap e820 map. */
kfraser@15074 290 static void __init reserve_in_boot_e820(unsigned long s, unsigned long e)
kfraser@15074 291 {
kfraser@15074 292 unsigned long rs, re;
kfraser@15074 293 int i;
kfraser@15074 294
kfraser@15074 295 for ( i = 0; i < boot_e820.nr_map; i++ )
kfraser@15074 296 {
kfraser@15074 297 /* Have we found the e820 region that includes the specified range? */
kfraser@15074 298 rs = boot_e820.map[i].addr;
kfraser@15074 299 re = boot_e820.map[i].addr + boot_e820.map[i].size;
kfraser@15074 300 if ( (s < rs) || (e > re) )
kfraser@15074 301 continue;
kfraser@15074 302
kfraser@15074 303 /* Start fragment. */
kfraser@15074 304 boot_e820.map[i].size = s - rs;
kfraser@15074 305
kfraser@15074 306 /* End fragment. */
kfraser@15074 307 if ( e < re )
kfraser@15074 308 {
kfraser@15074 309 memmove(&boot_e820.map[i+1], &boot_e820.map[i],
kfraser@15074 310 (boot_e820.nr_map-i) * sizeof(boot_e820.map[0]));
kfraser@15074 311 boot_e820.nr_map++;
kfraser@15074 312 i++;
kfraser@15074 313 boot_e820.map[i].addr = e;
kfraser@15074 314 boot_e820.map[i].size = re - e;
kfraser@15074 315 }
kfraser@15074 316 }
ian@12677 317 }
ian@12677 318
keir@15082 319 void init_done(void)
keir@15082 320 {
keir@15082 321 extern char __init_begin[], __init_end[];
keir@15082 322
keir@15082 323 /* Free (or page-protect) the init areas. */
keir@15082 324 #ifndef MEMORY_GUARD
keir@15082 325 init_xenheap_pages(__pa(__init_begin), __pa(__init_end));
keir@15082 326 #endif
keir@15082 327 memguard_guard_range(__init_begin, __init_end - __init_begin);
keir@15082 328 #if defined(CONFIG_X86_64)
keir@15082 329 /* Also zap the mapping in the 1:1 area. */
keir@15082 330 memguard_guard_range(__va(__pa(__init_begin)), __init_end - __init_begin);
keir@15082 331 #endif
keir@15082 332 printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
keir@15082 333
keir@15082 334 startup_cpu_idle_loop();
keir@15082 335 }
keir@15082 336
kaf24@8463 337 void __init __start_xen(multiboot_info_t *mbi)
kaf24@1452 338 {
kfraser@15293 339 char *memmap_type = NULL;
kaf24@9823 340 char __cmdline[] = "", *cmdline = __cmdline;
kaf24@8457 341 unsigned long _initrd_start = 0, _initrd_len = 0;
kaf24@8457 342 unsigned int initrdidx = 1;
kfraser@11881 343 char *_policy_start = NULL;
kfraser@11881 344 unsigned long _policy_len = 0;
kaf24@8457 345 module_t *mod = (module_t *)__va(mbi->mods_addr);
kaf24@6111 346 unsigned long nr_pages, modules_length;
kfraser@15293 347 int i, e820_warn = 0, bytes = 0;
kaf24@5776 348 struct ns16550_defaults ns16550 = {
kaf24@5776 349 .data_bits = 8,
kaf24@5776 350 .parity = 'n',
kaf24@5776 351 .stop_bits = 1
kaf24@5776 352 };
kaf24@3338 353
kfraser@12853 354 extern void early_page_fault(void);
kfraser@12853 355 set_intr_gate(TRAP_page_fault, &early_page_fault);
kfraser@12853 356
kaf24@3338 357 /* Parse the command-line options. */
kaf24@3344 358 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
kaf24@9823 359 cmdline = __va(mbi->cmdline);
kaf24@9823 360 cmdline_parse(cmdline);
kaf24@3338 361
kaf24@8534 362 set_current((struct vcpu *)0xfffff000); /* debug sanity */
kfraser@11240 363 idle_vcpu[0] = current;
kaf24@8534 364 set_processor_id(0); /* needed early, for smp_processor_id() */
kaf24@3338 365
kaf24@5146 366 smp_prepare_boot_cpu();
kaf24@5146 367
kaf24@3338 368 /* We initialise the serial devices very early so we can get debugging. */
kaf24@5776 369 ns16550.io_base = 0x3f8;
kaf24@5776 370 ns16550.irq = 4;
kaf24@5776 371 ns16550_init(0, &ns16550);
kaf24@5776 372 ns16550.io_base = 0x2f8;
kaf24@5776 373 ns16550.irq = 3;
kaf24@5776 374 ns16550_init(1, &ns16550);
kaf24@5195 375 serial_init_preirq();
kaf24@3338 376
kaf24@3338 377 init_console();
kaf24@3338 378
kfraser@11947 379 printk("Command line: %s\n", cmdline);
kaf24@9823 380
kaf24@3344 381 /* Check that we have at least one Multiboot module. */
kaf24@3344 382 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
kfraser@15074 383 EARLY_FAIL("dom0 kernel not specified. "
kfraser@15074 384 "Check bootloader configuration.\n");
kaf24@5011 385
kaf24@5011 386 if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 )
kfraser@15074 387 EARLY_FAIL("Misaligned CPU0 stack.\n");
kaf24@3338 388
kfraser@11618 389 /*
kfraser@11618 390 * Since there are some stubs getting built on the stacks which use
kfraser@11618 391 * direct calls/jumps, the heap must be confined to the lower 2G so
kfraser@11618 392 * that those branches can reach their targets.
kfraser@11618 393 */
kfraser@11618 394 if ( opt_xenheap_megabytes > 2048 )
kfraser@11618 395 opt_xenheap_megabytes = 2048;
kaf24@3338 396
kfraser@15293 397 if ( e820_raw_nr != 0 )
kfraser@15292 398 {
kfraser@15293 399 memmap_type = "Xen-e820";
kfraser@15292 400 }
kfraser@15293 401 else if ( bootsym(lowmem_kb) )
kfraser@15292 402 {
kfraser@15293 403 memmap_type = "Xen-e801";
kfraser@15292 404 e820_raw[0].addr = 0;
kfraser@15293 405 e820_raw[0].size = bootsym(lowmem_kb) << 10;
kfraser@15292 406 e820_raw[0].type = E820_RAM;
kfraser@15292 407 e820_raw[1].addr = 0x100000;
kfraser@15293 408 e820_raw[1].size = bootsym(highmem_kb) << 10;
kfraser@15292 409 e820_raw[1].type = E820_RAM;
kfraser@15292 410 e820_raw_nr = 2;
kfraser@15292 411 }
kfraser@15292 412 else if ( mbi->flags & MBI_MEMMAP )
kaf24@3344 413 {
kfraser@15293 414 memmap_type = "Multiboot-e820";
kaf24@3344 415 while ( bytes < mbi->mmap_length )
kaf24@3344 416 {
kaf24@3344 417 memory_map_t *map = __va(mbi->mmap_addr + bytes);
kaf24@8402 418
kaf24@8402 419 /*
kaf24@8403 420 * This is a gross workaround for a BIOS bug. Some bootloaders do
kaf24@8402 421 * not write e820 map entries into pre-zeroed memory. This is
kaf24@8402 422 * okay if the BIOS fills in all fields of the map entry, but
kaf24@8402 423 * some broken BIOSes do not bother to write the high word of
kaf24@8402 424 * the length field if the length is smaller than 4GB. We
kaf24@8402 425 * detect and fix this by flagging sections below 4GB that
kaf24@8403 426 * appear to be larger than 4GB in size.
kaf24@8402 427 */
kaf24@8403 428 if ( (map->base_addr_high == 0) && (map->length_high != 0) )
kaf24@8402 429 {
kfraser@15292 430 if ( !e820_warn )
kfraser@15292 431 {
kfraser@15292 432 printk("WARNING: Buggy e820 map detected and fixed "
kfraser@15292 433 "(truncated length fields).\n");
kfraser@15292 434 e820_warn = 1;
kfraser@15292 435 }
kaf24@8402 436 map->length_high = 0;
kaf24@8402 437 }
kaf24@8402 438
kaf24@3344 439 e820_raw[e820_raw_nr].addr =
kaf24@3344 440 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
kaf24@3344 441 e820_raw[e820_raw_nr].size =
kaf24@3344 442 ((u64)map->length_high << 32) | (u64)map->length_low;
kaf24@3344 443 e820_raw[e820_raw_nr].type =
kfraser@12226 444 (map->type > E820_NVS) ? E820_RESERVED : map->type;
kaf24@3344 445 e820_raw_nr++;
kaf24@8402 446
kaf24@3344 447 bytes += map->size + 4;
kaf24@3344 448 }
kaf24@3344 449 }
kaf24@3344 450 else if ( mbi->flags & MBI_MEMLIMITS )
kaf24@3344 451 {
kfraser@15293 452 memmap_type = "Multiboot-e801";
kaf24@3344 453 e820_raw[0].addr = 0;
kaf24@3344 454 e820_raw[0].size = mbi->mem_lower << 10;
kaf24@3344 455 e820_raw[0].type = E820_RAM;
kaf24@3354 456 e820_raw[1].addr = 0x100000;
kaf24@3354 457 e820_raw[1].size = mbi->mem_upper << 10;
kaf24@3354 458 e820_raw[1].type = E820_RAM;
kaf24@3344 459 e820_raw_nr = 2;
kaf24@3344 460 }
kaf24@3344 461 else
kaf24@3344 462 {
kfraser@15074 463 EARLY_FAIL("Bootloader provided no memory information.\n");
kaf24@3344 464 }
kaf24@3344 465
kaf24@13427 466 /* Ensure that all E820 RAM regions are page-aligned and -sized. */
kaf24@13427 467 for ( i = 0; i < e820_raw_nr; i++ )
kaf24@13427 468 {
kaf24@13427 469 uint64_t s, e;
kfraser@15292 470
kaf24@13427 471 if ( e820_raw[i].type != E820_RAM )
kaf24@13427 472 continue;
kaf24@13427 473 s = PFN_UP(e820_raw[i].addr);
kaf24@13427 474 e = PFN_DOWN(e820_raw[i].addr + e820_raw[i].size);
kaf24@13427 475 e820_raw[i].size = 0; /* discarded later */
kaf24@13427 476 if ( s < e )
kaf24@13427 477 {
kaf24@13427 478 e820_raw[i].addr = s << PAGE_SHIFT;
kaf24@13427 479 e820_raw[i].size = (e - s) << PAGE_SHIFT;
kaf24@13427 480 }
kaf24@13427 481 }
kaf24@13427 482
kaf24@13427 483 /* Sanitise the raw E820 map to produce a final clean version. */
kfraser@15293 484 max_page = init_e820(memmap_type, e820_raw, &e820_raw_nr);
kaf24@3338 485
kfraser@15074 486 /*
kfraser@15074 487 * Create a temporary copy of the E820 map. Truncate it to above 16MB
kfraser@15074 488 * as anything below that is already mapped and has a statically-allocated
kfraser@15074 489 * purpose.
kfraser@15074 490 */
kfraser@15074 491 memcpy(&boot_e820, &e820, sizeof(e820));
kfraser@15074 492 for ( i = 0; i < boot_e820.nr_map; i++ )
kaf24@3338 493 {
kfraser@15074 494 uint64_t s, e, min = 16 << 20; /* 16MB */
kfraser@15074 495 s = boot_e820.map[i].addr;
kfraser@15074 496 e = boot_e820.map[i].addr + boot_e820.map[i].size;
kfraser@15074 497 if ( s >= min )
kfraser@15074 498 continue;
kfraser@15074 499 if ( e > min )
kaf24@6111 500 {
kfraser@15074 501 boot_e820.map[i].addr = min;
kfraser@15074 502 boot_e820.map[i].size = e - min;
kaf24@6111 503 }
kfraser@15074 504 else
kfraser@15074 505 boot_e820.map[i].type = E820_RESERVED;
kaf24@3338 506 }
kaf24@6111 507
kfraser@15074 508 /*
keir@15077 509 * Iterate backwards over all superpage-aligned RAM regions.
kfraser@15074 510 *
kfraser@15074 511 * We require superpage alignment because the boot allocator is not yet
kfraser@15074 512 * initialised. Hence we can only map superpages in the address range
kfraser@15074 513 * 0 to BOOTSTRAP_DIRECTMAP_END, as this is guaranteed not to require
kfraser@15074 514 * dynamic allocation of pagetables.
kfraser@15074 515 *
kfraser@15074 516 * As well as mapping superpages in that range, in preparation for
kfraser@15074 517 * initialising the boot allocator, we also look for a region to which
kfraser@15074 518 * we can relocate the dom0 kernel and other multiboot modules. Also, on
kfraser@15074 519 * x86/64, we relocate Xen to higher memory.
kfraser@15074 520 */
kfraser@15074 521 modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
keir@15077 522 for ( i = boot_e820.nr_map-1; i >= 0; i-- )
kfraser@15074 523 {
kfraser@15074 524 uint64_t s, e, mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
kaf24@6134 525
kfraser@15074 526 /* Superpage-aligned chunks up to BOOTSTRAP_DIRECTMAP_END, please. */
kfraser@15074 527 s = (boot_e820.map[i].addr + mask) & ~mask;
kfraser@15074 528 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
kfraser@15074 529 e = min_t(uint64_t, e, BOOTSTRAP_DIRECTMAP_END);
kfraser@15074 530 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
kaf24@3354 531 continue;
kaf24@6111 532
kfraser@15074 533 /* Map the chunk. No memory will need to be allocated to do this. */
kfraser@15074 534 map_pages_to_xen(
kfraser@15074 535 (unsigned long)maddr_to_bootstrap_virt(s),
kfraser@15074 536 s >> PAGE_SHIFT, (e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
kaf24@6111 537
kfraser@14084 538 #if defined(CONFIG_X86_64)
kfraser@15074 539 /* Is the region suitable for relocating Xen? */
kfraser@15074 540 if ( !xen_phys_start && (((e-s) >> 20) >= opt_xenheap_megabytes) )
kaf24@5003 541 {
kfraser@15074 542 extern l2_pgentry_t l2_xenmap[];
kfraser@15074 543 l4_pgentry_t *pl4e;
kfraser@15074 544 l3_pgentry_t *pl3e;
kfraser@15074 545 l2_pgentry_t *pl2e;
kfraser@15074 546 int i, j;
kfraser@15074 547
kfraser@15074 548 /* Select relocation address. */
kfraser@15074 549 e = (e - (opt_xenheap_megabytes << 20)) & ~mask;
kfraser@15074 550 xen_phys_start = e;
kfraser@15292 551 bootsym(trampoline_xen_phys_start) = e;
kfraser@15074 552
kfraser@15074 553 /*
kfraser@15074 554 * Perform relocation to new physical address.
kfraser@15074 555 * Before doing so we must sync static/global data with main memory
kfraser@15074 556 * with a barrier(). After this we must *not* modify static/global
kfraser@15074 557 * data until after we have switched to the relocated pagetables!
kfraser@15074 558 */
kfraser@15074 559 barrier();
kfraser@15074 560 move_memory(e, 0, __pa(&_end) - xen_phys_start);
kfraser@15074 561
kfraser@15074 562 /* Walk initial pagetables, relocating page directory entries. */
kfraser@15074 563 pl4e = __va(__pa(idle_pg_table));
kfraser@15074 564 for ( i = 0 ; i < L4_PAGETABLE_ENTRIES; i++, pl4e++ )
kfraser@15074 565 {
kfraser@15074 566 if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
kfraser@15074 567 continue;
kfraser@15074 568 *pl4e = l4e_from_intpte(l4e_get_intpte(*pl4e) +
kfraser@15074 569 xen_phys_start);
kfraser@15074 570 pl3e = l4e_to_l3e(*pl4e);
kfraser@15074 571 for ( j = 0; j < L3_PAGETABLE_ENTRIES; j++, pl3e++ )
kfraser@15074 572 {
kfraser@15074 573 /* Not present or already relocated? */
kfraser@15074 574 if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) ||
kfraser@15074 575 (l3e_get_pfn(*pl3e) > 0x1000) )
kfraser@15074 576 continue;
kfraser@15074 577 *pl3e = l3e_from_intpte(l3e_get_intpte(*pl3e) +
kfraser@15074 578 xen_phys_start);
kfraser@15074 579 }
kfraser@15074 580 }
kfraser@15074 581
kfraser@15074 582 /* The only data mappings to be relocated are in the Xen area. */
kfraser@15074 583 pl2e = __va(__pa(l2_xenmap));
kfraser@15074 584 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
kfraser@15074 585 {
kfraser@15074 586 if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
kfraser@15074 587 continue;
kfraser@15074 588 *pl2e = l2e_from_intpte(l2e_get_intpte(*pl2e) +
kfraser@15074 589 xen_phys_start);
kfraser@15074 590 }
kfraser@15074 591
kfraser@15074 592 /* Re-sync the stack and then switch to relocated pagetables. */
kfraser@15074 593 asm volatile (
kfraser@15074 594 "rep movsb ; " /* re-sync the stack */
kfraser@15074 595 "movq %%cr4,%%rsi ; "
kfraser@15074 596 "andb $0x7f,%%sil ; "
kfraser@15074 597 "movq %%rsi,%%cr4 ; " /* CR4.PGE == 0 */
kfraser@15074 598 "movq %0,%%cr3 ; " /* CR3 == new pagetables */
kfraser@15074 599 "orb $0x80,%%sil ; "
kfraser@15074 600 "movq %%rsi,%%cr4 " /* CR4.PGE == 1 */
kfraser@15074 601 : : "r" (__pa(idle_pg_table)), "S" (cpu0_stack),
kfraser@15074 602 "D" (__va(__pa(cpu0_stack))), "c" (STACK_SIZE) : "memory" );
kaf24@5003 603 }
kaf24@5003 604 #endif
keir@15077 605
keir@15077 606 /* Is the region suitable for relocating the multiboot modules? */
keir@15077 607 if ( !initial_images_start && (s < e) && ((e-s) >= modules_length) )
keir@15077 608 {
keir@15077 609 e -= modules_length;
keir@15077 610 initial_images_start = e;
keir@15077 611 initial_images_end = initial_images_start + modules_length;
keir@15077 612 move_memory(initial_images_start,
keir@15077 613 mod[0].mod_start, mod[mbi->mods_count-1].mod_end);
keir@15077 614 }
kaf24@3354 615 }
kaf24@3354 616
kfraser@15074 617 if ( !initial_images_start )
kfraser@15074 618 EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n");
kfraser@15074 619 reserve_in_boot_e820(initial_images_start, initial_images_end);
kfraser@15074 620
kfraser@15074 621 /*
kfraser@15074 622 * With modules (and Xen itself, on x86/64) relocated out of the way, we
kfraser@15074 623 * can now initialise the boot allocator with some memory.
kfraser@15074 624 */
kfraser@15074 625 xenheap_phys_start = init_boot_allocator(__pa(&_end));
kfraser@15074 626 xenheap_phys_end = opt_xenheap_megabytes << 20;
kfraser@15074 627 #if defined(CONFIG_X86_64)
kfraser@15074 628 if ( !xen_phys_start )
kfraser@15074 629 EARLY_FAIL("Not enough memory to relocate Xen.\n");
kfraser@15074 630 xenheap_phys_end += xen_phys_start;
kfraser@15074 631 reserve_in_boot_e820(xen_phys_start,
kfraser@15074 632 xen_phys_start + (opt_xenheap_megabytes<<20));
kfraser@15074 633 init_boot_pages(1<<20, 16<<20); /* Initial seed: 15MB */
kfraser@15074 634 #else
kfraser@15074 635 init_boot_pages(xenheap_phys_end, 16<<20); /* Initial seed: 4MB */
kfraser@15074 636 #endif
kfraser@15074 637
kfraser@15074 638 /*
kfraser@15074 639 * With the boot allocator now seeded, we can walk every RAM region and
kfraser@15074 640 * map it in its entirety (on x86/64, at least) and notify it to the
kfraser@15074 641 * boot allocator.
kfraser@15074 642 */
kfraser@15074 643 for ( i = 0; i < boot_e820.nr_map; i++ )
kfraser@15074 644 {
kfraser@15074 645 uint64_t s, e, map_e, mask = PAGE_SIZE - 1;
kfraser@15074 646
kfraser@15074 647 /* Only page alignment required now. */
kfraser@15074 648 s = (boot_e820.map[i].addr + mask) & ~mask;
kfraser@15074 649 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
kfraser@15074 650 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
kfraser@15074 651 continue;
kfraser@15074 652
kfraser@15074 653 /* Perform the mapping (truncated in 32-bit mode). */
kfraser@15074 654 map_e = e;
kfraser@15074 655 #if defined(CONFIG_X86_32)
kfraser@15074 656 map_e = min_t(uint64_t, map_e, BOOTSTRAP_DIRECTMAP_END);
kfraser@15074 657 #endif
kfraser@15074 658 if ( s < map_e )
kfraser@15074 659 map_pages_to_xen(
kfraser@15074 660 (unsigned long)maddr_to_bootstrap_virt(s),
kfraser@15074 661 s >> PAGE_SHIFT, (map_e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
kfraser@15074 662
kfraser@15074 663 init_boot_pages(s, e);
kfraser@15074 664 }
kfraser@15074 665
kfraser@15074 666 if ( (kexec_crash_area.size > 0) && (kexec_crash_area.start > 0) )
ian@12681 667 {
ian@12677 668 unsigned long kdump_start, kdump_size, k;
ian@12677 669
ian@12681 670 /* Mark images pages as free for now. */
ian@12677 671 init_boot_pages(initial_images_start, initial_images_end);
ian@12677 672
ian@12713 673 kdump_start = kexec_crash_area.start;
ian@12713 674 kdump_size = kexec_crash_area.size;
ian@12677 675
ian@12681 676 printk("Kdump: %luMB (%lukB) at 0x%lx\n",
ian@12677 677 kdump_size >> 20,
ian@12677 678 kdump_size >> 10,
ian@12677 679 kdump_start);
ian@12677 680
ian@12681 681 if ( (kdump_start & ~PAGE_MASK) || (kdump_size & ~PAGE_MASK) )
ian@12677 682 panic("Kdump parameters not page aligned\n");
ian@12677 683
ian@12677 684 kdump_start >>= PAGE_SHIFT;
ian@12677 685 kdump_size >>= PAGE_SHIFT;
ian@12677 686
kfraser@12853 687 /* Allocate pages for Kdump memory area. */
kfraser@14083 688 if ( !reserve_boot_pages(kdump_start, kdump_size) )
ian@12677 689 panic("Unable to reserve Kdump memory\n");
ian@12677 690
kfraser@12853 691 /* Allocate pages for relocated initial images. */
ian@12677 692 k = ((initial_images_end - initial_images_start) & ~PAGE_MASK) ? 1 : 0;
ian@12677 693 k += (initial_images_end - initial_images_start) >> PAGE_SHIFT;
ian@12677 694
kfraser@14084 695 #if defined(CONFIG_X86_32)
kfraser@14084 696 /* Must allocate within bootstrap 1:1 limits. */
kfraser@15074 697 k = alloc_boot_low_pages(k, 1); /* 0x0 - BOOTSTRAP_DIRECTMAP_END */
kfraser@14084 698 #else
kfraser@14084 699 k = alloc_boot_pages(k, 1);
kfraser@14084 700 #endif
kfraser@12853 701 if ( k == 0 )
ian@12677 702 panic("Unable to allocate initial images memory\n");
ian@12677 703
ian@12677 704 move_memory(k << PAGE_SHIFT, initial_images_start, initial_images_end);
ian@12677 705
ian@12677 706 initial_images_end -= initial_images_start;
ian@12677 707 initial_images_start = k << PAGE_SHIFT;
ian@12677 708 initial_images_end += initial_images_start;
ian@12681 709 }
ian@12677 710
kaf24@5003 711 memguard_init();
kaf24@4950 712
kfraser@15074 713 nr_pages = 0;
kfraser@15074 714 for ( i = 0; i < e820.nr_map; i++ )
kfraser@15074 715 if ( e820.map[i].type == E820_RAM )
kfraser@15074 716 nr_pages += e820.map[i].size >> PAGE_SHIFT;
ian@12681 717 printk("System RAM: %luMB (%lukB)\n",
kaf24@3354 718 nr_pages >> (20 - PAGE_SHIFT),
kaf24@3354 719 nr_pages << (PAGE_SHIFT - 10));
kaf24@7220 720 total_pages = nr_pages;
kaf24@3354 721
kfraser@11296 722 /* Sanity check for unwanted bloat of certain hypercall structures. */
kfraser@11296 723 BUILD_BUG_ON(sizeof(((struct xen_platform_op *)0)->u) !=
kfraser@11296 724 sizeof(((struct xen_platform_op *)0)->u.pad));
kfraser@11296 725 BUILD_BUG_ON(sizeof(((struct xen_domctl *)0)->u) !=
kfraser@11296 726 sizeof(((struct xen_domctl *)0)->u.pad));
kfraser@11296 727 BUILD_BUG_ON(sizeof(((struct xen_sysctl *)0)->u) !=
kfraser@11296 728 sizeof(((struct xen_sysctl *)0)->u.pad));
kaf24@7388 729
kaf24@9878 730 BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
kaf24@9878 731 BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
ack@13292 732 BUILD_BUG_ON(sizeof(struct vcpu_info) != 64);
kaf24@7744 733
ack@13291 734 #ifdef CONFIG_COMPAT
ack@13291 735 BUILD_BUG_ON(sizeof(((struct compat_platform_op *)0)->u) !=
ack@13291 736 sizeof(((struct compat_platform_op *)0)->u.pad));
ack@13291 737 BUILD_BUG_ON(sizeof(start_info_compat_t) > PAGE_SIZE);
ack@13292 738 BUILD_BUG_ON(sizeof(struct compat_vcpu_info) != 64);
ack@13291 739 #endif
ack@13291 740
kfraser@10492 741 /* Check definitions in public headers match internal defs. */
kaf24@9878 742 BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
kaf24@8521 743 #ifdef HYPERVISOR_VIRT_END
kaf24@9878 744 BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END);
kaf24@8521 745 #endif
kfraser@10492 746 BUILD_BUG_ON(MACH2PHYS_VIRT_START != RO_MPT_VIRT_START);
kfraser@10492 747 BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END);
kaf24@8521 748
kaf24@3354 749 init_frametable();
kaf24@3338 750
kfraser@11971 751 acpi_boot_table_init();
kfraser@11971 752
kfraser@11971 753 acpi_numa_init();
kfraser@11971 754
kfraser@11971 755 numa_initmem_init(0, max_page);
kfraser@11971 756
kaf24@6111 757 /* Initialise the Xen heap, skipping RAM holes. */
kfraser@15074 758 init_xenheap_pages(xenheap_phys_start, xenheap_phys_end);
kfraser@15074 759 nr_pages = (xenheap_phys_end - xenheap_phys_start) >> PAGE_SHIFT;
kfraser@15074 760 #ifdef __x86_64__
kfraser@15074 761 init_xenheap_pages(xen_phys_start, __pa(&_start));
kfraser@15074 762 nr_pages += (__pa(&_start) - xen_phys_start) >> PAGE_SHIFT;
kfraser@15074 763 #endif
kfraser@15074 764 xenheap_phys_start = xen_phys_start;
kaf24@6111 765 printk("Xen heap: %luMB (%lukB)\n",
kaf24@6111 766 nr_pages >> (20 - PAGE_SHIFT),
kaf24@6111 767 nr_pages << (PAGE_SHIFT - 10));
kaf24@3338 768
keir@14680 769 end_boot_allocator();
keir@14680 770
kaf24@3594 771 early_boot = 0;
kaf24@3338 772
kaf24@8459 773 early_cpu_init();
kaf24@8459 774
kaf24@8459 775 paging_init();
kaf24@8459 776
kaf24@8459 777 /* Unmap the first page of CPU0's stack. */
kaf24@8459 778 memguard_guard_stack(cpu0_stack);
kaf24@8459 779
kaf24@8459 780 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
kaf24@8459 781
kaf24@8459 782 if ( opt_watchdog )
kaf24@8459 783 nmi_watchdog = NMI_LOCAL_APIC;
kaf24@8459 784
kaf24@8459 785 sort_exception_tables();
kaf24@8459 786
kaf24@8459 787 find_smp_config();
kaf24@8459 788
kaf24@8459 789 dmi_scan_machine();
kaf24@8459 790
kaf24@8459 791 generic_apic_probe();
kaf24@8459 792
kaf24@8459 793 acpi_boot_init();
kaf24@8459 794
kfraser@11971 795 init_cpu_to_node();
kfraser@11971 796
kfraser@11241 797 if ( smp_found_config )
kaf24@8459 798 get_smp_config();
kaf24@8459 799
keir@15083 800 #ifdef CONFIG_X86_64
keir@15083 801 /* Low mappings were only needed for some BIOS table parsing. */
keir@15083 802 zap_low_mappings();
keir@15083 803 #endif
keir@15083 804
kaf24@8459 805 init_apic_mappings();
kaf24@8459 806
kaf24@8459 807 init_IRQ();
kaf24@8459 808
kfraser@11241 809 percpu_init_areas();
kfraser@11241 810
kfraser@11240 811 init_idle_domain();
kfraser@11240 812
kaf24@8459 813 trap_init();
kaf24@8459 814
kaf24@13662 815 rcu_init();
kaf24@13662 816
kaf24@8586 817 timer_init();
kaf24@8459 818
kaf24@8459 819 early_time_init();
kaf24@8459 820
kaf24@8459 821 arch_init_memory();
kaf24@8459 822
kaf24@8459 823 identify_cpu(&boot_cpu_data);
kaf24@8459 824 if ( cpu_has_fxsr )
kaf24@8459 825 set_in_cr4(X86_CR4_OSFXSR);
kaf24@8459 826 if ( cpu_has_xmm )
kaf24@8459 827 set_in_cr4(X86_CR4_OSXMMEXCPT);
kaf24@8459 828
kaf24@8459 829 if ( opt_nosmp )
kaf24@8459 830 max_cpus = 0;
kaf24@8459 831
kaf24@8459 832 smp_prepare_cpus(max_cpus);
kaf24@8459 833
kaf24@8459 834 /*
kaf24@8459 835 * Initialise higher-level timer functions. We do this fairly late
kaf24@8459 836 * (post-SMP) because the time bases and scale factors need to be updated
kaf24@8459 837 * regularly, and SMP initialisation can cause a long delay with
kaf24@8459 838 * interrupts not yet enabled.
kaf24@8459 839 */
kaf24@8459 840 init_xen_time();
kaf24@8459 841
kaf24@8459 842 initialize_keytable();
kaf24@8459 843
kaf24@8459 844 serial_init_postirq();
kaf24@8459 845
kaf24@8459 846 BUG_ON(!local_irq_is_enabled());
kaf24@8459 847
kaf24@8459 848 for_each_present_cpu ( i )
kaf24@8459 849 {
kaf24@8459 850 if ( num_online_cpus() >= max_cpus )
kaf24@8459 851 break;
kaf24@8459 852 if ( !cpu_online(i) )
kaf24@13662 853 {
kaf24@13662 854 rcu_online_cpu(i);
kaf24@8459 855 __cpu_up(i);
kaf24@13662 856 }
kfraser@11971 857
kfraser@11998 858 /* Set up cpu_to_node[]. */
kfraser@11971 859 srat_detect_node(i);
kfraser@11998 860 /* Set up node_to_cpumask based on cpu_to_node[]. */
kfraser@11971 861 numa_add_cpu(i);
kaf24@8459 862 }
kaf24@8459 863
kaf24@8459 864 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
kaf24@8459 865 smp_cpus_done(max_cpus);
kaf24@8459 866
kaf24@9117 867 initialise_gdb(); /* could be moved earlier */
kaf24@9117 868
kaf24@8459 869 do_initcalls();
kaf24@8459 870
kaf24@8594 871 if ( opt_watchdog )
kaf24@8594 872 watchdog_enable();
kaf24@8459 873
kfraser@11881 874 /* Extract policy from multiboot. */
kfraser@11881 875 extract_acm_policy(mbi, &initrdidx, &_policy_start, &_policy_len);
kfraser@11881 876
kaf24@8459 877 /* initialize access control security module */
kfraser@11881 878 acm_init(_policy_start, _policy_len);
kaf24@8459 879
kaf24@8459 880 /* Create initial domain 0. */
kfraser@14911 881 dom0 = domain_create(0, 0, DOM0_SSIDREF);
kfraser@10655 882 if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
kaf24@8459 883 panic("Error creating domain 0\n");
kaf24@8459 884
kfraser@12210 885 dom0->is_privileged = 1;
kfraser@12210 886
kaf24@8459 887 /* Grab the DOM0 command line. */
kaf24@8459 888 cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
kaf24@8459 889 if ( cmdline != NULL )
kaf24@8459 890 {
kaf24@8459 891 static char dom0_cmdline[MAX_GUEST_CMDLINE];
kaf24@8459 892
kaf24@8459 893 /* Skip past the image name and copy to a local buffer. */
kaf24@8459 894 while ( *cmdline == ' ' ) cmdline++;
kaf24@8459 895 if ( (cmdline = strchr(cmdline, ' ')) != NULL )
kaf24@8459 896 {
kaf24@8459 897 while ( *cmdline == ' ' ) cmdline++;
kfraser@13689 898 safe_strcpy(dom0_cmdline, cmdline);
kaf24@8459 899 }
kaf24@8459 900
kaf24@8459 901 /* Append any extra parameters. */
kfraser@13691 902 if ( skip_ioapic_setup && !strstr(dom0_cmdline, "noapic") )
kfraser@13691 903 safe_strcat(dom0_cmdline, " noapic");
kaf24@8459 904 if ( acpi_skip_timer_override &&
kfraser@13691 905 !strstr(dom0_cmdline, "acpi_skip_timer_override") )
kfraser@13691 906 safe_strcat(dom0_cmdline, " acpi_skip_timer_override");
kfraser@13691 907 if ( (strlen(acpi_param) != 0) && !strstr(dom0_cmdline, "acpi=") )
kaf24@8459 908 {
kfraser@13691 909 safe_strcat(dom0_cmdline, " acpi=");
kfraser@13691 910 safe_strcat(dom0_cmdline, acpi_param);
kaf24@8459 911 }
kfraser@13691 912
kfraser@13691 913 cmdline = dom0_cmdline;
kaf24@8459 914 }
kaf24@8459 915
kaf24@8459 916 if ( (initrdidx > 0) && (initrdidx < mbi->mods_count) )
kaf24@8459 917 {
kaf24@8459 918 _initrd_start = initial_images_start +
kaf24@8459 919 (mod[initrdidx].mod_start - mod[0].mod_start);
kaf24@8459 920 _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start;
kaf24@8459 921 }
kaf24@8459 922
kaf24@8459 923 /*
kaf24@8459 924 * We're going to setup domain0 using the module(s) that we stashed safely
kaf24@8459 925 * above our heap. The second module, if present, is an initrd ramdisk.
kaf24@8459 926 */
kaf24@8459 927 if ( construct_dom0(dom0,
kaf24@8459 928 initial_images_start,
kaf24@8459 929 mod[0].mod_end-mod[0].mod_start,
kaf24@8459 930 _initrd_start,
kaf24@8459 931 _initrd_len,
kaf24@8459 932 cmdline) != 0)
kaf24@8459 933 panic("Could not set up DOM0 guest OS\n");
kaf24@8459 934
kaf24@8459 935 /* Scrub RAM that is still free and so may go to an unprivileged domain. */
kaf24@8459 936 scrub_heap_pages();
kaf24@8459 937
kaf24@8459 938 init_trace_bufs();
kaf24@8459 939
kaf24@10502 940 console_endboot();
kaf24@8459 941
kaf24@8459 942 /* Hide UART from DOM0 if we're using it */
kaf24@8459 943 serial_endboot();
kaf24@8459 944
kaf24@8459 945 domain_unpause_by_systemcontroller(dom0);
kaf24@8459 946
keir@15082 947 reset_stack_and_jump(init_done);
kaf24@8459 948 }
kaf24@8459 949
ian@13763 950 void arch_get_xen_caps(xen_capabilities_info_t *info)
iap10@6721 951 {
kfraser@14997 952 /* Interface name is always xen-3.0-* for Xen-3.x. */
kfraser@14997 953 int major = 3, minor = 0;
keir@13754 954 char s[32];
keir@13754 955
ian@13763 956 (*info)[0] = '\0';
iap10@6721 957
kaf24@6725 958 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
kaf24@6725 959
keir@13754 960 snprintf(s, sizeof(s), "xen-%d.%d-x86_32 ", major, minor);
ian@13763 961 safe_strcat(*info, s);
keir@13754 962 if ( hvm_enabled )
keir@13754 963 {
keir@13754 964 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
ian@13763 965 safe_strcat(*info, s);
kfraser@13685 966 }
kaf24@6725 967
kaf24@6725 968 #elif defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
kaf24@6725 969
keir@13754 970 snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor);
ian@13763 971 safe_strcat(*info, s);
kaf24@6725 972 if ( hvm_enabled )
iap10@6721 973 {
keir@13754 974 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
ian@13763 975 safe_strcat(*info, s);
keir@13754 976 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor);
ian@13763 977 safe_strcat(*info, s);
iap10@6721 978 }
iap10@6721 979
kaf24@6725 980 #elif defined(CONFIG_X86_64)
iap10@6721 981
keir@13754 982 snprintf(s, sizeof(s), "xen-%d.%d-x86_64 ", major, minor);
ian@13763 983 safe_strcat(*info, s);
ack@13288 984 #ifdef CONFIG_COMPAT
keir@13754 985 snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor);
ian@13763 986 safe_strcat(*info, s);
ack@13288 987 #endif
kaf24@6725 988 if ( hvm_enabled )
iap10@6721 989 {
keir@13754 990 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
ian@13763 991 safe_strcat(*info, s);
keir@13754 992 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor);
ian@13763 993 safe_strcat(*info, s);
keir@13754 994 snprintf(s, sizeof(s), "hvm-%d.%d-x86_64 ", major, minor);
ian@13763 995 safe_strcat(*info, s);
iap10@6721 996 }
kaf24@6725 997
iap10@6721 998 #endif
iap10@6721 999 }
iap10@6721 1000
kaf24@3914 1001 /*
kaf24@3914 1002 * Local variables:
kaf24@3914 1003 * mode: C
kaf24@3914 1004 * c-set-style: "BSD"
kaf24@3914 1005 * c-basic-offset: 4
kaf24@3914 1006 * tab-width: 4
kaf24@3914 1007 * indent-tabs-mode: nil
kaf24@3988 1008 * End:
kaf24@3914 1009 */