ia64/xen-unstable

annotate xen/arch/x86/setup.c @ 15074:23c4790512db

xen: Big changes to x86 start-of-day:

1. x86/64 Xen now relocates itself to physical high memory. This is
useful if we have devices that need very low memory, or if in
future we want to grant a 1:1 mapping of low physical memory to a
special 'native client domain'.

2. We now only map low 16MB RAM statically. All other RAM is mapped
dynamically within the constraints of the e820 map. It is
recommended never to map MMIO regions, and this change means that
Xen now obeys this constraint.

3. The CPU bootup trampoline is now permanently installed at
0x90000. This is necessary prereq for CPU hotplug.

4. Start-of-day asm is generally cleaned up and diff between x86/32
and x86/64 is reduced.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu May 10 18:02:55 2007 +0100 (2007-05-10)
parents aeac9a7d97e0
children e19ddfa781c5
rev   line source
kaf24@1452 1 #include <xen/config.h>
kaf24@1452 2 #include <xen/init.h>
kaf24@1452 3 #include <xen/lib.h>
kaf24@1452 4 #include <xen/sched.h>
cl349@5247 5 #include <xen/domain.h>
kaf24@1452 6 #include <xen/serial.h>
kaf24@1506 7 #include <xen/softirq.h>
kaf24@1452 8 #include <xen/acpi.h>
kaf24@3338 9 #include <xen/console.h>
iap10@4287 10 #include <xen/serial.h>
kaf24@3338 11 #include <xen/trace.h>
kaf24@3338 12 #include <xen/multiboot.h>
kaf24@5356 13 #include <xen/domain_page.h>
kfraser@10890 14 #include <xen/version.h>
kaf24@9117 15 #include <xen/gdbstub.h>
kaf24@9818 16 #include <xen/percpu.h>
kfraser@11296 17 #include <xen/hypercall.h>
kfraser@11601 18 #include <xen/keyhandler.h>
kfraser@11971 19 #include <xen/numa.h>
kaf24@13662 20 #include <xen/rcupdate.h>
iap10@6721 21 #include <public/version.h>
ack@13291 22 #ifdef CONFIG_COMPAT
ack@13291 23 #include <compat/platform.h>
ack@13291 24 #include <compat/xen.h>
ack@13291 25 #endif
kaf24@1452 26 #include <asm/bitops.h>
kaf24@1452 27 #include <asm/smp.h>
kaf24@1452 28 #include <asm/processor.h>
kaf24@1452 29 #include <asm/mpspec.h>
kaf24@1452 30 #include <asm/apic.h>
kaf24@1452 31 #include <asm/desc.h>
Tim@13909 32 #include <asm/paging.h>
kaf24@3344 33 #include <asm/e820.h>
kaf24@5536 34 #include <acm/acm_hooks.h>
ian@12677 35 #include <xen/kexec.h>
kaf24@3338 36
kfraser@15074 37 #if defined(CONFIG_X86_64)
kfraser@15074 38 #define BOOTSTRAP_DIRECTMAP_END (1UL << 32)
kfraser@15074 39 #define maddr_to_bootstrap_virt(m) maddr_to_virt(m)
kfraser@15074 40 #else
kfraser@15074 41 #define BOOTSTRAP_DIRECTMAP_END HYPERVISOR_VIRT_START
kfraser@15074 42 #define maddr_to_bootstrap_virt(m) ((void *)(long)(m))
kfraser@15074 43 #endif
kfraser@15074 44
kaf24@5157 45 extern void dmi_scan_machine(void);
kaf24@5211 46 extern void generic_apic_probe(void);
kfraser@11971 47 extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
kaf24@5157 48
kaf24@3338 49 /*
kaf24@3338 50 * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
kaf24@8726 51 * page_info table and allocation bitmap.
kaf24@3338 52 */
kaf24@3338 53 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
kaf24@4950 54 #if defined(CONFIG_X86_64)
kaf24@3338 55 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
kaf24@3354 56 #endif
kaf24@1452 57
kaf24@5146 58 /* opt_nosmp: If true, secondary processors are ignored. */
kaf24@5900 59 static int opt_nosmp = 0;
kaf24@5146 60 boolean_param("nosmp", opt_nosmp);
kaf24@5146 61
kaf24@5146 62 /* maxcpus: maximum number of CPUs to activate. */
kaf24@5146 63 static unsigned int max_cpus = NR_CPUS;
shand@11156 64 integer_param("maxcpus", max_cpus);
kaf24@5146 65
kaf24@3334 66 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
kaf24@3334 67 static int opt_watchdog = 0;
kaf24@3334 68 boolean_param("watchdog", opt_watchdog);
kaf24@3334 69
kaf24@4850 70 /* **** Linux config option: propagated to domain0. */
kaf24@4850 71 /* "acpi=off": Sisables both ACPI table parsing and interpreter. */
kaf24@4850 72 /* "acpi=force": Override the disable blacklist. */
kaf24@4850 73 /* "acpi=strict": Disables out-of-spec workarounds. */
kaf24@4850 74 /* "acpi=ht": Limit ACPI just to boot-time to enable HT. */
kaf24@4850 75 /* "acpi=noirq": Disables ACPI interrupt routing. */
kaf24@4850 76 static void parse_acpi_param(char *s);
kaf24@4850 77 custom_param("acpi", parse_acpi_param);
kaf24@4850 78
kaf24@4850 79 /* **** Linux config option: propagated to domain0. */
kaf24@4850 80 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
kaf24@4850 81 extern int acpi_skip_timer_override;
kaf24@4850 82 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
kaf24@4850 83
kaf24@4850 84 /* **** Linux config option: propagated to domain0. */
kaf24@4850 85 /* noapic: Disable IOAPIC setup. */
kaf24@4850 86 extern int skip_ioapic_setup;
kaf24@4850 87 boolean_param("noapic", skip_ioapic_setup);
kaf24@4850 88
kaf24@3594 89 int early_boot = 1;
kaf24@3594 90
kaf24@5146 91 cpumask_t cpu_present_map;
kaf24@5146 92
kfraser@15074 93 unsigned long xen_phys_start;
kfraser@15074 94
kaf24@5003 95 /* Limits of Xen heap, used to initialise the allocator. */
kaf24@5003 96 unsigned long xenheap_phys_start, xenheap_phys_end;
kaf24@3338 97
kaf24@2298 98 extern void arch_init_memory(void);
kaf24@1589 99 extern void init_IRQ(void);
kaf24@1589 100 extern void trap_init(void);
kaf24@5604 101 extern void early_time_init(void);
kaf24@5167 102 extern void early_cpu_init(void);
kaf24@1589 103
kaf24@8533 104 struct tss_struct init_tss[NR_CPUS];
kaf24@8533 105
kfraser@15074 106 char __attribute__ ((__section__(".bss.page_aligned"))) cpu0_stack[STACK_SIZE];
kaf24@5011 107
kaf24@5214 108 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
kaf24@1452 109
kaf24@5237 110 #if CONFIG_PAGING_LEVELS > 2
kaf24@1670 111 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
kaf24@1670 112 #else
kaf24@5593 113 unsigned long mmu_cr4_features = X86_CR4_PSE;
kaf24@1670 114 #endif
kaf24@1452 115 EXPORT_SYMBOL(mmu_cr4_features);
kaf24@1452 116
kaf24@4818 117 int acpi_disabled;
kaf24@1452 118
kaf24@4850 119 int acpi_force;
kaf24@4850 120 char acpi_param[10] = "";
kfraser@15074 121 static void __init parse_acpi_param(char *s)
kaf24@4850 122 {
kaf24@4850 123 /* Save the parameter so it can be propagated to domain0. */
kfraser@13689 124 safe_strcpy(acpi_param, s);
kaf24@4850 125
kaf24@4850 126 /* Interpret the parameter for use within Xen. */
kaf24@4850 127 if ( !strcmp(s, "off") )
kaf24@4850 128 {
kaf24@4850 129 disable_acpi();
kaf24@4850 130 }
kaf24@4850 131 else if ( !strcmp(s, "force") )
kaf24@4850 132 {
kaf24@4850 133 acpi_force = 1;
kaf24@4850 134 acpi_ht = 1;
kaf24@4850 135 acpi_disabled = 0;
kaf24@4850 136 }
kaf24@4850 137 else if ( !strcmp(s, "strict") )
kaf24@4850 138 {
kaf24@4850 139 acpi_strict = 1;
kaf24@4850 140 }
kaf24@4850 141 else if ( !strcmp(s, "ht") )
kaf24@4850 142 {
kaf24@4850 143 if ( !acpi_force )
kaf24@4850 144 disable_acpi();
kaf24@4850 145 acpi_ht = 1;
kaf24@4850 146 }
kaf24@4850 147 else if ( !strcmp(s, "noirq") )
kaf24@4850 148 {
kaf24@4850 149 acpi_noirq_set();
kaf24@4850 150 }
kaf24@4850 151 }
kaf24@4850 152
kaf24@1452 153 static void __init do_initcalls(void)
kaf24@1452 154 {
kaf24@1452 155 initcall_t *call;
kaf24@1452 156 for ( call = &__initcall_start; call < &__initcall_end; call++ )
kaf24@1452 157 (*call)();
kaf24@1452 158 }
kaf24@1452 159
kfraser@15074 160 #define EARLY_FAIL(f, a...) do { \
kfraser@15074 161 printk( f , ## a ); \
kfraser@15074 162 for ( ; ; ) __asm__ __volatile__ ( "hlt" ); \
kfraser@15074 163 } while (0)
kaf24@8459 164
kfraser@15074 165 static struct e820entry __initdata e820_raw[E820MAX];
kaf24@8459 166
kfraser@15074 167 static unsigned long __initdata initial_images_start, initial_images_end;
kaf24@9067 168
kfraser@15074 169 unsigned long __init initial_images_nrpages(void)
kaf24@9067 170 {
kaf24@9067 171 unsigned long s = initial_images_start + PAGE_SIZE - 1;
kaf24@9067 172 unsigned long e = initial_images_end;
kaf24@9067 173 return ((e >> PAGE_SHIFT) - (s >> PAGE_SHIFT));
kaf24@9067 174 }
kaf24@9067 175
kfraser@15074 176 void __init discard_initial_images(void)
kaf24@9067 177 {
kaf24@9067 178 init_domheap_pages(initial_images_start, initial_images_end);
kaf24@9067 179 }
kaf24@9067 180
kaf24@9818 181 extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[];
kaf24@9818 182
kfraser@11241 183 static void __init percpu_init_areas(void)
kaf24@9818 184 {
kaf24@9818 185 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
kfraser@15074 186 unsigned int first_unused;
kaf24@9818 187
kaf24@9818 188 BUG_ON(data_size > PERCPU_SIZE);
kaf24@9818 189
kfraser@15074 190 /* Initialise per-cpu data area for all possible secondary CPUs. */
kfraser@15074 191 for ( i = 1; (i < NR_CPUS) && cpu_possible(i); i++ )
kfraser@15074 192 memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
kfraser@15074 193 __per_cpu_start,
kfraser@15074 194 data_size);
kaf24@9818 195 first_unused = i;
kaf24@9818 196
kfraser@14340 197 /* Check that there are no holes in cpu_possible_map. */
kaf24@9818 198 for ( ; i < NR_CPUS; i++ )
kfraser@14340 199 BUG_ON(cpu_possible(i));
kaf24@9818 200
kfraser@11241 201 #ifndef MEMORY_GUARD
kaf24@9818 202 init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
kaf24@9818 203 __pa(__per_cpu_end));
kfraser@11241 204 #endif
kaf24@9818 205 }
kaf24@9818 206
kfraser@11881 207 /* Fetch acm policy module from multiboot modules. */
kfraser@15074 208 static void __init extract_acm_policy(
kfraser@11881 209 multiboot_info_t *mbi,
kfraser@11881 210 unsigned int *initrdidx,
kfraser@11881 211 char **_policy_start,
kfraser@11881 212 unsigned long *_policy_len)
kfraser@11881 213 {
kfraser@11881 214 int i;
kfraser@11881 215 module_t *mod = (module_t *)__va(mbi->mods_addr);
kfraser@11881 216 unsigned long start, policy_len;
kfraser@11881 217 char *policy_start;
kfraser@11881 218
kfraser@11881 219 /*
kfraser@11881 220 * Try all modules and see whichever could be the binary policy.
kfraser@11881 221 * Adjust the initrdidx if module[1] is the binary policy.
kfraser@11881 222 */
kfraser@11881 223 for ( i = mbi->mods_count-1; i >= 1; i-- )
kfraser@11881 224 {
kfraser@11881 225 start = initial_images_start + (mod[i].mod_start-mod[0].mod_start);
kfraser@15074 226 policy_start = maddr_to_bootstrap_virt(start);
kfraser@11881 227 policy_len = mod[i].mod_end - mod[i].mod_start;
kfraser@11881 228 if ( acm_is_policy(policy_start, policy_len) )
kfraser@11881 229 {
kfraser@11881 230 printk("Policy len 0x%lx, start at %p - module %d.\n",
kfraser@11881 231 policy_len, policy_start, i);
kfraser@11881 232 *_policy_start = policy_start;
kfraser@11881 233 *_policy_len = policy_len;
kfraser@11881 234 if ( i == 1 )
kfraser@11881 235 *initrdidx = (mbi->mods_count > 2) ? 2 : 0;
kfraser@11881 236 break;
kfraser@11881 237 }
kfraser@11881 238 }
kfraser@11881 239 }
kfraser@11881 240
kfraser@11241 241 static void __init init_idle_domain(void)
kfraser@11240 242 {
kfraser@11240 243 struct domain *idle_domain;
kfraser@11240 244
kfraser@11240 245 /* Domain creation requires that scheduler structures are initialised. */
kfraser@11240 246 scheduler_init();
kfraser@11240 247
kfraser@14911 248 idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
kfraser@11240 249 if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
kfraser@11240 250 BUG();
kfraser@11240 251
kfraser@11240 252 set_current(idle_domain->vcpu[0]);
kfraser@11240 253 idle_vcpu[0] = this_cpu(curr_vcpu) = current;
kfraser@11240 254
kfraser@11240 255 setup_idle_pagetable();
kfraser@11240 256 }
kfraser@11240 257
kfraser@15074 258 static void __init srat_detect_node(int cpu)
kfraser@11971 259 {
kfraser@11998 260 unsigned node;
kfraser@11998 261 u8 apicid = x86_cpu_to_apicid[cpu];
kfraser@11971 262
kfraser@11998 263 node = apicid_to_node[apicid];
kfraser@11998 264 if ( node == NUMA_NO_NODE )
kfraser@11998 265 node = 0;
kfraser@11998 266 numa_set_node(cpu, node);
kfraser@11971 267
kfraser@11998 268 if ( acpi_numa > 0 )
kfraser@11998 269 printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
kfraser@11971 270 }
kfraser@11971 271
kfraser@15074 272 static void __init move_memory(
kfraser@15074 273 unsigned long dst, unsigned long src_start, unsigned long src_end)
ian@12677 274 {
kfraser@15074 275 memmove(maddr_to_bootstrap_virt(dst),
kfraser@15074 276 maddr_to_bootstrap_virt(src_start),
ian@12677 277 src_end - src_start);
kfraser@15074 278 }
kfraser@15074 279
kfraser@15074 280 /* A temporary copy of the e820 map that we can mess with during bootstrap. */
kfraser@15074 281 static struct e820map __initdata boot_e820;
kfraser@15074 282
kfraser@15074 283 /* Reserve area (@s,@e) in the temporary bootstrap e820 map. */
kfraser@15074 284 static void __init reserve_in_boot_e820(unsigned long s, unsigned long e)
kfraser@15074 285 {
kfraser@15074 286 unsigned long rs, re;
kfraser@15074 287 int i;
kfraser@15074 288
kfraser@15074 289 for ( i = 0; i < boot_e820.nr_map; i++ )
kfraser@15074 290 {
kfraser@15074 291 /* Have we found the e820 region that includes the specified range? */
kfraser@15074 292 rs = boot_e820.map[i].addr;
kfraser@15074 293 re = boot_e820.map[i].addr + boot_e820.map[i].size;
kfraser@15074 294 if ( (s < rs) || (e > re) )
kfraser@15074 295 continue;
kfraser@15074 296
kfraser@15074 297 /* Start fragment. */
kfraser@15074 298 boot_e820.map[i].size = s - rs;
kfraser@15074 299
kfraser@15074 300 /* End fragment. */
kfraser@15074 301 if ( e < re )
kfraser@15074 302 {
kfraser@15074 303 memmove(&boot_e820.map[i+1], &boot_e820.map[i],
kfraser@15074 304 (boot_e820.nr_map-i) * sizeof(boot_e820.map[0]));
kfraser@15074 305 boot_e820.nr_map++;
kfraser@15074 306 i++;
kfraser@15074 307 boot_e820.map[i].addr = e;
kfraser@15074 308 boot_e820.map[i].size = re - e;
kfraser@15074 309 }
kfraser@15074 310 }
ian@12677 311 }
ian@12677 312
kaf24@8463 313 void __init __start_xen(multiboot_info_t *mbi)
kaf24@1452 314 {
kaf24@9823 315 char __cmdline[] = "", *cmdline = __cmdline;
kaf24@8457 316 unsigned long _initrd_start = 0, _initrd_len = 0;
kaf24@8457 317 unsigned int initrdidx = 1;
kfraser@11881 318 char *_policy_start = NULL;
kfraser@11881 319 unsigned long _policy_len = 0;
kaf24@8457 320 module_t *mod = (module_t *)__va(mbi->mods_addr);
kaf24@6111 321 unsigned long nr_pages, modules_length;
kaf24@8402 322 int i, e820_warn = 0, e820_raw_nr = 0, bytes = 0;
kaf24@5776 323 struct ns16550_defaults ns16550 = {
kaf24@5776 324 .data_bits = 8,
kaf24@5776 325 .parity = 'n',
kaf24@5776 326 .stop_bits = 1
kaf24@5776 327 };
kaf24@3338 328
kfraser@12853 329 extern void early_page_fault(void);
kfraser@12853 330 set_intr_gate(TRAP_page_fault, &early_page_fault);
kfraser@12853 331
kaf24@3338 332 /* Parse the command-line options. */
kaf24@3344 333 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
kaf24@9823 334 cmdline = __va(mbi->cmdline);
kaf24@9823 335 cmdline_parse(cmdline);
kaf24@3338 336
kaf24@8534 337 set_current((struct vcpu *)0xfffff000); /* debug sanity */
kfraser@11240 338 idle_vcpu[0] = current;
kaf24@8534 339 set_processor_id(0); /* needed early, for smp_processor_id() */
kaf24@3338 340
kaf24@5146 341 smp_prepare_boot_cpu();
kaf24@5146 342
kaf24@3338 343 /* We initialise the serial devices very early so we can get debugging. */
kaf24@5776 344 ns16550.io_base = 0x3f8;
kaf24@5776 345 ns16550.irq = 4;
kaf24@5776 346 ns16550_init(0, &ns16550);
kaf24@5776 347 ns16550.io_base = 0x2f8;
kaf24@5776 348 ns16550.irq = 3;
kaf24@5776 349 ns16550_init(1, &ns16550);
kaf24@5195 350 serial_init_preirq();
kaf24@3338 351
kaf24@3338 352 init_console();
kaf24@3338 353
kfraser@11947 354 printk("Command line: %s\n", cmdline);
kaf24@9823 355
kaf24@3344 356 /* Check that we have at least one Multiboot module. */
kaf24@3344 357 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
kfraser@15074 358 EARLY_FAIL("dom0 kernel not specified. "
kfraser@15074 359 "Check bootloader configuration.\n");
kaf24@5011 360
kaf24@5011 361 if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 )
kfraser@15074 362 EARLY_FAIL("Misaligned CPU0 stack.\n");
kaf24@3338 363
kfraser@11618 364 /*
kfraser@11618 365 * Since there are some stubs getting built on the stacks which use
kfraser@11618 366 * direct calls/jumps, the heap must be confined to the lower 2G so
kfraser@11618 367 * that those branches can reach their targets.
kfraser@11618 368 */
kfraser@11618 369 if ( opt_xenheap_megabytes > 2048 )
kfraser@11618 370 opt_xenheap_megabytes = 2048;
kaf24@3338 371
kaf24@3344 372 if ( mbi->flags & MBI_MEMMAP )
kaf24@3344 373 {
kaf24@3344 374 while ( bytes < mbi->mmap_length )
kaf24@3344 375 {
kaf24@3344 376 memory_map_t *map = __va(mbi->mmap_addr + bytes);
kaf24@8402 377
kaf24@8402 378 /*
kaf24@8403 379 * This is a gross workaround for a BIOS bug. Some bootloaders do
kaf24@8402 380 * not write e820 map entries into pre-zeroed memory. This is
kaf24@8402 381 * okay if the BIOS fills in all fields of the map entry, but
kaf24@8402 382 * some broken BIOSes do not bother to write the high word of
kaf24@8402 383 * the length field if the length is smaller than 4GB. We
kaf24@8402 384 * detect and fix this by flagging sections below 4GB that
kaf24@8403 385 * appear to be larger than 4GB in size.
kaf24@8402 386 */
kaf24@8403 387 if ( (map->base_addr_high == 0) && (map->length_high != 0) )
kaf24@8402 388 {
kaf24@8402 389 e820_warn = 1;
kaf24@8402 390 map->length_high = 0;
kaf24@8402 391 }
kaf24@8402 392
kaf24@3344 393 e820_raw[e820_raw_nr].addr =
kaf24@3344 394 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
kaf24@3344 395 e820_raw[e820_raw_nr].size =
kaf24@3344 396 ((u64)map->length_high << 32) | (u64)map->length_low;
kaf24@3344 397 e820_raw[e820_raw_nr].type =
kfraser@12226 398 (map->type > E820_NVS) ? E820_RESERVED : map->type;
kaf24@3344 399 e820_raw_nr++;
kaf24@8402 400
kaf24@3344 401 bytes += map->size + 4;
kaf24@3344 402 }
kaf24@3344 403 }
kaf24@3344 404 else if ( mbi->flags & MBI_MEMLIMITS )
kaf24@3344 405 {
kaf24@3344 406 e820_raw[0].addr = 0;
kaf24@3344 407 e820_raw[0].size = mbi->mem_lower << 10;
kaf24@3344 408 e820_raw[0].type = E820_RAM;
kaf24@3354 409 e820_raw[1].addr = 0x100000;
kaf24@3354 410 e820_raw[1].size = mbi->mem_upper << 10;
kaf24@3354 411 e820_raw[1].type = E820_RAM;
kaf24@3344 412 e820_raw_nr = 2;
kaf24@3344 413 }
kaf24@3344 414 else
kaf24@3344 415 {
kfraser@15074 416 EARLY_FAIL("Bootloader provided no memory information.\n");
kaf24@3344 417 }
kaf24@3344 418
kaf24@8402 419 if ( e820_warn )
kaf24@8402 420 printk("WARNING: Buggy e820 map detected and fixed "
kaf24@8402 421 "(truncated length fields).\n");
kaf24@8402 422
kaf24@13427 423 /* Ensure that all E820 RAM regions are page-aligned and -sized. */
kaf24@13427 424 for ( i = 0; i < e820_raw_nr; i++ )
kaf24@13427 425 {
kaf24@13427 426 uint64_t s, e;
kaf24@13427 427 if ( e820_raw[i].type != E820_RAM )
kaf24@13427 428 continue;
kaf24@13427 429 s = PFN_UP(e820_raw[i].addr);
kaf24@13427 430 e = PFN_DOWN(e820_raw[i].addr + e820_raw[i].size);
kaf24@13427 431 e820_raw[i].size = 0; /* discarded later */
kaf24@13427 432 if ( s < e )
kaf24@13427 433 {
kaf24@13427 434 e820_raw[i].addr = s << PAGE_SHIFT;
kaf24@13427 435 e820_raw[i].size = (e - s) << PAGE_SHIFT;
kaf24@13427 436 }
kaf24@13427 437 }
kaf24@13427 438
kaf24@13427 439 /* Sanitise the raw E820 map to produce a final clean version. */
kaf24@4950 440 max_page = init_e820(e820_raw, &e820_raw_nr);
kaf24@3338 441
kfraser@15074 442 /*
kfraser@15074 443 * Create a temporary copy of the E820 map. Truncate it to above 16MB
kfraser@15074 444 * as anything below that is already mapped and has a statically-allocated
kfraser@15074 445 * purpose.
kfraser@15074 446 */
kfraser@15074 447 memcpy(&boot_e820, &e820, sizeof(e820));
kfraser@15074 448 for ( i = 0; i < boot_e820.nr_map; i++ )
kaf24@3338 449 {
kfraser@15074 450 uint64_t s, e, min = 16 << 20; /* 16MB */
kfraser@15074 451 s = boot_e820.map[i].addr;
kfraser@15074 452 e = boot_e820.map[i].addr + boot_e820.map[i].size;
kfraser@15074 453 if ( s >= min )
kfraser@15074 454 continue;
kfraser@15074 455 if ( e > min )
kaf24@6111 456 {
kfraser@15074 457 boot_e820.map[i].addr = min;
kfraser@15074 458 boot_e820.map[i].size = e - min;
kaf24@6111 459 }
kfraser@15074 460 else
kfraser@15074 461 boot_e820.map[i].type = E820_RESERVED;
kaf24@3338 462 }
kaf24@6111 463
kfraser@15074 464 /*
kfraser@15074 465 * Iterate over all superpage-aligned RAM regions.
kfraser@15074 466 *
kfraser@15074 467 * We require superpage alignment because the boot allocator is not yet
kfraser@15074 468 * initialised. Hence we can only map superpages in the address range
kfraser@15074 469 * 0 to BOOTSTRAP_DIRECTMAP_END, as this is guaranteed not to require
kfraser@15074 470 * dynamic allocation of pagetables.
kfraser@15074 471 *
kfraser@15074 472 * As well as mapping superpages in that range, in preparation for
kfraser@15074 473 * initialising the boot allocator, we also look for a region to which
kfraser@15074 474 * we can relocate the dom0 kernel and other multiboot modules. Also, on
kfraser@15074 475 * x86/64, we relocate Xen to higher memory.
kfraser@15074 476 */
kfraser@15074 477 modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
kfraser@15074 478 for ( i = 0; i < boot_e820.nr_map; i++ )
kfraser@15074 479 {
kfraser@15074 480 uint64_t s, e, mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
kaf24@6134 481
kfraser@15074 482 /* Superpage-aligned chunks up to BOOTSTRAP_DIRECTMAP_END, please. */
kfraser@15074 483 s = (boot_e820.map[i].addr + mask) & ~mask;
kfraser@15074 484 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
kfraser@15074 485 e = min_t(uint64_t, e, BOOTSTRAP_DIRECTMAP_END);
kfraser@15074 486 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
kaf24@3354 487 continue;
kaf24@6111 488
kfraser@15074 489 /* Map the chunk. No memory will need to be allocated to do this. */
kfraser@15074 490 map_pages_to_xen(
kfraser@15074 491 (unsigned long)maddr_to_bootstrap_virt(s),
kfraser@15074 492 s >> PAGE_SHIFT, (e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
kaf24@6111 493
kfraser@15074 494 /* Is the region suitable for relocating the multiboot modules? */
kfraser@15074 495 if ( !initial_images_start && ((e-s) >= modules_length) )
kfraser@15074 496 {
kfraser@15074 497 e -= modules_length;
kfraser@15074 498 e &= ~mask;
kfraser@15074 499 initial_images_start = e;
kfraser@15074 500 initial_images_end = initial_images_start + modules_length;
kfraser@15074 501 move_memory(initial_images_start,
kfraser@15074 502 mod[0].mod_start, mod[mbi->mods_count-1].mod_end);
kfraser@15074 503 if ( s >= e )
kfraser@15074 504 continue;
kfraser@15074 505 }
kaf24@6111 506
kfraser@14084 507 #if defined(CONFIG_X86_64)
kfraser@15074 508 /* Is the region suitable for relocating Xen? */
kfraser@15074 509 if ( !xen_phys_start && (((e-s) >> 20) >= opt_xenheap_megabytes) )
kaf24@5003 510 {
kfraser@15074 511 extern l2_pgentry_t l2_xenmap[];
kfraser@15074 512 l4_pgentry_t *pl4e;
kfraser@15074 513 l3_pgentry_t *pl3e;
kfraser@15074 514 l2_pgentry_t *pl2e;
kfraser@15074 515 int i, j;
kfraser@15074 516
kfraser@15074 517 /* Select relocation address. */
kfraser@15074 518 e = (e - (opt_xenheap_megabytes << 20)) & ~mask;
kfraser@15074 519 xen_phys_start = e;
kfraser@15074 520 boot_trampoline_va(trampoline_xen_phys_start) = e;
kfraser@15074 521
kfraser@15074 522 /*
kfraser@15074 523 * Perform relocation to new physical address.
kfraser@15074 524 * Before doing so we must sync static/global data with main memory
kfraser@15074 525 * with a barrier(). After this we must *not* modify static/global
kfraser@15074 526 * data until after we have switched to the relocated pagetables!
kfraser@15074 527 */
kfraser@15074 528 barrier();
kfraser@15074 529 move_memory(e, 0, __pa(&_end) - xen_phys_start);
kfraser@15074 530
kfraser@15074 531 /* Walk initial pagetables, relocating page directory entries. */
kfraser@15074 532 pl4e = __va(__pa(idle_pg_table));
kfraser@15074 533 for ( i = 0 ; i < L4_PAGETABLE_ENTRIES; i++, pl4e++ )
kfraser@15074 534 {
kfraser@15074 535 if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
kfraser@15074 536 continue;
kfraser@15074 537 *pl4e = l4e_from_intpte(l4e_get_intpte(*pl4e) +
kfraser@15074 538 xen_phys_start);
kfraser@15074 539 pl3e = l4e_to_l3e(*pl4e);
kfraser@15074 540 for ( j = 0; j < L3_PAGETABLE_ENTRIES; j++, pl3e++ )
kfraser@15074 541 {
kfraser@15074 542 /* Not present or already relocated? */
kfraser@15074 543 if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) ||
kfraser@15074 544 (l3e_get_pfn(*pl3e) > 0x1000) )
kfraser@15074 545 continue;
kfraser@15074 546 *pl3e = l3e_from_intpte(l3e_get_intpte(*pl3e) +
kfraser@15074 547 xen_phys_start);
kfraser@15074 548 }
kfraser@15074 549 }
kfraser@15074 550
kfraser@15074 551 /* The only data mappings to be relocated are in the Xen area. */
kfraser@15074 552 pl2e = __va(__pa(l2_xenmap));
kfraser@15074 553 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
kfraser@15074 554 {
kfraser@15074 555 if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
kfraser@15074 556 continue;
kfraser@15074 557 *pl2e = l2e_from_intpte(l2e_get_intpte(*pl2e) +
kfraser@15074 558 xen_phys_start);
kfraser@15074 559 }
kfraser@15074 560
kfraser@15074 561 /* Re-sync the stack and then switch to relocated pagetables. */
kfraser@15074 562 asm volatile (
kfraser@15074 563 "rep movsb ; " /* re-sync the stack */
kfraser@15074 564 "movq %%cr4,%%rsi ; "
kfraser@15074 565 "andb $0x7f,%%sil ; "
kfraser@15074 566 "movq %%rsi,%%cr4 ; " /* CR4.PGE == 0 */
kfraser@15074 567 "movq %0,%%cr3 ; " /* CR3 == new pagetables */
kfraser@15074 568 "orb $0x80,%%sil ; "
kfraser@15074 569 "movq %%rsi,%%cr4 " /* CR4.PGE == 1 */
kfraser@15074 570 : : "r" (__pa(idle_pg_table)), "S" (cpu0_stack),
kfraser@15074 571 "D" (__va(__pa(cpu0_stack))), "c" (STACK_SIZE) : "memory" );
kaf24@5003 572 }
kaf24@5003 573 #endif
kaf24@3354 574 }
kaf24@3354 575
kfraser@15074 576 if ( !initial_images_start )
kfraser@15074 577 EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n");
kfraser@15074 578 reserve_in_boot_e820(initial_images_start, initial_images_end);
kfraser@15074 579
kfraser@15074 580 /*
kfraser@15074 581 * With modules (and Xen itself, on x86/64) relocated out of the way, we
kfraser@15074 582 * can now initialise the boot allocator with some memory.
kfraser@15074 583 */
kfraser@15074 584 xenheap_phys_start = init_boot_allocator(__pa(&_end));
kfraser@15074 585 xenheap_phys_end = opt_xenheap_megabytes << 20;
kfraser@15074 586 #if defined(CONFIG_X86_64)
kfraser@15074 587 if ( !xen_phys_start )
kfraser@15074 588 EARLY_FAIL("Not enough memory to relocate Xen.\n");
kfraser@15074 589 xenheap_phys_end += xen_phys_start;
kfraser@15074 590 reserve_in_boot_e820(xen_phys_start,
kfraser@15074 591 xen_phys_start + (opt_xenheap_megabytes<<20));
kfraser@15074 592 init_boot_pages(1<<20, 16<<20); /* Initial seed: 15MB */
kfraser@15074 593 #else
kfraser@15074 594 init_boot_pages(xenheap_phys_end, 16<<20); /* Initial seed: 4MB */
kfraser@15074 595 #endif
kfraser@15074 596
kfraser@15074 597 /*
kfraser@15074 598 * With the boot allocator now seeded, we can walk every RAM region and
kfraser@15074 599 * map it in its entirety (on x86/64, at least) and notify it to the
kfraser@15074 600 * boot allocator.
kfraser@15074 601 */
kfraser@15074 602 for ( i = 0; i < boot_e820.nr_map; i++ )
kfraser@15074 603 {
kfraser@15074 604 uint64_t s, e, map_e, mask = PAGE_SIZE - 1;
kfraser@15074 605
kfraser@15074 606 /* Only page alignment required now. */
kfraser@15074 607 s = (boot_e820.map[i].addr + mask) & ~mask;
kfraser@15074 608 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
kfraser@15074 609 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
kfraser@15074 610 continue;
kfraser@15074 611
kfraser@15074 612 /* Perform the mapping (truncated in 32-bit mode). */
kfraser@15074 613 map_e = e;
kfraser@15074 614 #if defined(CONFIG_X86_32)
kfraser@15074 615 map_e = min_t(uint64_t, map_e, BOOTSTRAP_DIRECTMAP_END);
kfraser@15074 616 #endif
kfraser@15074 617 if ( s < map_e )
kfraser@15074 618 map_pages_to_xen(
kfraser@15074 619 (unsigned long)maddr_to_bootstrap_virt(s),
kfraser@15074 620 s >> PAGE_SHIFT, (map_e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
kfraser@15074 621
kfraser@15074 622 init_boot_pages(s, e);
kfraser@15074 623 }
kfraser@15074 624
kfraser@15074 625 if ( (kexec_crash_area.size > 0) && (kexec_crash_area.start > 0) )
ian@12681 626 {
ian@12677 627 unsigned long kdump_start, kdump_size, k;
ian@12677 628
ian@12681 629 /* Mark images pages as free for now. */
ian@12677 630 init_boot_pages(initial_images_start, initial_images_end);
ian@12677 631
ian@12713 632 kdump_start = kexec_crash_area.start;
ian@12713 633 kdump_size = kexec_crash_area.size;
ian@12677 634
ian@12681 635 printk("Kdump: %luMB (%lukB) at 0x%lx\n",
ian@12677 636 kdump_size >> 20,
ian@12677 637 kdump_size >> 10,
ian@12677 638 kdump_start);
ian@12677 639
ian@12681 640 if ( (kdump_start & ~PAGE_MASK) || (kdump_size & ~PAGE_MASK) )
ian@12677 641 panic("Kdump parameters not page aligned\n");
ian@12677 642
ian@12677 643 kdump_start >>= PAGE_SHIFT;
ian@12677 644 kdump_size >>= PAGE_SHIFT;
ian@12677 645
kfraser@12853 646 /* Allocate pages for Kdump memory area. */
kfraser@14083 647 if ( !reserve_boot_pages(kdump_start, kdump_size) )
ian@12677 648 panic("Unable to reserve Kdump memory\n");
ian@12677 649
kfraser@12853 650 /* Allocate pages for relocated initial images. */
ian@12677 651 k = ((initial_images_end - initial_images_start) & ~PAGE_MASK) ? 1 : 0;
ian@12677 652 k += (initial_images_end - initial_images_start) >> PAGE_SHIFT;
ian@12677 653
kfraser@14084 654 #if defined(CONFIG_X86_32)
kfraser@14084 655 /* Must allocate within bootstrap 1:1 limits. */
kfraser@15074 656 k = alloc_boot_low_pages(k, 1); /* 0x0 - BOOTSTRAP_DIRECTMAP_END */
kfraser@14084 657 #else
kfraser@14084 658 k = alloc_boot_pages(k, 1);
kfraser@14084 659 #endif
kfraser@12853 660 if ( k == 0 )
ian@12677 661 panic("Unable to allocate initial images memory\n");
ian@12677 662
ian@12677 663 move_memory(k << PAGE_SHIFT, initial_images_start, initial_images_end);
ian@12677 664
ian@12677 665 initial_images_end -= initial_images_start;
ian@12677 666 initial_images_start = k << PAGE_SHIFT;
ian@12677 667 initial_images_end += initial_images_start;
ian@12681 668 }
ian@12677 669
kaf24@5003 670 memguard_init();
kaf24@4950 671
kfraser@15074 672 nr_pages = 0;
kfraser@15074 673 for ( i = 0; i < e820.nr_map; i++ )
kfraser@15074 674 if ( e820.map[i].type == E820_RAM )
kfraser@15074 675 nr_pages += e820.map[i].size >> PAGE_SHIFT;
ian@12681 676 printk("System RAM: %luMB (%lukB)\n",
kaf24@3354 677 nr_pages >> (20 - PAGE_SHIFT),
kaf24@3354 678 nr_pages << (PAGE_SHIFT - 10));
kaf24@7220 679 total_pages = nr_pages;
kaf24@3354 680
kfraser@11296 681 /* Sanity check for unwanted bloat of certain hypercall structures. */
kfraser@11296 682 BUILD_BUG_ON(sizeof(((struct xen_platform_op *)0)->u) !=
kfraser@11296 683 sizeof(((struct xen_platform_op *)0)->u.pad));
kfraser@11296 684 BUILD_BUG_ON(sizeof(((struct xen_domctl *)0)->u) !=
kfraser@11296 685 sizeof(((struct xen_domctl *)0)->u.pad));
kfraser@11296 686 BUILD_BUG_ON(sizeof(((struct xen_sysctl *)0)->u) !=
kfraser@11296 687 sizeof(((struct xen_sysctl *)0)->u.pad));
kaf24@7388 688
kaf24@9878 689 BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
kaf24@9878 690 BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
ack@13292 691 BUILD_BUG_ON(sizeof(struct vcpu_info) != 64);
kaf24@7744 692
ack@13291 693 #ifdef CONFIG_COMPAT
ack@13291 694 BUILD_BUG_ON(sizeof(((struct compat_platform_op *)0)->u) !=
ack@13291 695 sizeof(((struct compat_platform_op *)0)->u.pad));
ack@13291 696 BUILD_BUG_ON(sizeof(start_info_compat_t) > PAGE_SIZE);
ack@13292 697 BUILD_BUG_ON(sizeof(struct compat_vcpu_info) != 64);
ack@13291 698 #endif
ack@13291 699
kfraser@10492 700 /* Check definitions in public headers match internal defs. */
kaf24@9878 701 BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
kaf24@8521 702 #ifdef HYPERVISOR_VIRT_END
kaf24@9878 703 BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END);
kaf24@8521 704 #endif
kfraser@10492 705 BUILD_BUG_ON(MACH2PHYS_VIRT_START != RO_MPT_VIRT_START);
kfraser@10492 706 BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END);
kaf24@8521 707
kaf24@3354 708 init_frametable();
kaf24@3338 709
kfraser@11971 710 acpi_boot_table_init();
kfraser@11971 711
kfraser@11971 712 acpi_numa_init();
kfraser@11971 713
kfraser@11971 714 numa_initmem_init(0, max_page);
kfraser@11971 715
kaf24@6111 716 /* Initialise the Xen heap, skipping RAM holes. */
kfraser@15074 717 init_xenheap_pages(xenheap_phys_start, xenheap_phys_end);
kfraser@15074 718 nr_pages = (xenheap_phys_end - xenheap_phys_start) >> PAGE_SHIFT;
kfraser@15074 719 #ifdef __x86_64__
kfraser@15074 720 init_xenheap_pages(xen_phys_start, __pa(&_start));
kfraser@15074 721 nr_pages += (__pa(&_start) - xen_phys_start) >> PAGE_SHIFT;
kfraser@15074 722 #endif
kfraser@15074 723 xenheap_phys_start = xen_phys_start;
kaf24@6111 724 printk("Xen heap: %luMB (%lukB)\n",
kaf24@6111 725 nr_pages >> (20 - PAGE_SHIFT),
kaf24@6111 726 nr_pages << (PAGE_SHIFT - 10));
kaf24@3338 727
keir@14680 728 end_boot_allocator();
keir@14680 729
kaf24@3594 730 early_boot = 0;
kaf24@3338 731
kaf24@8459 732 early_cpu_init();
kaf24@8459 733
kaf24@8459 734 paging_init();
kaf24@8459 735
kaf24@8459 736 /* Unmap the first page of CPU0's stack. */
kaf24@8459 737 memguard_guard_stack(cpu0_stack);
kaf24@8459 738
kaf24@8459 739 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
kaf24@8459 740
kaf24@8459 741 if ( opt_watchdog )
kaf24@8459 742 nmi_watchdog = NMI_LOCAL_APIC;
kaf24@8459 743
kaf24@8459 744 sort_exception_tables();
kaf24@8459 745
kaf24@8459 746 find_smp_config();
kaf24@8459 747
kaf24@8459 748 dmi_scan_machine();
kaf24@8459 749
kaf24@8459 750 generic_apic_probe();
kaf24@8459 751
kaf24@8459 752 acpi_boot_init();
kaf24@8459 753
kfraser@11971 754 init_cpu_to_node();
kfraser@11971 755
kfraser@11241 756 if ( smp_found_config )
kaf24@8459 757 get_smp_config();
kaf24@8459 758
kaf24@8459 759 init_apic_mappings();
kaf24@8459 760
kaf24@8459 761 init_IRQ();
kaf24@8459 762
kfraser@11241 763 percpu_init_areas();
kfraser@11241 764
kfraser@11240 765 init_idle_domain();
kfraser@11240 766
kaf24@8459 767 trap_init();
kaf24@8459 768
kaf24@13662 769 rcu_init();
kaf24@13662 770
kaf24@8586 771 timer_init();
kaf24@8459 772
kaf24@8459 773 early_time_init();
kaf24@8459 774
kaf24@8459 775 arch_init_memory();
kaf24@8459 776
kaf24@8459 777 identify_cpu(&boot_cpu_data);
kaf24@8459 778 if ( cpu_has_fxsr )
kaf24@8459 779 set_in_cr4(X86_CR4_OSFXSR);
kaf24@8459 780 if ( cpu_has_xmm )
kaf24@8459 781 set_in_cr4(X86_CR4_OSXMMEXCPT);
kaf24@8459 782
kaf24@8459 783 if ( opt_nosmp )
kaf24@8459 784 max_cpus = 0;
kaf24@8459 785
kaf24@8459 786 smp_prepare_cpus(max_cpus);
kaf24@8459 787
kaf24@8459 788 /*
kaf24@8459 789 * Initialise higher-level timer functions. We do this fairly late
kaf24@8459 790 * (post-SMP) because the time bases and scale factors need to be updated
kaf24@8459 791 * regularly, and SMP initialisation can cause a long delay with
kaf24@8459 792 * interrupts not yet enabled.
kaf24@8459 793 */
kaf24@8459 794 init_xen_time();
kaf24@8459 795
kaf24@8459 796 initialize_keytable();
kaf24@8459 797
kaf24@8459 798 serial_init_postirq();
kaf24@8459 799
kaf24@8459 800 BUG_ON(!local_irq_is_enabled());
kaf24@8459 801
kaf24@8459 802 for_each_present_cpu ( i )
kaf24@8459 803 {
kaf24@8459 804 if ( num_online_cpus() >= max_cpus )
kaf24@8459 805 break;
kaf24@8459 806 if ( !cpu_online(i) )
kaf24@13662 807 {
kaf24@13662 808 rcu_online_cpu(i);
kaf24@8459 809 __cpu_up(i);
kaf24@13662 810 }
kfraser@11971 811
kfraser@11998 812 /* Set up cpu_to_node[]. */
kfraser@11971 813 srat_detect_node(i);
kfraser@11998 814 /* Set up node_to_cpumask based on cpu_to_node[]. */
kfraser@11971 815 numa_add_cpu(i);
kaf24@8459 816 }
kaf24@8459 817
kaf24@8459 818 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
kaf24@8459 819 smp_cpus_done(max_cpus);
kaf24@8459 820
kaf24@9117 821 initialise_gdb(); /* could be moved earlier */
kaf24@9117 822
kaf24@8459 823 do_initcalls();
kaf24@8459 824
kaf24@8594 825 if ( opt_watchdog )
kaf24@8594 826 watchdog_enable();
kaf24@8459 827
kfraser@11881 828 /* Extract policy from multiboot. */
kfraser@11881 829 extract_acm_policy(mbi, &initrdidx, &_policy_start, &_policy_len);
kfraser@11881 830
kaf24@8459 831 /* initialize access control security module */
kfraser@11881 832 acm_init(_policy_start, _policy_len);
kaf24@8459 833
kaf24@8459 834 /* Create initial domain 0. */
kfraser@14911 835 dom0 = domain_create(0, 0, DOM0_SSIDREF);
kfraser@10655 836 if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
kaf24@8459 837 panic("Error creating domain 0\n");
kaf24@8459 838
kfraser@12210 839 dom0->is_privileged = 1;
kfraser@12210 840
kaf24@8459 841 /* Grab the DOM0 command line. */
kaf24@8459 842 cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
kaf24@8459 843 if ( cmdline != NULL )
kaf24@8459 844 {
kaf24@8459 845 static char dom0_cmdline[MAX_GUEST_CMDLINE];
kaf24@8459 846
kaf24@8459 847 /* Skip past the image name and copy to a local buffer. */
kaf24@8459 848 while ( *cmdline == ' ' ) cmdline++;
kaf24@8459 849 if ( (cmdline = strchr(cmdline, ' ')) != NULL )
kaf24@8459 850 {
kaf24@8459 851 while ( *cmdline == ' ' ) cmdline++;
kfraser@13689 852 safe_strcpy(dom0_cmdline, cmdline);
kaf24@8459 853 }
kaf24@8459 854
kaf24@8459 855 /* Append any extra parameters. */
kfraser@13691 856 if ( skip_ioapic_setup && !strstr(dom0_cmdline, "noapic") )
kfraser@13691 857 safe_strcat(dom0_cmdline, " noapic");
kaf24@8459 858 if ( acpi_skip_timer_override &&
kfraser@13691 859 !strstr(dom0_cmdline, "acpi_skip_timer_override") )
kfraser@13691 860 safe_strcat(dom0_cmdline, " acpi_skip_timer_override");
kfraser@13691 861 if ( (strlen(acpi_param) != 0) && !strstr(dom0_cmdline, "acpi=") )
kaf24@8459 862 {
kfraser@13691 863 safe_strcat(dom0_cmdline, " acpi=");
kfraser@13691 864 safe_strcat(dom0_cmdline, acpi_param);
kaf24@8459 865 }
kfraser@13691 866
kfraser@13691 867 cmdline = dom0_cmdline;
kaf24@8459 868 }
kaf24@8459 869
kaf24@8459 870 if ( (initrdidx > 0) && (initrdidx < mbi->mods_count) )
kaf24@8459 871 {
kaf24@8459 872 _initrd_start = initial_images_start +
kaf24@8459 873 (mod[initrdidx].mod_start - mod[0].mod_start);
kaf24@8459 874 _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start;
kaf24@8459 875 }
kaf24@8459 876
kaf24@8459 877 /*
kaf24@8459 878 * We're going to setup domain0 using the module(s) that we stashed safely
kaf24@8459 879 * above our heap. The second module, if present, is an initrd ramdisk.
kaf24@8459 880 */
kaf24@8459 881 if ( construct_dom0(dom0,
kaf24@8459 882 initial_images_start,
kaf24@8459 883 mod[0].mod_end-mod[0].mod_start,
kaf24@8459 884 _initrd_start,
kaf24@8459 885 _initrd_len,
kaf24@8459 886 cmdline) != 0)
kaf24@8459 887 panic("Could not set up DOM0 guest OS\n");
kaf24@8459 888
kaf24@8459 889 /* Scrub RAM that is still free and so may go to an unprivileged domain. */
kaf24@8459 890 scrub_heap_pages();
kaf24@8459 891
kaf24@8459 892 init_trace_bufs();
kaf24@8459 893
kaf24@10502 894 console_endboot();
kaf24@8459 895
kaf24@8459 896 /* Hide UART from DOM0 if we're using it */
kaf24@8459 897 serial_endboot();
kaf24@8459 898
kaf24@8459 899 domain_unpause_by_systemcontroller(dom0);
kaf24@8459 900
kaf24@8459 901 startup_cpu_idle_loop();
kaf24@8459 902 }
kaf24@8459 903
ian@13763 904 void arch_get_xen_caps(xen_capabilities_info_t *info)
iap10@6721 905 {
kfraser@14997 906 /* Interface name is always xen-3.0-* for Xen-3.x. */
kfraser@14997 907 int major = 3, minor = 0;
keir@13754 908 char s[32];
keir@13754 909
ian@13763 910 (*info)[0] = '\0';
iap10@6721 911
kaf24@6725 912 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
kaf24@6725 913
keir@13754 914 snprintf(s, sizeof(s), "xen-%d.%d-x86_32 ", major, minor);
ian@13763 915 safe_strcat(*info, s);
keir@13754 916 if ( hvm_enabled )
keir@13754 917 {
keir@13754 918 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
ian@13763 919 safe_strcat(*info, s);
kfraser@13685 920 }
kaf24@6725 921
kaf24@6725 922 #elif defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
kaf24@6725 923
keir@13754 924 snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor);
ian@13763 925 safe_strcat(*info, s);
kaf24@6725 926 if ( hvm_enabled )
iap10@6721 927 {
keir@13754 928 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
ian@13763 929 safe_strcat(*info, s);
keir@13754 930 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor);
ian@13763 931 safe_strcat(*info, s);
iap10@6721 932 }
iap10@6721 933
kaf24@6725 934 #elif defined(CONFIG_X86_64)
iap10@6721 935
keir@13754 936 snprintf(s, sizeof(s), "xen-%d.%d-x86_64 ", major, minor);
ian@13763 937 safe_strcat(*info, s);
ack@13288 938 #ifdef CONFIG_COMPAT
keir@13754 939 snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor);
ian@13763 940 safe_strcat(*info, s);
ack@13288 941 #endif
kaf24@6725 942 if ( hvm_enabled )
iap10@6721 943 {
keir@13754 944 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
ian@13763 945 safe_strcat(*info, s);
keir@13754 946 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor);
ian@13763 947 safe_strcat(*info, s);
keir@13754 948 snprintf(s, sizeof(s), "hvm-%d.%d-x86_64 ", major, minor);
ian@13763 949 safe_strcat(*info, s);
iap10@6721 950 }
kaf24@6725 951
iap10@6721 952 #endif
iap10@6721 953 }
iap10@6721 954
kaf24@3914 955 /*
kaf24@3914 956 * Local variables:
kaf24@3914 957 * mode: C
kaf24@3914 958 * c-set-style: "BSD"
kaf24@3914 959 * c-basic-offset: 4
kaf24@3914 960 * tab-width: 4
kaf24@3914 961 * indent-tabs-mode: nil
kaf24@3988 962 * End:
kaf24@3914 963 */