ia64/xen-unstable

view xen/arch/ia64/linux-xen/setup.c @ 9772:ddcd9c267612

[IA64] Reseve memory of domain0 (fix dom0 boot panic)

Our patch fix domain0 boot panic on large memory system.
(e.g. amount of installed memory is 16GB)

Memory of domain0 is not reserved now.
Our patch can make this memory reserved.
And we clean up initrd_start of domain0.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Tue Apr 25 22:52:49 2006 -0600 (2006-04-25)
parents ced37bea0647
children 41e7549d7df9
line source
1 /*
2 * Architecture-specific setup.
3 *
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13 *
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
24 */
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/acpi.h>
30 #include <linux/bootmem.h>
31 #include <linux/console.h>
32 #include <linux/delay.h>
33 #include <linux/kernel.h>
34 #include <linux/reboot.h>
35 #include <linux/sched.h>
36 #include <linux/seq_file.h>
37 #include <linux/string.h>
38 #include <linux/threads.h>
39 #include <linux/tty.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
44 #ifndef XEN
45 #include <linux/platform.h>
46 #include <linux/pm.h>
47 #endif
49 #include <asm/ia32.h>
50 #include <asm/machvec.h>
51 #include <asm/mca.h>
52 #include <asm/meminit.h>
53 #include <asm/page.h>
54 #include <asm/patch.h>
55 #include <asm/pgtable.h>
56 #include <asm/processor.h>
57 #include <asm/sal.h>
58 #include <asm/sections.h>
59 #include <asm/serial.h>
60 #include <asm/setup.h>
61 #include <asm/smp.h>
62 #include <asm/system.h>
63 #include <asm/unistd.h>
64 #ifdef XEN
65 #include <asm/vmx.h>
66 #include <asm/io.h>
67 #endif
69 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
70 # error "struct cpuinfo_ia64 too big!"
71 #endif
73 #ifdef CONFIG_SMP
74 unsigned long __per_cpu_offset[NR_CPUS];
75 EXPORT_SYMBOL(__per_cpu_offset);
76 #endif
78 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
79 #ifdef XEN
80 DEFINE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
81 #endif
82 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
83 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
84 unsigned long ia64_cycles_per_usec;
85 struct ia64_boot_param *ia64_boot_param;
86 struct screen_info screen_info;
87 unsigned long vga_console_iobase;
88 unsigned long vga_console_membase;
90 unsigned long ia64_max_cacheline_size;
91 unsigned long ia64_iobase; /* virtual address for I/O accesses */
92 EXPORT_SYMBOL(ia64_iobase);
93 struct io_space io_space[MAX_IO_SPACES];
94 EXPORT_SYMBOL(io_space);
95 unsigned int num_io_spaces;
97 #ifdef XEN
98 extern void early_cmdline_parse(char **);
99 #endif
101 /*
102 * "flush_icache_range()" needs to know what processor dependent stride size to use
103 * when it makes i-cache(s) coherent with d-caches.
104 */
105 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
106 unsigned long ia64_i_cache_stride_shift = ~0;
108 #ifdef XEN
109 #define D_CACHE_STRIDE_SHIFT 5 /* Safest. */
110 unsigned long ia64_d_cache_stride_shift = ~0;
111 #endif
113 /*
114 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
115 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
116 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
117 * address of the second buffer must be aligned to (merge_mask+1) in order to be
118 * mergeable). By default, we assume there is no I/O MMU which can merge physically
119 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
120 * page-size of 2^64.
121 */
122 unsigned long ia64_max_iommu_merge_mask = ~0UL;
123 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
125 /*
126 * We use a special marker for the end of memory and it uses the extra (+1) slot
127 */
128 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
129 int num_rsvd_regions;
132 /*
133 * Filter incoming memory segments based on the primitive map created from the boot
134 * parameters. Segments contained in the map are removed from the memory ranges. A
135 * caller-specified function is called with the memory ranges that remain after filtering.
136 * This routine does not assume the incoming segments are sorted.
137 */
138 int
139 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
140 {
141 unsigned long range_start, range_end, prev_start;
142 void (*func)(unsigned long, unsigned long, int);
143 int i;
145 #if IGNORE_PFN0
146 if (start == PAGE_OFFSET) {
147 printk(KERN_WARNING "warning: skipping physical page 0\n");
148 start += PAGE_SIZE;
149 if (start >= end) return 0;
150 }
151 #endif
152 /*
153 * lowest possible address(walker uses virtual)
154 */
155 prev_start = PAGE_OFFSET;
156 func = arg;
158 for (i = 0; i < num_rsvd_regions; ++i) {
159 range_start = max(start, prev_start);
160 range_end = min(end, rsvd_region[i].start);
162 if (range_start < range_end)
163 #ifdef XEN
164 {
165 /* init_boot_pages requires "ps, pe" */
166 printk("Init boot pages: 0x%lx -> 0x%lx.\n",
167 __pa(range_start), __pa(range_end));
168 (*func)(__pa(range_start), __pa(range_end), 0);
169 }
170 #else
171 call_pernode_memory(__pa(range_start), range_end - range_start, func);
172 #endif
174 /* nothing more available in this segment */
175 if (range_end == end) return 0;
177 prev_start = rsvd_region[i].end;
178 }
179 /* end of memory marker allows full processing inside loop body */
180 return 0;
181 }
183 static void
184 sort_regions (struct rsvd_region *rsvd_region, int max)
185 {
186 int j;
188 /* simple bubble sorting */
189 while (max--) {
190 for (j = 0; j < max; ++j) {
191 if (rsvd_region[j].start > rsvd_region[j+1].start) {
192 struct rsvd_region tmp;
193 tmp = rsvd_region[j];
194 rsvd_region[j] = rsvd_region[j + 1];
195 rsvd_region[j + 1] = tmp;
196 }
197 }
198 }
199 }
201 /**
202 * reserve_memory - setup reserved memory areas
203 *
204 * Setup the reserved memory areas set aside for the boot parameters,
205 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
206 * see include/asm-ia64/meminit.h if you need to define more.
207 */
208 void
209 reserve_memory (void)
210 {
211 int n = 0;
213 /*
214 * none of the entries in this table overlap
215 */
216 rsvd_region[n].start = (unsigned long) ia64_boot_param;
217 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
218 n++;
220 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
221 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
222 n++;
224 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
225 rsvd_region[n].end = (rsvd_region[n].start
226 + strlen(__va(ia64_boot_param->command_line)) + 1);
227 n++;
229 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
230 #ifdef XEN
231 /* Reserve xen image/bitmap/xen-heap */
232 rsvd_region[n].end = rsvd_region[n].start + xenheap_size;
233 #else
234 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
235 #endif
236 n++;
238 #ifdef XEN
239 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->domain_start);
240 rsvd_region[n].end = (rsvd_region[n].start + ia64_boot_param->domain_size);
241 n++;
242 #endif
244 #if defined(XEN)||defined(CONFIG_BLK_DEV_INITRD)
245 if (ia64_boot_param->initrd_start) {
246 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
247 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
248 n++;
249 }
250 #endif
252 /* end of memory marker */
253 rsvd_region[n].start = ~0UL;
254 rsvd_region[n].end = ~0UL;
255 n++;
257 num_rsvd_regions = n;
259 sort_regions(rsvd_region, num_rsvd_regions);
260 }
262 /**
263 * find_initrd - get initrd parameters from the boot parameter structure
264 *
265 * Grab the initrd start and end from the boot parameter struct given us by
266 * the boot loader.
267 */
268 void
269 find_initrd (void)
270 {
271 #ifdef CONFIG_BLK_DEV_INITRD
272 if (ia64_boot_param->initrd_start) {
273 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
274 initrd_end = initrd_start+ia64_boot_param->initrd_size;
276 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
277 initrd_start, ia64_boot_param->initrd_size);
278 }
279 #endif
280 }
282 static void __init
283 io_port_init (void)
284 {
285 extern unsigned long ia64_iobase;
286 unsigned long phys_iobase;
288 /*
289 * Set `iobase' to the appropriate address in region 6 (uncached access range).
290 *
291 * The EFI memory map is the "preferred" location to get the I/O port space base,
292 * rather the relying on AR.KR0. This should become more clear in future SAL
293 * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
294 * found in the memory map.
295 */
296 phys_iobase = efi_get_iobase();
297 if (phys_iobase)
298 /* set AR.KR0 since this is all we use it for anyway */
299 ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
300 else {
301 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
302 printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "
303 "to AR.KR0\n");
304 printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
305 }
306 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
308 /* setup legacy IO port space */
309 io_space[0].mmio_base = ia64_iobase;
310 io_space[0].sparse = 1;
311 num_io_spaces = 1;
312 }
314 /**
315 * early_console_setup - setup debugging console
316 *
317 * Consoles started here require little enough setup that we can start using
318 * them very early in the boot process, either right after the machine
319 * vector initialization, or even before if the drivers can detect their hw.
320 *
321 * Returns non-zero if a console couldn't be setup.
322 */
323 static inline int __init
324 early_console_setup (char *cmdline)
325 {
326 int earlycons = 0;
328 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
329 {
330 extern int sn_serial_console_early_setup(void);
331 if (!sn_serial_console_early_setup())
332 earlycons++;
333 }
334 #endif
335 #ifdef CONFIG_EFI_PCDP
336 if (!efi_setup_pcdp_console(cmdline))
337 earlycons++;
338 #endif
339 #ifdef CONFIG_SERIAL_8250_CONSOLE
340 if (!early_serial_console_init(cmdline))
341 earlycons++;
342 #endif
344 return (earlycons) ? 0 : -1;
345 }
347 static inline void
348 mark_bsp_online (void)
349 {
350 #ifdef CONFIG_SMP
351 /* If we register an early console, allow CPU 0 to printk */
352 cpu_set(smp_processor_id(), cpu_online_map);
353 #endif
354 }
356 #ifdef CONFIG_SMP
357 static void
358 check_for_logical_procs (void)
359 {
360 pal_logical_to_physical_t info;
361 s64 status;
363 status = ia64_pal_logical_to_phys(0, &info);
364 if (status == -1) {
365 printk(KERN_INFO "No logical to physical processor mapping "
366 "available\n");
367 return;
368 }
369 if (status) {
370 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
371 status);
372 return;
373 }
374 /*
375 * Total number of siblings that BSP has. Though not all of them
376 * may have booted successfully. The correct number of siblings
377 * booted is in info.overview_num_log.
378 */
379 smp_num_siblings = info.overview_tpc;
380 smp_num_cpucores = info.overview_cpp;
381 }
382 #endif
384 void __init
385 #ifdef XEN
386 early_setup_arch (char **cmdline_p)
387 #else
388 setup_arch (char **cmdline_p)
389 #endif
390 {
391 unw_init();
393 #ifndef XEN
394 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
395 #endif
397 *cmdline_p = __va(ia64_boot_param->command_line);
398 #ifndef XEN
399 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
400 #else
401 early_cmdline_parse(cmdline_p);
402 cmdline_parse(*cmdline_p);
403 #endif
405 efi_init();
406 io_port_init();
408 #ifdef CONFIG_IA64_GENERIC
409 {
410 const char *mvec_name = strstr (*cmdline_p, "machvec=");
411 char str[64];
413 if (mvec_name) {
414 const char *end;
415 size_t len;
417 mvec_name += 8;
418 end = strchr (mvec_name, ' ');
419 if (end)
420 len = end - mvec_name;
421 else
422 len = strlen (mvec_name);
423 len = min(len, sizeof (str) - 1);
424 strncpy (str, mvec_name, len);
425 str[len] = '\0';
426 mvec_name = str;
427 } else
428 mvec_name = acpi_get_sysname();
429 machvec_init(mvec_name);
430 }
431 #endif
433 if (early_console_setup(*cmdline_p) == 0)
434 mark_bsp_online();
436 #ifdef XEN
437 }
439 void __init
440 late_setup_arch (char **cmdline_p)
441 {
442 #endif
443 #ifdef CONFIG_ACPI_BOOT
444 /* Initialize the ACPI boot-time table parser */
445 acpi_table_init();
446 # ifdef CONFIG_ACPI_NUMA
447 acpi_numa_init();
448 # endif
449 #else
450 # ifdef CONFIG_SMP
451 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
452 # endif
453 #endif /* CONFIG_APCI_BOOT */
455 #ifndef XEN
456 find_memory();
457 #endif
459 /* process SAL system table: */
460 ia64_sal_init(efi.sal_systab);
462 #ifdef CONFIG_SMP
463 #ifdef XEN
464 init_smp_config ();
465 #endif
467 cpu_physical_id(0) = hard_smp_processor_id();
469 cpu_set(0, cpu_sibling_map[0]);
470 cpu_set(0, cpu_core_map[0]);
472 check_for_logical_procs();
473 if (smp_num_cpucores > 1)
474 printk(KERN_INFO
475 "cpu package is Multi-Core capable: number of cores=%d\n",
476 smp_num_cpucores);
477 if (smp_num_siblings > 1)
478 printk(KERN_INFO
479 "cpu package is Multi-Threading capable: number of siblings=%d\n",
480 smp_num_siblings);
481 #endif
483 #ifdef XEN
484 identify_vmx_feature();
485 #endif
487 cpu_init(); /* initialize the bootstrap CPU */
489 #ifdef CONFIG_ACPI_BOOT
490 acpi_boot_init();
491 #endif
493 #ifdef CONFIG_VT
494 if (!conswitchp) {
495 # if defined(CONFIG_DUMMY_CONSOLE)
496 conswitchp = &dummy_con;
497 # endif
498 # if defined(CONFIG_VGA_CONSOLE)
499 /*
500 * Non-legacy systems may route legacy VGA MMIO range to system
501 * memory. vga_con probes the MMIO hole, so memory looks like
502 * a VGA device to it. The EFI memory map can tell us if it's
503 * memory so we can avoid this problem.
504 */
505 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
506 conswitchp = &vga_con;
507 # endif
508 }
509 #endif
511 /* enable IA-64 Machine Check Abort Handling unless disabled */
512 if (!strstr(saved_command_line, "nomca"))
513 ia64_mca_init();
515 platform_setup(cmdline_p);
516 paging_init();
517 }
519 #ifndef XEN
520 /*
521 * Display cpu info for all cpu's.
522 */
523 static int
524 show_cpuinfo (struct seq_file *m, void *v)
525 {
526 #ifdef CONFIG_SMP
527 # define lpj c->loops_per_jiffy
528 # define cpunum c->cpu
529 #else
530 # define lpj loops_per_jiffy
531 # define cpunum 0
532 #endif
533 static struct {
534 unsigned long mask;
535 const char *feature_name;
536 } feature_bits[] = {
537 { 1UL << 0, "branchlong" },
538 { 1UL << 1, "spontaneous deferral"},
539 { 1UL << 2, "16-byte atomic ops" }
540 };
541 char family[32], features[128], *cp, sep;
542 struct cpuinfo_ia64 *c = v;
543 unsigned long mask;
544 int i;
546 mask = c->features;
548 switch (c->family) {
549 case 0x07: memcpy(family, "Itanium", 8); break;
550 case 0x1f: memcpy(family, "Itanium 2", 10); break;
551 default: sprintf(family, "%u", c->family); break;
552 }
554 /* build the feature string: */
555 memcpy(features, " standard", 10);
556 cp = features;
557 sep = 0;
558 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
559 if (mask & feature_bits[i].mask) {
560 if (sep)
561 *cp++ = sep;
562 sep = ',';
563 *cp++ = ' ';
564 strcpy(cp, feature_bits[i].feature_name);
565 cp += strlen(feature_bits[i].feature_name);
566 mask &= ~feature_bits[i].mask;
567 }
568 }
569 if (mask) {
570 /* print unknown features as a hex value: */
571 if (sep)
572 *cp++ = sep;
573 sprintf(cp, " 0x%lx", mask);
574 }
576 seq_printf(m,
577 "processor : %d\n"
578 "vendor : %s\n"
579 "arch : IA-64\n"
580 "family : %s\n"
581 "model : %u\n"
582 "revision : %u\n"
583 "archrev : %u\n"
584 "features :%s\n" /* don't change this---it _is_ right! */
585 "cpu number : %lu\n"
586 "cpu regs : %u\n"
587 "cpu MHz : %lu.%06lu\n"
588 "itc MHz : %lu.%06lu\n"
589 "BogoMIPS : %lu.%02lu\n",
590 cpunum, c->vendor, family, c->model, c->revision, c->archrev,
591 features, c->ppn, c->number,
592 c->proc_freq / 1000000, c->proc_freq % 1000000,
593 c->itc_freq / 1000000, c->itc_freq % 1000000,
594 lpj*HZ/500000, (lpj*HZ/5000) % 100);
595 #ifdef CONFIG_SMP
596 seq_printf(m, "siblings : %u\n", c->num_log);
597 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
598 seq_printf(m,
599 "physical id: %u\n"
600 "core id : %u\n"
601 "thread id : %u\n",
602 c->socket_id, c->core_id, c->thread_id);
603 #endif
604 seq_printf(m,"\n");
606 return 0;
607 }
609 static void *
610 c_start (struct seq_file *m, loff_t *pos)
611 {
612 #ifdef CONFIG_SMP
613 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
614 ++*pos;
615 #endif
616 return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
617 }
619 static void *
620 c_next (struct seq_file *m, void *v, loff_t *pos)
621 {
622 ++*pos;
623 return c_start(m, pos);
624 }
626 static void
627 c_stop (struct seq_file *m, void *v)
628 {
629 }
631 struct seq_operations cpuinfo_op = {
632 .start = c_start,
633 .next = c_next,
634 .stop = c_stop,
635 .show = show_cpuinfo
636 };
637 #endif /* XEN */
639 void
640 identify_cpu (struct cpuinfo_ia64 *c)
641 {
642 union {
643 unsigned long bits[5];
644 struct {
645 /* id 0 & 1: */
646 char vendor[16];
648 /* id 2 */
649 u64 ppn; /* processor serial number */
651 /* id 3: */
652 unsigned number : 8;
653 unsigned revision : 8;
654 unsigned model : 8;
655 unsigned family : 8;
656 unsigned archrev : 8;
657 unsigned reserved : 24;
659 /* id 4: */
660 u64 features;
661 } field;
662 } cpuid;
663 pal_vm_info_1_u_t vm1;
664 pal_vm_info_2_u_t vm2;
665 pal_status_t status;
666 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
667 int i;
669 for (i = 0; i < 5; ++i)
670 cpuid.bits[i] = ia64_get_cpuid(i);
672 memcpy(c->vendor, cpuid.field.vendor, 16);
673 #ifdef CONFIG_SMP
674 c->cpu = smp_processor_id();
676 /* below default values will be overwritten by identify_siblings()
677 * for Multi-Threading/Multi-Core capable cpu's
678 */
679 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
680 c->socket_id = -1;
682 identify_siblings(c);
683 #endif
684 c->ppn = cpuid.field.ppn;
685 c->number = cpuid.field.number;
686 c->revision = cpuid.field.revision;
687 c->model = cpuid.field.model;
688 c->family = cpuid.field.family;
689 c->archrev = cpuid.field.archrev;
690 c->features = cpuid.field.features;
692 status = ia64_pal_vm_summary(&vm1, &vm2);
693 if (status == PAL_STATUS_SUCCESS) {
694 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
695 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
696 }
697 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
698 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
700 #ifdef XEN
701 /* If vmx feature is on, do necessary initialization for vmx */
702 if (vmx_enabled)
703 vmx_init_env();
704 #endif
705 }
707 void
708 setup_per_cpu_areas (void)
709 {
710 /* start_kernel() requires this... */
711 }
713 /*
714 * Calculate the max. cache line size.
715 *
716 * In addition, the minimum of the i-cache stride sizes is calculated for
717 * "flush_icache_range()".
718 */
719 static void
720 get_max_cacheline_size (void)
721 {
722 unsigned long line_size, max = 1;
723 u64 l, levels, unique_caches;
724 pal_cache_config_info_t cci;
725 s64 status;
727 status = ia64_pal_cache_summary(&levels, &unique_caches);
728 if (status != 0) {
729 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
730 __FUNCTION__, status);
731 max = SMP_CACHE_BYTES;
732 /* Safest setup for "flush_icache_range()" */
733 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
734 #ifdef XEN
735 ia64_d_cache_stride_shift = D_CACHE_STRIDE_SHIFT;
736 #endif
737 goto out;
738 }
740 for (l = 0; l < levels; ++l) {
741 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
742 &cci);
743 if (status != 0) {
744 printk(KERN_ERR
745 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
746 __FUNCTION__, l, status);
747 max = SMP_CACHE_BYTES;
748 /* The safest setup for "flush_icache_range()" */
749 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
750 cci.pcci_unified = 1;
751 }
752 #ifdef XEN
753 if (cci.pcci_stride < ia64_d_cache_stride_shift)
754 ia64_d_cache_stride_shift = cci.pcci_stride;
755 #endif
756 line_size = 1 << cci.pcci_line_size;
757 if (line_size > max)
758 max = line_size;
759 if (!cci.pcci_unified) {
760 status = ia64_pal_cache_config_info(l,
761 /* cache_type (instruction)= */ 1,
762 &cci);
763 if (status != 0) {
764 printk(KERN_ERR
765 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
766 __FUNCTION__, l, status);
767 /* The safest setup for "flush_icache_range()" */
768 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
769 }
770 }
771 if (cci.pcci_stride < ia64_i_cache_stride_shift)
772 ia64_i_cache_stride_shift = cci.pcci_stride;
773 }
774 out:
775 if (max > ia64_max_cacheline_size)
776 ia64_max_cacheline_size = max;
777 #ifdef XEN
778 if (ia64_d_cache_stride_shift > ia64_i_cache_stride_shift)
779 ia64_d_cache_stride_shift = ia64_i_cache_stride_shift;
780 #endif
782 }
784 /*
785 * cpu_init() initializes state that is per-CPU. This function acts
786 * as a 'CPU state barrier', nothing should get across.
787 */
788 void
789 cpu_init (void)
790 {
791 extern void __devinit ia64_mmu_init (void *);
792 unsigned long num_phys_stacked;
793 #ifndef XEN
794 pal_vm_info_2_u_t vmi;
795 unsigned int max_ctx;
796 #endif
797 struct cpuinfo_ia64 *cpu_info;
798 void *cpu_data;
800 cpu_data = per_cpu_init();
802 #ifdef XEN
803 printf ("cpu_init: current=%p, current->domain->arch.mm=%p\n",
804 current, current->domain->arch.mm);
805 #endif
807 /*
808 * We set ar.k3 so that assembly code in MCA handler can compute
809 * physical addresses of per cpu variables with a simple:
810 * phys = ar.k3 + &per_cpu_var
811 */
812 ia64_set_kr(IA64_KR_PER_CPU_DATA,
813 ia64_tpa(cpu_data) - (long) __per_cpu_start);
815 get_max_cacheline_size();
817 /*
818 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
819 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
820 * depends on the data returned by identify_cpu(). We break the dependency by
821 * accessing cpu_data() through the canonical per-CPU address.
822 */
823 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
824 identify_cpu(cpu_info);
826 #ifdef CONFIG_MCKINLEY
827 {
828 # define FEATURE_SET 16
829 struct ia64_pal_retval iprv;
831 if (cpu_info->family == 0x1f) {
832 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
833 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
834 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
835 (iprv.v1 | 0x80), FEATURE_SET, 0);
836 }
837 }
838 #endif
840 /* Clear the stack memory reserved for pt_regs: */
841 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
843 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
845 /*
846 * Initialize the page-table base register to a global
847 * directory with all zeroes. This ensure that we can handle
848 * TLB-misses to user address-space even before we created the
849 * first user address-space. This may happen, e.g., due to
850 * aggressive use of lfetch.fault.
851 */
852 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
854 /*
855 * Initialize default control register to defer speculative faults except
856 * for those arising from TLB misses, which are not deferred. The
857 * kernel MUST NOT depend on a particular setting of these bits (in other words,
858 * the kernel must have recovery code for all speculative accesses). Turn on
859 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
860 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
861 * be fine).
862 */
863 #ifdef XEN
864 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
865 | IA64_DCR_PP | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
866 #else
867 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
868 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
869 #endif
870 #ifndef XEN
871 atomic_inc(&init_mm.mm_count);
872 current->active_mm = &init_mm;
873 #endif
874 #ifdef XEN
875 if (current->domain->arch.mm)
876 #else
877 if (current->mm)
878 #endif
879 BUG();
881 #ifdef XEN
882 ia64_fph_enable();
883 __ia64_init_fpu();
884 #endif
886 ia64_mmu_init(ia64_imva(cpu_data));
887 ia64_mca_cpu_init(ia64_imva(cpu_data));
889 #ifdef CONFIG_IA32_SUPPORT
890 ia32_cpu_init();
891 #endif
893 /* Clear ITC to eliminiate sched_clock() overflows in human time. */
894 ia64_set_itc(0);
896 /* disable all local interrupt sources: */
897 ia64_set_itv(1 << 16);
898 ia64_set_lrr0(1 << 16);
899 ia64_set_lrr1(1 << 16);
900 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
901 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
903 /* clear TPR & XTP to enable all interrupt classes: */
904 ia64_setreg(_IA64_REG_CR_TPR, 0);
905 #ifdef CONFIG_SMP
906 normal_xtp();
907 #endif
909 #ifndef XEN
910 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
911 if (ia64_pal_vm_summary(NULL, &vmi) == 0)
912 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
913 else {
914 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
915 max_ctx = (1U << 15) - 1; /* use architected minimum */
916 }
917 while (max_ctx < ia64_ctx.max_ctx) {
918 unsigned int old = ia64_ctx.max_ctx;
919 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
920 break;
921 }
922 #endif
924 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
925 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
926 "stacked regs\n");
927 num_phys_stacked = 96;
928 }
929 /* size of physical stacked register partition plus 8 bytes: */
930 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
931 platform_cpu_init();
932 #ifndef XEN
933 pm_idle = default_idle;
934 #endif
936 #ifdef XEN
937 /* surrender usage of kernel registers to domain, use percpu area instead */
938 __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE);
939 __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA);
940 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK);
941 __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER);
942 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT);
943 __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE);
944 #endif
945 }
947 #ifndef XEN
948 void
949 check_bugs (void)
950 {
951 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
952 (unsigned long) __end___mckinley_e9_bundles);
953 }
954 #endif