ia64/xen-unstable

view xen/arch/x86/setup.c @ 1589:9eda3ea2b4a0

bitkeeper revision 1.1019 (40de8639yjRdZbQS_JrcVsHsaPIwZw)

smp.h, irq.h, config.h, sched_bvt.c, setup.c, pci-pc.c:
Fix compile errors for latest GCC (3.4.0).
author kaf24@scramble.cl.cam.ac.uk
date Sun Jun 27 08:32:57 2004 +0000 (2004-06-27)
parents 729cac1fb14e
children c6692e37c11f 0e23f01219c6
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/sched.h>
6 #include <xen/pci.h>
7 #include <xen/serial.h>
8 #include <xen/softirq.h>
9 #include <xen/acpi.h>
10 #include <asm/bitops.h>
11 #include <asm/smp.h>
12 #include <asm/processor.h>
13 #include <asm/mpspec.h>
14 #include <asm/apic.h>
15 #include <asm/desc.h>
16 #include <asm/domain_page.h>
17 #include <asm/pdb.h>
19 extern void init_IRQ(void);
20 extern void trap_init(void);
21 extern void time_init(void);
22 extern void ac_timer_init(void);
23 extern void initialize_keytable();
24 extern int opt_nosmp, opt_watchdog, opt_noacpi;
25 extern int opt_ignorebiostables, opt_noht;
26 extern int do_timer_lists_from_pit;
28 char ignore_irq13; /* set if exception 16 works */
29 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
31 /* Lots of nice things, since we only target PPro+. */
32 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
33 EXPORT_SYMBOL(mmu_cr4_features);
35 unsigned long wait_init_idle;
37 struct domain *idle_task[NR_CPUS] = { &idle0_task };
39 #ifdef CONFIG_ACPI_INTERPRETER
40 int acpi_disabled = 0;
41 #else
42 int acpi_disabled = 1;
43 #endif
44 EXPORT_SYMBOL(acpi_disabled);
46 #ifdef CONFIG_ACPI_BOOT
47 extern int __initdata acpi_ht;
48 int acpi_force __initdata = 0;
49 #endif
51 int phys_proc_id[NR_CPUS];
52 int logical_proc_id[NR_CPUS];
54 /* Standard macro to see if a specific flag is changeable */
55 static inline int flag_is_changeable_p(u32 flag)
56 {
57 u32 f1, f2;
59 asm("pushfl\n\t"
60 "pushfl\n\t"
61 "popl %0\n\t"
62 "movl %0,%1\n\t"
63 "xorl %2,%0\n\t"
64 "pushl %0\n\t"
65 "popfl\n\t"
66 "pushfl\n\t"
67 "popl %0\n\t"
68 "popfl\n\t"
69 : "=&r" (f1), "=&r" (f2)
70 : "ir" (flag));
72 return ((f1^f2) & flag) != 0;
73 }
75 /* Probe for the CPUID instruction */
76 static int __init have_cpuid_p(void)
77 {
78 return flag_is_changeable_p(X86_EFLAGS_ID);
79 }
81 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
82 {
83 char *v = c->x86_vendor_id;
85 if (!strcmp(v, "GenuineIntel"))
86 c->x86_vendor = X86_VENDOR_INTEL;
87 else if (!strcmp(v, "AuthenticAMD"))
88 c->x86_vendor = X86_VENDOR_AMD;
89 else if (!strcmp(v, "CyrixInstead"))
90 c->x86_vendor = X86_VENDOR_CYRIX;
91 else if (!strcmp(v, "UMC UMC UMC "))
92 c->x86_vendor = X86_VENDOR_UMC;
93 else if (!strcmp(v, "CentaurHauls"))
94 c->x86_vendor = X86_VENDOR_CENTAUR;
95 else if (!strcmp(v, "NexGenDriven"))
96 c->x86_vendor = X86_VENDOR_NEXGEN;
97 else if (!strcmp(v, "RiseRiseRise"))
98 c->x86_vendor = X86_VENDOR_RISE;
99 else if (!strcmp(v, "GenuineTMx86") ||
100 !strcmp(v, "TransmetaCPU"))
101 c->x86_vendor = X86_VENDOR_TRANSMETA;
102 else
103 c->x86_vendor = X86_VENDOR_UNKNOWN;
104 }
106 static void __init init_intel(struct cpuinfo_x86 *c)
107 {
108 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
109 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
110 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
112 if ( opt_noht )
113 {
114 opt_noacpi = 1; /* Virtual CPUs only appear in ACPI tables. */
115 clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
116 }
118 #ifdef CONFIG_SMP
119 if ( test_bit(X86_FEATURE_HT, &c->x86_capability) )
120 {
121 u32 eax, ebx, ecx, edx;
122 int initial_apic_id, siblings, cpu = smp_processor_id();
124 cpuid(1, &eax, &ebx, &ecx, &edx);
125 siblings = (ebx & 0xff0000) >> 16;
127 if ( siblings <= 1 )
128 {
129 printk(KERN_INFO "CPU#%d: Hyper-Threading is disabled\n", cpu);
130 }
131 else if ( siblings > 2 )
132 {
133 panic("We don't support more than two logical CPUs per package!");
134 }
135 else
136 {
137 initial_apic_id = ebx >> 24 & 0xff;
138 phys_proc_id[cpu] = initial_apic_id >> 1;
139 logical_proc_id[cpu] = initial_apic_id & 1;
140 printk(KERN_INFO "CPU#%d: Physical ID: %d, Logical ID: %d\n",
141 cpu, phys_proc_id[cpu], logical_proc_id[cpu]);
142 }
143 }
144 #endif
145 }
147 static void __init init_amd(struct cpuinfo_x86 *c)
148 {
149 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
150 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
151 clear_bit(0*32+31, &c->x86_capability);
153 switch(c->x86)
154 {
155 case 5:
156 panic("AMD K6 is not supported.\n");
157 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
158 break;
159 }
160 }
162 /*
163 * This does the hard work of actually picking apart the CPU stuff...
164 */
165 void __init identify_cpu(struct cpuinfo_x86 *c)
166 {
167 int junk, i, cpu = smp_processor_id();
168 u32 xlvl, tfms;
170 phys_proc_id[cpu] = cpu;
171 logical_proc_id[cpu] = 0;
173 c->x86_vendor = X86_VENDOR_UNKNOWN;
174 c->cpuid_level = -1; /* CPUID not detected */
175 c->x86_model = c->x86_mask = 0; /* So far unknown... */
176 c->x86_vendor_id[0] = '\0'; /* Unset */
177 memset(&c->x86_capability, 0, sizeof c->x86_capability);
179 if ( !have_cpuid_p() )
180 panic("Ancient processors not supported\n");
182 /* Get vendor name */
183 cpuid(0x00000000, &c->cpuid_level,
184 (int *)&c->x86_vendor_id[0],
185 (int *)&c->x86_vendor_id[8],
186 (int *)&c->x86_vendor_id[4]);
188 get_cpu_vendor(c);
190 if ( c->cpuid_level == 0 )
191 panic("Decrepit CPUID not supported\n");
193 cpuid(0x00000001, &tfms, &junk, &junk,
194 &c->x86_capability[0]);
195 c->x86 = (tfms >> 8) & 15;
196 c->x86_model = (tfms >> 4) & 15;
197 c->x86_mask = tfms & 15;
199 /* AMD-defined flags: level 0x80000001 */
200 xlvl = cpuid_eax(0x80000000);
201 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
202 if ( xlvl >= 0x80000001 )
203 c->x86_capability[1] = cpuid_edx(0x80000001);
204 }
206 /* Transmeta-defined flags: level 0x80860001 */
207 xlvl = cpuid_eax(0x80860000);
208 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
209 if ( xlvl >= 0x80860001 )
210 c->x86_capability[2] = cpuid_edx(0x80860001);
211 }
213 printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
214 smp_processor_id(),
215 c->x86_capability[0],
216 c->x86_capability[1],
217 c->x86_capability[2],
218 c->x86_vendor);
220 switch ( c->x86_vendor ) {
221 case X86_VENDOR_INTEL:
222 init_intel(c);
223 break;
224 case X86_VENDOR_AMD:
225 init_amd(c);
226 break;
227 case X86_VENDOR_UNKNOWN: /* Connectix Virtual PC reports this */
228 break;
229 case X86_VENDOR_CENTAUR:
230 break;
231 default:
232 printk("Unknown CPU identifier (%d): continuing anyway, "
233 "but might fail.\n", c->x86_vendor);
234 }
236 printk("CPU caps: %08x %08x %08x %08x\n",
237 c->x86_capability[0],
238 c->x86_capability[1],
239 c->x86_capability[2],
240 c->x86_capability[3]);
242 /*
243 * On SMP, boot_cpu_data holds the common feature set between
244 * all CPUs; so make sure that we indicate which features are
245 * common between the CPUs. The first time this routine gets
246 * executed, c == &boot_cpu_data.
247 */
248 if ( c != &boot_cpu_data ) {
249 /* AND the already accumulated flags with these */
250 for ( i = 0 ; i < NCAPINTS ; i++ )
251 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
252 }
253 }
256 unsigned long cpu_initialized;
257 void __init cpu_init(void)
258 {
259 int nr = smp_processor_id();
260 struct tss_struct * t = &init_tss[nr];
262 if ( test_and_set_bit(nr, &cpu_initialized) )
263 panic("CPU#%d already initialized!!!\n", nr);
264 printk("Initializing CPU#%d\n", nr);
266 t->bitmap = INVALID_IO_BITMAP_OFFSET;
267 memset(t->io_bitmap, ~0, sizeof(t->io_bitmap));
269 /* Set up GDT and IDT. */
270 SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
271 SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
272 __asm__ __volatile__("lgdt %0": "=m" (*current->mm.gdt));
273 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
275 /* No nested task. */
276 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
278 /* Ensure FPU gets initialised for each domain. */
279 stts();
281 /* Set up and load the per-CPU TSS and LDT. */
282 t->ss0 = __HYPERVISOR_DS;
283 t->esp0 = get_stack_top();
284 set_tss_desc(nr,t);
285 load_TR(nr);
286 __asm__ __volatile__("lldt %%ax"::"a" (0));
288 /* Clear all 6 debug registers. */
289 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
290 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
291 #undef CD
293 /* Install correct page table. */
294 write_ptbase(&current->mm);
296 init_idle_task();
297 }
299 static void __init do_initcalls(void)
300 {
301 initcall_t *call;
302 for ( call = &__initcall_start; call < &__initcall_end; call++ )
303 (*call)();
304 }
306 unsigned long pci_mem_start = 0x10000000;
308 void __init start_of_day(void)
309 {
310 unsigned long low_mem_size;
312 #ifdef MEMORY_GUARD
313 /* Unmap the first page of CPU0's stack. */
314 extern unsigned long cpu0_stack[];
315 memguard_guard_range(cpu0_stack, PAGE_SIZE);
316 #endif
318 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
320 if ( opt_watchdog )
321 nmi_watchdog = NMI_LOCAL_APIC;
323 /* Tell the PCI layer not to allocate too close to the RAM area.. */
324 low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
325 if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
327 identify_cpu(&boot_cpu_data); /* get CPU type info */
328 if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR);
329 if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT);
330 #ifdef CONFIG_SMP
331 if ( opt_ignorebiostables )
332 {
333 opt_nosmp = 1; /* No SMP without configuration */
334 opt_noacpi = 1; /* ACPI will just confuse matters also */
335 }
336 else
337 {
338 find_smp_config();
339 smp_alloc_memory(); /* trampoline which other CPUs jump at */
340 }
341 #endif
342 paging_init(); /* not much here now, but sets up fixmap */
343 if ( !opt_noacpi )
344 acpi_boot_init();
345 #ifdef CONFIG_SMP
346 if ( smp_found_config )
347 get_smp_config();
348 #endif
349 scheduler_init();
350 init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
351 trap_init();
352 time_init(); /* installs software handler for HZ clock. */
353 init_apic_mappings(); /* make APICs addressable in our pagetables. */
355 #ifndef CONFIG_SMP
356 APIC_init_uniprocessor();
357 #else
358 if ( opt_nosmp )
359 APIC_init_uniprocessor();
360 else
361 smp_boot_cpus();
362 /*
363 * Does loads of stuff, including kicking the local
364 * APIC, and the IO APIC after other CPUs are booted.
365 * Each IRQ is preferably handled by IO-APIC, but
366 * fall thru to 8259A if we have to (but slower).
367 */
368 #endif
370 __sti();
372 initialize_keytable(); /* call back handling for key codes */
374 serial_init_stage2();
376 #ifdef XEN_DEBUGGER
377 initialize_pdb(); /* pervasive debugger */
378 #endif
380 if ( !cpu_has_apic )
381 {
382 do_timer_lists_from_pit = 1;
383 if ( smp_num_cpus != 1 )
384 panic("We need local APICs on SMP machines!");
385 }
387 ac_timer_init(); /* init accurate timers */
388 init_xen_time(); /* initialise the time */
389 schedulers_start(); /* start scheduler for each CPU */
391 check_nmi_watchdog();
393 #ifdef CONFIG_PCI
394 pci_init();
395 #endif
396 do_initcalls();
398 #ifdef CONFIG_SMP
399 wait_init_idle = cpu_online_map;
400 clear_bit(smp_processor_id(), &wait_init_idle);
401 smp_threads_ready = 1;
402 smp_commence(); /* Tell other CPUs that state of the world is stable. */
403 while (wait_init_idle)
404 {
405 cpu_relax();
406 barrier();
407 }
408 #endif
410 watchdog_on = 1;
411 }