direct-io.hg

view xen/arch/x86/setup.c @ 3280:dda5ab69e74a

bitkeeper revision 1.1159.1.477 (41bf20d2wgoxIqhcE0nzBC8W-yFPhg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Tue Dec 14 17:20:18 2004 +0000 (2004-12-14)
parents 96f143b7c63d e606dfe099cc
children b9ab4345fd1b
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/sched.h>
6 #include <xen/pci.h>
7 #include <xen/serial.h>
8 #include <xen/softirq.h>
9 #include <xen/acpi.h>
10 #include <asm/bitops.h>
11 #include <asm/smp.h>
12 #include <asm/processor.h>
13 #include <asm/mpspec.h>
14 #include <asm/apic.h>
15 #include <asm/desc.h>
16 #include <asm/domain_page.h>
17 #include <asm/pdb.h>
19 extern void arch_init_memory(void);
20 extern void init_IRQ(void);
21 extern void trap_init(void);
22 extern void time_init(void);
23 extern void ac_timer_init(void);
24 extern void initialize_keytable();
25 extern int opt_nosmp, opt_watchdog, opt_noacpi;
26 extern int opt_ignorebiostables;
27 extern int do_timer_lists_from_pit;
29 char ignore_irq13; /* set if exception 16 works */
30 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
32 #if defined(__x86_64__)
33 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
34 #else
35 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
36 #endif
37 EXPORT_SYMBOL(mmu_cr4_features);
39 unsigned long wait_init_idle;
41 struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
43 #ifdef CONFIG_ACPI_INTERPRETER
44 int acpi_disabled = 0;
45 #else
46 int acpi_disabled = 1;
47 #endif
48 EXPORT_SYMBOL(acpi_disabled);
50 int phys_proc_id[NR_CPUS];
51 int logical_proc_id[NR_CPUS];
53 #if defined(__i386__)
55 /* Standard macro to see if a specific flag is changeable */
56 static inline int flag_is_changeable_p(u32 flag)
57 {
58 u32 f1, f2;
60 asm("pushfl\n\t"
61 "pushfl\n\t"
62 "popl %0\n\t"
63 "movl %0,%1\n\t"
64 "xorl %2,%0\n\t"
65 "pushl %0\n\t"
66 "popfl\n\t"
67 "pushfl\n\t"
68 "popl %0\n\t"
69 "popfl\n\t"
70 : "=&r" (f1), "=&r" (f2)
71 : "ir" (flag));
73 return ((f1^f2) & flag) != 0;
74 }
76 /* Probe for the CPUID instruction */
77 static int __init have_cpuid_p(void)
78 {
79 return flag_is_changeable_p(X86_EFLAGS_ID);
80 }
82 #elif defined(__x86_64__)
84 #define have_cpuid_p() (1)
86 #endif
88 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
89 {
90 char *v = c->x86_vendor_id;
92 if (!strcmp(v, "GenuineIntel"))
93 c->x86_vendor = X86_VENDOR_INTEL;
94 else if (!strcmp(v, "AuthenticAMD"))
95 c->x86_vendor = X86_VENDOR_AMD;
96 else if (!strcmp(v, "CyrixInstead"))
97 c->x86_vendor = X86_VENDOR_CYRIX;
98 else if (!strcmp(v, "UMC UMC UMC "))
99 c->x86_vendor = X86_VENDOR_UMC;
100 else if (!strcmp(v, "CentaurHauls"))
101 c->x86_vendor = X86_VENDOR_CENTAUR;
102 else if (!strcmp(v, "NexGenDriven"))
103 c->x86_vendor = X86_VENDOR_NEXGEN;
104 else if (!strcmp(v, "RiseRiseRise"))
105 c->x86_vendor = X86_VENDOR_RISE;
106 else if (!strcmp(v, "GenuineTMx86") ||
107 !strcmp(v, "TransmetaCPU"))
108 c->x86_vendor = X86_VENDOR_TRANSMETA;
109 else
110 c->x86_vendor = X86_VENDOR_UNKNOWN;
111 }
113 static void __init init_intel(struct cpuinfo_x86 *c)
114 {
115 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
116 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
117 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
119 #ifdef CONFIG_SMP
120 if ( test_bit(X86_FEATURE_HT, &c->x86_capability) )
121 {
122 u32 eax, ebx, ecx, edx;
123 int initial_apic_id, siblings, cpu = smp_processor_id();
125 cpuid(1, &eax, &ebx, &ecx, &edx);
126 ht_per_core = siblings = (ebx & 0xff0000) >> 16;
128 if ( opt_noht )
129 clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
131 if ( siblings <= 1 )
132 {
133 printk(KERN_INFO "CPU#%d: Hyper-Threading is disabled\n", cpu);
134 }
135 else if ( siblings > 2 )
136 {
137 panic("We don't support more than two logical CPUs per package!");
138 }
139 else
140 {
141 initial_apic_id = ebx >> 24 & 0xff;
142 phys_proc_id[cpu] = initial_apic_id >> 1;
143 logical_proc_id[cpu] = initial_apic_id & 1;
144 printk(KERN_INFO "CPU#%d: Physical ID: %d, Logical ID: %d\n",
145 cpu, phys_proc_id[cpu], logical_proc_id[cpu]);
146 }
147 }
148 #endif
149 }
151 static void __init init_amd(struct cpuinfo_x86 *c)
152 {
153 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
154 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
155 clear_bit(0*32+31, &c->x86_capability);
157 switch(c->x86)
158 {
159 case 5:
160 panic("AMD K6 is not supported.\n");
161 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
162 break;
163 }
164 }
166 /*
167 * This does the hard work of actually picking apart the CPU stuff...
168 */
169 void __init identify_cpu(struct cpuinfo_x86 *c)
170 {
171 int junk, i, cpu = smp_processor_id();
172 u32 xlvl, tfms;
174 phys_proc_id[cpu] = cpu;
175 logical_proc_id[cpu] = 0;
177 c->x86_vendor = X86_VENDOR_UNKNOWN;
178 c->cpuid_level = -1; /* CPUID not detected */
179 c->x86_model = c->x86_mask = 0; /* So far unknown... */
180 c->x86_vendor_id[0] = '\0'; /* Unset */
181 memset(&c->x86_capability, 0, sizeof c->x86_capability);
183 if ( !have_cpuid_p() )
184 panic("Ancient processors not supported\n");
186 /* Get vendor name */
187 cpuid(0x00000000, &c->cpuid_level,
188 (int *)&c->x86_vendor_id[0],
189 (int *)&c->x86_vendor_id[8],
190 (int *)&c->x86_vendor_id[4]);
192 get_cpu_vendor(c);
194 if ( c->cpuid_level == 0 )
195 panic("Decrepit CPUID not supported\n");
197 cpuid(0x00000001, &tfms, &junk, &junk,
198 &c->x86_capability[0]);
199 c->x86 = (tfms >> 8) & 15;
200 c->x86_model = (tfms >> 4) & 15;
201 c->x86_mask = tfms & 15;
203 /* AMD-defined flags: level 0x80000001 */
204 xlvl = cpuid_eax(0x80000000);
205 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
206 if ( xlvl >= 0x80000001 )
207 c->x86_capability[1] = cpuid_edx(0x80000001);
208 }
210 /* Transmeta-defined flags: level 0x80860001 */
211 xlvl = cpuid_eax(0x80860000);
212 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
213 if ( xlvl >= 0x80860001 )
214 c->x86_capability[2] = cpuid_edx(0x80860001);
215 }
217 printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
218 smp_processor_id(),
219 c->x86_capability[0],
220 c->x86_capability[1],
221 c->x86_capability[2],
222 c->x86_vendor);
224 switch ( c->x86_vendor ) {
225 case X86_VENDOR_INTEL:
226 init_intel(c);
227 break;
228 case X86_VENDOR_AMD:
229 init_amd(c);
230 break;
231 case X86_VENDOR_UNKNOWN: /* Connectix Virtual PC reports this */
232 break;
233 case X86_VENDOR_CENTAUR:
234 break;
235 default:
236 printk("Unknown CPU identifier (%d): continuing anyway, "
237 "but might fail.\n", c->x86_vendor);
238 }
240 printk("CPU caps: %08x %08x %08x %08x\n",
241 c->x86_capability[0],
242 c->x86_capability[1],
243 c->x86_capability[2],
244 c->x86_capability[3]);
246 /*
247 * On SMP, boot_cpu_data holds the common feature set between
248 * all CPUs; so make sure that we indicate which features are
249 * common between the CPUs. The first time this routine gets
250 * executed, c == &boot_cpu_data.
251 */
252 if ( c != &boot_cpu_data ) {
253 /* AND the already accumulated flags with these */
254 for ( i = 0 ; i < NCAPINTS ; i++ )
255 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
256 }
257 }
260 unsigned long cpu_initialized;
261 void __init cpu_init(void)
262 {
263 #if defined(__i386__) /* XXX */
264 int nr = smp_processor_id();
265 struct tss_struct * t = &init_tss[nr];
267 if ( test_and_set_bit(nr, &cpu_initialized) )
268 panic("CPU#%d already initialized!!!\n", nr);
269 printk("Initializing CPU#%d\n", nr);
271 t->bitmap = IOBMP_INVALID_OFFSET;
272 memset(t->io_bitmap, ~0, sizeof(t->io_bitmap));
274 /* Set up GDT and IDT. */
275 SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
276 SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
277 __asm__ __volatile__("lgdt %0": "=m" (*current->mm.gdt));
278 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
280 /* No nested task. */
281 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
283 /* Ensure FPU gets initialised for each domain. */
284 stts();
286 /* Set up and load the per-CPU TSS and LDT. */
287 t->ss0 = __HYPERVISOR_DS;
288 t->esp0 = get_stack_top();
289 set_tss_desc(nr,t);
290 load_TR(nr);
291 __asm__ __volatile__("lldt %%ax"::"a" (0));
293 /* Clear all 6 debug registers. */
294 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
295 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
296 #undef CD
298 /* Install correct page table. */
299 write_ptbase(&current->mm);
301 init_idle_task();
302 #endif
303 }
305 static void __init do_initcalls(void)
306 {
307 initcall_t *call;
308 for ( call = &__initcall_start; call < &__initcall_end; call++ )
309 (*call)();
310 }
312 unsigned long pci_mem_start = 0x10000000;
314 void __init start_of_day(void)
315 {
316 unsigned long low_mem_size;
318 #ifdef MEMORY_GUARD
319 /* Unmap the first page of CPU0's stack. */
320 extern unsigned long cpu0_stack[];
321 memguard_guard_range(cpu0_stack, PAGE_SIZE);
322 #endif
324 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
326 if ( opt_watchdog )
327 nmi_watchdog = NMI_LOCAL_APIC;
329 sort_exception_tables();
331 arch_do_createdomain(current);
333 /* Tell the PCI layer not to allocate too close to the RAM area.. */
334 low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
335 if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
337 identify_cpu(&boot_cpu_data); /* get CPU type info */
338 if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR);
339 if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT);
340 #ifdef CONFIG_SMP
341 if ( opt_ignorebiostables )
342 {
343 opt_nosmp = 1; /* No SMP without configuration */
344 opt_noacpi = 1; /* ACPI will just confuse matters also */
345 }
346 else
347 {
348 find_smp_config();
349 smp_alloc_memory(); /* trampoline which other CPUs jump at */
350 }
351 #endif
352 paging_init(); /* not much here now, but sets up fixmap */
353 if ( !opt_noacpi )
354 acpi_boot_init();
355 #ifdef CONFIG_SMP
356 if ( smp_found_config )
357 get_smp_config();
358 #endif
359 scheduler_init();
360 init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
361 trap_init();
362 time_init(); /* installs software handler for HZ clock. */
363 init_apic_mappings(); /* make APICs addressable in our pagetables. */
365 arch_init_memory();
367 #ifndef CONFIG_SMP
368 APIC_init_uniprocessor();
369 #else
370 if ( opt_nosmp )
371 APIC_init_uniprocessor();
372 else
373 smp_boot_cpus();
374 /*
375 * Does loads of stuff, including kicking the local
376 * APIC, and the IO APIC after other CPUs are booted.
377 * Each IRQ is preferably handled by IO-APIC, but
378 * fall thru to 8259A if we have to (but slower).
379 */
380 #endif
382 __sti();
384 initialize_keytable(); /* call back handling for key codes */
386 serial_init_stage2();
388 #ifdef XEN_DEBUGGER
389 initialize_pdb(); /* pervasive debugger */
390 #endif
392 if ( !cpu_has_apic )
393 {
394 do_timer_lists_from_pit = 1;
395 if ( smp_num_cpus != 1 )
396 panic("We need local APICs on SMP machines!");
397 }
399 ac_timer_init(); /* init accurate timers */
400 init_xen_time(); /* initialise the time */
401 schedulers_start(); /* start scheduler for each CPU */
403 check_nmi_watchdog();
405 #ifdef CONFIG_PCI
406 pci_init();
407 #endif
408 do_initcalls();
410 #ifdef CONFIG_SMP
411 wait_init_idle = cpu_online_map;
412 clear_bit(smp_processor_id(), &wait_init_idle);
413 smp_threads_ready = 1;
414 smp_commence(); /* Tell other CPUs that state of the world is stable. */
415 while ( wait_init_idle != 0 )
416 {
417 cpu_relax();
418 barrier();
419 }
420 #endif
422 watchdog_on = 1;
423 }