ia64/xen-unstable

view xen/arch/i386/setup.c @ 722:7a9d47fea66c

bitkeeper revision 1.428 (3f677454_j81KDQLm_L7AscjYn2nYg)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Tue Sep 16 20:36:36 2003 +0000 (2003-09-16)
parents 3744aa0643ce ec38a236c5db
children 8b83e1b92625
line source
2 #include <xeno/config.h>
3 #include <xeno/init.h>
4 #include <xeno/interrupt.h>
5 #include <xeno/lib.h>
6 #include <xeno/sched.h>
7 #include <xeno/pci.h>
8 #include <asm/bitops.h>
9 #include <asm/smp.h>
10 #include <asm/processor.h>
11 #include <asm/mpspec.h>
12 #include <asm/apic.h>
13 #include <asm/desc.h>
14 #include <asm/domain_page.h>
16 struct cpuinfo_x86 boot_cpu_data = { 0 };
17 /* Lots of nice things, since we only target PPro+. */
18 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
19 unsigned long wait_init_idle;
21 /* Basic page table for each CPU in the system. */
22 l2_pgentry_t *idle_pg_table[NR_CPUS] = { idle0_pg_table };
23 struct task_struct *idle_task[NR_CPUS] = { &idle0_task };
25 /* for asm/domain_page.h, map_domain_page() */
26 unsigned long *mapcache[NR_CPUS];
28 int phys_proc_id[NR_CPUS];
29 int logical_proc_id[NR_CPUS];
31 /* Standard macro to see if a specific flag is changeable */
32 static inline int flag_is_changeable_p(u32 flag)
33 {
34 u32 f1, f2;
36 asm("pushfl\n\t"
37 "pushfl\n\t"
38 "popl %0\n\t"
39 "movl %0,%1\n\t"
40 "xorl %2,%0\n\t"
41 "pushl %0\n\t"
42 "popfl\n\t"
43 "pushfl\n\t"
44 "popl %0\n\t"
45 "popfl\n\t"
46 : "=&r" (f1), "=&r" (f2)
47 : "ir" (flag));
49 return ((f1^f2) & flag) != 0;
50 }
52 /* Probe for the CPUID instruction */
53 static int __init have_cpuid_p(void)
54 {
55 return flag_is_changeable_p(X86_EFLAGS_ID);
56 }
58 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
59 {
60 char *v = c->x86_vendor_id;
62 if (!strcmp(v, "GenuineIntel"))
63 c->x86_vendor = X86_VENDOR_INTEL;
64 else if (!strcmp(v, "AuthenticAMD"))
65 c->x86_vendor = X86_VENDOR_AMD;
66 else if (!strcmp(v, "CyrixInstead"))
67 c->x86_vendor = X86_VENDOR_CYRIX;
68 else if (!strcmp(v, "UMC UMC UMC "))
69 c->x86_vendor = X86_VENDOR_UMC;
70 else if (!strcmp(v, "CentaurHauls"))
71 c->x86_vendor = X86_VENDOR_CENTAUR;
72 else if (!strcmp(v, "NexGenDriven"))
73 c->x86_vendor = X86_VENDOR_NEXGEN;
74 else if (!strcmp(v, "RiseRiseRise"))
75 c->x86_vendor = X86_VENDOR_RISE;
76 else if (!strcmp(v, "GenuineTMx86") ||
77 !strcmp(v, "TransmetaCPU"))
78 c->x86_vendor = X86_VENDOR_TRANSMETA;
79 else
80 c->x86_vendor = X86_VENDOR_UNKNOWN;
81 }
83 static void __init init_intel(struct cpuinfo_x86 *c)
84 {
85 extern int opt_noht, opt_noacpi;
87 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
88 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
89 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
91 if ( opt_noht )
92 {
93 opt_noacpi = 1; /* Virtual CPUs only appear in ACPI tables. */
94 clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
95 }
97 #ifdef CONFIG_SMP
98 if ( test_bit(X86_FEATURE_HT, &c->x86_capability) )
99 {
100 u32 eax, ebx, ecx, edx;
101 int initial_apic_id, siblings, cpu = smp_processor_id();
103 cpuid(1, &eax, &ebx, &ecx, &edx);
104 siblings = (ebx & 0xff0000) >> 16;
106 if ( siblings <= 1 )
107 {
108 printk(KERN_INFO "CPU#%d: Hyper-Threading is disabled\n", cpu);
109 }
110 else if ( siblings > 2 )
111 {
112 panic("We don't support more than two logical CPUs per package!");
113 }
114 else
115 {
116 initial_apic_id = ebx >> 24 & 0xff;
117 phys_proc_id[cpu] = initial_apic_id >> 1;
118 logical_proc_id[cpu] = initial_apic_id & 1;
119 printk(KERN_INFO "CPU#%d: Physical ID: %d, Logical ID: %d\n",
120 cpu, phys_proc_id[cpu], logical_proc_id[cpu]);
121 }
122 }
123 #endif
124 }
126 static void __init init_amd(struct cpuinfo_x86 *c)
127 {
128 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
129 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
130 clear_bit(0*32+31, &c->x86_capability);
132 switch(c->x86)
133 {
134 case 5:
135 panic("AMD K6 is not supported.\n");
136 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
137 break;
138 }
139 }
141 /*
142 * This does the hard work of actually picking apart the CPU stuff...
143 */
144 void __init identify_cpu(struct cpuinfo_x86 *c)
145 {
146 int junk, i, cpu = smp_processor_id();
147 u32 xlvl, tfms;
149 phys_proc_id[cpu] = cpu;
150 logical_proc_id[cpu] = 0;
152 c->x86_vendor = X86_VENDOR_UNKNOWN;
153 c->cpuid_level = -1; /* CPUID not detected */
154 c->x86_model = c->x86_mask = 0; /* So far unknown... */
155 c->x86_vendor_id[0] = '\0'; /* Unset */
156 memset(&c->x86_capability, 0, sizeof c->x86_capability);
158 if ( !have_cpuid_p() )
159 panic("Ancient processors not supported\n");
161 /* Get vendor name */
162 cpuid(0x00000000, &c->cpuid_level,
163 (int *)&c->x86_vendor_id[0],
164 (int *)&c->x86_vendor_id[8],
165 (int *)&c->x86_vendor_id[4]);
167 get_cpu_vendor(c);
169 if ( c->cpuid_level == 0 )
170 panic("Decrepit CPUID not supported\n");
172 cpuid(0x00000001, &tfms, &junk, &junk,
173 &c->x86_capability[0]);
174 c->x86 = (tfms >> 8) & 15;
175 c->x86_model = (tfms >> 4) & 15;
176 c->x86_mask = tfms & 15;
178 /* AMD-defined flags: level 0x80000001 */
179 xlvl = cpuid_eax(0x80000000);
180 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
181 if ( xlvl >= 0x80000001 )
182 c->x86_capability[1] = cpuid_edx(0x80000001);
183 }
185 /* Transmeta-defined flags: level 0x80860001 */
186 xlvl = cpuid_eax(0x80860000);
187 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
188 if ( xlvl >= 0x80860001 )
189 c->x86_capability[2] = cpuid_edx(0x80860001);
190 }
192 printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
193 smp_processor_id(),
194 c->x86_capability[0],
195 c->x86_capability[1],
196 c->x86_capability[2],
197 c->x86_vendor);
199 switch ( c->x86_vendor ) {
200 case X86_VENDOR_INTEL:
201 init_intel(c);
202 break;
203 case X86_VENDOR_AMD:
204 init_amd(c);
205 break;
206 default:
207 panic("Only support Intel processors (P6+)\n");
208 }
210 printk("CPU caps: %08x %08x %08x %08x\n",
211 c->x86_capability[0],
212 c->x86_capability[1],
213 c->x86_capability[2],
214 c->x86_capability[3]);
216 /*
217 * On SMP, boot_cpu_data holds the common feature set between
218 * all CPUs; so make sure that we indicate which features are
219 * common between the CPUs. The first time this routine gets
220 * executed, c == &boot_cpu_data.
221 */
222 if ( c != &boot_cpu_data ) {
223 /* AND the already accumulated flags with these */
224 for ( i = 0 ; i < NCAPINTS ; i++ )
225 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
226 }
227 }
230 unsigned long cpu_initialized;
231 void __init cpu_init(void)
232 {
233 int nr = smp_processor_id();
234 struct tss_struct * t = &init_tss[nr];
235 l2_pgentry_t *pl2e;
237 if ( test_and_set_bit(nr, &cpu_initialized) )
238 panic("CPU#%d already initialized!!!\n", nr);
239 printk("Initializing CPU#%d\n", nr);
241 /* Set up GDT and IDT. */
242 SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
243 SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
244 __asm__ __volatile__("lgdt %0": "=m" (*current->mm.gdt));
245 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
247 /* No nested task. */
248 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
250 /* Ensure FPU gets initialised for each domain. */
251 stts();
253 /* Set up and load the per-CPU TSS and LDT. */
254 t->ss0 = __HYPERVISOR_DS;
255 t->esp0 = current->thread.esp0;
256 set_tss_desc(nr,t);
257 load_TR(nr);
258 __asm__ __volatile__("lldt %%ax"::"a" (0));
260 /* Clear all 6 debug registers. */
261 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
262 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
263 #undef CD
265 /* Install correct page table. */
266 __asm__ __volatile__ ("movl %%eax,%%cr3"
267 : : "a" (pagetable_val(current->mm.pagetable)));
269 /* Set up mapping cache for domain pages. */
270 pl2e = idle_pg_table[nr] + (MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT);
271 mapcache[nr] = (unsigned long *)get_free_page(GFP_KERNEL);
272 clear_page(mapcache[nr]);
273 *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | PAGE_HYPERVISOR);
275 init_idle_task();
276 }
278 static void __init do_initcalls(void)
279 {
280 initcall_t *call;
282 call = &__initcall_start;
283 do {
284 (*call)();
285 call++;
286 } while (call < &__initcall_end);
287 }
289 /*
290 * IBM-compatible BIOSes place drive info tables at initial interrupt
291 * vectors 0x41 and 0x46. These are in the for of 16-bit-mode far ptrs.
292 */
293 struct drive_info_struct { unsigned char dummy[32]; } drive_info;
294 void get_bios_driveinfo(void)
295 {
296 unsigned long seg, off, tab1, tab2;
298 off = (unsigned long)*(unsigned short *)(4*0x41+0);
299 seg = (unsigned long)*(unsigned short *)(4*0x41+2);
300 tab1 = (seg<<4) + off;
302 off = (unsigned long)*(unsigned short *)(4*0x46+0);
303 seg = (unsigned long)*(unsigned short *)(4*0x46+2);
304 tab2 = (seg<<4) + off;
306 printk("Reading BIOS drive-info tables at 0x%05lx and 0x%05lx\n",
307 tab1, tab2);
309 memcpy(drive_info.dummy+ 0, (char *)tab1, 16);
310 memcpy(drive_info.dummy+16, (char *)tab2, 16);
311 }
314 unsigned long pci_mem_start = 0x10000000;
316 void __init start_of_day(void)
317 {
318 extern void trap_init(void);
319 extern void init_IRQ(void);
320 extern void time_init(void);
321 extern void softirq_init(void);
322 extern void timer_bh(void);
323 extern void tqueue_bh(void);
324 extern void immediate_bh(void);
325 extern void init_timervecs(void);
326 extern void disable_pit(void);
327 extern void ac_timer_init(void);
328 extern int setup_network_devices(void);
329 extern void net_init(void);
330 extern void initialize_block_io(void);
331 extern void initialize_keytable();
332 extern void initialize_serial(void);
333 extern void initialize_keyboard(void);
334 extern int opt_nosmp, opt_watchdog;
335 extern int do_timer_lists_from_pit;
336 unsigned long low_mem_size;
338 if ( opt_watchdog )
339 nmi_watchdog = NMI_LOCAL_APIC;
341 /*
342 * We do this early, but tables are in the lowest 1MB (usually
343 * 0xfe000-0xfffff). Therefore they're unlikely to ever get clobbered.
344 */
345 get_bios_driveinfo();
347 /* Tell the PCI layer not to allocate too close to the RAM area.. */
348 low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
349 if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
351 identify_cpu(&boot_cpu_data); /* get CPU type info */
352 if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR);
353 if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT);
354 #ifdef CONFIG_SMP
355 find_smp_config(); /* find ACPI tables */
356 smp_alloc_memory(); /* trampoline which other CPUs jump at */
357 #endif
358 paging_init(); /* not much here now, but sets up fixmap */
359 #ifdef CONFIG_SMP
360 if ( smp_found_config ) get_smp_config();
361 #endif
362 domain_init();
363 scheduler_init();
364 trap_init();
365 init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
366 time_init(); /* installs software handler for HZ clock. */
367 softirq_init();
368 init_timervecs();
369 init_bh(TIMER_BH, timer_bh);
370 init_bh(TQUEUE_BH, tqueue_bh);
371 init_bh(IMMEDIATE_BH, immediate_bh);
372 init_apic_mappings(); /* make APICs addressable in our pagetables. */
374 #ifndef CONFIG_SMP
375 APIC_init_uniprocessor();
376 #else
377 if( opt_nosmp )
378 APIC_init_uniprocessor();
379 else
380 smp_boot_cpus();
381 /*
382 * Does loads of stuff, including kicking the local
383 * APIC, and the IO APIC after other CPUs are booted.
384 * Each IRQ is preferably handled by IO-APIC, but
385 * fall thru to 8259A if we have to (but slower).
386 */
387 #endif
388 initialize_keytable(); /* call back handling for key codes */
390 if ( cpu_has_apic )
391 disable_pit();
392 else if ( smp_num_cpus != 1 )
393 panic("We really need local APICs on SMP machines!");
394 else
395 do_timer_lists_from_pit = 1;
397 ac_timer_init(); /* init accurate timers */
398 init_xeno_time(); /* initialise the time */
399 schedulers_start(); /* start scheduler for each CPU */
401 sti();
403 check_nmi_watchdog();
405 zap_low_mappings();
406 kmem_cache_init();
407 kmem_cache_sizes_init(max_page);
408 #ifdef CONFIG_PCI
409 pci_init();
410 #endif
411 do_initcalls();
414 initialize_serial(); /* setup serial 'driver' (for debugging) */
415 initialize_keyboard(); /* setup keyboard (also for debugging) */
417 if ( !setup_network_devices() )
418 panic("Must have a network device!\n");
419 net_init(); /* initializes virtual network system. */
420 initialize_block_io(); /* setup block devices */
422 #ifdef CONFIG_SMP
423 wait_init_idle = cpu_online_map;
424 clear_bit(smp_processor_id(), &wait_init_idle);
425 smp_threads_ready = 1;
426 smp_commence(); /* Tell other CPUs that state of the world is stable. */
427 while (wait_init_idle)
428 {
429 cpu_relax();
430 barrier();
431 }
432 #endif
434 watchdog_on = 1;
435 }