ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/core/smpboot.c @ 13018:fe2e013ae5cb

[LINUX] Fix build without CONFIG_HOTPLUG_CPU
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Thu Dec 14 10:32:38 2006 +0000 (2006-12-14)
parents ff5f976191a5
children 4fad820a2233
line source
1 /*
2 * Xen SMP booting functions
3 *
4 * See arch/i386/kernel/smpboot.c for copyright and credits for derived
5 * portions of this file.
6 */
8 #include <linux/module.h>
9 #include <linux/config.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/smp_lock.h>
16 #include <linux/irq.h>
17 #include <linux/bootmem.h>
18 #include <linux/notifier.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <asm/desc.h>
22 #include <asm/arch_hooks.h>
23 #include <asm/pgalloc.h>
24 #include <xen/evtchn.h>
25 #include <xen/interface/vcpu.h>
26 #include <xen/cpu_hotplug.h>
27 #include <xen/xenbus.h>
29 #ifdef CONFIG_SMP_ALTERNATIVES
30 #include <asm/smp_alt.h>
31 #endif
33 extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
34 extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
36 extern int local_setup_timer(unsigned int cpu);
37 extern void local_teardown_timer(unsigned int cpu);
39 extern void hypervisor_callback(void);
40 extern void failsafe_callback(void);
41 extern void system_call(void);
42 extern void smp_trap_init(trap_info_t *);
44 /* Number of siblings per CPU package */
45 int smp_num_siblings = 1;
46 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
47 EXPORT_SYMBOL(phys_proc_id);
48 int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
49 EXPORT_SYMBOL(cpu_core_id);
51 cpumask_t cpu_online_map;
52 EXPORT_SYMBOL(cpu_online_map);
53 cpumask_t cpu_possible_map;
54 EXPORT_SYMBOL(cpu_possible_map);
56 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
57 EXPORT_SYMBOL(cpu_data);
59 #ifdef CONFIG_HOTPLUG_CPU
60 DEFINE_PER_CPU(int, cpu_state) = { 0 };
61 #endif
63 static DEFINE_PER_CPU(int, resched_irq);
64 static DEFINE_PER_CPU(int, callfunc_irq);
65 static char resched_name[NR_CPUS][15];
66 static char callfunc_name[NR_CPUS][15];
68 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
70 void *xquad_portio;
72 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
73 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
74 EXPORT_SYMBOL(cpu_core_map);
76 #if defined(__i386__)
77 u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
78 EXPORT_SYMBOL(x86_cpu_to_apicid);
79 #elif !defined(CONFIG_X86_IO_APIC)
80 unsigned int maxcpus = NR_CPUS;
81 #endif
83 void __init prefill_possible_map(void)
84 {
85 int i, rc;
87 if (!cpus_empty(cpu_possible_map))
88 return;
90 for (i = 0; i < NR_CPUS; i++) {
91 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
92 if (rc >= 0)
93 cpu_set(i, cpu_possible_map);
94 }
95 }
97 void __init smp_alloc_memory(void)
98 {
99 }
101 static inline void
102 set_cpu_sibling_map(int cpu)
103 {
104 phys_proc_id[cpu] = cpu;
105 cpu_core_id[cpu] = 0;
107 cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
108 cpu_core_map[cpu] = cpumask_of_cpu(cpu);
110 cpu_data[cpu].booted_cores = 1;
111 }
113 static void
114 remove_siblinginfo(int cpu)
115 {
116 phys_proc_id[cpu] = BAD_APICID;
117 cpu_core_id[cpu] = BAD_APICID;
119 cpus_clear(cpu_sibling_map[cpu]);
120 cpus_clear(cpu_core_map[cpu]);
122 cpu_data[cpu].booted_cores = 0;
123 }
125 static int xen_smp_intr_init(unsigned int cpu)
126 {
127 int rc;
129 per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
131 sprintf(resched_name[cpu], "resched%d", cpu);
132 rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
133 cpu,
134 smp_reschedule_interrupt,
135 SA_INTERRUPT,
136 resched_name[cpu],
137 NULL);
138 if (rc < 0)
139 goto fail;
140 per_cpu(resched_irq, cpu) = rc;
142 sprintf(callfunc_name[cpu], "callfunc%d", cpu);
143 rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
144 cpu,
145 smp_call_function_interrupt,
146 SA_INTERRUPT,
147 callfunc_name[cpu],
148 NULL);
149 if (rc < 0)
150 goto fail;
151 per_cpu(callfunc_irq, cpu) = rc;
153 if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
154 goto fail;
156 return 0;
158 fail:
159 if (per_cpu(resched_irq, cpu) >= 0)
160 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
161 if (per_cpu(callfunc_irq, cpu) >= 0)
162 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
163 return rc;
164 }
166 #ifdef CONFIG_HOTPLUG_CPU
167 static void xen_smp_intr_exit(unsigned int cpu)
168 {
169 if (cpu != 0)
170 local_teardown_timer(cpu);
172 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
173 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
174 }
175 #endif
177 void cpu_bringup(void)
178 {
179 cpu_init();
180 touch_softlockup_watchdog();
181 preempt_disable();
182 local_irq_enable();
183 }
185 static void cpu_bringup_and_idle(void)
186 {
187 cpu_bringup();
188 cpu_idle();
189 }
191 void cpu_initialize_context(unsigned int cpu)
192 {
193 vcpu_guest_context_t ctxt;
194 struct task_struct *idle = idle_task(cpu);
195 #ifdef __x86_64__
196 struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
197 #else
198 struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
199 #endif
201 if (cpu == 0)
202 return;
204 memset(&ctxt, 0, sizeof(ctxt));
206 ctxt.flags = VGCF_IN_KERNEL;
207 ctxt.user_regs.ds = __USER_DS;
208 ctxt.user_regs.es = __USER_DS;
209 ctxt.user_regs.fs = 0;
210 ctxt.user_regs.gs = 0;
211 ctxt.user_regs.ss = __KERNEL_DS;
212 ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
213 ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
215 memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
217 smp_trap_init(ctxt.trap_ctxt);
219 ctxt.ldt_ents = 0;
221 ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
222 ctxt.gdt_ents = gdt_descr->size / 8;
224 #ifdef __i386__
225 ctxt.user_regs.cs = __KERNEL_CS;
226 ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
228 ctxt.kernel_ss = __KERNEL_DS;
229 ctxt.kernel_sp = idle->thread.esp0;
231 ctxt.event_callback_cs = __KERNEL_CS;
232 ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
233 ctxt.failsafe_callback_cs = __KERNEL_CS;
234 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
236 ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
237 #else /* __x86_64__ */
238 ctxt.user_regs.cs = __KERNEL_CS;
239 ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
241 ctxt.kernel_ss = __KERNEL_DS;
242 ctxt.kernel_sp = idle->thread.rsp0;
244 ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
245 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
246 ctxt.syscall_callback_eip = (unsigned long)system_call;
248 ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
250 ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
251 #endif
253 BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
254 }
256 void __init smp_prepare_cpus(unsigned int max_cpus)
257 {
258 int cpu;
259 struct task_struct *idle;
260 #ifdef __x86_64__
261 struct desc_ptr *gdt_descr;
262 #else
263 struct Xgt_desc_struct *gdt_descr;
264 #endif
266 boot_cpu_data.apicid = 0;
267 cpu_data[0] = boot_cpu_data;
269 cpu_2_logical_apicid[0] = 0;
270 x86_cpu_to_apicid[0] = 0;
272 current_thread_info()->cpu = 0;
274 for (cpu = 0; cpu < NR_CPUS; cpu++) {
275 cpus_clear(cpu_sibling_map[cpu]);
276 cpus_clear(cpu_core_map[cpu]);
277 }
279 set_cpu_sibling_map(0);
281 if (xen_smp_intr_init(0))
282 BUG();
284 /* Restrict the possible_map according to max_cpus. */
285 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
286 for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
287 continue;
288 cpu_clear(cpu, cpu_possible_map);
289 }
291 for_each_cpu (cpu) {
292 if (cpu == 0)
293 continue;
295 #ifdef __x86_64__
296 gdt_descr = &cpu_gdt_descr[cpu];
297 #else
298 gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
299 #endif
300 gdt_descr->address = get_zeroed_page(GFP_KERNEL);
301 if (unlikely(!gdt_descr->address)) {
302 printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
303 cpu);
304 continue;
305 }
306 gdt_descr->size = GDT_SIZE;
307 memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
308 make_page_readonly(
309 (void *)gdt_descr->address,
310 XENFEAT_writable_descriptor_tables);
312 cpu_data[cpu] = boot_cpu_data;
313 cpu_data[cpu].apicid = cpu;
315 cpu_2_logical_apicid[cpu] = cpu;
316 x86_cpu_to_apicid[cpu] = cpu;
318 idle = fork_idle(cpu);
319 if (IS_ERR(idle))
320 panic("failed fork for CPU %d", cpu);
322 #ifdef __x86_64__
323 cpu_pda(cpu)->pcurrent = idle;
324 cpu_pda(cpu)->cpunumber = cpu;
325 clear_ti_thread_flag(idle->thread_info, TIF_FORK);
326 #endif
328 irq_ctx_init(cpu);
330 #ifdef CONFIG_HOTPLUG_CPU
331 if (is_initial_xendomain())
332 cpu_set(cpu, cpu_present_map);
333 #else
334 cpu_set(cpu, cpu_present_map);
335 #endif
337 cpu_initialize_context(cpu);
338 }
340 init_xenbus_allowed_cpumask();
342 #ifdef CONFIG_X86_IO_APIC
343 /*
344 * Here we can be sure that there is an IO-APIC in the system. Let's
345 * go and set it up:
346 */
347 if (!skip_ioapic_setup && nr_ioapics)
348 setup_IO_APIC();
349 #endif
350 }
352 void __devinit smp_prepare_boot_cpu(void)
353 {
354 prefill_possible_map();
355 cpu_present_map = cpumask_of_cpu(0);
356 cpu_online_map = cpumask_of_cpu(0);
357 }
359 #ifdef CONFIG_HOTPLUG_CPU
361 /*
362 * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
363 * But do it early enough to catch critical for_each_present_cpu() loops
364 * in i386-specific code.
365 */
366 static int __init initialize_cpu_present_map(void)
367 {
368 cpu_present_map = cpu_possible_map;
369 return 0;
370 }
371 core_initcall(initialize_cpu_present_map);
373 int __cpu_disable(void)
374 {
375 cpumask_t map = cpu_online_map;
376 int cpu = smp_processor_id();
378 if (cpu == 0)
379 return -EBUSY;
381 remove_siblinginfo(cpu);
383 cpu_clear(cpu, map);
384 fixup_irqs(map);
385 cpu_clear(cpu, cpu_online_map);
387 return 0;
388 }
390 void __cpu_die(unsigned int cpu)
391 {
392 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
393 current->state = TASK_UNINTERRUPTIBLE;
394 schedule_timeout(HZ/10);
395 }
397 xen_smp_intr_exit(cpu);
399 #ifdef CONFIG_SMP_ALTERNATIVES
400 if (num_online_cpus() == 1)
401 unprepare_for_smp();
402 #endif
403 }
405 #else /* !CONFIG_HOTPLUG_CPU */
407 int __cpu_disable(void)
408 {
409 return -ENOSYS;
410 }
412 void __cpu_die(unsigned int cpu)
413 {
414 BUG();
415 }
417 #endif /* CONFIG_HOTPLUG_CPU */
419 int __devinit __cpu_up(unsigned int cpu)
420 {
421 int rc;
423 rc = cpu_up_check(cpu);
424 if (rc)
425 return rc;
427 #ifdef CONFIG_SMP_ALTERNATIVES
428 if (num_online_cpus() == 1)
429 prepare_for_smp();
430 #endif
432 /* This must be done before setting cpu_online_map */
433 set_cpu_sibling_map(cpu);
434 wmb();
436 rc = xen_smp_intr_init(cpu);
437 if (rc) {
438 remove_siblinginfo(cpu);
439 return rc;
440 }
442 cpu_set(cpu, cpu_online_map);
444 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
445 BUG_ON(rc);
447 return 0;
448 }
450 void __init smp_cpus_done(unsigned int max_cpus)
451 {
452 }
454 #ifndef CONFIG_X86_LOCAL_APIC
455 int setup_profiling_timer(unsigned int multiplier)
456 {
457 return -EINVAL;
458 }
459 #endif