ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c @ 7814:312e3f18bd6c

Initialise secondary CPUs via an arch_initcall rather than
a subsys_initcall. This ensures that initialisation occurs
before topology_init().

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Nov 15 11:28:01 2005 +0100 (2005-11-15)
parents 0915074c356e
children 5e111356ba17
line source
1 /*
2 * Xen SMP booting functions
3 *
4 * See arch/i386/kernel/smpboot.c for copyright and credits for derived
5 * portions of this file.
6 */
8 #include <linux/module.h>
9 #include <linux/config.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/smp_lock.h>
16 #include <linux/irq.h>
17 #include <linux/bootmem.h>
18 #include <linux/notifier.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <asm/desc.h>
22 #include <asm/arch_hooks.h>
23 #include <asm/pgalloc.h>
24 #include <asm-xen/evtchn.h>
25 #include <asm-xen/xen-public/vcpu.h>
26 #include <asm-xen/xenbus.h>
28 #ifdef CONFIG_SMP_ALTERNATIVES
29 #include <asm/smp_alt.h>
30 #endif
32 extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
33 extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
35 extern void local_setup_timer(unsigned int cpu);
36 extern void local_teardown_timer(unsigned int cpu);
38 extern void hypervisor_callback(void);
39 extern void failsafe_callback(void);
40 extern void system_call(void);
41 extern void smp_trap_init(trap_info_t *);
43 extern cpumask_t cpu_initialized;
45 /* Number of siblings per CPU package */
46 int smp_num_siblings = 1;
47 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
48 EXPORT_SYMBOL(phys_proc_id);
49 int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
50 EXPORT_SYMBOL(cpu_core_id);
52 cpumask_t cpu_online_map;
53 EXPORT_SYMBOL(cpu_online_map);
54 cpumask_t cpu_possible_map;
55 EXPORT_SYMBOL(cpu_possible_map);
57 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
58 EXPORT_SYMBOL(cpu_data);
60 #ifdef CONFIG_HOTPLUG_CPU
61 DEFINE_PER_CPU(int, cpu_state) = { 0 };
62 #endif
64 static DEFINE_PER_CPU(int, resched_irq);
65 static DEFINE_PER_CPU(int, callfunc_irq);
66 static char resched_name[NR_CPUS][15];
67 static char callfunc_name[NR_CPUS][15];
69 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
71 void *xquad_portio;
73 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
74 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
75 EXPORT_SYMBOL(cpu_core_map);
77 #if defined(__i386__)
78 u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
79 EXPORT_SYMBOL(x86_cpu_to_apicid);
80 #elif !defined(CONFIG_X86_IO_APIC)
81 unsigned int maxcpus = NR_CPUS;
82 #endif
84 void __init smp_alloc_memory(void)
85 {
86 }
88 static void xen_smp_intr_init(unsigned int cpu)
89 {
90 sprintf(resched_name[cpu], "resched%d", cpu);
91 per_cpu(resched_irq, cpu) =
92 bind_ipi_to_irqhandler(
93 RESCHEDULE_VECTOR,
94 cpu,
95 smp_reschedule_interrupt,
96 SA_INTERRUPT,
97 resched_name[cpu],
98 NULL);
99 BUG_ON(per_cpu(resched_irq, cpu) < 0);
101 sprintf(callfunc_name[cpu], "callfunc%d", cpu);
102 per_cpu(callfunc_irq, cpu) =
103 bind_ipi_to_irqhandler(
104 CALL_FUNCTION_VECTOR,
105 cpu,
106 smp_call_function_interrupt,
107 SA_INTERRUPT,
108 callfunc_name[cpu],
109 NULL);
110 BUG_ON(per_cpu(callfunc_irq, cpu) < 0);
112 if (cpu != 0)
113 local_setup_timer(cpu);
114 }
116 #ifdef CONFIG_HOTPLUG_CPU
117 static void xen_smp_intr_exit(unsigned int cpu)
118 {
119 if (cpu != 0)
120 local_teardown_timer(cpu);
122 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
123 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
124 }
125 #endif
127 static void cpu_bringup(void)
128 {
129 if (!cpu_isset(smp_processor_id(), cpu_initialized))
130 cpu_init();
131 local_irq_enable();
132 cpu_idle();
133 }
135 void vcpu_prepare(int vcpu)
136 {
137 vcpu_guest_context_t ctxt;
138 struct task_struct *idle = idle_task(vcpu);
140 if (vcpu == 0)
141 return;
143 memset(&ctxt, 0, sizeof(ctxt));
145 ctxt.flags = VGCF_IN_KERNEL;
146 ctxt.user_regs.ds = __USER_DS;
147 ctxt.user_regs.es = __USER_DS;
148 ctxt.user_regs.fs = 0;
149 ctxt.user_regs.gs = 0;
150 ctxt.user_regs.ss = __KERNEL_DS;
151 ctxt.user_regs.eip = (unsigned long)cpu_bringup;
152 ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
154 memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
156 smp_trap_init(ctxt.trap_ctxt);
158 ctxt.ldt_ents = 0;
160 ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
161 ctxt.gdt_ents = cpu_gdt_descr[vcpu].size / 8;
163 #ifdef __i386__
164 ctxt.user_regs.cs = __KERNEL_CS;
165 ctxt.user_regs.esp = idle->thread.esp;
167 ctxt.kernel_ss = __KERNEL_DS;
168 ctxt.kernel_sp = idle->thread.esp0;
170 ctxt.event_callback_cs = __KERNEL_CS;
171 ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
172 ctxt.failsafe_callback_cs = __KERNEL_CS;
173 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
175 ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
176 #else
177 ctxt.user_regs.cs = __KERNEL_CS | 3;
178 ctxt.user_regs.esp = idle->thread.rsp;
180 ctxt.kernel_ss = __KERNEL_DS;
181 ctxt.kernel_sp = idle->thread.rsp0;
183 ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
184 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
185 ctxt.syscall_callback_eip = (unsigned long)system_call;
187 ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
189 ctxt.gs_base_kernel = (unsigned long)(cpu_pda + vcpu);
190 #endif
192 BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt));
193 }
195 void __init smp_prepare_cpus(unsigned int max_cpus)
196 {
197 int cpu, rc;
198 struct task_struct *idle;
200 cpu_data[0] = boot_cpu_data;
202 cpu_2_logical_apicid[0] = 0;
203 x86_cpu_to_apicid[0] = 0;
205 current_thread_info()->cpu = 0;
206 cpu_sibling_map[0] = cpumask_of_cpu(0);
207 cpu_core_map[0] = cpumask_of_cpu(0);
209 if (max_cpus != 0)
210 xen_smp_intr_init(0);
212 for (cpu = 1; cpu < max_cpus; cpu++) {
213 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
214 if (rc == -ENOENT)
215 break;
216 BUG_ON(rc != 0);
218 cpu_data[cpu] = boot_cpu_data;
219 cpu_2_logical_apicid[cpu] = cpu;
220 x86_cpu_to_apicid[cpu] = cpu;
222 idle = fork_idle(cpu);
223 if (IS_ERR(idle))
224 panic("failed fork for CPU %d", cpu);
226 #ifdef __x86_64__
227 cpu_pda[cpu].pcurrent = idle;
228 cpu_pda[cpu].cpunumber = cpu;
229 per_cpu(init_tss,cpu).rsp0 = idle->thread.rsp;
230 clear_ti_thread_flag(idle->thread_info, TIF_FORK);
231 #endif
233 irq_ctx_init(cpu);
235 cpu_gdt_descr[cpu].address =
236 __get_free_page(GFP_KERNEL|__GFP_ZERO);
237 BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
238 cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
239 memcpy((void *)cpu_gdt_descr[cpu].address,
240 (void *)cpu_gdt_descr[0].address,
241 cpu_gdt_descr[0].size);
242 make_page_readonly((void *)cpu_gdt_descr[cpu].address);
244 cpu_set(cpu, cpu_possible_map);
245 #ifdef CONFIG_HOTPLUG_CPU
246 if (xen_start_info->flags & SIF_INITDOMAIN)
247 cpu_set(cpu, cpu_present_map);
248 #else
249 cpu_set(cpu, cpu_present_map);
250 #endif
252 vcpu_prepare(cpu);
253 }
255 /* Currently, Xen gives no dynamic NUMA/HT info. */
256 for (cpu = 1; cpu < NR_CPUS; cpu++) {
257 cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
258 cpu_core_map[cpu] = cpumask_of_cpu(cpu);
259 }
261 #ifdef CONFIG_X86_IO_APIC
262 /*
263 * Here we can be sure that there is an IO-APIC in the system. Let's
264 * go and set it up:
265 */
266 if (!skip_ioapic_setup && nr_ioapics)
267 setup_IO_APIC();
268 #endif
269 }
271 void __devinit smp_prepare_boot_cpu(void)
272 {
273 cpu_possible_map = cpumask_of_cpu(0);
274 cpu_present_map = cpumask_of_cpu(0);
275 cpu_online_map = cpumask_of_cpu(0);
276 }
278 #ifdef CONFIG_HOTPLUG_CPU
280 static void vcpu_hotplug(unsigned int cpu)
281 {
282 int err;
283 char dir[32], state[32];
285 if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
286 return;
288 sprintf(dir, "cpu/%d", cpu);
289 err = xenbus_scanf(NULL, dir, "availability", "%s", state);
290 if (err != 1) {
291 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
292 return;
293 }
295 if (strcmp(state, "online") == 0) {
296 cpu_set(cpu, cpu_present_map);
297 (void)cpu_up(cpu);
298 } else if (strcmp(state, "offline") == 0) {
299 (void)cpu_down(cpu);
300 } else {
301 printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
302 state, cpu);
303 }
304 }
306 static void handle_vcpu_hotplug_event(
307 struct xenbus_watch *watch, const char **vec, unsigned int len)
308 {
309 int cpu;
310 char *cpustr;
311 const char *node = vec[XS_WATCH_PATH];
313 if ((cpustr = strstr(node, "cpu/")) != NULL) {
314 sscanf(cpustr, "cpu/%d", &cpu);
315 vcpu_hotplug(cpu);
316 }
317 }
319 static int setup_cpu_watcher(struct notifier_block *notifier,
320 unsigned long event, void *data)
321 {
322 int i;
324 static struct xenbus_watch cpu_watch = {
325 .node = "cpu",
326 .callback = handle_vcpu_hotplug_event };
327 (void)register_xenbus_watch(&cpu_watch);
329 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
330 for_each_cpu(i)
331 vcpu_hotplug(i);
332 printk(KERN_INFO "Brought up %ld CPUs\n",
333 (long)num_online_cpus());
334 }
336 return NOTIFY_DONE;
337 }
339 static int __init setup_vcpu_hotplug_event(void)
340 {
341 static struct notifier_block xsn_cpu = {
342 .notifier_call = setup_cpu_watcher };
343 register_xenstore_notifier(&xsn_cpu);
344 return 0;
345 }
347 arch_initcall(setup_vcpu_hotplug_event);
349 int __cpu_disable(void)
350 {
351 cpumask_t map = cpu_online_map;
352 int cpu = smp_processor_id();
354 if (cpu == 0)
355 return -EBUSY;
357 cpu_clear(cpu, map);
358 fixup_irqs(map);
359 cpu_clear(cpu, cpu_online_map);
361 return 0;
362 }
364 void __cpu_die(unsigned int cpu)
365 {
366 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
367 current->state = TASK_UNINTERRUPTIBLE;
368 schedule_timeout(HZ/10);
369 }
371 xen_smp_intr_exit(cpu);
373 #ifdef CONFIG_SMP_ALTERNATIVES
374 if (num_online_cpus() == 1)
375 unprepare_for_smp();
376 #endif
377 }
379 #else /* !CONFIG_HOTPLUG_CPU */
381 int __cpu_disable(void)
382 {
383 return -ENOSYS;
384 }
386 void __cpu_die(unsigned int cpu)
387 {
388 BUG();
389 }
391 #endif /* CONFIG_HOTPLUG_CPU */
393 int __devinit __cpu_up(unsigned int cpu)
394 {
395 #ifdef CONFIG_SMP_ALTERNATIVES
396 if (num_online_cpus() == 1)
397 prepare_for_smp();
398 #endif
400 xen_smp_intr_init(cpu);
401 cpu_set(cpu, cpu_online_map);
402 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
404 return 0;
405 }
407 void __init smp_cpus_done(unsigned int max_cpus)
408 {
409 }
411 /*
412 * Local variables:
413 * c-file-style: "linux"
414 * indent-tabs-mode: t
415 * c-indent-level: 8
416 * c-basic-offset: 8
417 * tab-width: 8
418 * End:
419 */