ia64/xen-unstable

view xen/arch/ia64/linux-xen/smp.c @ 10888:5379548bfc79

[NET] Enable TCPv4 segmentation offload in front/back drivers.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Aug 01 11:54:45 2006 +0100 (2006-08-01)
parents 59e05ddfd0ad
children 741fd616f5dc
line source
1 /*
2 * SMP Support
3 *
4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Lots of stuff stolen from arch/alpha/kernel/smp.c
8 *
9 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
10 * the existing code (on the lines of x86 port).
11 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12 * calibration on each CPU.
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
15 * & cpu_online_map now gets done here (instead of setup.c)
16 * 99/10/05 davidm Update to bring it in sync with new command-line processing
17 * scheme.
18 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19 * smp_call_function_single to resend IPI on timeouts
20 */
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/mm.h>
29 #include <linux/cache.h>
30 #include <linux/delay.h>
31 #include <linux/efi.h>
32 #include <linux/bitops.h>
34 #include <asm/atomic.h>
35 #include <asm/current.h>
36 #include <asm/delay.h>
37 #include <asm/machvec.h>
38 #include <asm/io.h>
39 #include <asm/irq.h>
40 #include <asm/page.h>
41 #include <asm/pgalloc.h>
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/ptrace.h>
45 #include <asm/sal.h>
46 #include <asm/system.h>
47 #include <asm/tlbflush.h>
48 #include <asm/unistd.h>
49 #include <asm/mca.h>
50 #ifdef XEN
51 #include <asm/vhpt.h>
52 #include <asm/hw_irq.h>
53 #endif
55 #ifdef XEN
56 //#if CONFIG_SMP || IA64
57 #if CONFIG_SMP
58 //Huh? This seems to be used on ia64 even if !CONFIG_SMP
59 void smp_send_event_check_mask(cpumask_t mask)
60 {
61 int cpu;
63 /* Not for me. */
64 cpu_clear(smp_processor_id(), mask);
65 if (cpus_empty(mask))
66 return;
68 //printf("smp_send_event_check_mask called\n");
70 for (cpu = 0; cpu < NR_CPUS; ++cpu)
71 if (cpu_isset(cpu, mask))
72 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
73 }
74 #endif
75 #endif
77 #ifdef CONFIG_SMP /* ifdef XEN */
79 /*
80 * Structure and data for smp_call_function(). This is designed to minimise static memory
81 * requirements. It also looks cleaner.
82 */
83 static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
85 struct call_data_struct {
86 void (*func) (void *info);
87 void *info;
88 long wait;
89 atomic_t started;
90 atomic_t finished;
91 };
93 static volatile struct call_data_struct *call_data;
95 #define IPI_CALL_FUNC 0
96 #define IPI_CPU_STOP 1
98 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
99 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
101 extern void cpu_halt (void);
103 void
104 lock_ipi_calllock(void)
105 {
106 spin_lock_irq(&call_lock);
107 }
109 void
110 unlock_ipi_calllock(void)
111 {
112 spin_unlock_irq(&call_lock);
113 }
115 static void
116 stop_this_cpu (void)
117 {
118 /*
119 * Remove this CPU:
120 */
121 cpu_clear(smp_processor_id(), cpu_online_map);
122 max_xtp();
123 local_irq_disable();
124 #ifndef XEN
125 cpu_halt();
126 #endif
127 }
129 void
130 cpu_die(void)
131 {
132 max_xtp();
133 local_irq_disable();
134 #ifndef XEN
135 cpu_halt();
136 #endif
137 /* Should never be here */
138 BUG();
139 for (;;);
140 }
142 irqreturn_t
143 handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
144 {
145 int this_cpu = get_cpu();
146 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
147 unsigned long ops;
149 mb(); /* Order interrupt and bit testing. */
150 while ((ops = xchg(pending_ipis, 0)) != 0) {
151 mb(); /* Order bit clearing and data access. */
152 do {
153 unsigned long which;
155 which = ffz(~ops);
156 ops &= ~(1 << which);
158 switch (which) {
159 case IPI_CALL_FUNC:
160 {
161 struct call_data_struct *data;
162 void (*func)(void *info);
163 void *info;
164 int wait;
166 /* release the 'pointer lock' */
167 data = (struct call_data_struct *) call_data;
168 func = data->func;
169 info = data->info;
170 wait = data->wait;
172 mb();
173 atomic_inc(&data->started);
174 /*
175 * At this point the structure may be gone unless
176 * wait is true.
177 */
178 (*func)(info);
180 /* Notify the sending CPU that the task is done. */
181 mb();
182 if (wait)
183 atomic_inc(&data->finished);
184 }
185 break;
187 case IPI_CPU_STOP:
188 stop_this_cpu();
189 break;
191 default:
192 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
193 break;
194 }
195 } while (ops);
196 mb(); /* Order data access and bit testing. */
197 }
198 put_cpu();
199 #ifndef XEN
200 return IRQ_HANDLED;
201 #endif
202 }
204 /*
205 * Called with preeemption disabled.
206 */
207 static inline void
208 send_IPI_single (int dest_cpu, int op)
209 {
210 set_bit(op, &per_cpu(ipi_operation, dest_cpu));
211 platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
212 }
214 /*
215 * Called with preeemption disabled.
216 */
217 static inline void
218 send_IPI_allbutself (int op)
219 {
220 unsigned int i;
222 for (i = 0; i < NR_CPUS; i++) {
223 if (cpu_online(i) && i != smp_processor_id())
224 send_IPI_single(i, op);
225 }
226 }
228 /*
229 * Called with preeemption disabled.
230 */
231 static inline void
232 send_IPI_all (int op)
233 {
234 int i;
236 for (i = 0; i < NR_CPUS; i++)
237 if (cpu_online(i))
238 send_IPI_single(i, op);
239 }
241 /*
242 * Called with preeemption disabled.
243 */
244 static inline void
245 send_IPI_self (int op)
246 {
247 send_IPI_single(smp_processor_id(), op);
248 }
250 #ifndef XEN
251 /*
252 * Called with preeemption disabled.
253 */
254 void
255 smp_send_reschedule (int cpu)
256 {
257 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
258 }
260 void
261 smp_flush_tlb_all (void)
262 {
263 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
264 }
266 void
267 smp_flush_tlb_mm (struct mm_struct *mm)
268 {
269 preempt_disable();
270 /* this happens for the common case of a single-threaded fork(): */
271 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
272 {
273 local_finish_flush_tlb_mm(mm);
274 preempt_enable();
275 return;
276 }
278 preempt_enable();
279 /*
280 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
281 * have been running in the address space. It's not clear that this is worth the
282 * trouble though: to avoid races, we have to raise the IPI on the target CPU
283 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
284 * rather trivial.
285 */
286 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
287 }
288 #endif
290 /*
291 * Run a function on another CPU
292 * <func> The function to run. This must be fast and non-blocking.
293 * <info> An arbitrary pointer to pass to the function.
294 * <nonatomic> Currently unused.
295 * <wait> If true, wait until function has completed on other CPUs.
296 * [RETURNS] 0 on success, else a negative status code.
297 *
298 * Does not return until the remote CPU is nearly ready to execute <func>
299 * or is or has executed.
300 */
302 int
303 smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
304 int wait)
305 {
306 struct call_data_struct data;
307 int cpus = 1;
308 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
310 if (cpuid == me) {
311 printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
312 put_cpu();
313 return -EBUSY;
314 }
316 data.func = func;
317 data.info = info;
318 atomic_set(&data.started, 0);
319 data.wait = wait;
320 if (wait)
321 atomic_set(&data.finished, 0);
323 #ifdef XEN
324 spin_lock(&call_lock);
325 #else
326 spin_lock_bh(&call_lock);
327 #endif
329 call_data = &data;
330 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
331 send_IPI_single(cpuid, IPI_CALL_FUNC);
333 /* Wait for response */
334 while (atomic_read(&data.started) != cpus)
335 cpu_relax();
337 if (wait)
338 while (atomic_read(&data.finished) != cpus)
339 cpu_relax();
340 call_data = NULL;
342 #ifdef XEN
343 spin_unlock(&call_lock);
344 #else
345 spin_unlock_bh(&call_lock);
346 #endif
347 put_cpu();
348 return 0;
349 }
350 EXPORT_SYMBOL(smp_call_function_single);
352 /*
353 * this function sends a 'generic call function' IPI to all other CPUs
354 * in the system.
355 */
357 /*
358 * [SUMMARY] Run a function on all other CPUs.
359 * <func> The function to run. This must be fast and non-blocking.
360 * <info> An arbitrary pointer to pass to the function.
361 * <nonatomic> currently unused.
362 * <wait> If true, wait (atomically) until function has completed on other CPUs.
363 * [RETURNS] 0 on success, else a negative status code.
364 *
365 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
366 * executed.
367 *
368 * You must not call this function with disabled interrupts or from a
369 * hardware interrupt handler or from a bottom half handler.
370 */
371 int
372 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
373 {
374 struct call_data_struct data;
375 int cpus = num_online_cpus()-1;
377 if (!cpus)
378 return 0;
380 /* Can deadlock when called with interrupts disabled */
381 #ifdef XEN
382 if (irqs_disabled()) {
383 printk("smp_call_function called with interrupts disabled...");
384 printk("enabling interrupts\n");
385 local_irq_enable();
386 }
387 #else
388 WARN_ON(irqs_disabled());
389 #endif
391 data.func = func;
392 data.info = info;
393 atomic_set(&data.started, 0);
394 data.wait = wait;
395 if (wait)
396 atomic_set(&data.finished, 0);
398 spin_lock(&call_lock);
399 #if 0 //def XEN
400 printk("smp_call_function: %d lock\n", smp_processor_id ());
401 #endif
403 call_data = &data;
404 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
405 send_IPI_allbutself(IPI_CALL_FUNC);
407 /* Wait for response */
408 while (atomic_read(&data.started) != cpus)
409 cpu_relax();
411 if (wait)
412 while (atomic_read(&data.finished) != cpus)
413 cpu_relax();
414 call_data = NULL;
416 spin_unlock(&call_lock);
417 #if 0 //def XEN
418 printk("smp_call_function: DONE WITH spin_unlock, returning \n");
419 #endif
420 return 0;
421 }
422 EXPORT_SYMBOL(smp_call_function);
424 #ifdef XEN
425 int
426 on_selected_cpus(cpumask_t selected, void (*func) (void *info), void *info,
427 int retry, int wait)
428 {
429 struct call_data_struct data;
430 unsigned int cpu, nr_cpus = cpus_weight(selected);
432 ASSERT(local_irq_is_enabled());
434 if (!nr_cpus)
435 return 0;
437 data.func = func;
438 data.info = info;
439 data.wait = wait;
440 atomic_set(&data.started, 0);
441 atomic_set(&data.finished, 0);
443 spin_lock(&call_lock);
445 call_data = &data;
446 wmb();
448 for_each_cpu_mask(cpu, selected)
449 send_IPI_single(cpu, IPI_CALL_FUNC);
451 while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
452 cpu_relax();
454 spin_unlock(&call_lock);
456 return 0;
457 }
458 #endif
460 /*
461 * this function calls the 'stop' function on all other CPUs in the system.
462 */
463 void
464 smp_send_stop (void)
465 {
466 send_IPI_allbutself(IPI_CPU_STOP);
467 }
469 int __init
470 setup_profiling_timer (unsigned int multiplier)
471 {
472 return -EINVAL;
473 }
474 #endif /* CONFIG_SMP ifdef XEN */