ia64/xen-unstable

view xen/arch/x86/smp.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 0e111bfd22d0
children
line source
1 /*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * This code is released under the GNU General Public License version 2 or
8 * later.
9 */
11 #include <xen/config.h>
12 #include <xen/irq.h>
13 #include <xen/sched.h>
14 #include <xen/delay.h>
15 #include <xen/perfc.h>
16 #include <xen/spinlock.h>
17 #include <asm/current.h>
18 #include <asm/smp.h>
19 #include <asm/mc146818rtc.h>
20 #include <asm/flushtlb.h>
21 #include <asm/hardirq.h>
22 #include <asm/hvm/support.h>
23 #include <mach_apic.h>
25 /*
26 * send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask,
27 * excluding the local CPU. @cpumask may be empty.
28 */
29 #define send_IPI_mask (genapic->send_IPI_mask)
31 /*
32 * Some notes on x86 processor bugs affecting SMP operation:
33 *
34 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
35 * The Linux implications for SMP are handled as follows:
36 *
37 * Pentium III / [Xeon]
38 * None of the E1AP-E3AP errata are visible to the user.
39 *
40 * E1AP. see PII A1AP
41 * E2AP. see PII A2AP
42 * E3AP. see PII A3AP
43 *
44 * Pentium II / [Xeon]
45 * None of the A1AP-A3AP errata are visible to the user.
46 *
47 * A1AP. see PPro 1AP
48 * A2AP. see PPro 2AP
49 * A3AP. see PPro 7AP
50 *
51 * Pentium Pro
52 * None of 1AP-9AP errata are visible to the normal user,
53 * except occasional delivery of 'spurious interrupt' as trap #15.
54 * This is very rare and a non-problem.
55 *
56 * 1AP. Linux maps APIC as non-cacheable
57 * 2AP. worked around in hardware
58 * 3AP. fixed in C0 and above steppings microcode update.
59 * Linux does not use excessive STARTUP_IPIs.
60 * 4AP. worked around in hardware
61 * 5AP. symmetric IO mode (normal Linux operation) not affected.
62 * 'noapic' mode has vector 0xf filled out properly.
63 * 6AP. 'noapic' mode might be affected - fixed in later steppings
64 * 7AP. We do not assume writes to the LVT deassering IRQs
65 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
66 * 9AP. We do not use mixed mode
67 */
69 /*
70 * The following functions deal with sending IPIs between CPUs.
71 */
73 static inline int __prepare_ICR (unsigned int shortcut, int vector)
74 {
75 return APIC_DM_FIXED | shortcut | vector;
76 }
78 static inline int __prepare_ICR2 (unsigned int mask)
79 {
80 return SET_xAPIC_DEST_FIELD(mask);
81 }
83 void apic_wait_icr_idle(void)
84 {
85 if ( x2apic_enabled )
86 return;
88 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
89 cpu_relax();
90 }
92 void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
93 {
94 unsigned long mask = cpus_addr(*cpumask)[0];
95 unsigned long cfg;
96 unsigned long flags;
98 mask &= ~(1UL << smp_processor_id());
99 if ( mask == 0 )
100 return;
102 local_irq_save(flags);
104 /*
105 * Wait for idle.
106 */
107 apic_wait_icr_idle();
109 /*
110 * prepare target chip field
111 */
112 cfg = __prepare_ICR2(mask);
113 apic_write_around(APIC_ICR2, cfg);
115 /*
116 * program the ICR
117 */
118 cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;
120 /*
121 * Send the IPI. The write to APIC_ICR fires this off.
122 */
123 apic_write_around(APIC_ICR, cfg);
125 local_irq_restore(flags);
126 }
128 void send_IPI_mask_phys(const cpumask_t *mask, int vector)
129 {
130 unsigned long cfg, flags;
131 unsigned int query_cpu;
133 local_irq_save(flags);
135 for_each_cpu_mask ( query_cpu, *mask )
136 {
137 if ( query_cpu == smp_processor_id() )
138 continue;
140 /*
141 * Wait for idle.
142 */
143 apic_wait_icr_idle();
145 /*
146 * prepare target chip field
147 */
148 cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
149 apic_write_around(APIC_ICR2, cfg);
151 /*
152 * program the ICR
153 */
154 cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;
156 /*
157 * Send the IPI. The write to APIC_ICR fires this off.
158 */
159 apic_write_around(APIC_ICR, cfg);
160 }
162 local_irq_restore(flags);
163 }
165 static DEFINE_SPINLOCK(flush_lock);
166 static cpumask_t flush_cpumask;
167 static const void *flush_va;
168 static unsigned int flush_flags;
170 fastcall void smp_invalidate_interrupt(void)
171 {
172 ack_APIC_irq();
173 perfc_incr(ipis);
174 irq_enter();
175 if ( !__sync_lazy_execstate() ||
176 (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
177 flush_area_local(flush_va, flush_flags);
178 cpu_clear(smp_processor_id(), flush_cpumask);
179 irq_exit();
180 }
182 void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
183 {
184 ASSERT(local_irq_is_enabled());
186 if ( cpu_isset(smp_processor_id(), *mask) )
187 flush_area_local(va, flags);
189 if ( !cpus_subset(*mask, *cpumask_of(smp_processor_id())) )
190 {
191 spin_lock(&flush_lock);
192 cpus_andnot(flush_cpumask, *mask, *cpumask_of(smp_processor_id()));
193 flush_va = va;
194 flush_flags = flags;
195 send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
196 while ( !cpus_empty(flush_cpumask) )
197 cpu_relax();
198 spin_unlock(&flush_lock);
199 }
200 }
202 /* Call with no locks held and interrupts enabled (e.g., softirq context). */
203 void new_tlbflush_clock_period(void)
204 {
205 cpumask_t allbutself;
207 /* Flush everyone else. We definitely flushed just before entry. */
208 allbutself = cpu_online_map;
209 cpu_clear(smp_processor_id(), allbutself);
210 flush_mask(&allbutself, FLUSH_TLB);
212 /* No need for atomicity: we are the only possible updater. */
213 ASSERT(tlbflush_clock == 0);
214 tlbflush_clock++;
215 }
217 void smp_send_event_check_mask(const cpumask_t *mask)
218 {
219 send_IPI_mask(mask, EVENT_CHECK_VECTOR);
220 }
222 /*
223 * Structure and data for smp_call_function()/on_selected_cpus().
224 */
226 static void __smp_call_function_interrupt(void);
227 static DEFINE_SPINLOCK(call_lock);
228 static struct call_data_struct {
229 void (*func) (void *info);
230 void *info;
231 int wait;
232 atomic_t started;
233 atomic_t finished;
234 cpumask_t selected;
235 } call_data;
237 int smp_call_function(
238 void (*func) (void *info),
239 void *info,
240 int wait)
241 {
242 cpumask_t allbutself = cpu_online_map;
243 cpu_clear(smp_processor_id(), allbutself);
244 return on_selected_cpus(&allbutself, func, info, wait);
245 }
247 int on_selected_cpus(
248 const cpumask_t *selected,
249 void (*func) (void *info),
250 void *info,
251 int wait)
252 {
253 unsigned int nr_cpus;
255 ASSERT(local_irq_is_enabled());
257 spin_lock(&call_lock);
259 call_data.selected = *selected;
261 nr_cpus = cpus_weight(call_data.selected);
262 if ( nr_cpus == 0 )
263 goto out;
265 call_data.func = func;
266 call_data.info = info;
267 call_data.wait = wait;
268 atomic_set(&call_data.started, 0);
269 atomic_set(&call_data.finished, 0);
271 send_IPI_mask(&call_data.selected, CALL_FUNCTION_VECTOR);
273 if ( cpu_isset(smp_processor_id(), call_data.selected) )
274 {
275 local_irq_disable();
276 __smp_call_function_interrupt();
277 local_irq_enable();
278 }
280 while ( atomic_read(wait ? &call_data.finished : &call_data.started)
281 != nr_cpus )
282 cpu_relax();
284 out:
285 spin_unlock(&call_lock);
286 return 0;
287 }
289 static void __stop_this_cpu(void)
290 {
291 ASSERT(!local_irq_is_enabled());
293 disable_local_APIC();
295 hvm_cpu_down();
297 /*
298 * Clear FPU, zapping any pending exceptions. Needed for warm reset with
299 * some BIOSes.
300 */
301 clts();
302 asm volatile ( "fninit" );
303 }
305 static void stop_this_cpu(void *dummy)
306 {
307 __stop_this_cpu();
308 cpu_clear(smp_processor_id(), cpu_online_map);
309 for ( ; ; )
310 halt();
311 }
313 /*
314 * Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a
315 * clean IRQ state.
316 */
317 void smp_send_stop(void)
318 {
319 int timeout = 10;
321 smp_call_function(stop_this_cpu, NULL, 0);
323 /* Wait 10ms for all other CPUs to go offline. */
324 while ( (num_online_cpus() > 1) && (timeout-- > 0) )
325 mdelay(1);
327 local_irq_disable();
328 __stop_this_cpu();
329 disable_IO_APIC();
330 local_irq_enable();
331 }
333 void smp_send_nmi_allbutself(void)
334 {
335 send_IPI_mask(&cpu_online_map, APIC_DM_NMI);
336 }
338 fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
339 {
340 ack_APIC_irq();
341 perfc_incr(ipis);
342 }
344 static void __smp_call_function_interrupt(void)
345 {
346 void (*func)(void *info) = call_data.func;
347 void *info = call_data.info;
349 if ( !cpu_isset(smp_processor_id(), call_data.selected) )
350 return;
352 irq_enter();
354 if ( call_data.wait )
355 {
356 (*func)(info);
357 mb();
358 atomic_inc(&call_data.finished);
359 }
360 else
361 {
362 mb();
363 atomic_inc(&call_data.started);
364 (*func)(info);
365 }
367 irq_exit();
368 }
370 fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
371 {
372 ack_APIC_irq();
373 perfc_incr(ipis);
374 __smp_call_function_interrupt();
375 }