ia64/xen-unstable

view xen/arch/x86/smp.c @ 15867:aaae02dbe269

x86: Handle 'self-IPI' on legacy UP systems with no APIC.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Sep 10 17:49:58 2007 +0100 (2007-09-10)
parents e704430b5b32
children 154769114a82
line source
1 /*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * This code is released under the GNU General Public License version 2 or
8 * later.
9 */
11 #include <xen/config.h>
12 #include <xen/irq.h>
13 #include <xen/sched.h>
14 #include <xen/delay.h>
15 #include <xen/perfc.h>
16 #include <xen/spinlock.h>
17 #include <asm/current.h>
18 #include <asm/smp.h>
19 #include <asm/mc146818rtc.h>
20 #include <asm/flushtlb.h>
21 #include <asm/smpboot.h>
22 #include <asm/hardirq.h>
23 #include <asm/ipi.h>
24 #include <asm/hvm/support.h>
25 #include <mach_apic.h>
27 /*
28 * Some notes on x86 processor bugs affecting SMP operation:
29 *
30 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
31 * The Linux implications for SMP are handled as follows:
32 *
33 * Pentium III / [Xeon]
34 * None of the E1AP-E3AP errata are visible to the user.
35 *
36 * E1AP. see PII A1AP
37 * E2AP. see PII A2AP
38 * E3AP. see PII A3AP
39 *
40 * Pentium II / [Xeon]
41 * None of the A1AP-A3AP errata are visible to the user.
42 *
43 * A1AP. see PPro 1AP
44 * A2AP. see PPro 2AP
45 * A3AP. see PPro 7AP
46 *
47 * Pentium Pro
48 * None of 1AP-9AP errata are visible to the normal user,
49 * except occasional delivery of 'spurious interrupt' as trap #15.
50 * This is very rare and a non-problem.
51 *
52 * 1AP. Linux maps APIC as non-cacheable
53 * 2AP. worked around in hardware
54 * 3AP. fixed in C0 and above steppings microcode update.
55 * Linux does not use excessive STARTUP_IPIs.
56 * 4AP. worked around in hardware
57 * 5AP. symmetric IO mode (normal Linux operation) not affected.
58 * 'noapic' mode has vector 0xf filled out properly.
59 * 6AP. 'noapic' mode might be affected - fixed in later steppings
60 * 7AP. We do not assume writes to the LVT deassering IRQs
61 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
62 * 9AP. We do not use mixed mode
63 */
65 /*
66 * The following functions deal with sending IPIs between CPUs.
67 */
69 static inline int __prepare_ICR (unsigned int shortcut, int vector)
70 {
71 return APIC_DM_FIXED | shortcut | vector;
72 }
74 static inline int __prepare_ICR2 (unsigned int mask)
75 {
76 return SET_APIC_DEST_FIELD(mask);
77 }
79 static inline void check_IPI_mask(cpumask_t cpumask)
80 {
81 /*
82 * Sanity, and necessary. An IPI with no target generates a send accept
83 * error with Pentium and P6 APICs.
84 */
85 ASSERT(cpus_subset(cpumask, cpu_online_map));
86 ASSERT(!cpus_empty(cpumask));
87 }
89 void send_IPI_mask_flat(cpumask_t cpumask, int vector)
90 {
91 unsigned long mask = cpus_addr(cpumask)[0];
92 unsigned long cfg;
93 unsigned long flags;
95 check_IPI_mask(cpumask);
97 local_irq_save(flags);
99 /*
100 * Wait for idle.
101 */
102 apic_wait_icr_idle();
104 /*
105 * prepare target chip field
106 */
107 cfg = __prepare_ICR2(mask);
108 apic_write_around(APIC_ICR2, cfg);
110 /*
111 * program the ICR
112 */
113 cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;
115 /*
116 * Send the IPI. The write to APIC_ICR fires this off.
117 */
118 apic_write_around(APIC_ICR, cfg);
120 local_irq_restore(flags);
121 }
123 void send_IPI_mask_phys(cpumask_t mask, int vector)
124 {
125 unsigned long cfg, flags;
126 unsigned int query_cpu;
128 check_IPI_mask(mask);
130 /*
131 * Hack. The clustered APIC addressing mode doesn't allow us to send
132 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
133 * should be modified to do 1 message per cluster ID - mbligh
134 */
136 local_irq_save(flags);
138 for_each_cpu_mask( query_cpu, mask )
139 {
140 /*
141 * Wait for idle.
142 */
143 apic_wait_icr_idle();
145 /*
146 * prepare target chip field
147 */
148 cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
149 apic_write_around(APIC_ICR2, cfg);
151 /*
152 * program the ICR
153 */
154 cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;
156 /*
157 * Send the IPI. The write to APIC_ICR fires this off.
158 */
159 apic_write_around(APIC_ICR, cfg);
160 }
162 local_irq_restore(flags);
163 }
165 static DEFINE_SPINLOCK(flush_lock);
166 static cpumask_t flush_cpumask;
167 static unsigned long flush_va;
169 fastcall void smp_invalidate_interrupt(void)
170 {
171 ack_APIC_irq();
172 perfc_incr(ipis);
173 irq_enter();
174 if ( !__sync_lazy_execstate() )
175 {
176 if ( flush_va == FLUSHVA_ALL )
177 local_flush_tlb();
178 else
179 local_flush_tlb_one(flush_va);
180 }
181 cpu_clear(smp_processor_id(), flush_cpumask);
182 irq_exit();
183 }
185 void __flush_tlb_mask(cpumask_t mask, unsigned long va)
186 {
187 ASSERT(local_irq_is_enabled());
189 if ( cpu_isset(smp_processor_id(), mask) )
190 {
191 if ( va == FLUSHVA_ALL )
192 local_flush_tlb();
193 else
194 local_flush_tlb_one(va);
195 cpu_clear(smp_processor_id(), mask);
196 }
198 if ( !cpus_empty(mask) )
199 {
200 spin_lock(&flush_lock);
201 flush_cpumask = mask;
202 flush_va = va;
203 send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
204 while ( !cpus_empty(flush_cpumask) )
205 cpu_relax();
206 spin_unlock(&flush_lock);
207 }
208 }
210 /* Call with no locks held and interrupts enabled (e.g., softirq context). */
211 void new_tlbflush_clock_period(void)
212 {
213 cpumask_t allbutself;
215 /* Flush everyone else. We definitely flushed just before entry. */
216 allbutself = cpu_online_map;
217 cpu_clear(smp_processor_id(), allbutself);
218 __flush_tlb_mask(allbutself, FLUSHVA_ALL);
220 /* No need for atomicity: we are the only possible updater. */
221 ASSERT(tlbflush_clock == 0);
222 tlbflush_clock++;
223 }
225 static void flush_tlb_all_pge_ipi(void *info)
226 {
227 local_flush_tlb_pge();
228 }
230 void flush_tlb_all_pge(void)
231 {
232 smp_call_function(flush_tlb_all_pge_ipi, 0, 1, 1);
233 local_flush_tlb_pge();
234 }
236 void smp_send_event_check_mask(cpumask_t mask)
237 {
238 cpu_clear(smp_processor_id(), mask);
239 if ( !cpus_empty(mask) )
240 send_IPI_mask(mask, EVENT_CHECK_VECTOR);
241 }
243 /*
244 * Structure and data for smp_call_function()/on_selected_cpus().
245 */
247 struct call_data_struct {
248 void (*func) (void *info);
249 void *info;
250 int wait;
251 atomic_t started;
252 atomic_t finished;
253 cpumask_t selected;
254 };
256 static DEFINE_SPINLOCK(call_lock);
257 static struct call_data_struct *call_data;
259 int smp_call_function(
260 void (*func) (void *info),
261 void *info,
262 int retry,
263 int wait)
264 {
265 cpumask_t allbutself = cpu_online_map;
266 cpu_clear(smp_processor_id(), allbutself);
267 return on_selected_cpus(allbutself, func, info, retry, wait);
268 }
270 int on_selected_cpus(
271 cpumask_t selected,
272 void (*func) (void *info),
273 void *info,
274 int retry,
275 int wait)
276 {
277 struct call_data_struct data;
278 unsigned int nr_cpus = cpus_weight(selected);
280 ASSERT(local_irq_is_enabled());
282 /* Legacy UP system with no APIC to deliver IPIs? */
283 if ( unlikely(!cpu_has_apic) )
284 {
285 ASSERT(num_online_cpus() == 1);
286 if ( cpu_isset(0, selected) )
287 {
288 local_irq_disable();
289 func(info);
290 local_irq_enable();
291 }
292 return 0;
293 }
295 if ( nr_cpus == 0 )
296 return 0;
298 data.func = func;
299 data.info = info;
300 data.wait = wait;
301 atomic_set(&data.started, 0);
302 atomic_set(&data.finished, 0);
303 data.selected = selected;
305 spin_lock(&call_lock);
307 call_data = &data;
308 wmb();
310 send_IPI_mask(selected, CALL_FUNCTION_VECTOR);
312 while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
313 cpu_relax();
315 spin_unlock(&call_lock);
317 return 0;
318 }
320 static void stop_this_cpu (void *dummy)
321 {
322 cpu_clear(smp_processor_id(), cpu_online_map);
324 local_irq_disable();
325 disable_local_APIC();
326 hvm_cpu_down();
328 for ( ; ; )
329 __asm__ __volatile__ ( "hlt" );
330 }
332 void smp_send_stop(void)
333 {
334 /* Stop all other CPUs in the system. */
335 smp_call_function(stop_this_cpu, NULL, 1, 0);
337 local_irq_disable();
338 disable_local_APIC();
339 local_irq_enable();
340 }
342 fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
343 {
344 ack_APIC_irq();
345 perfc_incr(ipis);
346 }
348 fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
349 {
350 void (*func)(void *info) = call_data->func;
351 void *info = call_data->info;
353 ack_APIC_irq();
354 perfc_incr(ipis);
356 if ( !cpu_isset(smp_processor_id(), call_data->selected) )
357 return;
359 irq_enter();
361 if ( call_data->wait )
362 {
363 (*func)(info);
364 mb();
365 atomic_inc(&call_data->finished);
366 }
367 else
368 {
369 mb();
370 atomic_inc(&call_data->started);
371 (*func)(info);
372 }
374 irq_exit();
375 }