ia64/xen-unstable

view xen/arch/x86/smp.c @ 9733:4613f42db780

Remove bogus extern declaration.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Apr 15 15:53:53 2006 +0100 (2006-04-15)
parents 887ff2d1e382
children 7fd7f276bb38
line source
1 /*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * This code is released under the GNU General Public License version 2 or
8 * later.
9 */
11 #include <xen/config.h>
12 #include <xen/irq.h>
13 #include <xen/sched.h>
14 #include <xen/delay.h>
15 #include <xen/perfc.h>
16 #include <xen/spinlock.h>
17 #include <asm/current.h>
18 #include <asm/smp.h>
19 #include <asm/mc146818rtc.h>
20 #include <asm/flushtlb.h>
21 #include <asm/smpboot.h>
22 #include <asm/hardirq.h>
23 #include <asm/ipi.h>
24 #include <mach_apic.h>
26 /*
27 * Some notes on x86 processor bugs affecting SMP operation:
28 *
29 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
30 * The Linux implications for SMP are handled as follows:
31 *
32 * Pentium III / [Xeon]
33 * None of the E1AP-E3AP errata are visible to the user.
34 *
35 * E1AP. see PII A1AP
36 * E2AP. see PII A2AP
37 * E3AP. see PII A3AP
38 *
39 * Pentium II / [Xeon]
40 * None of the A1AP-A3AP errata are visible to the user.
41 *
42 * A1AP. see PPro 1AP
43 * A2AP. see PPro 2AP
44 * A3AP. see PPro 7AP
45 *
46 * Pentium Pro
47 * None of 1AP-9AP errata are visible to the normal user,
48 * except occasional delivery of 'spurious interrupt' as trap #15.
49 * This is very rare and a non-problem.
50 *
51 * 1AP. Linux maps APIC as non-cacheable
52 * 2AP. worked around in hardware
53 * 3AP. fixed in C0 and above steppings microcode update.
54 * Linux does not use excessive STARTUP_IPIs.
55 * 4AP. worked around in hardware
56 * 5AP. symmetric IO mode (normal Linux operation) not affected.
57 * 'noapic' mode has vector 0xf filled out properly.
58 * 6AP. 'noapic' mode might be affected - fixed in later steppings
59 * 7AP. We do not assume writes to the LVT deassering IRQs
60 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
61 * 9AP. We do not use mixed mode
62 */
64 /*
65 * The following functions deal with sending IPIs between CPUs.
66 */
68 static inline int __prepare_ICR (unsigned int shortcut, int vector)
69 {
70 return APIC_DM_FIXED | shortcut | vector;
71 }
73 static inline int __prepare_ICR2 (unsigned int mask)
74 {
75 return SET_APIC_DEST_FIELD(mask);
76 }
78 static inline void check_IPI_mask(cpumask_t cpumask)
79 {
80 /*
81 * Sanity, and necessary. An IPI with no target generates a send accept
82 * error with Pentium and P6 APICs.
83 */
84 ASSERT(cpus_subset(cpumask, cpu_online_map));
85 ASSERT(!cpus_empty(cpumask));
86 }
88 void send_IPI_mask_flat(cpumask_t cpumask, int vector)
89 {
90 unsigned long mask = cpus_addr(cpumask)[0];
91 unsigned long cfg;
92 unsigned long flags;
94 check_IPI_mask(cpumask);
96 local_irq_save(flags);
98 /*
99 * Wait for idle.
100 */
101 apic_wait_icr_idle();
103 /*
104 * prepare target chip field
105 */
106 cfg = __prepare_ICR2(mask);
107 apic_write_around(APIC_ICR2, cfg);
109 /*
110 * program the ICR
111 */
112 cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;
114 /*
115 * Send the IPI. The write to APIC_ICR fires this off.
116 */
117 apic_write_around(APIC_ICR, cfg);
119 local_irq_restore(flags);
120 }
122 void send_IPI_mask_phys(cpumask_t mask, int vector)
123 {
124 unsigned long cfg, flags;
125 unsigned int query_cpu;
127 check_IPI_mask(mask);
129 /*
130 * Hack. The clustered APIC addressing mode doesn't allow us to send
131 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
132 * should be modified to do 1 message per cluster ID - mbligh
133 */
135 local_irq_save(flags);
137 for_each_cpu_mask( query_cpu, mask )
138 {
139 /*
140 * Wait for idle.
141 */
142 apic_wait_icr_idle();
144 /*
145 * prepare target chip field
146 */
147 cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
148 apic_write_around(APIC_ICR2, cfg);
150 /*
151 * program the ICR
152 */
153 cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;
155 /*
156 * Send the IPI. The write to APIC_ICR fires this off.
157 */
158 apic_write_around(APIC_ICR, cfg);
159 }
161 local_irq_restore(flags);
162 }
164 static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
165 static cpumask_t flush_cpumask;
166 static unsigned long flush_va;
168 fastcall void smp_invalidate_interrupt(void)
169 {
170 ack_APIC_irq();
171 perfc_incrc(ipis);
172 if ( !__sync_lazy_execstate() )
173 {
174 if ( flush_va == FLUSHVA_ALL )
175 local_flush_tlb();
176 else
177 local_flush_tlb_one(flush_va);
178 }
179 cpu_clear(smp_processor_id(), flush_cpumask);
180 }
182 void __flush_tlb_mask(cpumask_t mask, unsigned long va)
183 {
184 ASSERT(local_irq_is_enabled());
186 if ( cpu_isset(smp_processor_id(), mask) )
187 {
188 local_flush_tlb();
189 cpu_clear(smp_processor_id(), mask);
190 }
192 if ( !cpus_empty(mask) )
193 {
194 spin_lock(&flush_lock);
195 flush_cpumask = mask;
196 flush_va = va;
197 send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
198 while ( !cpus_empty(flush_cpumask) )
199 cpu_relax();
200 spin_unlock(&flush_lock);
201 }
202 }
204 /* Call with no locks held and interrupts enabled (e.g., softirq context). */
205 void new_tlbflush_clock_period(void)
206 {
207 cpumask_t allbutself;
209 /* Flush everyone else. We definitely flushed just before entry. */
210 allbutself = cpu_online_map;
211 cpu_clear(smp_processor_id(), allbutself);
212 __flush_tlb_mask(allbutself, FLUSHVA_ALL);
214 /* No need for atomicity: we are the only possible updater. */
215 ASSERT(tlbflush_clock == 0);
216 tlbflush_clock++;
217 }
219 static void flush_tlb_all_pge_ipi(void *info)
220 {
221 local_flush_tlb_pge();
222 }
224 void flush_tlb_all_pge(void)
225 {
226 smp_call_function(flush_tlb_all_pge_ipi, 0, 1, 1);
227 local_flush_tlb_pge();
228 }
230 void smp_send_event_check_mask(cpumask_t mask)
231 {
232 cpu_clear(smp_processor_id(), mask);
233 if ( !cpus_empty(mask) )
234 send_IPI_mask(mask, EVENT_CHECK_VECTOR);
235 }
237 /*
238 * Structure and data for smp_call_function()/on_selected_cpus().
239 */
241 struct call_data_struct {
242 void (*func) (void *info);
243 void *info;
244 int wait;
245 atomic_t started;
246 atomic_t finished;
247 cpumask_t selected;
248 };
250 static DEFINE_SPINLOCK(call_lock);
251 static struct call_data_struct *call_data;
253 int smp_call_function(
254 void (*func) (void *info),
255 void *info,
256 int retry,
257 int wait)
258 {
259 cpumask_t allbutself = cpu_online_map;
260 cpu_clear(smp_processor_id(), allbutself);
261 return on_selected_cpus(allbutself, func, info, retry, wait);
262 }
264 int on_selected_cpus(
265 cpumask_t selected,
266 void (*func) (void *info),
267 void *info,
268 int retry,
269 int wait)
270 {
271 struct call_data_struct data;
272 unsigned int nr_cpus = cpus_weight(selected);
274 ASSERT(local_irq_is_enabled());
276 if ( nr_cpus == 0 )
277 return 0;
279 data.func = func;
280 data.info = info;
281 data.wait = wait;
282 atomic_set(&data.started, 0);
283 atomic_set(&data.finished, 0);
284 data.selected = selected;
286 spin_lock(&call_lock);
288 call_data = &data;
289 wmb();
291 send_IPI_mask(selected, CALL_FUNCTION_VECTOR);
293 while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
294 cpu_relax();
296 spin_unlock(&call_lock);
298 return 0;
299 }
301 static void stop_this_cpu (void *dummy)
302 {
303 clear_bit(smp_processor_id(), &cpu_online_map);
305 disable_local_APIC();
307 for ( ; ; )
308 __asm__ __volatile__ ( "hlt" );
309 }
311 void smp_send_stop(void)
312 {
313 /* Stop all other CPUs in the system. */
314 smp_call_function(stop_this_cpu, NULL, 1, 0);
316 local_irq_disable();
317 disable_local_APIC();
318 local_irq_enable();
319 }
321 fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
322 {
323 ack_APIC_irq();
324 perfc_incrc(ipis);
325 }
327 fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
328 {
329 void (*func)(void *info) = call_data->func;
330 void *info = call_data->info;
332 ack_APIC_irq();
333 perfc_incrc(ipis);
335 if ( !cpu_isset(smp_processor_id(), call_data->selected) )
336 return;
338 if ( call_data->wait )
339 {
340 (*func)(info);
341 mb();
342 atomic_inc(&call_data->finished);
343 }
344 else
345 {
346 mb();
347 atomic_inc(&call_data->started);
348 (*func)(info);
349 }
350 }