ia64/xen-unstable

view xen/arch/x86/smp.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents 50170dc8649c
children 822ea2bf0c54
line source
1 /*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * This code is released under the GNU General Public License version 2 or
8 * later.
9 */
11 #include <xen/config.h>
12 #include <xen/irq.h>
13 #include <xen/sched.h>
14 #include <xen/delay.h>
15 #include <xen/perfc.h>
16 #include <xen/spinlock.h>
17 #include <asm/current.h>
18 #include <asm/smp.h>
19 #include <asm/mc146818rtc.h>
20 #include <asm/flushtlb.h>
21 #include <asm/hardirq.h>
22 #include <asm/ipi.h>
23 #include <asm/hvm/support.h>
24 #include <mach_apic.h>
26 /*
27 * Some notes on x86 processor bugs affecting SMP operation:
28 *
29 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
30 * The Linux implications for SMP are handled as follows:
31 *
32 * Pentium III / [Xeon]
33 * None of the E1AP-E3AP errata are visible to the user.
34 *
35 * E1AP. see PII A1AP
36 * E2AP. see PII A2AP
37 * E3AP. see PII A3AP
38 *
39 * Pentium II / [Xeon]
40 * None of the A1AP-A3AP errata are visible to the user.
41 *
42 * A1AP. see PPro 1AP
43 * A2AP. see PPro 2AP
44 * A3AP. see PPro 7AP
45 *
46 * Pentium Pro
47 * None of 1AP-9AP errata are visible to the normal user,
48 * except occasional delivery of 'spurious interrupt' as trap #15.
49 * This is very rare and a non-problem.
50 *
51 * 1AP. Linux maps APIC as non-cacheable
52 * 2AP. worked around in hardware
53 * 3AP. fixed in C0 and above steppings microcode update.
54 * Linux does not use excessive STARTUP_IPIs.
55 * 4AP. worked around in hardware
56 * 5AP. symmetric IO mode (normal Linux operation) not affected.
57 * 'noapic' mode has vector 0xf filled out properly.
58 * 6AP. 'noapic' mode might be affected - fixed in later steppings
59 * 7AP. We do not assume writes to the LVT deassering IRQs
60 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
61 * 9AP. We do not use mixed mode
62 */
64 /*
65 * The following functions deal with sending IPIs between CPUs.
66 */
68 static inline int __prepare_ICR (unsigned int shortcut, int vector)
69 {
70 return APIC_DM_FIXED | shortcut | vector;
71 }
73 static inline int __prepare_ICR2 (unsigned int mask)
74 {
75 return SET_xAPIC_DEST_FIELD(mask);
76 }
78 void apic_wait_icr_idle(void)
79 {
80 if ( x2apic_enabled )
81 return;
83 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
84 cpu_relax();
85 }
87 void send_IPI_mask_flat(cpumask_t cpumask, int vector)
88 {
89 unsigned long mask = cpus_addr(cpumask)[0];
90 unsigned long cfg;
91 unsigned long flags;
93 /* An IPI with no target generates a send accept error from P5/P6 APICs. */
94 WARN_ON(mask == 0);
96 local_irq_save(flags);
98 /*
99 * Wait for idle.
100 */
101 apic_wait_icr_idle();
103 /*
104 * prepare target chip field
105 */
106 cfg = __prepare_ICR2(mask);
107 apic_write_around(APIC_ICR2, cfg);
109 /*
110 * program the ICR
111 */
112 cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;
114 /*
115 * Send the IPI. The write to APIC_ICR fires this off.
116 */
117 apic_write_around(APIC_ICR, cfg);
119 local_irq_restore(flags);
120 }
122 void send_IPI_mask_phys(cpumask_t mask, int vector)
123 {
124 unsigned long cfg, flags;
125 unsigned int query_cpu;
127 local_irq_save(flags);
129 for_each_cpu_mask ( query_cpu, mask )
130 {
131 /*
132 * Wait for idle.
133 */
134 apic_wait_icr_idle();
136 /*
137 * prepare target chip field
138 */
139 cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
140 apic_write_around(APIC_ICR2, cfg);
142 /*
143 * program the ICR
144 */
145 cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;
147 /*
148 * Send the IPI. The write to APIC_ICR fires this off.
149 */
150 apic_write_around(APIC_ICR, cfg);
151 }
153 local_irq_restore(flags);
154 }
156 static DEFINE_SPINLOCK(flush_lock);
157 static cpumask_t flush_cpumask;
158 static const void *flush_va;
159 static unsigned int flush_flags;
161 fastcall void smp_invalidate_interrupt(void)
162 {
163 ack_APIC_irq();
164 perfc_incr(ipis);
165 irq_enter();
166 if ( !__sync_lazy_execstate() ||
167 (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
168 flush_area_local(flush_va, flush_flags);
169 cpu_clear(smp_processor_id(), flush_cpumask);
170 irq_exit();
171 }
173 void flush_area_mask(cpumask_t mask, const void *va, unsigned int flags)
174 {
175 ASSERT(local_irq_is_enabled());
177 if ( cpu_isset(smp_processor_id(), mask) )
178 {
179 flush_area_local(va, flags);
180 cpu_clear(smp_processor_id(), mask);
181 }
183 if ( !cpus_empty(mask) )
184 {
185 spin_lock(&flush_lock);
186 flush_cpumask = mask;
187 flush_va = va;
188 flush_flags = flags;
189 send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
190 while ( !cpus_empty(flush_cpumask) )
191 cpu_relax();
192 spin_unlock(&flush_lock);
193 }
194 }
196 /* Call with no locks held and interrupts enabled (e.g., softirq context). */
197 void new_tlbflush_clock_period(void)
198 {
199 cpumask_t allbutself;
201 /* Flush everyone else. We definitely flushed just before entry. */
202 allbutself = cpu_online_map;
203 cpu_clear(smp_processor_id(), allbutself);
204 flush_mask(allbutself, FLUSH_TLB);
206 /* No need for atomicity: we are the only possible updater. */
207 ASSERT(tlbflush_clock == 0);
208 tlbflush_clock++;
209 }
211 void smp_send_event_check_mask(cpumask_t mask)
212 {
213 cpu_clear(smp_processor_id(), mask);
214 if ( !cpus_empty(mask) )
215 send_IPI_mask(mask, EVENT_CHECK_VECTOR);
216 }
218 /*
219 * Structure and data for smp_call_function()/on_selected_cpus().
220 */
222 struct call_data_struct {
223 void (*func) (void *info);
224 void *info;
225 int wait;
226 atomic_t started;
227 atomic_t finished;
228 cpumask_t selected;
229 };
231 static DEFINE_SPINLOCK(call_lock);
232 static struct call_data_struct *call_data;
234 int smp_call_function(
235 void (*func) (void *info),
236 void *info,
237 int retry,
238 int wait)
239 {
240 cpumask_t allbutself = cpu_online_map;
241 cpu_clear(smp_processor_id(), allbutself);
242 return on_selected_cpus(allbutself, func, info, retry, wait);
243 }
245 int on_selected_cpus(
246 cpumask_t selected,
247 void (*func) (void *info),
248 void *info,
249 int retry,
250 int wait)
251 {
252 struct call_data_struct data;
253 unsigned int nr_cpus = cpus_weight(selected);
255 ASSERT(local_irq_is_enabled());
257 /* Legacy UP system with no APIC to deliver IPIs? */
258 if ( unlikely(!cpu_has_apic) )
259 {
260 ASSERT(num_online_cpus() == 1);
261 if ( cpu_isset(0, selected) )
262 {
263 local_irq_disable();
264 func(info);
265 local_irq_enable();
266 }
267 return 0;
268 }
270 if ( nr_cpus == 0 )
271 return 0;
273 data.func = func;
274 data.info = info;
275 data.wait = wait;
276 atomic_set(&data.started, 0);
277 atomic_set(&data.finished, 0);
278 data.selected = selected;
280 spin_lock(&call_lock);
282 call_data = &data;
284 send_IPI_mask(selected, CALL_FUNCTION_VECTOR);
286 while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
287 cpu_relax();
289 spin_unlock(&call_lock);
291 return 0;
292 }
294 static void __stop_this_cpu(void)
295 {
296 ASSERT(!local_irq_is_enabled());
298 disable_local_APIC();
300 hvm_cpu_down();
302 /*
303 * Clear FPU, zapping any pending exceptions. Needed for warm reset with
304 * some BIOSes.
305 */
306 clts();
307 asm volatile ( "fninit" );
308 }
310 static void stop_this_cpu(void *dummy)
311 {
312 __stop_this_cpu();
313 cpu_clear(smp_processor_id(), cpu_online_map);
314 for ( ; ; )
315 halt();
316 }
318 /*
319 * Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a
320 * clean IRQ state.
321 */
322 void smp_send_stop(void)
323 {
324 int timeout = 10;
326 smp_call_function(stop_this_cpu, NULL, 1, 0);
328 /* Wait 10ms for all other CPUs to go offline. */
329 while ( (num_online_cpus() > 1) && (timeout-- > 0) )
330 mdelay(1);
332 local_irq_disable();
333 __stop_this_cpu();
334 disable_IO_APIC();
335 local_irq_enable();
336 }
338 fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
339 {
340 ack_APIC_irq();
341 perfc_incr(ipis);
342 }
344 fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
345 {
346 void (*func)(void *info) = call_data->func;
347 void *info = call_data->info;
349 ack_APIC_irq();
350 perfc_incr(ipis);
352 if ( !cpu_isset(smp_processor_id(), call_data->selected) )
353 return;
355 irq_enter();
357 if ( call_data->wait )
358 {
359 (*func)(info);
360 mb();
361 atomic_inc(&call_data->finished);
362 }
363 else
364 {
365 mb();
366 atomic_inc(&call_data->started);
367 (*func)(info);
368 }
370 irq_exit();
371 }