ia64/xen-unstable

view xen/arch/x86/hpet.c @ 18715:5bca96f74d59

hpet cpuidle: Observe IRQ-safe locking protocol.

Otherwise, may encounter deadlock.

Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 23 12:01:03 2008 +0100 (2008-10-23)
parents d8ce41b79ecc
children f12d9595d07c
line source
1 /******************************************************************************
2 * arch/x86/hpet.c
3 *
4 * HPET management.
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/time.h>
10 #include <xen/timer.h>
11 #include <xen/smp.h>
12 #include <xen/softirq.h>
13 #include <asm/fixmap.h>
14 #include <asm/div64.h>
15 #include <asm/hpet.h>
17 #define STIME_MAX ((s_time_t)((uint64_t)~0ull>>1))
19 #define MAX_DELTA_NS MILLISECS(10*1000)
20 #define MIN_DELTA_NS MICROSECS(20)
22 struct hpet_event_channel
23 {
24 unsigned long mult;
25 int shift;
26 s_time_t next_event;
27 cpumask_t cpumask;
28 spinlock_t lock;
29 void (*event_handler)(struct hpet_event_channel *);
30 };
31 static struct hpet_event_channel hpet_event;
33 unsigned long hpet_address;
35 /*
36 * Calculate a multiplication factor for scaled math, which is used to convert
37 * nanoseconds based values to clock ticks:
38 *
39 * clock_ticks = (nanoseconds * factor) >> shift.
40 *
41 * div_sc is the rearranged equation to calculate a factor from a given clock
42 * ticks / nanoseconds ratio:
43 *
44 * factor = (clock_ticks << shift) / nanoseconds
45 */
46 static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
47 int shift)
48 {
49 uint64_t tmp = ((uint64_t)ticks) << shift;
51 do_div(tmp, nsec);
52 return (unsigned long) tmp;
53 }
55 /*
56 * Convert nanoseconds based values to clock ticks:
57 *
58 * clock_ticks = (nanoseconds * factor) >> shift.
59 */
60 static inline unsigned long ns2ticks(unsigned long nsec, int shift,
61 unsigned long factor)
62 {
63 uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
65 return (unsigned long) tmp;
66 }
68 static int hpet_legacy_next_event(unsigned long delta)
69 {
70 uint32_t cnt, cmp;
71 unsigned long flags;
73 local_irq_save(flags);
74 cnt = hpet_read32(HPET_COUNTER);
75 cmp = cnt + delta;
76 hpet_write32(cmp, HPET_T0_CMP);
77 cmp = hpet_read32(HPET_COUNTER);
78 local_irq_restore(flags);
80 /* Are we within two ticks of the deadline passing? Then we may miss. */
81 return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
82 }
84 static int reprogram_hpet_evt_channel(
85 struct hpet_event_channel *ch,
86 s_time_t expire, s_time_t now, int force)
87 {
88 int64_t delta;
89 int ret;
91 if ( unlikely(expire < 0) )
92 {
93 printk(KERN_DEBUG "reprogram: expire < 0\n");
94 return -ETIME;
95 }
97 delta = expire - now;
98 if ( (delta <= 0) && !force )
99 return -ETIME;
101 ch->next_event = expire;
103 if ( expire == STIME_MAX )
104 {
105 /* We assume it will take a long time for the timer to wrap. */
106 hpet_write32(0, HPET_T0_CMP);
107 return 0;
108 }
110 delta = min_t(int64_t, delta, MAX_DELTA_NS);
111 delta = max_t(int64_t, delta, MIN_DELTA_NS);
112 delta = ns2ticks(delta, ch->shift, ch->mult);
114 ret = hpet_legacy_next_event(delta);
115 while ( ret && force )
116 {
117 delta += delta;
118 ret = hpet_legacy_next_event(delta);
119 }
121 return ret;
122 }
124 static int evt_do_broadcast(cpumask_t mask)
125 {
126 int ret = 0, cpu = smp_processor_id();
128 if ( cpu_isset(cpu, mask) )
129 {
130 cpu_clear(cpu, mask);
131 raise_softirq(TIMER_SOFTIRQ);
132 ret = 1;
133 }
135 if ( !cpus_empty(mask) )
136 {
137 cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
138 ret = 1;
139 }
140 return ret;
141 }
143 static void handle_hpet_broadcast(struct hpet_event_channel *ch)
144 {
145 cpumask_t mask;
146 s_time_t now, next_event;
147 int cpu;
149 spin_lock_irq(&ch->lock);
151 again:
152 ch->next_event = STIME_MAX;
153 next_event = STIME_MAX;
154 mask = (cpumask_t)CPU_MASK_NONE;
155 now = NOW();
157 /* find all expired events */
158 for_each_cpu_mask(cpu, ch->cpumask)
159 {
160 if ( per_cpu(timer_deadline, cpu) <= now )
161 cpu_set(cpu, mask);
162 else if ( per_cpu(timer_deadline, cpu) < next_event )
163 next_event = per_cpu(timer_deadline, cpu);
164 }
166 /* wakeup the cpus which have an expired event. */
167 evt_do_broadcast(mask);
169 if ( next_event != STIME_MAX )
170 {
171 if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
172 goto again;
173 }
174 spin_unlock_irq(&ch->lock);
175 }
177 void hpet_broadcast_init(void)
178 {
179 u64 hpet_rate;
180 u32 hpet_id, cfg;
182 hpet_rate = hpet_setup();
183 if ( hpet_rate == 0 )
184 return;
186 hpet_id = hpet_read32(HPET_ID);
187 if ( !(hpet_id & HPET_ID_LEGSUP) )
188 return;
190 /* Start HPET legacy interrupts */
191 cfg = hpet_read32(HPET_CFG);
192 cfg |= HPET_CFG_LEGACY;
193 hpet_write32(cfg, HPET_CFG);
195 /* set HPET T0 as oneshot */
196 cfg = hpet_read32(HPET_T0_CFG);
197 cfg &= ~HPET_TN_PERIODIC;
198 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
199 hpet_write32(cfg, HPET_T0_CFG);
201 /*
202 * The period is a femto seconds value. We need to calculate the scaled
203 * math multiplication factor for nanosecond to hpet tick conversion.
204 */
205 hpet_event.mult = div_sc((unsigned long)hpet_rate, 1000000000ul, 32);
206 hpet_event.shift = 32;
207 hpet_event.next_event = STIME_MAX;
208 hpet_event.event_handler = handle_hpet_broadcast;
209 spin_lock_init(&hpet_event.lock);
210 }
212 void hpet_broadcast_enter(void)
213 {
214 struct hpet_event_channel *ch = &hpet_event;
216 ASSERT(!local_irq_is_enabled());
217 spin_lock(&ch->lock);
219 disable_APIC_timer();
221 cpu_set(smp_processor_id(), ch->cpumask);
223 /* reprogram if current cpu expire time is nearer */
224 if ( this_cpu(timer_deadline) < ch->next_event )
225 reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
227 spin_unlock(&ch->lock);
228 }
230 void hpet_broadcast_exit(void)
231 {
232 struct hpet_event_channel *ch = &hpet_event;
233 int cpu = smp_processor_id();
235 spin_lock_irq(&ch->lock);
237 if ( cpu_test_and_clear(cpu, ch->cpumask) )
238 {
239 /* Cancel any outstanding LAPIC event and re-enable interrupts. */
240 reprogram_timer(0);
241 enable_APIC_timer();
243 /* Reprogram the deadline; trigger timer work now if it has passed. */
244 if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
245 raise_softirq(TIMER_SOFTIRQ);
247 if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
248 reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
249 }
251 spin_unlock_irq(&ch->lock);
252 }
254 int hpet_broadcast_is_available(void)
255 {
256 return (hpet_event.event_handler == handle_hpet_broadcast);
257 }
259 int hpet_legacy_irq_tick(void)
260 {
261 if ( !hpet_event.event_handler )
262 return 0;
263 hpet_event.event_handler(&hpet_event);
264 return 1;
265 }
267 u64 hpet_setup(void)
268 {
269 static u64 hpet_rate;
270 static int initialised;
271 u32 hpet_id, hpet_period, cfg;
272 int i;
274 if ( initialised )
275 return hpet_rate;
276 initialised = 1;
278 if ( hpet_address == 0 )
279 return 0;
281 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
283 hpet_id = hpet_read32(HPET_ID);
284 if ( hpet_id == 0 )
285 {
286 printk("BAD HPET vendor id.\n");
287 return 0;
288 }
290 /* Check for sane period (100ps <= period <= 100ns). */
291 hpet_period = hpet_read32(HPET_PERIOD);
292 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
293 {
294 printk("BAD HPET period %u.\n", hpet_period);
295 return 0;
296 }
298 cfg = hpet_read32(HPET_CFG);
299 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
300 hpet_write32(cfg, HPET_CFG);
302 for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
303 {
304 cfg = hpet_read32(HPET_T0_CFG + i*0x20);
305 cfg &= ~HPET_TN_ENABLE;
306 hpet_write32(cfg & ~HPET_TN_ENABLE, HPET_T0_CFG);
307 }
309 cfg = hpet_read32(HPET_CFG);
310 cfg |= HPET_CFG_ENABLE;
311 hpet_write32(cfg, HPET_CFG);
313 hpet_rate = 1000000000000000ULL; /* 10^15 */
314 (void)do_div(hpet_rate, hpet_period);
316 return hpet_rate;
317 }