ia64/xen-unstable

view xen/arch/x86/hpet.c @ 18752:f12d9595d07c

Change timer implementation to allow variable 'slop' in how late
timers are fired. The default continues to be 50us, but this can be
configured on Xen's command line.

Signed-off-by: Yu Ke <ke.yu@intel.com>
Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Oct 31 14:02:39 2008 +0000 (2008-10-31)
parents 5bca96f74d59
children ed8524f4a044
line source
1 /******************************************************************************
2 * arch/x86/hpet.c
3 *
4 * HPET management.
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/time.h>
10 #include <xen/timer.h>
11 #include <xen/smp.h>
12 #include <xen/softirq.h>
13 #include <asm/fixmap.h>
14 #include <asm/div64.h>
15 #include <asm/hpet.h>
17 #define MAX_DELTA_NS MILLISECS(10*1000)
18 #define MIN_DELTA_NS MICROSECS(20)
20 struct hpet_event_channel
21 {
22 unsigned long mult;
23 int shift;
24 s_time_t next_event;
25 cpumask_t cpumask;
26 spinlock_t lock;
27 void (*event_handler)(struct hpet_event_channel *);
28 };
29 static struct hpet_event_channel hpet_event;
31 unsigned long hpet_address;
33 /*
34 * Calculate a multiplication factor for scaled math, which is used to convert
35 * nanoseconds based values to clock ticks:
36 *
37 * clock_ticks = (nanoseconds * factor) >> shift.
38 *
39 * div_sc is the rearranged equation to calculate a factor from a given clock
40 * ticks / nanoseconds ratio:
41 *
42 * factor = (clock_ticks << shift) / nanoseconds
43 */
44 static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
45 int shift)
46 {
47 uint64_t tmp = ((uint64_t)ticks) << shift;
49 do_div(tmp, nsec);
50 return (unsigned long) tmp;
51 }
53 /*
54 * Convert nanoseconds based values to clock ticks:
55 *
56 * clock_ticks = (nanoseconds * factor) >> shift.
57 */
58 static inline unsigned long ns2ticks(unsigned long nsec, int shift,
59 unsigned long factor)
60 {
61 uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
63 return (unsigned long) tmp;
64 }
66 static int hpet_legacy_next_event(unsigned long delta)
67 {
68 uint32_t cnt, cmp;
69 unsigned long flags;
71 local_irq_save(flags);
72 cnt = hpet_read32(HPET_COUNTER);
73 cmp = cnt + delta;
74 hpet_write32(cmp, HPET_T0_CMP);
75 cmp = hpet_read32(HPET_COUNTER);
76 local_irq_restore(flags);
78 /* Are we within two ticks of the deadline passing? Then we may miss. */
79 return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
80 }
82 static int reprogram_hpet_evt_channel(
83 struct hpet_event_channel *ch,
84 s_time_t expire, s_time_t now, int force)
85 {
86 int64_t delta;
87 int ret;
89 if ( unlikely(expire < 0) )
90 {
91 printk(KERN_DEBUG "reprogram: expire < 0\n");
92 return -ETIME;
93 }
95 delta = expire - now;
96 if ( (delta <= 0) && !force )
97 return -ETIME;
99 ch->next_event = expire;
101 if ( expire == STIME_MAX )
102 {
103 /* We assume it will take a long time for the timer to wrap. */
104 hpet_write32(0, HPET_T0_CMP);
105 return 0;
106 }
108 delta = min_t(int64_t, delta, MAX_DELTA_NS);
109 delta = max_t(int64_t, delta, MIN_DELTA_NS);
110 delta = ns2ticks(delta, ch->shift, ch->mult);
112 ret = hpet_legacy_next_event(delta);
113 while ( ret && force )
114 {
115 delta += delta;
116 ret = hpet_legacy_next_event(delta);
117 }
119 return ret;
120 }
122 static int evt_do_broadcast(cpumask_t mask)
123 {
124 int ret = 0, cpu = smp_processor_id();
126 if ( cpu_isset(cpu, mask) )
127 {
128 cpu_clear(cpu, mask);
129 raise_softirq(TIMER_SOFTIRQ);
130 ret = 1;
131 }
133 if ( !cpus_empty(mask) )
134 {
135 cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
136 ret = 1;
137 }
138 return ret;
139 }
141 static void handle_hpet_broadcast(struct hpet_event_channel *ch)
142 {
143 cpumask_t mask;
144 s_time_t now, next_event;
145 int cpu;
147 spin_lock_irq(&ch->lock);
149 again:
150 ch->next_event = STIME_MAX;
151 next_event = STIME_MAX;
152 mask = (cpumask_t)CPU_MASK_NONE;
153 now = NOW();
155 /* find all expired events */
156 for_each_cpu_mask(cpu, ch->cpumask)
157 {
158 if ( per_cpu(timer_deadline, cpu) <= now )
159 cpu_set(cpu, mask);
160 else if ( per_cpu(timer_deadline, cpu) < next_event )
161 next_event = per_cpu(timer_deadline, cpu);
162 }
164 /* wakeup the cpus which have an expired event. */
165 evt_do_broadcast(mask);
167 if ( next_event != STIME_MAX )
168 {
169 if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
170 goto again;
171 }
172 spin_unlock_irq(&ch->lock);
173 }
175 void hpet_broadcast_init(void)
176 {
177 u64 hpet_rate;
178 u32 hpet_id, cfg;
180 hpet_rate = hpet_setup();
181 if ( hpet_rate == 0 )
182 return;
184 hpet_id = hpet_read32(HPET_ID);
185 if ( !(hpet_id & HPET_ID_LEGSUP) )
186 return;
188 /* Start HPET legacy interrupts */
189 cfg = hpet_read32(HPET_CFG);
190 cfg |= HPET_CFG_LEGACY;
191 hpet_write32(cfg, HPET_CFG);
193 /* set HPET T0 as oneshot */
194 cfg = hpet_read32(HPET_T0_CFG);
195 cfg &= ~HPET_TN_PERIODIC;
196 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
197 hpet_write32(cfg, HPET_T0_CFG);
199 /*
200 * The period is a femto seconds value. We need to calculate the scaled
201 * math multiplication factor for nanosecond to hpet tick conversion.
202 */
203 hpet_event.mult = div_sc((unsigned long)hpet_rate, 1000000000ul, 32);
204 hpet_event.shift = 32;
205 hpet_event.next_event = STIME_MAX;
206 hpet_event.event_handler = handle_hpet_broadcast;
207 spin_lock_init(&hpet_event.lock);
208 }
210 void hpet_broadcast_enter(void)
211 {
212 struct hpet_event_channel *ch = &hpet_event;
214 ASSERT(!local_irq_is_enabled());
215 spin_lock(&ch->lock);
217 disable_APIC_timer();
219 cpu_set(smp_processor_id(), ch->cpumask);
221 /* reprogram if current cpu expire time is nearer */
222 if ( this_cpu(timer_deadline) < ch->next_event )
223 reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
225 spin_unlock(&ch->lock);
226 }
228 void hpet_broadcast_exit(void)
229 {
230 struct hpet_event_channel *ch = &hpet_event;
231 int cpu = smp_processor_id();
233 spin_lock_irq(&ch->lock);
235 if ( cpu_test_and_clear(cpu, ch->cpumask) )
236 {
237 /* Cancel any outstanding LAPIC event and re-enable interrupts. */
238 reprogram_timer(0);
239 enable_APIC_timer();
241 /* Reprogram the deadline; trigger timer work now if it has passed. */
242 if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
243 raise_softirq(TIMER_SOFTIRQ);
245 if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
246 reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
247 }
249 spin_unlock_irq(&ch->lock);
250 }
252 int hpet_broadcast_is_available(void)
253 {
254 return (hpet_event.event_handler == handle_hpet_broadcast);
255 }
257 int hpet_legacy_irq_tick(void)
258 {
259 if ( !hpet_event.event_handler )
260 return 0;
261 hpet_event.event_handler(&hpet_event);
262 return 1;
263 }
265 u64 hpet_setup(void)
266 {
267 static u64 hpet_rate;
268 static int initialised;
269 u32 hpet_id, hpet_period, cfg;
270 int i;
272 if ( initialised )
273 return hpet_rate;
274 initialised = 1;
276 if ( hpet_address == 0 )
277 return 0;
279 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
281 hpet_id = hpet_read32(HPET_ID);
282 if ( hpet_id == 0 )
283 {
284 printk("BAD HPET vendor id.\n");
285 return 0;
286 }
288 /* Check for sane period (100ps <= period <= 100ns). */
289 hpet_period = hpet_read32(HPET_PERIOD);
290 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
291 {
292 printk("BAD HPET period %u.\n", hpet_period);
293 return 0;
294 }
296 cfg = hpet_read32(HPET_CFG);
297 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
298 hpet_write32(cfg, HPET_CFG);
300 for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
301 {
302 cfg = hpet_read32(HPET_T0_CFG + i*0x20);
303 cfg &= ~HPET_TN_ENABLE;
304 hpet_write32(cfg & ~HPET_TN_ENABLE, HPET_T0_CFG);
305 }
307 cfg = hpet_read32(HPET_CFG);
308 cfg |= HPET_CFG_ENABLE;
309 hpet_write32(cfg, HPET_CFG);
311 hpet_rate = 1000000000000000ULL; /* 10^15 */
312 (void)do_div(hpet_rate, hpet_period);
314 return hpet_rate;
315 }