ia64/xen-unstable

annotate xen/arch/x86/hpet.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents f12d9595d07c
children 876bffced2b8
rev   line source
keir@17687 1 /******************************************************************************
keir@17687 2 * arch/x86/hpet.c
keir@17687 3 *
keir@17687 4 * HPET management.
keir@17687 5 */
keir@17687 6
keir@17687 7 #include <xen/config.h>
keir@17687 8 #include <xen/errno.h>
keir@17687 9 #include <xen/time.h>
keir@17687 10 #include <xen/timer.h>
keir@17687 11 #include <xen/smp.h>
keir@17687 12 #include <xen/softirq.h>
keir@17687 13 #include <asm/fixmap.h>
keir@17687 14 #include <asm/div64.h>
keir@17687 15 #include <asm/hpet.h>
keir@17687 16
keir@17687 17 #define MAX_DELTA_NS MILLISECS(10*1000)
keir@17782 18 #define MIN_DELTA_NS MICROSECS(20)
keir@17687 19
keir@17687 20 struct hpet_event_channel
keir@17687 21 {
keir@17687 22 unsigned long mult;
keir@17687 23 int shift;
keir@17687 24 s_time_t next_event;
keir@17687 25 cpumask_t cpumask;
keir@17687 26 spinlock_t lock;
keir@17687 27 void (*event_handler)(struct hpet_event_channel *);
keir@17687 28 };
keir@17687 29 static struct hpet_event_channel hpet_event;
keir@17687 30
keir@17687 31 unsigned long hpet_address;
keir@17687 32
keir@17687 33 /*
keir@17687 34 * Calculate a multiplication factor for scaled math, which is used to convert
keir@17687 35 * nanoseconds based values to clock ticks:
keir@17687 36 *
keir@17687 37 * clock_ticks = (nanoseconds * factor) >> shift.
keir@17687 38 *
keir@17687 39 * div_sc is the rearranged equation to calculate a factor from a given clock
keir@17687 40 * ticks / nanoseconds ratio:
keir@17687 41 *
keir@17687 42 * factor = (clock_ticks << shift) / nanoseconds
keir@17687 43 */
keir@17687 44 static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
keir@17687 45 int shift)
keir@17687 46 {
keir@17687 47 uint64_t tmp = ((uint64_t)ticks) << shift;
keir@17687 48
keir@17687 49 do_div(tmp, nsec);
keir@17687 50 return (unsigned long) tmp;
keir@17687 51 }
keir@17687 52
keir@17687 53 /*
keir@17687 54 * Convert nanoseconds based values to clock ticks:
keir@17687 55 *
keir@17687 56 * clock_ticks = (nanoseconds * factor) >> shift.
keir@17687 57 */
keir@17687 58 static inline unsigned long ns2ticks(unsigned long nsec, int shift,
keir@17687 59 unsigned long factor)
keir@17687 60 {
keir@17687 61 uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
keir@17687 62
keir@17687 63 return (unsigned long) tmp;
keir@17687 64 }
keir@17687 65
keir@17687 66 static int hpet_legacy_next_event(unsigned long delta)
keir@17687 67 {
keir@17782 68 uint32_t cnt, cmp;
keir@17782 69 unsigned long flags;
keir@17687 70
keir@17782 71 local_irq_save(flags);
keir@17687 72 cnt = hpet_read32(HPET_COUNTER);
keir@17782 73 cmp = cnt + delta;
keir@17782 74 hpet_write32(cmp, HPET_T0_CMP);
keir@17782 75 cmp = hpet_read32(HPET_COUNTER);
keir@17782 76 local_irq_restore(flags);
keir@17687 77
keir@17782 78 /* Are we within two ticks of the deadline passing? Then we may miss. */
keir@17782 79 return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
keir@17687 80 }
keir@17687 81
keir@17687 82 static int reprogram_hpet_evt_channel(
keir@17687 83 struct hpet_event_channel *ch,
keir@17687 84 s_time_t expire, s_time_t now, int force)
keir@17687 85 {
keir@17687 86 int64_t delta;
keir@17687 87 int ret;
keir@17687 88
keir@17687 89 if ( unlikely(expire < 0) )
keir@17687 90 {
keir@17687 91 printk(KERN_DEBUG "reprogram: expire < 0\n");
keir@17687 92 return -ETIME;
keir@17687 93 }
keir@17687 94
keir@17687 95 delta = expire - now;
keir@18073 96 if ( (delta <= 0) && !force )
keir@18073 97 return -ETIME;
keir@17687 98
keir@17687 99 ch->next_event = expire;
keir@17687 100
keir@18470 101 if ( expire == STIME_MAX )
keir@18470 102 {
keir@18470 103 /* We assume it will take a long time for the timer to wrap. */
keir@18470 104 hpet_write32(0, HPET_T0_CMP);
keir@18470 105 return 0;
keir@18470 106 }
keir@18470 107
keir@17687 108 delta = min_t(int64_t, delta, MAX_DELTA_NS);
keir@17687 109 delta = max_t(int64_t, delta, MIN_DELTA_NS);
keir@17687 110 delta = ns2ticks(delta, ch->shift, ch->mult);
keir@17687 111
keir@17687 112 ret = hpet_legacy_next_event(delta);
keir@17687 113 while ( ret && force )
keir@17687 114 {
keir@17687 115 delta += delta;
keir@17687 116 ret = hpet_legacy_next_event(delta);
keir@17687 117 }
keir@17687 118
keir@17687 119 return ret;
keir@17687 120 }
keir@17687 121
keir@17687 122 static int evt_do_broadcast(cpumask_t mask)
keir@17687 123 {
keir@17687 124 int ret = 0, cpu = smp_processor_id();
keir@17687 125
keir@17687 126 if ( cpu_isset(cpu, mask) )
keir@17687 127 {
keir@17687 128 cpu_clear(cpu, mask);
keir@17687 129 raise_softirq(TIMER_SOFTIRQ);
keir@17687 130 ret = 1;
keir@17687 131 }
keir@17687 132
keir@17687 133 if ( !cpus_empty(mask) )
keir@17687 134 {
keir@17687 135 cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
keir@17687 136 ret = 1;
keir@17687 137 }
keir@17687 138 return ret;
keir@17687 139 }
keir@17687 140
keir@17687 141 static void handle_hpet_broadcast(struct hpet_event_channel *ch)
keir@17687 142 {
keir@17687 143 cpumask_t mask;
keir@17687 144 s_time_t now, next_event;
keir@18073 145 int cpu;
keir@17687 146
keir@18715 147 spin_lock_irq(&ch->lock);
keir@17687 148
keir@17687 149 again:
keir@17687 150 ch->next_event = STIME_MAX;
keir@17687 151 next_event = STIME_MAX;
keir@17687 152 mask = (cpumask_t)CPU_MASK_NONE;
keir@17687 153 now = NOW();
keir@17687 154
keir@17687 155 /* find all expired events */
keir@17687 156 for_each_cpu_mask(cpu, ch->cpumask)
keir@17687 157 {
keir@17687 158 if ( per_cpu(timer_deadline, cpu) <= now )
keir@17687 159 cpu_set(cpu, mask);
keir@17687 160 else if ( per_cpu(timer_deadline, cpu) < next_event )
keir@17687 161 next_event = per_cpu(timer_deadline, cpu);
keir@17687 162 }
keir@17687 163
keir@17687 164 /* wakeup the cpus which have an expired event. */
keir@17687 165 evt_do_broadcast(mask);
keir@17687 166
keir@17687 167 if ( next_event != STIME_MAX )
keir@17687 168 {
keir@17687 169 if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
keir@17687 170 goto again;
keir@17687 171 }
keir@18715 172 spin_unlock_irq(&ch->lock);
keir@17687 173 }
keir@17687 174
keir@17687 175 void hpet_broadcast_init(void)
keir@17687 176 {
keir@17687 177 u64 hpet_rate;
keir@17687 178 u32 hpet_id, cfg;
keir@17687 179
keir@17687 180 hpet_rate = hpet_setup();
keir@17687 181 if ( hpet_rate == 0 )
keir@17687 182 return;
keir@17687 183
keir@17687 184 hpet_id = hpet_read32(HPET_ID);
keir@17687 185 if ( !(hpet_id & HPET_ID_LEGSUP) )
keir@17687 186 return;
keir@17687 187
keir@17687 188 /* Start HPET legacy interrupts */
keir@17687 189 cfg = hpet_read32(HPET_CFG);
keir@17687 190 cfg |= HPET_CFG_LEGACY;
keir@17687 191 hpet_write32(cfg, HPET_CFG);
keir@17687 192
keir@17687 193 /* set HPET T0 as oneshot */
keir@17687 194 cfg = hpet_read32(HPET_T0_CFG);
keir@17687 195 cfg &= ~HPET_TN_PERIODIC;
keir@17687 196 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
keir@17687 197 hpet_write32(cfg, HPET_T0_CFG);
keir@17687 198
keir@17687 199 /*
keir@17687 200 * The period is a femto seconds value. We need to calculate the scaled
keir@17687 201 * math multiplication factor for nanosecond to hpet tick conversion.
keir@17687 202 */
keir@17687 203 hpet_event.mult = div_sc((unsigned long)hpet_rate, 1000000000ul, 32);
keir@17687 204 hpet_event.shift = 32;
keir@17687 205 hpet_event.next_event = STIME_MAX;
keir@17687 206 hpet_event.event_handler = handle_hpet_broadcast;
keir@17687 207 spin_lock_init(&hpet_event.lock);
keir@17687 208 }
keir@17687 209
keir@17687 210 void hpet_broadcast_enter(void)
keir@17687 211 {
keir@17687 212 struct hpet_event_channel *ch = &hpet_event;
keir@17687 213
keir@18715 214 ASSERT(!local_irq_is_enabled());
keir@18470 215 spin_lock(&ch->lock);
keir@18470 216
keir@18480 217 disable_APIC_timer();
keir@18480 218
keir@17687 219 cpu_set(smp_processor_id(), ch->cpumask);
keir@17687 220
keir@17687 221 /* reprogram if current cpu expire time is nearer */
keir@17687 222 if ( this_cpu(timer_deadline) < ch->next_event )
keir@17687 223 reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
keir@17687 224
keir@17687 225 spin_unlock(&ch->lock);
keir@17687 226 }
keir@17687 227
keir@17687 228 void hpet_broadcast_exit(void)
keir@17687 229 {
keir@17687 230 struct hpet_event_channel *ch = &hpet_event;
keir@17687 231 int cpu = smp_processor_id();
keir@17687 232
keir@18470 233 spin_lock_irq(&ch->lock);
keir@18470 234
keir@17687 235 if ( cpu_test_and_clear(cpu, ch->cpumask) )
keir@18470 236 {
keir@18480 237 /* Cancel any outstanding LAPIC event and re-enable interrupts. */
keir@18480 238 reprogram_timer(0);
keir@18480 239 enable_APIC_timer();
keir@18480 240
keir@18480 241 /* Reprogram the deadline; trigger timer work now if it has passed. */
keir@18471 242 if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
keir@18471 243 raise_softirq(TIMER_SOFTIRQ);
keir@18470 244
keir@18470 245 if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
keir@18470 246 reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
keir@18470 247 }
keir@18470 248
keir@18470 249 spin_unlock_irq(&ch->lock);
keir@17687 250 }
keir@17687 251
keir@17850 252 int hpet_broadcast_is_available(void)
keir@17850 253 {
keir@17850 254 return (hpet_event.event_handler == handle_hpet_broadcast);
keir@17850 255 }
keir@17850 256
keir@17687 257 int hpet_legacy_irq_tick(void)
keir@17687 258 {
keir@17687 259 if ( !hpet_event.event_handler )
keir@17687 260 return 0;
keir@17687 261 hpet_event.event_handler(&hpet_event);
keir@17687 262 return 1;
keir@17687 263 }
keir@17687 264
keir@17687 265 u64 hpet_setup(void)
keir@17687 266 {
keir@18806 267 u64 hpet_rate;
keir@17687 268 u32 hpet_id, hpet_period, cfg;
keir@17687 269 int i;
keir@17687 270
keir@17687 271 if ( hpet_address == 0 )
keir@17687 272 return 0;
keir@17687 273
keir@17687 274 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
keir@17687 275
keir@17687 276 hpet_id = hpet_read32(HPET_ID);
keir@17687 277 if ( hpet_id == 0 )
keir@17687 278 {
keir@17687 279 printk("BAD HPET vendor id.\n");
keir@17687 280 return 0;
keir@17687 281 }
keir@17687 282
keir@17687 283 /* Check for sane period (100ps <= period <= 100ns). */
keir@17687 284 hpet_period = hpet_read32(HPET_PERIOD);
keir@17687 285 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
keir@17687 286 {
keir@17687 287 printk("BAD HPET period %u.\n", hpet_period);
keir@17687 288 return 0;
keir@17687 289 }
keir@17687 290
keir@17687 291 cfg = hpet_read32(HPET_CFG);
keir@17687 292 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
keir@17687 293 hpet_write32(cfg, HPET_CFG);
keir@17687 294
keir@17687 295 for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
keir@17687 296 {
keir@17687 297 cfg = hpet_read32(HPET_T0_CFG + i*0x20);
keir@17687 298 cfg &= ~HPET_TN_ENABLE;
keir@17687 299 hpet_write32(cfg & ~HPET_TN_ENABLE, HPET_T0_CFG);
keir@17687 300 }
keir@17687 301
keir@17687 302 cfg = hpet_read32(HPET_CFG);
keir@17687 303 cfg |= HPET_CFG_ENABLE;
keir@17687 304 hpet_write32(cfg, HPET_CFG);
keir@17687 305
keir@17687 306 hpet_rate = 1000000000000000ULL; /* 10^15 */
keir@17687 307 (void)do_div(hpet_rate, hpet_period);
keir@17687 308
keir@17687 309 return hpet_rate;
keir@17687 310 }