ia64/xen-unstable

view xen/arch/x86/hpet.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 822ea2bf0c54
children
line source
1 /******************************************************************************
2 * arch/x86/hpet.c
3 *
4 * HPET management.
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/time.h>
10 #include <xen/timer.h>
11 #include <xen/smp.h>
12 #include <xen/softirq.h>
13 #include <xen/irq.h>
14 #include <asm/fixmap.h>
15 #include <asm/div64.h>
16 #include <asm/hpet.h>
17 #include <asm/msi.h>
18 #include <mach_apic.h>
20 #define MAX_DELTA_NS MILLISECS(10*1000)
21 #define MIN_DELTA_NS MICROSECS(20)
23 #define MAX_HPET_NUM 32
25 #define HPET_EVT_USED_BIT 0
26 #define HPET_EVT_USED (1 << HPET_EVT_USED_BIT)
27 #define HPET_EVT_DISABLE_BIT 1
28 #define HPET_EVT_DISABLE (1 << HPET_EVT_DISABLE_BIT)
30 struct hpet_event_channel
31 {
32 unsigned long mult;
33 int shift;
34 s_time_t next_event;
35 cpumask_t cpumask;
36 spinlock_t lock;
37 void (*event_handler)(struct hpet_event_channel *);
39 unsigned int idx; /* physical channel idx */
40 int cpu; /* msi target */
41 unsigned int vector;/* msi vector */
42 unsigned int flags; /* HPET_EVT_x */
43 } __cacheline_aligned;
44 static struct hpet_event_channel legacy_hpet_event;
45 static struct hpet_event_channel hpet_events[MAX_HPET_NUM];
46 static unsigned int num_hpets_used; /* msi hpet channels used for broadcast */
48 DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
50 static int vector_channel[NR_VECTORS] = {[0 ... NR_VECTORS-1] = -1};
52 #define vector_to_channel(vector) vector_channel[vector]
54 unsigned long hpet_address;
56 void msi_compose_msg(struct pci_dev *pdev, int vector, struct msi_msg *msg);
58 /*
59 * force_hpet_broadcast: by default legacy hpet broadcast will be stopped
60 * if RTC interrupts are enabled. Enable this option if want to always enable
61 * legacy hpet broadcast for deep C state
62 */
63 int force_hpet_broadcast;
64 boolean_param("hpetbroadcast", force_hpet_broadcast);
66 /*
67 * Calculate a multiplication factor for scaled math, which is used to convert
68 * nanoseconds based values to clock ticks:
69 *
70 * clock_ticks = (nanoseconds * factor) >> shift.
71 *
72 * div_sc is the rearranged equation to calculate a factor from a given clock
73 * ticks / nanoseconds ratio:
74 *
75 * factor = (clock_ticks << shift) / nanoseconds
76 */
77 static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
78 int shift)
79 {
80 uint64_t tmp = ((uint64_t)ticks) << shift;
82 do_div(tmp, nsec);
83 return (unsigned long) tmp;
84 }
86 /*
87 * Convert nanoseconds based values to clock ticks:
88 *
89 * clock_ticks = (nanoseconds * factor) >> shift.
90 */
91 static inline unsigned long ns2ticks(unsigned long nsec, int shift,
92 unsigned long factor)
93 {
94 uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
96 return (unsigned long) tmp;
97 }
99 static int hpet_next_event(unsigned long delta, int timer)
100 {
101 uint32_t cnt, cmp;
102 unsigned long flags;
104 local_irq_save(flags);
105 cnt = hpet_read32(HPET_COUNTER);
106 cmp = cnt + delta;
107 hpet_write32(cmp, HPET_Tn_CMP(timer));
108 cmp = hpet_read32(HPET_COUNTER);
109 local_irq_restore(flags);
111 /* Are we within two ticks of the deadline passing? Then we may miss. */
112 return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
113 }
115 static int reprogram_hpet_evt_channel(
116 struct hpet_event_channel *ch,
117 s_time_t expire, s_time_t now, int force)
118 {
119 int64_t delta;
120 int ret;
122 if ( (ch->flags & HPET_EVT_DISABLE) || (expire == 0) )
123 return 0;
125 if ( unlikely(expire < 0) )
126 {
127 printk(KERN_DEBUG "reprogram: expire <= 0\n");
128 return -ETIME;
129 }
131 delta = expire - now;
132 if ( (delta <= 0) && !force )
133 return -ETIME;
135 ch->next_event = expire;
137 if ( expire == STIME_MAX )
138 {
139 /* We assume it will take a long time for the timer to wrap. */
140 hpet_write32(0, HPET_Tn_CMP(ch->idx));
141 return 0;
142 }
144 delta = min_t(int64_t, delta, MAX_DELTA_NS);
145 delta = max_t(int64_t, delta, MIN_DELTA_NS);
146 delta = ns2ticks(delta, ch->shift, ch->mult);
148 ret = hpet_next_event(delta, ch->idx);
149 while ( ret && force )
150 {
151 delta += delta;
152 ret = hpet_next_event(delta, ch->idx);
153 }
155 return ret;
156 }
158 static int evt_do_broadcast(cpumask_t mask)
159 {
160 int ret = 0, cpu = smp_processor_id();
162 if ( cpu_isset(cpu, mask) )
163 {
164 cpu_clear(cpu, mask);
165 raise_softirq(TIMER_SOFTIRQ);
166 ret = 1;
167 }
169 if ( !cpus_empty(mask) )
170 {
171 cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
172 ret = 1;
173 }
174 return ret;
175 }
177 static void handle_hpet_broadcast(struct hpet_event_channel *ch)
178 {
179 cpumask_t mask;
180 s_time_t now, next_event;
181 int cpu;
183 spin_lock_irq(&ch->lock);
185 again:
186 ch->next_event = STIME_MAX;
187 next_event = STIME_MAX;
188 mask = (cpumask_t)CPU_MASK_NONE;
189 now = NOW();
191 /* find all expired events */
192 for_each_cpu_mask(cpu, ch->cpumask)
193 {
194 if ( per_cpu(timer_deadline, cpu) <= now )
195 cpu_set(cpu, mask);
196 else if ( per_cpu(timer_deadline, cpu) < next_event )
197 next_event = per_cpu(timer_deadline, cpu);
198 }
200 /* wakeup the cpus which have an expired event. */
201 evt_do_broadcast(mask);
203 if ( next_event != STIME_MAX )
204 {
205 if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
206 goto again;
207 }
208 spin_unlock_irq(&ch->lock);
209 }
211 static void hpet_interrupt_handler(int vector, void *data,
212 struct cpu_user_regs *regs)
213 {
214 struct hpet_event_channel *ch = (struct hpet_event_channel *)data;
215 if ( !ch->event_handler )
216 {
217 printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx);
218 return;
219 }
221 ch->event_handler(ch);
222 }
224 static void hpet_msi_unmask(unsigned int vector)
225 {
226 unsigned long cfg;
227 int ch_idx = vector_to_channel(vector);
228 struct hpet_event_channel *ch;
230 BUG_ON(ch_idx < 0);
231 ch = &hpet_events[ch_idx];
233 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
234 cfg |= HPET_TN_FSB;
235 hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
236 }
238 static void hpet_msi_mask(unsigned int vector)
239 {
240 unsigned long cfg;
241 int ch_idx = vector_to_channel(vector);
242 struct hpet_event_channel *ch;
244 BUG_ON(ch_idx < 0);
245 ch = &hpet_events[ch_idx];
247 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
248 cfg &= ~HPET_TN_FSB;
249 hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
250 }
252 static void hpet_msi_write(unsigned int vector, struct msi_msg *msg)
253 {
254 int ch_idx = vector_to_channel(vector);
255 struct hpet_event_channel *ch;
257 BUG_ON(ch_idx < 0);
258 ch = &hpet_events[ch_idx];
260 hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
261 hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
262 }
264 static void hpet_msi_read(unsigned int vector, struct msi_msg *msg)
265 {
266 int ch_idx = vector_to_channel(vector);
267 struct hpet_event_channel *ch;
269 BUG_ON(ch_idx < 0);
270 ch = &hpet_events[ch_idx];
272 msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
273 msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
274 msg->address_hi = 0;
275 }
277 static unsigned int hpet_msi_startup(unsigned int vector)
278 {
279 hpet_msi_unmask(vector);
280 return 0;
281 }
283 static void hpet_msi_shutdown(unsigned int vector)
284 {
285 hpet_msi_mask(vector);
286 }
288 static void hpet_msi_ack(unsigned int vector)
289 {
290 ack_APIC_irq();
291 }
293 static void hpet_msi_end(unsigned int vector)
294 {
295 }
297 static void hpet_msi_set_affinity(unsigned int vector, cpumask_t mask)
298 {
299 struct msi_msg msg;
300 unsigned int dest;
301 cpumask_t tmp;
303 cpus_and(tmp, mask, cpu_online_map);
304 if ( cpus_empty(tmp) )
305 mask = TARGET_CPUS;
307 dest = cpu_mask_to_apicid(mask);
309 hpet_msi_read(vector, &msg);
311 msg.data &= ~MSI_DATA_VECTOR_MASK;
312 msg.data |= MSI_DATA_VECTOR(vector);
313 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
314 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
316 hpet_msi_write(vector, &msg);
317 irq_desc[vector].affinity = mask;
318 }
320 /*
321 * IRQ Chip for MSI HPET Devices,
322 */
323 static struct hw_interrupt_type hpet_msi_type = {
324 .typename = "HPET-MSI",
325 .startup = hpet_msi_startup,
326 .shutdown = hpet_msi_shutdown,
327 .enable = hpet_msi_unmask,
328 .disable = hpet_msi_mask,
329 .ack = hpet_msi_ack,
330 .end = hpet_msi_end,
331 .set_affinity = hpet_msi_set_affinity,
332 };
334 static int hpet_setup_msi_irq(unsigned int vector)
335 {
336 int ret;
337 struct msi_msg msg;
338 struct hpet_event_channel *ch = &hpet_events[vector_to_channel(vector)];
340 irq_desc[vector].handler = &hpet_msi_type;
341 ret = request_irq_vector(vector, hpet_interrupt_handler,
342 0, "HPET", ch);
343 if ( ret < 0 )
344 return ret;
346 msi_compose_msg(NULL, vector, &msg);
347 hpet_msi_write(vector, &msg);
349 return 0;
350 }
352 static int hpet_assign_irq(struct hpet_event_channel *ch)
353 {
354 int vector;
356 if ( ch->vector )
357 return 0;
359 if ( (vector = assign_irq_vector(AUTO_ASSIGN_IRQ)) < 0 )
360 return vector;
362 vector_channel[vector] = ch - &hpet_events[0];
364 if ( hpet_setup_msi_irq(vector) )
365 {
366 free_irq_vector(vector);
367 vector_channel[vector] = -1;
368 return -EINVAL;
369 }
371 ch->vector = vector;
372 return 0;
373 }
375 static int hpet_fsb_cap_lookup(void)
376 {
377 unsigned int id;
378 unsigned int num_chs, num_chs_used;
379 int i;
381 id = hpet_read32(HPET_ID);
383 num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
384 num_chs++; /* Value read out starts from 0 */
386 num_chs_used = 0;
387 for ( i = 0; i < num_chs; i++ )
388 {
389 struct hpet_event_channel *ch = &hpet_events[num_chs_used];
390 unsigned long cfg = hpet_read32(HPET_Tn_CFG(i));
392 /* Only consider HPET timer with MSI support */
393 if ( !(cfg & HPET_TN_FSB_CAP) )
394 continue;
396 ch->flags = 0;
397 ch->idx = i;
399 if ( hpet_assign_irq(ch) )
400 continue;
402 /* set default irq affinity */
403 ch->cpu = num_chs_used;
404 per_cpu(cpu_bc_channel, ch->cpu) = ch;
405 irq_desc[ch->vector].handler->
406 set_affinity(ch->vector, cpumask_of_cpu(ch->cpu));
408 num_chs_used++;
410 if ( num_chs_used == num_possible_cpus() )
411 break;
412 }
414 printk(XENLOG_INFO
415 "HPET: %d timers in total, %d timers will be used for broadcast\n",
416 num_chs, num_chs_used);
418 return num_chs_used;
419 }
421 static int next_channel;
422 static spinlock_t next_lock = SPIN_LOCK_UNLOCKED;
424 static struct hpet_event_channel *hpet_get_channel(int cpu)
425 {
426 int i;
427 int next;
428 struct hpet_event_channel *ch;
430 spin_lock(&next_lock);
431 next = next_channel = (next_channel + 1) % num_hpets_used;
432 spin_unlock(&next_lock);
434 /* try unused channel first */
435 for ( i = next; i < next + num_hpets_used; i++ )
436 {
437 ch = &hpet_events[i % num_hpets_used];
438 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
439 {
440 ch->cpu = cpu;
441 return ch;
442 }
443 }
445 /* share a in-use channel */
446 ch = &hpet_events[next];
447 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
448 ch->cpu = cpu;
450 return ch;
451 }
453 static void hpet_attach_channel_share(int cpu, struct hpet_event_channel *ch)
454 {
455 per_cpu(cpu_bc_channel, cpu) = ch;
457 /* try to be the channel owner again while holding the lock */
458 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
459 ch->cpu = cpu;
461 if ( ch->cpu != cpu )
462 return;
464 /* set irq affinity */
465 irq_desc[ch->vector].handler->
466 set_affinity(ch->vector, cpumask_of_cpu(ch->cpu));
467 }
469 static void hpet_detach_channel_share(int cpu)
470 {
471 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
473 per_cpu(cpu_bc_channel, cpu) = NULL;
475 if ( cpu != ch->cpu )
476 return;
478 if ( cpus_empty(ch->cpumask) )
479 {
480 ch->cpu = -1;
481 clear_bit(HPET_EVT_USED_BIT, &ch->flags);
482 return;
483 }
485 ch->cpu = first_cpu(ch->cpumask);
486 /* set irq affinity */
487 irq_desc[ch->vector].handler->
488 set_affinity(ch->vector, cpumask_of_cpu(ch->cpu));
489 }
491 static void (*hpet_attach_channel)(int cpu, struct hpet_event_channel *ch);
492 static void (*hpet_detach_channel)(int cpu);
494 #include <asm/mc146818rtc.h>
495 void cpuidle_disable_deep_cstate(void);
497 void (*pv_rtc_handler)(unsigned int port, uint8_t value);
499 static void handle_rtc_once(unsigned int port, uint8_t value)
500 {
501 static int index;
503 if ( port == 0x70 )
504 {
505 index = value;
506 return;
507 }
509 if ( index != RTC_REG_B )
510 return;
512 /* RTC Reg B, contain PIE/AIE/UIE */
513 if ( value & (RTC_PIE | RTC_AIE | RTC_UIE ) )
514 {
515 cpuidle_disable_deep_cstate();
516 pv_rtc_handler = NULL;
517 }
518 }
520 void hpet_broadcast_init(void)
521 {
522 u64 hpet_rate;
523 u32 hpet_id, cfg;
524 int i;
526 hpet_rate = hpet_setup();
527 if ( hpet_rate == 0 )
528 return;
530 num_hpets_used = hpet_fsb_cap_lookup();
531 if ( num_hpets_used > 0 )
532 {
533 /* Stop HPET legacy interrupts */
534 cfg = hpet_read32(HPET_CFG);
535 cfg &= ~HPET_CFG_LEGACY;
536 hpet_write32(cfg, HPET_CFG);
538 for ( i = 0; i < num_hpets_used; i++ )
539 {
540 /* set HPET Tn as oneshot */
541 cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
542 cfg &= ~HPET_TN_PERIODIC;
543 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
544 hpet_write32(cfg, HPET_Tn_CFG(hpet_events[i].idx));
546 hpet_events[i].mult = div_sc((unsigned long)hpet_rate,
547 1000000000ul, 32);
548 hpet_events[i].shift = 32;
549 hpet_events[i].next_event = STIME_MAX;
550 hpet_events[i].event_handler = handle_hpet_broadcast;
551 spin_lock_init(&hpet_events[i].lock);
552 }
554 if ( num_hpets_used < num_possible_cpus() )
555 {
556 hpet_attach_channel = hpet_attach_channel_share;
557 hpet_detach_channel = hpet_detach_channel_share;
558 }
560 return;
561 }
563 if ( legacy_hpet_event.flags & HPET_EVT_DISABLE )
564 return;
566 hpet_id = hpet_read32(HPET_ID);
567 if ( !(hpet_id & HPET_ID_LEGSUP) )
568 return;
570 /* Start HPET legacy interrupts */
571 cfg = hpet_read32(HPET_CFG);
572 cfg |= HPET_CFG_LEGACY;
573 hpet_write32(cfg, HPET_CFG);
575 /* set HPET T0 as oneshot */
576 cfg = hpet_read32(HPET_T0_CFG);
577 cfg &= ~HPET_TN_PERIODIC;
578 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
579 hpet_write32(cfg, HPET_T0_CFG);
581 /*
582 * The period is a femto seconds value. We need to calculate the scaled
583 * math multiplication factor for nanosecond to hpet tick conversion.
584 */
585 legacy_hpet_event.mult = div_sc((unsigned long)hpet_rate, 1000000000ul, 32);
586 legacy_hpet_event.shift = 32;
587 legacy_hpet_event.next_event = STIME_MAX;
588 legacy_hpet_event.event_handler = handle_hpet_broadcast;
589 legacy_hpet_event.idx = 0;
590 legacy_hpet_event.flags = 0;
591 spin_lock_init(&legacy_hpet_event.lock);
593 for_each_cpu(i)
594 per_cpu(cpu_bc_channel, i) = &legacy_hpet_event;
596 if ( !force_hpet_broadcast )
597 pv_rtc_handler = handle_rtc_once;
598 }
600 void hpet_disable_legacy_broadcast(void)
601 {
602 u32 cfg;
604 spin_lock_irq(&legacy_hpet_event.lock);
606 legacy_hpet_event.flags |= HPET_EVT_DISABLE;
608 /* disable HPET T0 */
609 cfg = hpet_read32(HPET_T0_CFG);
610 cfg &= ~HPET_TN_ENABLE;
611 hpet_write32(cfg, HPET_T0_CFG);
613 /* Stop HPET legacy interrupts */
614 cfg = hpet_read32(HPET_CFG);
615 cfg &= ~HPET_CFG_LEGACY;
616 hpet_write32(cfg, HPET_CFG);
618 spin_unlock_irq(&legacy_hpet_event.lock);
620 smp_send_event_check_mask(&cpu_online_map);
621 }
623 void hpet_broadcast_enter(void)
624 {
625 int cpu = smp_processor_id();
626 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
628 if ( this_cpu(timer_deadline) == 0 )
629 return;
631 if ( !ch )
632 ch = hpet_get_channel(cpu);
633 BUG_ON( !ch );
635 ASSERT(!local_irq_is_enabled());
636 spin_lock(&ch->lock);
638 if ( hpet_attach_channel )
639 hpet_attach_channel(cpu, ch);
641 disable_APIC_timer();
643 cpu_set(cpu, ch->cpumask);
645 /* reprogram if current cpu expire time is nearer */
646 if ( this_cpu(timer_deadline) < ch->next_event )
647 reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
649 spin_unlock(&ch->lock);
650 }
652 void hpet_broadcast_exit(void)
653 {
654 int cpu = smp_processor_id();
655 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
657 BUG_ON( !ch );
659 spin_lock_irq(&ch->lock);
661 if ( cpu_test_and_clear(cpu, ch->cpumask) )
662 {
663 /* Cancel any outstanding LAPIC event and re-enable interrupts. */
664 reprogram_timer(0);
665 enable_APIC_timer();
667 /* Reprogram the deadline; trigger timer work now if it has passed. */
668 if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
669 raise_softirq(TIMER_SOFTIRQ);
671 if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
672 reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
673 }
675 if ( hpet_detach_channel )
676 hpet_detach_channel(cpu);
678 spin_unlock_irq(&ch->lock);
679 }
681 int hpet_broadcast_is_available(void)
682 {
683 return (legacy_hpet_event.event_handler == handle_hpet_broadcast
684 || num_hpets_used > 0);
685 }
687 int hpet_legacy_irq_tick(void)
688 {
689 if ( !legacy_hpet_event.event_handler )
690 return 0;
691 legacy_hpet_event.event_handler(&legacy_hpet_event);
692 return 1;
693 }
695 u64 hpet_setup(void)
696 {
697 static u64 hpet_rate;
698 static u32 system_reset_latch;
699 u32 hpet_id, hpet_period, cfg;
700 int i;
702 if ( system_reset_latch == system_reset_counter )
703 return hpet_rate;
704 system_reset_latch = system_reset_counter;
706 if ( hpet_address == 0 )
707 return 0;
709 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
711 hpet_id = hpet_read32(HPET_ID);
712 if ( (hpet_id & HPET_ID_REV) == 0 )
713 {
714 printk("BAD HPET revision id.\n");
715 return 0;
716 }
718 /* Check for sane period (100ps <= period <= 100ns). */
719 hpet_period = hpet_read32(HPET_PERIOD);
720 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
721 {
722 printk("BAD HPET period %u.\n", hpet_period);
723 return 0;
724 }
726 cfg = hpet_read32(HPET_CFG);
727 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
728 hpet_write32(cfg, HPET_CFG);
730 for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
731 {
732 cfg = hpet_read32(HPET_Tn_CFG(i));
733 cfg &= ~HPET_TN_ENABLE;
734 hpet_write32(cfg, HPET_Tn_CFG(i));
735 }
737 cfg = hpet_read32(HPET_CFG);
738 cfg |= HPET_CFG_ENABLE;
739 hpet_write32(cfg, HPET_CFG);
741 hpet_rate = 1000000000000000ULL; /* 10^15 */
742 (void)do_div(hpet_rate, hpet_period);
744 return hpet_rate;
745 }