ia64/xen-unstable

annotate xen/arch/ia64/xen/xentime.c @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents 58a3ed82eee4
children 2b6e531dab38
rev   line source
djm@6458 1 /*
djm@6458 2 * xen/arch/ia64/time.c
djm@6458 3 *
djm@6458 4 * Copyright (C) 2005 Hewlett-Packard Co
djm@6458 5 * Dan Magenheimer <dan.magenheimer@hp.com>
djm@6458 6 */
djm@6458 7
djm@6458 8 #include <linux/config.h>
djm@6458 9
djm@6458 10 #include <linux/cpu.h>
djm@6458 11 #include <linux/init.h>
djm@6458 12 #include <linux/kernel.h>
djm@6458 13 #include <linux/module.h>
djm@6458 14 #include <linux/profile.h>
djm@6458 15 #include <linux/sched.h>
djm@6458 16 #include <linux/time.h>
djm@6458 17 #include <linux/interrupt.h>
djm@6458 18 #include <linux/efi.h>
djm@6458 19 #include <linux/profile.h>
djm@6458 20 #include <linux/timex.h>
djm@6458 21
djm@6458 22 #include <asm/machvec.h>
djm@6458 23 #include <asm/delay.h>
djm@6458 24 #include <asm/hw_irq.h>
djm@6458 25 #include <asm/ptrace.h>
djm@6458 26 #include <asm/sal.h>
djm@6458 27 #include <asm/sections.h>
djm@6458 28 #include <asm/system.h>
djm@6458 29 #include <asm/vcpu.h>
djm@6458 30 #include <linux/jiffies.h> // not included by xen/sched.h
djm@6458 31 #include <xen/softirq.h>
awilliam@9314 32 #include <xen/event.h>
djm@6458 33
awilliam@9005 34 /* FIXME: where these declarations should be there ? */
awilliam@9005 35 extern void ia64_init_itm(void);
awilliam@9005 36
djm@6458 37 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
djm@6458 38
djm@6458 39 #define TIME_KEEPER_ID 0
djm@6458 40 unsigned long domain0_ready = 0;
djm@6458 41 static s_time_t stime_irq = 0x0; /* System time at last 'time update' */
djm@6466 42 unsigned long itc_scale, ns_scale;
djm@6458 43 unsigned long itc_at_irq;
djm@6458 44
djm@7499 45 /* We don't expect an absolute cycle value here, since then no way
djm@7499 46 * to prevent overflow for large norminator. Normally this conversion
djm@7499 47 * is used for relative offset.
djm@7499 48 */
djm@7499 49 u64 cycle_to_ns(u64 cycle)
djm@7499 50 {
djm@7499 51 return (cycle * itc_scale) >> 32;
djm@7499 52 }
djm@7499 53
djm@7499 54 u64 ns_to_cycle(u64 ns)
djm@7499 55 {
djm@7499 56 return (ns * ns_scale) >> 32;
djm@7499 57 }
djm@7499 58
djm@6458 59 static inline u64 get_time_delta(void)
djm@6458 60 {
djm@6458 61 s64 delta_itc;
djm@6466 62 u64 cur_itc;
djm@6458 63
djm@6458 64 cur_itc = ia64_get_itc();
djm@6458 65
djm@6458 66 delta_itc = (s64)(cur_itc - itc_at_irq);
djm@6466 67
djm@6466 68 /* Ensure that the returned system time is monotonically increasing. */
djm@6458 69 if ( unlikely(delta_itc < 0) ) delta_itc = 0;
djm@6466 70 return cycle_to_ns(delta_itc);
djm@6458 71 }
djm@6458 72
djm@6458 73
djm@6458 74 s_time_t get_s_time(void)
djm@6458 75 {
djm@6458 76 s_time_t now;
awilliam@9005 77 unsigned long seq;
djm@6458 78
djm@6466 79 do {
djm@6466 80 seq = read_seqbegin(&xtime_lock);
djm@6466 81 now = stime_irq + get_time_delta();
djm@6466 82 } while (unlikely(read_seqretry(&xtime_lock, seq)));
djm@6458 83
djm@6458 84 return now;
djm@6458 85 }
djm@6458 86
djm@6458 87 void update_dom_time(struct vcpu *v)
djm@6458 88 {
djm@6466 89 /* N-op here, and let dom0 to manage system time directly */
djm@6466 90 return;
djm@6458 91 }
djm@6458 92
djm@6458 93 /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
djm@6458 94 void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
djm@6458 95 {
djm@6466 96 /* If absolute system time is managed by dom0, there's no need for such
djm@6466 97 * action since only virtual itc/itm service is provided.
djm@6466 98 */
djm@6466 99 return;
djm@6458 100 }
djm@6458 101
djm@6458 102 irqreturn_t
djm@6458 103 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
djm@6458 104 {
djm@6466 105 unsigned long new_itm, old_itc;
djm@6458 106
djm@7333 107 #if 0
djm@6458 108 #define HEARTBEAT_FREQ 16 // period in seconds
djm@6458 109 #ifdef HEARTBEAT_FREQ
djm@6458 110 static long count = 0;
djm@6458 111 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
djm@7332 112 printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
djm@7332 113 regs->cr_iip /*,
djm@6458 114 VCPU(current,interrupt_delivery_enabled),
djm@7332 115 VCPU(current,pending_interruption) */);
djm@6458 116 count = 0;
djm@6458 117 }
djm@6458 118 #endif
djm@7333 119 #endif
awilliam@9084 120
awilliam@9405 121 if (!is_idle_domain(current->domain))
djm@6458 122 if (vcpu_timer_expired(current)) {
djm@6458 123 vcpu_pend_timer(current);
djm@6458 124 // ensure another timer interrupt happens even if domain doesn't
djm@6458 125 vcpu_set_next_timer(current);
djm@6458 126 }
awilliam@9405 127
djm@6458 128 new_itm = local_cpu_data->itm_next;
djm@6458 129
djm@7333 130 if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm))
awilliam@9083 131 return IRQ_HANDLED;
djm@6458 132
djm@6458 133 while (1) {
djm@6458 134 new_itm += local_cpu_data->itm_delta;
djm@6458 135
djm@6458 136 if (smp_processor_id() == TIME_KEEPER_ID) {
djm@6458 137 /*
djm@6458 138 * Here we are in the timer irq handler. We have irqs locally
djm@6458 139 * disabled, but we don't know if the timer_bh is running on
djm@6458 140 * another CPU. We need to avoid to SMP race by acquiring the
djm@6458 141 * xtime_lock.
djm@6458 142 */
djm@6458 143 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
djm@6458 144 write_seqlock(&xtime_lock);
djm@6458 145 #endif
djm@6458 146 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
djm@6458 147 do_timer(regs);
djm@6458 148 #endif
djm@6458 149 local_cpu_data->itm_next = new_itm;
djm@6466 150
djm@6466 151 /* Updates system time (nanoseconds since boot). */
djm@6466 152 old_itc = itc_at_irq;
djm@6466 153 itc_at_irq = ia64_get_itc();
djm@6466 154 stime_irq += cycle_to_ns(itc_at_irq - old_itc);
djm@6466 155
djm@6458 156 #ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
djm@6458 157 write_sequnlock(&xtime_lock);
djm@6458 158 #endif
djm@6458 159 } else
djm@6458 160 local_cpu_data->itm_next = new_itm;
djm@6458 161
djm@6458 162 if (time_after(new_itm, ia64_get_itc()))
djm@6458 163 break;
djm@6458 164 }
djm@6458 165
djm@6458 166 do {
djm@6458 167 /*
djm@6458 168 * If we're too close to the next clock tick for
djm@6458 169 * comfort, we increase the safety margin by
djm@6458 170 * intentionally dropping the next tick(s). We do NOT
djm@6458 171 * update itm.next because that would force us to call
djm@6458 172 * do_timer() which in turn would let our clock run
djm@6458 173 * too fast (with the potentially devastating effect
djm@6458 174 * of losing monotony of time).
djm@6458 175 */
djm@6458 176 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
djm@6458 177 new_itm += local_cpu_data->itm_delta;
djm@6458 178 ia64_set_itm(new_itm);
djm@6458 179 /* double check, in case we got hit by a (slow) PMI: */
djm@6458 180 } while (time_after_eq(ia64_get_itc(), new_itm));
kaf24@8586 181 raise_softirq(TIMER_SOFTIRQ);
djm@6466 182
djm@6458 183 return IRQ_HANDLED;
djm@6458 184 }
djm@6458 185
djm@6458 186 static struct irqaction xen_timer_irqaction = {
awilliam@9005 187 .handler = (void *) xen_timer_interrupt,
djm@6458 188 .name = "timer"
djm@6458 189 };
djm@6458 190
djm@6458 191 void __init
djm@6466 192 ia64_time_init (void)
djm@6458 193 {
djm@6458 194 register_percpu_irq(IA64_TIMER_VECTOR, &xen_timer_irqaction);
djm@6458 195 ia64_init_itm();
djm@6458 196 }
djm@6458 197
djm@6458 198
djm@6458 199 /* Late init function (after all CPUs are booted). */
djm@6458 200 int __init init_xen_time()
djm@6458 201 {
djm@6466 202 ia64_time_init();
djm@6458 203 itc_scale = 1000000000UL << 32 ;
djm@6458 204 itc_scale /= local_cpu_data->itc_freq;
djm@6466 205 ns_scale = (local_cpu_data->itc_freq << 32) / 1000000000UL;
djm@6458 206
djm@6458 207 /* System time ticks from zero. */
djm@6458 208 stime_irq = (s_time_t)0;
djm@6458 209 itc_at_irq = ia64_get_itc();
djm@6458 210
djm@6458 211 printk("Time init:\n");
djm@6458 212 printk(".... System Time: %ldns\n", NOW());
djm@6458 213 printk(".... scale: %16lX\n", itc_scale);
djm@6458 214
djm@6458 215 return 0;
djm@6458 216 }
djm@6458 217
kaf24@8586 218 int reprogram_timer(s_time_t timeout)
djm@6458 219 {
djm@6466 220 struct vcpu *v = current;
djm@6466 221 s_time_t expire;
djm@6466 222 unsigned long seq, cur_itc, itm_next;
djm@6466 223
djm@7337 224 if (!domain0_ready || timeout == 0) return 1;
djm@6466 225
djm@6466 226 do {
djm@6466 227 seq = read_seqbegin(&xtime_lock);
djm@6466 228 if ((expire = timeout - NOW()) < 0)
djm@6466 229 return 0;
djm@6466 230
djm@6466 231 cur_itc = ia64_get_itc();
djm@6466 232 itm_next = cur_itc + ns_to_cycle(expire);
djm@6466 233 } while (unlikely(read_seqretry(&xtime_lock, seq)));
djm@6466 234
djm@6466 235 local_cpu_data->itm_next = itm_next;
awilliam@9005 236 vcpu_set_next_timer(v);
djm@6466 237 return 1;
djm@6466 238 }
djm@6458 239
kaf24@9284 240 void send_timer_event(struct vcpu *v)
kaf24@9284 241 {
kaf24@9284 242 send_guest_virq(v, VIRQ_TIMER);
kaf24@9284 243 }
djm@6458 244