ia64/xen-unstable

view xen/arch/x86/hvm/vpt.c @ 16347:644e7577f6ee

x86, hvm: Fix typo in no-missed-tick-accounting timer mode.
From: Dave Winchell <dwinchell@virtualiron.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Nov 07 14:53:32 2007 +0000 (2007-11-07)
parents 070da619e65e
children 8ff5bb70136d
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 */
21 #include <xen/time.h>
22 #include <asm/hvm/support.h>
23 #include <asm/hvm/vpt.h>
24 #include <asm/event.h>
26 #define mode_is(d, name) \
27 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
29 static void pt_lock(struct periodic_time *pt)
30 {
31 struct vcpu *v;
33 for ( ; ; )
34 {
35 v = pt->vcpu;
36 spin_lock(&v->arch.hvm_vcpu.tm_lock);
37 if ( likely(pt->vcpu == v) )
38 break;
39 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
40 }
41 }
43 static void pt_unlock(struct periodic_time *pt)
44 {
45 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
46 }
48 static void pt_process_missed_ticks(struct periodic_time *pt)
49 {
50 s_time_t missed_ticks, now = NOW();
52 if ( pt->one_shot )
53 return;
55 missed_ticks = now - pt->scheduled;
56 if ( missed_ticks <= 0 )
57 return;
59 if ( mode_is(pt->vcpu->domain, no_missed_tick_accounting) )
60 {
61 pt->pending_intr_nr = 1;
62 pt->scheduled = now + pt->period;
63 }
64 else
65 {
66 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
67 pt->pending_intr_nr += missed_ticks;
68 pt->scheduled += missed_ticks * pt->period;
69 }
70 }
72 static void pt_freeze_time(struct vcpu *v)
73 {
74 if ( !mode_is(v->domain, delay_for_missed_ticks) )
75 return;
77 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
78 }
80 static void pt_thaw_time(struct vcpu *v)
81 {
82 if ( !mode_is(v->domain, delay_for_missed_ticks) )
83 return;
85 if ( v->arch.hvm_vcpu.guest_time == 0 )
86 return;
88 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
89 v->arch.hvm_vcpu.guest_time = 0;
90 }
92 void pt_save_timer(struct vcpu *v)
93 {
94 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
95 struct periodic_time *pt;
97 if ( test_bit(_VPF_blocked, &v->pause_flags) )
98 return;
100 spin_lock(&v->arch.hvm_vcpu.tm_lock);
102 list_for_each_entry ( pt, head, list )
103 stop_timer(&pt->timer);
105 pt_freeze_time(v);
107 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
108 }
110 void pt_restore_timer(struct vcpu *v)
111 {
112 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
113 struct periodic_time *pt;
115 spin_lock(&v->arch.hvm_vcpu.tm_lock);
117 list_for_each_entry ( pt, head, list )
118 {
119 pt_process_missed_ticks(pt);
120 set_timer(&pt->timer, pt->scheduled);
121 }
123 pt_thaw_time(v);
125 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
126 }
128 static void pt_timer_fn(void *data)
129 {
130 struct periodic_time *pt = data;
132 pt_lock(pt);
134 if ( mode_is(pt->vcpu->domain, no_missed_tick_accounting) )
135 pt->pending_intr_nr = 1;
136 else
137 pt->pending_intr_nr++;
139 if ( !pt->one_shot )
140 {
141 pt->scheduled += pt->period;
142 pt_process_missed_ticks(pt);
143 set_timer(&pt->timer, pt->scheduled);
144 }
146 vcpu_kick(pt->vcpu);
148 pt_unlock(pt);
149 }
151 void pt_update_irq(struct vcpu *v)
152 {
153 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
154 struct periodic_time *pt;
155 uint64_t max_lag = -1ULL;
156 int irq = -1;
158 spin_lock(&v->arch.hvm_vcpu.tm_lock);
160 list_for_each_entry ( pt, head, list )
161 {
162 if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
163 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
164 {
165 max_lag = pt->last_plt_gtime + pt->period_cycles;
166 irq = pt->irq;
167 }
168 }
170 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
172 if ( is_lvtt(v, irq) )
173 {
174 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
175 }
176 else if ( irq >= 0 )
177 {
178 hvm_isa_irq_deassert(v->domain, irq);
179 hvm_isa_irq_assert(v->domain, irq);
180 }
181 }
183 static struct periodic_time *is_pt_irq(
184 struct vcpu *v, struct hvm_intack intack)
185 {
186 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
187 struct periodic_time *pt;
188 struct RTCState *rtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
189 int vector;
191 list_for_each_entry ( pt, head, list )
192 {
193 if ( !pt->pending_intr_nr )
194 continue;
196 if ( is_lvtt(v, pt->irq) )
197 {
198 if ( pt->irq != intack.vector )
199 continue;
200 return pt;
201 }
203 vector = get_isa_irq_vector(v, pt->irq, intack.source);
205 /* RTC irq need special care */
206 if ( (intack.vector != vector) ||
207 ((pt->irq == 8) && !is_rtc_periodic_irq(rtc)) )
208 continue;
210 return pt;
211 }
213 return NULL;
214 }
216 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
217 {
218 struct periodic_time *pt;
219 time_cb *cb;
220 void *cb_priv;
222 spin_lock(&v->arch.hvm_vcpu.tm_lock);
224 pt = is_pt_irq(v, intack);
225 if ( pt == NULL )
226 {
227 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
228 return;
229 }
231 if ( pt->one_shot )
232 {
233 pt->enabled = 0;
234 list_del(&pt->list);
235 }
236 else
237 {
238 pt->pending_intr_nr--;
239 if ( mode_is(v->domain, no_missed_tick_accounting) )
240 pt->last_plt_gtime = hvm_get_guest_time(v);
241 else
242 pt->last_plt_gtime += pt->period_cycles;
243 }
245 if ( mode_is(v->domain, delay_for_missed_ticks) &&
246 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
247 hvm_set_guest_time(v, pt->last_plt_gtime);
249 cb = pt->cb;
250 cb_priv = pt->priv;
252 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
254 if ( cb != NULL )
255 cb(v, cb_priv);
256 }
258 void pt_reset(struct vcpu *v)
259 {
260 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
261 struct periodic_time *pt;
263 spin_lock(&v->arch.hvm_vcpu.tm_lock);
265 list_for_each_entry ( pt, head, list )
266 {
267 pt->pending_intr_nr = 0;
268 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
269 pt->scheduled = NOW() + pt->period;
270 set_timer(&pt->timer, pt->scheduled);
271 }
273 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
274 }
276 void pt_migrate(struct vcpu *v)
277 {
278 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
279 struct periodic_time *pt;
281 spin_lock(&v->arch.hvm_vcpu.tm_lock);
283 list_for_each_entry ( pt, head, list )
284 migrate_timer(&pt->timer, v->processor);
286 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
287 }
289 void create_periodic_time(
290 struct vcpu *v, struct periodic_time *pt, uint64_t period,
291 uint8_t irq, char one_shot, time_cb *cb, void *data)
292 {
293 destroy_periodic_time(pt);
295 spin_lock(&v->arch.hvm_vcpu.tm_lock);
297 pt->enabled = 1;
298 pt->pending_intr_nr = 0;
300 /* Periodic timer must be at least 0.9ms. */
301 if ( (period < 900000) && !one_shot )
302 {
303 gdprintk(XENLOG_WARNING,
304 "HVM_PlatformTime: program too small period %"PRIu64"\n",
305 period);
306 period = 900000;
307 }
309 pt->period = period;
310 pt->vcpu = v;
311 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
312 pt->irq = irq;
313 pt->period_cycles = (u64)period * cpu_khz / 1000000L;
314 pt->one_shot = one_shot;
315 pt->scheduled = NOW() + period;
316 /*
317 * Offset LAPIC ticks from other timer ticks. Otherwise guests which use
318 * LAPIC ticks for process accounting can see long sequences of process
319 * ticks incorrectly accounted to interrupt processing.
320 */
321 if ( is_lvtt(v, irq) )
322 pt->scheduled += period >> 1;
323 pt->cb = cb;
324 pt->priv = data;
326 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
328 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
329 set_timer(&pt->timer, pt->scheduled);
331 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
332 }
334 void destroy_periodic_time(struct periodic_time *pt)
335 {
336 if ( !pt->enabled )
337 return;
339 pt_lock(pt);
340 pt->enabled = 0;
341 list_del(&pt->list);
342 pt_unlock(pt);
344 /*
345 * pt_timer_fn() can run until this kill_timer() returns. We must do this
346 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
347 */
348 kill_timer(&pt->timer);
349 }