direct-io.hg

view xen/arch/x86/hvm/vpt.c @ 15388:50358c4b37f4

hvm: Support injection of virtual NMIs and clean up ExtInt handling in general.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jun 20 11:50:16 2007 +0100 (2007-06-20)
parents 5794f9b80c3f
children
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 */
21 #include <xen/time.h>
22 #include <asm/hvm/support.h>
23 #include <asm/hvm/vpt.h>
24 #include <asm/event.h>
26 static void pt_lock(struct periodic_time *pt)
27 {
28 struct vcpu *v;
30 for ( ; ; )
31 {
32 v = pt->vcpu;
33 spin_lock(&v->arch.hvm_vcpu.tm_lock);
34 if ( likely(pt->vcpu == v) )
35 break;
36 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
37 }
38 }
40 static void pt_unlock(struct periodic_time *pt)
41 {
42 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
43 }
45 static void missed_ticks(struct periodic_time *pt)
46 {
47 s_time_t missed_ticks;
49 missed_ticks = NOW() - pt->scheduled;
50 if ( missed_ticks <= 0 )
51 return;
53 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
54 if ( missed_ticks > 1000 )
55 {
56 /* TODO: Adjust guest time together */
57 pt->pending_intr_nr++;
58 }
59 else
60 {
61 pt->pending_intr_nr += missed_ticks;
62 }
64 pt->scheduled += missed_ticks * pt->period;
65 }
67 void pt_freeze_time(struct vcpu *v)
68 {
69 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
70 struct periodic_time *pt;
72 if ( test_bit(_VPF_blocked, &v->pause_flags) )
73 return;
75 spin_lock(&v->arch.hvm_vcpu.tm_lock);
77 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
79 list_for_each_entry ( pt, head, list )
80 stop_timer(&pt->timer);
82 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
83 }
85 void pt_thaw_time(struct vcpu *v)
86 {
87 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
88 struct periodic_time *pt;
90 spin_lock(&v->arch.hvm_vcpu.tm_lock);
92 if ( v->arch.hvm_vcpu.guest_time )
93 {
94 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
95 v->arch.hvm_vcpu.guest_time = 0;
97 list_for_each_entry ( pt, head, list )
98 {
99 missed_ticks(pt);
100 set_timer(&pt->timer, pt->scheduled);
101 }
102 }
104 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
105 }
107 static void pt_timer_fn(void *data)
108 {
109 struct periodic_time *pt = data;
111 pt_lock(pt);
113 pt->pending_intr_nr++;
114 pt->scheduled += pt->period;
116 missed_ticks(pt);
118 if ( !pt->one_shot )
119 set_timer(&pt->timer, pt->scheduled);
121 vcpu_kick(pt->vcpu);
123 pt_unlock(pt);
124 }
126 void pt_update_irq(struct vcpu *v)
127 {
128 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
129 struct periodic_time *pt;
130 uint64_t max_lag = -1ULL;
131 int irq = -1;
133 spin_lock(&v->arch.hvm_vcpu.tm_lock);
135 list_for_each_entry ( pt, head, list )
136 {
137 if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
138 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
139 {
140 max_lag = pt->last_plt_gtime + pt->period_cycles;
141 irq = pt->irq;
142 }
143 }
145 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
147 if ( is_lvtt(v, irq) )
148 {
149 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
150 }
151 else if ( irq >= 0 )
152 {
153 hvm_isa_irq_deassert(v->domain, irq);
154 hvm_isa_irq_assert(v->domain, irq);
155 }
156 }
158 static struct periodic_time *is_pt_irq(
159 struct vcpu *v, int vector, enum hvm_intack src)
160 {
161 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
162 struct periodic_time *pt;
163 struct RTCState *rtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
164 int vec;
166 list_for_each_entry ( pt, head, list )
167 {
168 if ( !pt->pending_intr_nr )
169 continue;
171 if ( is_lvtt(v, pt->irq) )
172 {
173 if ( pt->irq != vector )
174 continue;
175 return pt;
176 }
178 vec = get_isa_irq_vector(v, pt->irq, src);
180 /* RTC irq need special care */
181 if ( (vector != vec) || (pt->irq == 8 && !is_rtc_periodic_irq(rtc)) )
182 continue;
184 return pt;
185 }
187 return NULL;
188 }
190 void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src)
191 {
192 struct periodic_time *pt;
193 time_cb *cb;
194 void *cb_priv;
196 spin_lock(&v->arch.hvm_vcpu.tm_lock);
198 pt = is_pt_irq(v, vector, src);
199 if ( pt == NULL )
200 {
201 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
202 return;
203 }
205 ASSERT(pt->vcpu == v);
207 pt->pending_intr_nr--;
208 pt->last_plt_gtime += pt->period_cycles;
210 if ( hvm_get_guest_time(v) < pt->last_plt_gtime )
211 hvm_set_guest_time(v, pt->last_plt_gtime);
213 cb = pt->cb;
214 cb_priv = pt->priv;
216 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
218 if ( cb != NULL )
219 cb(v, cb_priv);
220 }
222 void pt_reset(struct vcpu *v)
223 {
224 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
225 struct periodic_time *pt;
227 spin_lock(&v->arch.hvm_vcpu.tm_lock);
229 list_for_each_entry ( pt, head, list )
230 {
231 pt->pending_intr_nr = 0;
232 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
233 pt->scheduled = NOW() + pt->period;
234 set_timer(&pt->timer, pt->scheduled);
235 }
237 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
238 }
240 void pt_migrate(struct vcpu *v)
241 {
242 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
243 struct periodic_time *pt;
245 spin_lock(&v->arch.hvm_vcpu.tm_lock);
247 list_for_each_entry ( pt, head, list )
248 migrate_timer(&pt->timer, v->processor);
250 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
251 }
253 void create_periodic_time(
254 struct vcpu *v, struct periodic_time *pt, uint64_t period,
255 uint8_t irq, char one_shot, time_cb *cb, void *data)
256 {
257 destroy_periodic_time(pt);
259 spin_lock(&v->arch.hvm_vcpu.tm_lock);
261 pt->enabled = 1;
262 pt->pending_intr_nr = 0;
264 if ( period < 900000 ) /* < 0.9 ms */
265 {
266 gdprintk(XENLOG_WARNING,
267 "HVM_PlatformTime: program too small period %"PRIu64"\n",
268 period);
269 period = 900000; /* force to 0.9ms */
270 }
271 pt->period = period;
272 pt->vcpu = v;
273 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
274 pt->irq = irq;
275 pt->period_cycles = (u64)period * cpu_khz / 1000000L;
276 pt->one_shot = one_shot;
277 pt->scheduled = NOW() + period;
278 pt->cb = cb;
279 pt->priv = data;
281 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
283 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
284 set_timer(&pt->timer, pt->scheduled);
286 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
287 }
289 void destroy_periodic_time(struct periodic_time *pt)
290 {
291 if ( !pt->enabled )
292 return;
294 pt_lock(pt);
295 pt->enabled = 0;
296 list_del(&pt->list);
297 pt_unlock(pt);
299 /*
300 * pt_timer_fn() can run until this kill_timer() returns. We must do this
301 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
302 */
303 kill_timer(&pt->timer);
304 }