ia64/xen-unstable

view xen/arch/x86/hvm/intercept.c @ 9334:56a775219c88

This patch fix HVM/VMX time resolution issue that cause IA32E complain
"loss tick" occationally and APIC time calibration issue.

Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Mar 19 18:52:20 2006 +0100 (2006-03-19)
parents b5bb9920bf48
children c1d53788a25e
line source
1 /*
2 * intercept.c: Handle performance critical I/O packets in hypervisor space
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/sched.h>
23 #include <asm/regs.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/hvm/domain.h>
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <asm/current.h>
30 #include <io_ports.h>
31 #include <xen/event.h>
34 extern struct hvm_mmio_handler vlapic_mmio_handler;
35 extern struct hvm_mmio_handler vioapic_mmio_handler;
37 #define HVM_MMIO_HANDLER_NR 2
39 struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
40 {
41 &vlapic_mmio_handler,
42 &vioapic_mmio_handler
43 };
45 static inline void hvm_mmio_access(struct vcpu *v,
46 ioreq_t *p,
47 hvm_mmio_read_t read_handler,
48 hvm_mmio_write_t write_handler)
49 {
50 ioreq_t *req;
51 vcpu_iodata_t *vio = get_vio(v->domain, v->vcpu_id);
52 unsigned int tmp1, tmp2;
53 unsigned long data;
55 if (vio == NULL) {
56 printk("vlapic_access: bad shared page\n");
57 domain_crash_synchronous();
58 }
60 req = &vio->vp_ioreq;
62 switch (req->type) {
63 case IOREQ_TYPE_COPY:
64 {
65 int sign = (req->df) ? -1 : 1, i;
67 if (!req->pdata_valid) {
68 if (req->dir == IOREQ_READ){
69 req->u.data = read_handler(v, req->addr, req->size);
70 } else { /* req->dir != IOREQ_READ */
71 write_handler(v, req->addr, req->size, req->u.data);
72 }
73 } else { /* !req->pdata_valid */
74 if (req->dir == IOREQ_READ) {
75 for (i = 0; i < req->count; i++) {
76 data = read_handler(v,
77 req->addr + (sign * i * req->size),
78 req->size);
79 hvm_copy(&data,
80 (unsigned long)p->u.pdata + (sign * i * req->size),
81 p->size,
82 HVM_COPY_OUT);
83 }
84 } else { /* !req->dir == IOREQ_READ */
85 for (i = 0; i < req->count; i++) {
86 hvm_copy(&data,
87 (unsigned long)p->u.pdata + (sign * i * req->size),
88 p->size,
89 HVM_COPY_IN);
90 write_handler(v,
91 req->addr + (sign * i * req->size),
92 req->size, data);
93 }
94 }
95 }
96 break;
97 }
99 case IOREQ_TYPE_AND:
100 tmp1 = read_handler(v, req->addr, req->size);
101 if (req->dir == IOREQ_WRITE) {
102 tmp2 = tmp1 & (unsigned long) req->u.data;
103 write_handler(v, req->addr, req->size, tmp2);
104 }
105 req->u.data = tmp1;
106 break;
108 case IOREQ_TYPE_OR:
109 tmp1 = read_handler(v, req->addr, req->size);
110 if (req->dir == IOREQ_WRITE) {
111 tmp2 = tmp1 | (unsigned long) req->u.data;
112 write_handler(v, req->addr, req->size, tmp2);
113 }
114 req->u.data = tmp1;
115 break;
117 case IOREQ_TYPE_XOR:
118 tmp1 = read_handler(v, req->addr, req->size);
119 if (req->dir == IOREQ_WRITE) {
120 tmp2 = tmp1 ^ (unsigned long) req->u.data;
121 write_handler(v, req->addr, req->size, tmp2);
122 }
123 req->u.data = tmp1;
124 break;
126 default:
127 printk("error ioreq type for local APIC %x\n", req->type);
128 domain_crash_synchronous();
129 break;
130 }
131 }
133 int hvm_mmio_intercept(ioreq_t *p)
134 {
135 struct vcpu *v = current;
136 int i;
138 /* XXX currently only APIC use intercept */
139 if ( !hvm_apic_support(v->domain) )
140 return 0;
142 for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ ) {
143 if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) ) {
144 hvm_mmio_access(v, p,
145 hvm_mmio_handlers[i]->read_handler,
146 hvm_mmio_handlers[i]->write_handler);
147 return 1;
148 }
149 }
150 return 0;
151 }
153 /*
154 * Check if the request is handled inside xen
155 * return value: 0 --not handled; 1 --handled
156 */
157 int hvm_io_intercept(ioreq_t *p, int type)
158 {
159 struct vcpu *v = current;
160 struct hvm_io_handler *handler =
161 &(v->domain->arch.hvm_domain.io_handler);
162 int i;
163 unsigned long addr, size;
165 for (i = 0; i < handler->num_slot; i++) {
166 if( type != handler->hdl_list[i].type)
167 continue;
168 addr = handler->hdl_list[i].addr;
169 size = handler->hdl_list[i].size;
170 if (p->addr >= addr &&
171 p->addr < addr + size)
172 return handler->hdl_list[i].action(p);
173 }
174 return 0;
175 }
177 int register_io_handler(unsigned long addr, unsigned long size,
178 intercept_action_t action, int type)
179 {
180 struct vcpu *v = current;
181 struct hvm_io_handler *handler =
182 &(v->domain->arch.hvm_domain.io_handler);
183 int num = handler->num_slot;
185 if (num >= MAX_IO_HANDLER) {
186 printk("no extra space, register io interceptor failed!\n");
187 domain_crash_synchronous();
188 }
190 handler->hdl_list[num].addr = addr;
191 handler->hdl_list[num].size = size;
192 handler->hdl_list[num].action = action;
193 handler->hdl_list[num].type = type;
194 handler->num_slot++;
196 return 1;
197 }
199 static void pit_cal_count(struct hvm_virpit *vpit)
200 {
201 u64 nsec_delta = (unsigned int)((NOW() - vpit->inject_point));
203 if (nsec_delta > vpit->period)
204 HVM_DBG_LOG(DBG_LEVEL_1,
205 "HVM_PIT: long time has passed from last injection!");
207 if(vpit->init_val == 0)
208 {
209 printk("PIT init value == 0!\n");
210 domain_crash_synchronous();
211 }
213 vpit->count = vpit->init_val
214 - ((nsec_delta * PIT_FREQ / 1000000000ULL) % vpit->init_val);
215 }
217 static void pit_latch_io(struct hvm_virpit *vpit)
218 {
219 pit_cal_count(vpit);
221 switch(vpit->read_state) {
222 case MSByte:
223 vpit->count_MSB_latched=1;
224 break;
225 case LSByte:
226 vpit->count_LSB_latched=1;
227 break;
228 case LSByte_multiple:
229 vpit->count_LSB_latched=1;
230 vpit->count_MSB_latched=1;
231 break;
232 case MSByte_multiple:
233 HVM_DBG_LOG(DBG_LEVEL_1,
234 "HVM_PIT: latch PIT counter before MSB_multiple!");
235 vpit->read_state=LSByte_multiple;
236 vpit->count_LSB_latched=1;
237 vpit->count_MSB_latched=1;
238 break;
239 default:
240 domain_crash_synchronous();
241 }
242 }
244 static int pit_read_io(struct hvm_virpit *vpit)
245 {
246 if(vpit->count_LSB_latched) {
247 /* Read Least Significant Byte */
248 if(vpit->read_state==LSByte_multiple) {
249 vpit->read_state=MSByte_multiple;
250 }
251 vpit->count_LSB_latched=0;
252 return (vpit->count & 0xFF);
253 } else if(vpit->count_MSB_latched) {
254 /* Read Most Significant Byte */
255 if(vpit->read_state==MSByte_multiple) {
256 vpit->read_state=LSByte_multiple;
257 }
258 vpit->count_MSB_latched=0;
259 return ((vpit->count>>8) & 0xFF);
260 } else {
261 /* Unlatched Count Read */
262 HVM_DBG_LOG(DBG_LEVEL_1, "HVM_PIT: unlatched read");
263 pit_cal_count(vpit);
264 if(!(vpit->read_state & 0x1)) {
265 /* Read Least Significant Byte */
266 if(vpit->read_state==LSByte_multiple) {
267 vpit->read_state=MSByte_multiple;
268 }
269 return (vpit->count & 0xFF);
270 } else {
271 /* Read Most Significant Byte */
272 if(vpit->read_state==MSByte_multiple) {
273 vpit->read_state=LSByte_multiple;
274 }
275 return ((vpit->count>>8) & 0xFF);
276 }
277 }
278 }
280 /* hvm_io_assist light-weight version, specific to PIT DM */
281 static void resume_pit_io(ioreq_t *p)
282 {
283 struct cpu_user_regs *regs = guest_cpu_user_regs();
284 unsigned long old_eax = regs->eax;
285 p->state = STATE_INVALID;
287 switch(p->size) {
288 case 1:
289 regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
290 break;
291 case 2:
292 regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
293 break;
294 case 4:
295 regs->eax = (p->u.data & 0xffffffff);
296 break;
297 default:
298 BUG();
299 }
300 }
302 /* the intercept action for PIT DM retval:0--not handled; 1--handled */
303 int intercept_pit_io(ioreq_t *p)
304 {
305 struct vcpu *v = current;
306 struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
308 if (p->size != 1 ||
309 p->pdata_valid ||
310 p->type != IOREQ_TYPE_PIO)
311 return 0;
313 if (p->addr == PIT_MODE &&
314 p->dir == 0 && /* write */
315 ((p->u.data >> 4) & 0x3) == 0 && /* latch command */
316 ((p->u.data >> 6) & 0x3) == (vpit->channel)) {/* right channel */
317 pit_latch_io(vpit);
318 return 1;
319 }
321 if (p->addr == (PIT_CH0 + vpit->channel) &&
322 p->dir == 1) { /* read */
323 p->u.data = pit_read_io(vpit);
324 resume_pit_io(p);
325 return 1;
326 }
328 return 0;
329 }
331 /* hooks function for the HLT instruction emulation wakeup */
332 void hlt_timer_fn(void *data)
333 {
334 struct vcpu *v = data;
336 evtchn_set_pending(v, iopacket_port(v));
337 }
339 static __inline__ void missed_ticks(struct hvm_virpit*vpit)
340 {
341 int missed_ticks;
343 missed_ticks = (NOW() - vpit->scheduled)/(s_time_t) vpit->period;
344 if ( missed_ticks++ >= 0 ) {
345 vpit->pending_intr_nr += missed_ticks;
346 vpit->scheduled += missed_ticks * vpit->period;
347 }
348 }
350 /* hooks function for the PIT when the guest is active */
351 static void pit_timer_fn(void *data)
352 {
353 struct vcpu *v = data;
354 struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
356 /* pick up missed timer tick */
357 missed_ticks(vpit);
358 if ( test_bit(_VCPUF_running, &v->vcpu_flags) ) {
359 set_timer(&vpit->pit_timer, vpit->scheduled);
360 }
361 }
363 /* pick up missed timer ticks at deactive time */
364 void pickup_deactive_ticks(struct hvm_virpit *vpit)
365 {
366 if ( !active_timer(&(vpit->pit_timer)) ) {
367 missed_ticks(vpit);
368 set_timer(&vpit->pit_timer, vpit->scheduled);
369 }
370 }
372 /* Only some PIT operations such as load init counter need a hypervisor hook.
373 * leave all other operations in user space DM
374 */
375 void hvm_hooks_assist(struct vcpu *v)
376 {
377 vcpu_iodata_t *vio = get_vio(v->domain, v->vcpu_id);
378 ioreq_t *p = &vio->vp_ioreq;
379 struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
380 int rw_mode, reinit = 0;
382 /* load init count*/
383 if (p->state == STATE_IORESP_HOOK) {
384 /* set up actimer, handle re-init */
385 if ( active_timer(&(vpit->pit_timer)) ) {
386 HVM_DBG_LOG(DBG_LEVEL_1, "HVM_PIT: guest reset PIT with channel %lx!\n", (unsigned long) ((p->u.data >> 24) & 0x3) );
387 stop_timer(&(vpit->pit_timer));
388 reinit = 1;
390 }
391 else {
392 init_timer(&vpit->pit_timer, pit_timer_fn, v, v->processor);
393 }
395 /* init count for this channel */
396 vpit->init_val = (p->u.data & 0xFFFF) ;
397 /* frequency(ns) of pit */
398 vpit->period = DIV_ROUND(((vpit->init_val) * 1000000000ULL), PIT_FREQ);
399 HVM_DBG_LOG(DBG_LEVEL_1,"HVM_PIT: guest set init pit freq:%u ns, initval:0x%x\n", vpit->period, vpit->init_val);
400 if (vpit->period < 900000) { /* < 0.9 ms */
401 printk("HVM_PIT: guest programmed too small an init_val: %x\n",
402 vpit->init_val);
403 vpit->period = 1000000;
404 }
405 vpit->period_cycles = (u64)vpit->period * cpu_khz / 1000000L;
406 printk("HVM_PIT: guest freq in cycles=%lld\n",(long long)vpit->period_cycles);
408 vpit->channel = ((p->u.data >> 24) & 0x3);
409 vpit->first_injected = 0;
411 vpit->count_LSB_latched = 0;
412 vpit->count_MSB_latched = 0;
414 rw_mode = ((p->u.data >> 26) & 0x3);
415 switch(rw_mode) {
416 case 0x1:
417 vpit->read_state=LSByte;
418 break;
419 case 0x2:
420 vpit->read_state=MSByte;
421 break;
422 case 0x3:
423 vpit->read_state=LSByte_multiple;
424 break;
425 default:
426 printk("HVM_PIT:wrong PIT rw_mode!\n");
427 break;
428 }
430 vpit->scheduled = NOW() + vpit->period;
431 set_timer(&vpit->pit_timer, vpit->scheduled);
433 /*restore the state*/
434 p->state = STATE_IORESP_READY;
436 /* register handler to intercept the PIT io when vm_exit */
437 if (!reinit) {
438 register_portio_handler(0x40, 4, intercept_pit_io);
439 }
440 }
441 }
443 /*
444 * Local variables:
445 * mode: C
446 * c-set-style: "BSD"
447 * c-basic-offset: 4
448 * tab-width: 4
449 * indent-tabs-mode: nil
450 * End:
451 */