ia64/xen-unstable

view tools/ioemu/target-i386-dm/helper2.c @ 7816:3864566bc833

This patch fixed keyboard issue for linux kernel 2.6 in vmx guest.
linux 2.6 kernel keyboard use a timer to poll the kbd state,in
virtualization environment,the kbd interrupt injection is very likely
to happend just after kbd state query in the kbd timer and thus
will reverse sequence of scan code.
Also fix env->send_event SMP issue.

Signed-off-by: Xiaofeng Ling <xiaofeng.ling@intel.com>
Signed-off-by: Edwin Zhai<edwin.zhai@intel.com>
Signed-off-by: Eddion Dong <eddie.dong@intel.com>
Signed-off-by: Asit Mallick <asit.k.mallick@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Nov 15 11:34:29 2005 +0100 (2005-11-15)
parents 0cae0c6436f5
children 6e3e98e1c182
line source
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Main cpu loop for handling I/O requests coming from a virtual machine
23 * Copyright 2004, Intel Corporation.
24 *
25 * This program is free software; you can redistribute it and/or modify it
26 * under the terms and conditions of the GNU Lesser General Public License,
27 * version 2.1, as published by the Free Software Foundation.
28 *
29 * This program is distributed in the hope it will be useful, but WITHOUT
30 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
31 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
32 * more details.
33 *
34 * You should have received a copy of the GNU Lesser General Public License
35 * along with this program; if not, write to the Free Software Foundation,
36 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
37 */
38 #include <stdarg.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <string.h>
42 #include <inttypes.h>
43 #include <signal.h>
44 #include <assert.h>
46 #include <limits.h>
47 #include <fcntl.h>
48 #include <sys/ioctl.h>
50 #include <xenctrl.h>
51 #include <xen/io/ioreq.h>
52 #include <xen/linux/evtchn.h>
54 #include "cpu.h"
55 #include "exec-all.h"
56 #include "vl.h"
58 extern int domid;
60 void *shared_vram;
62 shared_iopage_t *shared_page = NULL;
63 extern int reset_requested;
65 CPUX86State *cpu_86_init(void)
66 {
67 CPUX86State *env;
68 static int inited;
70 cpu_exec_init();
72 env = malloc(sizeof(CPUX86State));
73 if (!env)
74 return NULL;
75 memset(env, 0, sizeof(CPUX86State));
76 /* init various static tables */
77 if (!inited) {
78 inited = 1;
79 }
80 cpu_single_env = env;
81 cpu_reset(env);
82 return env;
83 }
85 /* NOTE: must be called outside the CPU execute loop */
86 void cpu_reset(CPUX86State *env)
87 {
88 }
90 void cpu_x86_close(CPUX86State *env)
91 {
92 free(env);
93 }
96 void cpu_dump_state(CPUState *env, FILE *f,
97 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
98 int flags)
99 {
100 }
102 /***********************************************************/
103 /* x86 mmu */
104 /* XXX: add PGE support */
106 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
107 {
108 a20_state = (a20_state != 0);
109 if (a20_state != ((env->a20_mask >> 20) & 1)) {
110 #if defined(DEBUG_MMU)
111 printf("A20 update: a20=%d\n", a20_state);
112 #endif
113 env->a20_mask = 0xffefffff | (a20_state << 20);
114 }
115 }
117 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
118 {
119 return addr;
120 }
122 //the evtchn fd for polling
123 int evtchn_fd = -1;
124 //the evtchn port for polling the notification, should be inputed as bochs's parameter
125 uint16_t ioreq_remote_port, ioreq_local_port;
127 //some functions to handle the io req packet
128 void
129 sp_info()
130 {
131 ioreq_t *req;
133 req = &(shared_page->vcpu_iodata[0].vp_ioreq);
134 term_printf("event port: %d\n", shared_page->sp_global.eport);
135 term_printf("req state: %x, pvalid: %x, addr: %llx, data: %llx, count: %llx, size: %llx\n", req->state, req->pdata_valid, req->addr, req->u.data, req->count, req->size);
136 }
138 //get the ioreq packets from share mem
139 ioreq_t* __cpu_get_ioreq(void)
140 {
141 ioreq_t *req;
143 req = &(shared_page->vcpu_iodata[0].vp_ioreq);
144 if (req->state == STATE_IOREQ_READY) {
145 req->state = STATE_IOREQ_INPROCESS;
146 } else {
147 fprintf(logfile, "False I/O request ... in-service already: %x, pvalid: %x,port: %llx, data: %llx, count: %llx, size: %llx\n", req->state, req->pdata_valid, req->addr, req->u.data, req->count, req->size);
148 req = NULL;
149 }
151 return req;
152 }
154 //use poll to get the port notification
155 //ioreq_vec--out,the
156 //retval--the number of ioreq packet
157 ioreq_t* cpu_get_ioreq(void)
158 {
159 int rc;
160 uint16_t port;
161 rc = read(evtchn_fd, &port, sizeof(port));
162 if ((rc == sizeof(port)) && (port == ioreq_local_port)) {
163 // unmask the wanted port again
164 write(evtchn_fd, &ioreq_local_port, 2);
166 //get the io packet from shared memory
167 return __cpu_get_ioreq();
168 }
170 //read error or read nothing
171 return NULL;
172 }
174 unsigned long
175 do_inp(CPUState *env, unsigned long addr, unsigned long size)
176 {
177 switch(size) {
178 case 1:
179 return cpu_inb(env, addr);
180 case 2:
181 return cpu_inw(env, addr);
182 case 4:
183 return cpu_inl(env, addr);
184 default:
185 fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
186 exit(-1);
187 }
188 }
190 void
191 do_outp(CPUState *env, unsigned long addr, unsigned long size,
192 unsigned long val)
193 {
194 switch(size) {
195 case 1:
196 return cpu_outb(env, addr, val);
197 case 2:
198 return cpu_outw(env, addr, val);
199 case 4:
200 return cpu_outl(env, addr, val);
201 default:
202 fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
203 exit(-1);
204 }
205 }
207 extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
208 int len, int is_write);
210 static inline void
211 read_physical(uint64_t addr, unsigned long size, void *val)
212 {
213 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
214 }
216 static inline void
217 write_physical(uint64_t addr, unsigned long size, void *val)
218 {
219 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
220 }
222 void
223 cpu_ioreq_pio(CPUState *env, ioreq_t *req)
224 {
225 int i, sign;
227 sign = req->df ? -1 : 1;
229 if (req->dir == IOREQ_READ) {
230 if (!req->pdata_valid) {
231 req->u.data = do_inp(env, req->addr, req->size);
232 } else {
233 unsigned long tmp;
235 for (i = 0; i < req->count; i++) {
236 tmp = do_inp(env, req->addr, req->size);
237 write_physical((target_phys_addr_t) req->u.pdata
238 + (sign * i * req->size),
239 req->size, &tmp);
240 }
241 }
242 } else if (req->dir == IOREQ_WRITE) {
243 if (!req->pdata_valid) {
244 do_outp(env, req->addr, req->size, req->u.data);
245 } else {
246 for (i = 0; i < req->count; i++) {
247 unsigned long tmp;
249 read_physical((target_phys_addr_t) req->u.pdata
250 + (sign * i * req->size),
251 req->size, &tmp);
252 do_outp(env, req->addr, req->size, tmp);
253 }
254 }
255 }
256 }
258 void
259 cpu_ioreq_move(CPUState *env, ioreq_t *req)
260 {
261 int i, sign;
263 sign = req->df ? -1 : 1;
265 if (!req->pdata_valid) {
266 if (req->dir == IOREQ_READ) {
267 for (i = 0; i < req->count; i++) {
268 read_physical(req->addr
269 + (sign * i * req->size),
270 req->size, &req->u.data);
271 }
272 } else if (req->dir == IOREQ_WRITE) {
273 for (i = 0; i < req->count; i++) {
274 write_physical(req->addr
275 + (sign * i * req->size),
276 req->size, &req->u.data);
277 }
278 }
279 } else {
280 unsigned long tmp;
282 if (req->dir == IOREQ_READ) {
283 for (i = 0; i < req->count; i++) {
284 read_physical(req->addr
285 + (sign * i * req->size),
286 req->size, &tmp);
287 write_physical((target_phys_addr_t )req->u.pdata
288 + (sign * i * req->size),
289 req->size, &tmp);
290 }
291 } else if (req->dir == IOREQ_WRITE) {
292 for (i = 0; i < req->count; i++) {
293 read_physical((target_phys_addr_t) req->u.pdata
294 + (sign * i * req->size),
295 req->size, &tmp);
296 write_physical(req->addr
297 + (sign * i * req->size),
298 req->size, &tmp);
299 }
300 }
301 }
302 }
304 void
305 cpu_ioreq_and(CPUState *env, ioreq_t *req)
306 {
307 unsigned long tmp1, tmp2;
309 if (req->pdata_valid != 0)
310 hw_error("expected scalar value");
312 read_physical(req->addr, req->size, &tmp1);
313 if (req->dir == IOREQ_WRITE) {
314 tmp2 = tmp1 & (unsigned long) req->u.data;
315 write_physical(req->addr, req->size, &tmp2);
316 }
317 req->u.data = tmp1;
318 }
320 void
321 cpu_ioreq_or(CPUState *env, ioreq_t *req)
322 {
323 unsigned long tmp1, tmp2;
325 if (req->pdata_valid != 0)
326 hw_error("expected scalar value");
328 read_physical(req->addr, req->size, &tmp1);
329 if (req->dir == IOREQ_WRITE) {
330 tmp2 = tmp1 | (unsigned long) req->u.data;
331 write_physical(req->addr, req->size, &tmp2);
332 }
333 req->u.data = tmp1;
334 }
336 void
337 cpu_ioreq_xor(CPUState *env, ioreq_t *req)
338 {
339 unsigned long tmp1, tmp2;
341 if (req->pdata_valid != 0)
342 hw_error("expected scalar value");
344 read_physical(req->addr, req->size, &tmp1);
345 if (req->dir == IOREQ_WRITE) {
346 tmp2 = tmp1 ^ (unsigned long) req->u.data;
347 write_physical(req->addr, req->size, &tmp2);
348 }
349 req->u.data = tmp1;
350 }
352 void
353 cpu_handle_ioreq(CPUState *env)
354 {
355 ioreq_t *req = cpu_get_ioreq();
357 if (req) {
358 if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
359 if (req->size != 4)
360 req->u.data &= (1UL << (8 * req->size))-1;
361 }
363 switch (req->type) {
364 case IOREQ_TYPE_PIO:
365 cpu_ioreq_pio(env, req);
366 break;
367 case IOREQ_TYPE_COPY:
368 cpu_ioreq_move(env, req);
369 break;
370 case IOREQ_TYPE_AND:
371 cpu_ioreq_and(env, req);
372 break;
373 case IOREQ_TYPE_OR:
374 cpu_ioreq_or(env, req);
375 break;
376 case IOREQ_TYPE_XOR:
377 cpu_ioreq_xor(env, req);
378 break;
379 default:
380 hw_error("Invalid ioreq type 0x%x", req->type);
381 }
383 /* No state change if state = STATE_IORESP_HOOK */
384 if (req->state == STATE_IOREQ_INPROCESS)
385 req->state = STATE_IORESP_READY;
386 env->send_event = 1;
387 }
388 }
390 int xc_handle;
392 void
393 destroy_vmx_domain(void)
394 {
395 extern FILE* logfile;
396 char destroy_cmd[20];
397 sprintf(destroy_cmd, "xm destroy %d", domid);
398 if (system(destroy_cmd) == -1)
399 fprintf(logfile, "%s failed.!\n", destroy_cmd);
400 }
402 fd_set wakeup_rfds;
403 int highest_fds;
404 int main_loop(void)
405 {
406 fd_set rfds;
407 struct timeval tv;
408 extern CPUState *global_env;
409 extern int vm_running;
410 extern int shutdown_requested;
411 CPUState *env = global_env;
412 int retval;
413 extern void main_loop_wait(int);
415 /* Watch stdin (fd 0) to see when it has input. */
416 FD_ZERO(&wakeup_rfds);
417 FD_SET(evtchn_fd, &wakeup_rfds);
418 highest_fds = evtchn_fd;
419 env->send_event = 0
420 while (1) {
421 if (vm_running) {
422 if (shutdown_requested) {
423 break;
424 }
425 if (reset_requested){
426 qemu_system_reset();
427 reset_requested = 0;
428 }
429 }
431 /* Wait up to one seconds. */
432 tv.tv_sec = 0;
433 tv.tv_usec = 100000;
435 retval = select(highest_fds+1, &wakeup_rfds, NULL, NULL, &tv);
436 if (retval == -1) {
437 perror("select");
438 return 0;
439 }
440 rfds = wakeup_rfds;
441 FD_ZERO(&wakeup_rfds);
442 FD_SET(evtchn_fd, &wakeup_rfds);
444 #if __WORDSIZE == 32
445 #define ULONGLONG_MAX 0xffffffffffffffffULL
446 #else
447 #define ULONGLONG_MAX ULONG_MAX
448 #endif
450 tun_receive_handler(&rfds);
451 if ( FD_ISSET(evtchn_fd, &rfds) ) {
452 cpu_handle_ioreq(env);
453 }
454 main_loop_wait(0);
455 if (env->send_event) {
456 env->send_event = 0;
457 struct ioctl_evtchn_notify notify;
458 notify.port = ioreq_local_port;
459 (void)ioctl(evtchn_fd, IOCTL_EVTCHN_NOTIFY, &notify);
460 }
461 }
462 destroy_vmx_domain();
463 return 0;
464 }
466 static void
467 qemu_vmx_reset(void *unused)
468 {
469 char cmd[255];
471 /* pause domain first, to avoid repeated reboot request*/
472 xc_domain_pause (xc_handle, domid);
474 sprintf(cmd,"xm shutdown -R %d", domid);
475 system (cmd);
476 }
478 CPUState *
479 cpu_init()
480 {
481 CPUX86State *env;
482 struct ioctl_evtchn_bind_interdomain bind;
483 int rc;
485 cpu_exec_init();
486 qemu_register_reset(qemu_vmx_reset, NULL);
487 env = malloc(sizeof(CPUX86State));
488 if (!env)
489 return NULL;
490 memset(env, 0, sizeof(CPUX86State));
492 cpu_single_env = env;
494 if (evtchn_fd != -1)//the evtchn has been opened by another cpu object
495 return NULL;
497 //use nonblock reading not polling, may change in future.
498 evtchn_fd = open("/dev/xen/evtchn", O_RDWR|O_NONBLOCK);
499 if (evtchn_fd == -1) {
500 perror("open");
501 return NULL;
502 }
504 bind.remote_domain = domid;
505 bind.remote_port = ioreq_remote_port;
506 rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
507 if (rc == -1) {
508 perror("ioctl");
509 return NULL;
510 }
511 ioreq_local_port = rc;
513 return env;
514 }