direct-io.hg

view tools/ioemu/target-i386-dm/helper2.c @ 8151:f5b119533cc8

Define explicit evtchn_port_t type (32 bits) and plumb up
to user space thru /dev/xen/evtchn.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Dec 01 15:22:22 2005 +0100 (2005-12-01)
parents f7c7575695b3
children 9fc306e40a7c
line source
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Main cpu loop for handling I/O requests coming from a virtual machine
23 * Copyright 2004, Intel Corporation.
24 *
25 * This program is free software; you can redistribute it and/or modify it
26 * under the terms and conditions of the GNU Lesser General Public License,
27 * version 2.1, as published by the Free Software Foundation.
28 *
29 * This program is distributed in the hope it will be useful, but WITHOUT
30 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
31 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
32 * more details.
33 *
34 * You should have received a copy of the GNU Lesser General Public License
35 * along with this program; if not, write to the Free Software Foundation,
36 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
37 */
38 #include <stdarg.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <string.h>
42 #include <inttypes.h>
43 #include <signal.h>
44 #include <assert.h>
46 #include <limits.h>
47 #include <fcntl.h>
48 #include <sys/ioctl.h>
50 #include <xenctrl.h>
51 #include <xen/io/ioreq.h>
52 #include <xen/linux/evtchn.h>
54 #include "cpu.h"
55 #include "exec-all.h"
56 #include "vl.h"
58 extern int domid;
59 extern int vcpus;
61 void *shared_vram;
63 shared_iopage_t *shared_page = NULL;
64 extern int reset_requested;
66 CPUX86State *cpu_86_init(void)
67 {
68 CPUX86State *env;
69 static int inited;
71 cpu_exec_init();
73 env = malloc(sizeof(CPUX86State));
74 if (!env)
75 return NULL;
76 memset(env, 0, sizeof(CPUX86State));
77 /* init various static tables */
78 if (!inited) {
79 inited = 1;
80 }
81 cpu_single_env = env;
82 cpu_reset(env);
83 return env;
84 }
86 /* NOTE: must be called outside the CPU execute loop */
87 void cpu_reset(CPUX86State *env)
88 {
89 }
91 void cpu_x86_close(CPUX86State *env)
92 {
93 free(env);
94 }
97 void cpu_dump_state(CPUState *env, FILE *f,
98 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
99 int flags)
100 {
101 }
103 /***********************************************************/
104 /* x86 mmu */
105 /* XXX: add PGE support */
107 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
108 {
109 a20_state = (a20_state != 0);
110 if (a20_state != ((env->a20_mask >> 20) & 1)) {
111 #if defined(DEBUG_MMU)
112 printf("A20 update: a20=%d\n", a20_state);
113 #endif
114 env->a20_mask = 0xffefffff | (a20_state << 20);
115 }
116 }
118 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
119 {
120 return addr;
121 }
123 //the evtchn fd for polling
124 int evtchn_fd = -1;
126 //the evtchn port for polling the notification,
127 //should be inputed as bochs's parameter
128 evtchn_port_t ioreq_remote_port, ioreq_local_port;
130 //some functions to handle the io req packet
131 void sp_info()
132 {
133 ioreq_t *req;
134 int i;
136 term_printf("event port: %d\n", shared_page->sp_global.eport);
137 for ( i = 0; i < vcpus; i++ ) {
138 req = &(shared_page->vcpu_iodata[i].vp_ioreq);
139 term_printf("vcpu %d:\n", i);
140 term_printf(" req state: %x, pvalid: %x, addr: %llx, "
141 "data: %llx, count: %llx, size: %llx\n",
142 req->state, req->pdata_valid, req->addr,
143 req->u.data, req->count, req->size);
144 }
145 }
147 //get the ioreq packets from share mem
148 ioreq_t* __cpu_get_ioreq(void)
149 {
150 ioreq_t *req;
152 req = &(shared_page->vcpu_iodata[0].vp_ioreq);
153 if (req->state == STATE_IOREQ_READY) {
154 req->state = STATE_IOREQ_INPROCESS;
155 } else {
156 fprintf(logfile, "False I/O request ... in-service already: "
157 "%x, pvalid: %x, port: %llx, "
158 "data: %llx, count: %llx, size: %llx\n",
159 req->state, req->pdata_valid, req->addr,
160 req->u.data, req->count, req->size);
161 req = NULL;
162 }
164 return req;
165 }
167 //use poll to get the port notification
168 //ioreq_vec--out,the
169 //retval--the number of ioreq packet
170 ioreq_t* cpu_get_ioreq(void)
171 {
172 int rc;
173 evtchn_port_t port;
175 rc = read(evtchn_fd, &port, sizeof(port));
176 if ((rc == sizeof(port)) && (port == ioreq_local_port)) {
177 // unmask the wanted port again
178 write(evtchn_fd, &ioreq_local_port, sizeof(port));
180 //get the io packet from shared memory
181 return __cpu_get_ioreq();
182 }
184 //read error or read nothing
185 return NULL;
186 }
188 unsigned long do_inp(CPUState *env, unsigned long addr, unsigned long size)
189 {
190 switch(size) {
191 case 1:
192 return cpu_inb(env, addr);
193 case 2:
194 return cpu_inw(env, addr);
195 case 4:
196 return cpu_inl(env, addr);
197 default:
198 fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
199 exit(-1);
200 }
201 }
203 void do_outp(CPUState *env, unsigned long addr,
204 unsigned long size, unsigned long val)
205 {
206 switch(size) {
207 case 1:
208 return cpu_outb(env, addr, val);
209 case 2:
210 return cpu_outw(env, addr, val);
211 case 4:
212 return cpu_outl(env, addr, val);
213 default:
214 fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
215 exit(-1);
216 }
217 }
219 extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
220 int len, int is_write);
222 static inline void read_physical(uint64_t addr, unsigned long size, void *val)
223 {
224 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
225 }
227 static inline void write_physical(uint64_t addr, unsigned long size, void *val)
228 {
229 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
230 }
232 void cpu_ioreq_pio(CPUState *env, ioreq_t *req)
233 {
234 int i, sign;
236 sign = req->df ? -1 : 1;
238 if (req->dir == IOREQ_READ) {
239 if (!req->pdata_valid) {
240 req->u.data = do_inp(env, req->addr, req->size);
241 } else {
242 unsigned long tmp;
244 for (i = 0; i < req->count; i++) {
245 tmp = do_inp(env, req->addr, req->size);
246 write_physical((target_phys_addr_t) req->u.pdata
247 + (sign * i * req->size),
248 req->size, &tmp);
249 }
250 }
251 } else if (req->dir == IOREQ_WRITE) {
252 if (!req->pdata_valid) {
253 do_outp(env, req->addr, req->size, req->u.data);
254 } else {
255 for (i = 0; i < req->count; i++) {
256 unsigned long tmp;
258 read_physical((target_phys_addr_t) req->u.pdata
259 + (sign * i * req->size),
260 req->size, &tmp);
261 do_outp(env, req->addr, req->size, tmp);
262 }
263 }
264 }
265 }
267 void cpu_ioreq_move(CPUState *env, ioreq_t *req)
268 {
269 int i, sign;
271 sign = req->df ? -1 : 1;
273 if (!req->pdata_valid) {
274 if (req->dir == IOREQ_READ) {
275 for (i = 0; i < req->count; i++) {
276 read_physical(req->addr
277 + (sign * i * req->size),
278 req->size, &req->u.data);
279 }
280 } else if (req->dir == IOREQ_WRITE) {
281 for (i = 0; i < req->count; i++) {
282 write_physical(req->addr
283 + (sign * i * req->size),
284 req->size, &req->u.data);
285 }
286 }
287 } else {
288 unsigned long tmp;
290 if (req->dir == IOREQ_READ) {
291 for (i = 0; i < req->count; i++) {
292 read_physical(req->addr
293 + (sign * i * req->size),
294 req->size, &tmp);
295 write_physical((target_phys_addr_t )req->u.pdata
296 + (sign * i * req->size),
297 req->size, &tmp);
298 }
299 } else if (req->dir == IOREQ_WRITE) {
300 for (i = 0; i < req->count; i++) {
301 read_physical((target_phys_addr_t) req->u.pdata
302 + (sign * i * req->size),
303 req->size, &tmp);
304 write_physical(req->addr
305 + (sign * i * req->size),
306 req->size, &tmp);
307 }
308 }
309 }
310 }
312 void cpu_ioreq_and(CPUState *env, ioreq_t *req)
313 {
314 unsigned long tmp1, tmp2;
316 if (req->pdata_valid != 0)
317 hw_error("expected scalar value");
319 read_physical(req->addr, req->size, &tmp1);
320 if (req->dir == IOREQ_WRITE) {
321 tmp2 = tmp1 & (unsigned long) req->u.data;
322 write_physical(req->addr, req->size, &tmp2);
323 }
324 req->u.data = tmp1;
325 }
327 void cpu_ioreq_or(CPUState *env, ioreq_t *req)
328 {
329 unsigned long tmp1, tmp2;
331 if (req->pdata_valid != 0)
332 hw_error("expected scalar value");
334 read_physical(req->addr, req->size, &tmp1);
335 if (req->dir == IOREQ_WRITE) {
336 tmp2 = tmp1 | (unsigned long) req->u.data;
337 write_physical(req->addr, req->size, &tmp2);
338 }
339 req->u.data = tmp1;
340 }
342 void cpu_ioreq_xor(CPUState *env, ioreq_t *req)
343 {
344 unsigned long tmp1, tmp2;
346 if (req->pdata_valid != 0)
347 hw_error("expected scalar value");
349 read_physical(req->addr, req->size, &tmp1);
350 if (req->dir == IOREQ_WRITE) {
351 tmp2 = tmp1 ^ (unsigned long) req->u.data;
352 write_physical(req->addr, req->size, &tmp2);
353 }
354 req->u.data = tmp1;
355 }
357 void cpu_handle_ioreq(CPUState *env)
358 {
359 ioreq_t *req = cpu_get_ioreq();
361 if (req) {
362 if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
363 if (req->size != 4)
364 req->u.data &= (1UL << (8 * req->size))-1;
365 }
367 switch (req->type) {
368 case IOREQ_TYPE_PIO:
369 cpu_ioreq_pio(env, req);
370 break;
371 case IOREQ_TYPE_COPY:
372 cpu_ioreq_move(env, req);
373 break;
374 case IOREQ_TYPE_AND:
375 cpu_ioreq_and(env, req);
376 break;
377 case IOREQ_TYPE_OR:
378 cpu_ioreq_or(env, req);
379 break;
380 case IOREQ_TYPE_XOR:
381 cpu_ioreq_xor(env, req);
382 break;
383 default:
384 hw_error("Invalid ioreq type 0x%x\n", req->type);
385 }
387 /* No state change if state = STATE_IORESP_HOOK */
388 if (req->state == STATE_IOREQ_INPROCESS)
389 req->state = STATE_IORESP_READY;
390 env->send_event = 1;
391 }
392 }
394 int xc_handle;
396 void
397 destroy_vmx_domain(void)
398 {
399 extern FILE* logfile;
400 char destroy_cmd[32];
402 sprintf(destroy_cmd, "xm destroy %d", domid);
403 if (system(destroy_cmd) == -1)
404 fprintf(logfile, "%s failed.!\n", destroy_cmd);
405 }
407 fd_set wakeup_rfds;
408 int highest_fds;
409 int main_loop(void)
410 {
411 fd_set rfds;
412 struct timeval tv;
413 extern CPUState *global_env;
414 extern int vm_running;
415 extern int shutdown_requested;
416 CPUState *env = global_env;
417 int retval;
418 extern void main_loop_wait(int);
420 /* Watch stdin (fd 0) to see when it has input. */
421 FD_ZERO(&wakeup_rfds);
422 FD_SET(evtchn_fd, &wakeup_rfds);
423 highest_fds = evtchn_fd;
424 env->send_event = 0;
426 while (1) {
427 if (vm_running) {
428 if (shutdown_requested) {
429 break;
430 }
431 if (reset_requested){
432 qemu_system_reset();
433 reset_requested = 0;
434 }
435 }
437 /* Wait up to one seconds. */
438 tv.tv_sec = 0;
439 tv.tv_usec = 100000;
441 retval = select(highest_fds+1, &wakeup_rfds, NULL, NULL, &tv);
442 if (retval == -1) {
443 fprintf(logfile, "select returned error %d\n", errno);
444 return 0;
445 }
446 rfds = wakeup_rfds;
447 FD_ZERO(&wakeup_rfds);
448 FD_SET(evtchn_fd, &wakeup_rfds);
450 #if __WORDSIZE == 32
451 #define ULONGLONG_MAX 0xffffffffffffffffULL
452 #else
453 #define ULONGLONG_MAX ULONG_MAX
454 #endif
456 tun_receive_handler(&rfds);
457 if ( FD_ISSET(evtchn_fd, &rfds) ) {
458 cpu_handle_ioreq(env);
459 }
460 main_loop_wait(0);
462 if (env->send_event) {
463 struct ioctl_evtchn_notify notify;
465 env->send_event = 0;
466 notify.port = ioreq_local_port;
467 (void)ioctl(evtchn_fd, IOCTL_EVTCHN_NOTIFY, &notify);
468 }
469 }
470 destroy_vmx_domain();
471 return 0;
472 }
474 static void qemu_vmx_reset(void *unused)
475 {
476 char cmd[64];
478 /* pause domain first, to avoid repeated reboot request*/
479 xc_domain_pause(xc_handle, domid);
481 sprintf(cmd, "xm shutdown -R %d", domid);
482 system(cmd);
483 }
485 CPUState * cpu_init()
486 {
487 CPUX86State *env;
488 struct ioctl_evtchn_bind_interdomain bind;
489 int rc;
491 cpu_exec_init();
492 qemu_register_reset(qemu_vmx_reset, NULL);
493 env = malloc(sizeof(CPUX86State));
494 if (!env)
495 return NULL;
496 memset(env, 0, sizeof(CPUX86State));
498 cpu_single_env = env;
500 if (evtchn_fd != -1)//the evtchn has been opened by another cpu object
501 return NULL;
503 //use nonblock reading not polling, may change in future.
504 evtchn_fd = open("/dev/xen/evtchn", O_RDWR|O_NONBLOCK);
505 if (evtchn_fd == -1) {
506 fprintf(logfile, "open evtchn device error %d\n", errno);
507 return NULL;
508 }
510 bind.remote_domain = domid;
511 bind.remote_port = ioreq_remote_port;
512 rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
513 if (rc == -1) {
514 fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
515 return NULL;
516 }
517 ioreq_local_port = rc;
519 return env;
520 }