ia64/xen-unstable

view tools/ioemu/target-i386-dm/helper2.c @ 16866:79497be10105

ioemu: Use asprintf instead of PATH_MAX, which POSIX says to be facultative.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 23 18:05:12 2008 +0000 (2008-01-23)
parents 019f5bd23ea5
children d29d74d4eeac
line source
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Main cpu loop for handling I/O requests coming from a virtual machine
23 * Copyright 2004, Intel Corporation.
24 * Copyright 2005, International Business Machines Corporation.
25 *
26 * This program is free software; you can redistribute it and/or modify it
27 * under the terms and conditions of the GNU Lesser General Public License,
28 * version 2.1, as published by the Free Software Foundation.
29 *
30 * This program is distributed in the hope it will be useful, but WITHOUT
31 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33 * more details.
34 *
35 * You should have received a copy of the GNU Lesser General Public License
36 * along with this program; if not, write to the Free Software Foundation,
37 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
38 */
39 #include <stdarg.h>
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>
43 #include <inttypes.h>
44 #include <signal.h>
45 #include <assert.h>
47 #include <limits.h>
48 #include <fcntl.h>
50 #include <xenctrl.h>
51 #include <xen/hvm/ioreq.h>
53 #include "cpu.h"
54 #include "exec-all.h"
56 //#define DEBUG_MMU
58 #ifdef USE_CODE_COPY
59 #include <asm/ldt.h>
60 #include <linux/unistd.h>
61 #include <linux/version.h>
63 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
66 #define modify_ldt_ldt_s user_desc
67 #endif
68 #endif /* USE_CODE_COPY */
70 #include "vl.h"
72 int domid = -1;
73 int vcpus = 1;
75 extern int xc_handle;
77 long time_offset = 0;
79 shared_iopage_t *shared_page = NULL;
81 #define BUFFER_IO_MAX_DELAY 100
82 buffered_iopage_t *buffered_io_page = NULL;
83 QEMUTimer *buffered_io_timer;
85 /* the evtchn fd for polling */
86 int xce_handle = -1;
88 /* which vcpu we are serving */
89 int send_vcpu = 0;
91 //the evtchn port for polling the notification,
92 #define NR_CPUS 32
93 evtchn_port_t ioreq_local_port[NR_CPUS];
95 CPUX86State *cpu_x86_init(void)
96 {
97 CPUX86State *env;
98 static int inited;
99 int i, rc;
101 env = qemu_mallocz(sizeof(CPUX86State));
102 if (!env)
103 return NULL;
104 cpu_exec_init(env);
106 /* init various static tables */
107 if (!inited) {
108 inited = 1;
110 cpu_single_env = env;
112 xce_handle = xc_evtchn_open();
113 if (xce_handle == -1) {
114 perror("open");
115 return NULL;
116 }
118 /* FIXME: how about if we overflow the page here? */
119 for (i = 0; i < vcpus; i++) {
120 rc = xc_evtchn_bind_interdomain(
121 xce_handle, domid, shared_page->vcpu_iodata[i].vp_eport);
122 if (rc == -1) {
123 fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
124 return NULL;
125 }
126 ioreq_local_port[i] = rc;
127 }
128 }
130 return env;
131 }
133 /* called from main_cpu_reset */
134 void cpu_reset(CPUX86State *env)
135 {
136 int xcHandle;
137 int sts;
139 xcHandle = xc_interface_open();
140 if (xcHandle < 0)
141 fprintf(logfile, "Cannot acquire xenctrl handle\n");
142 else {
143 xc_domain_shutdown_hook(xcHandle, domid);
144 sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_reboot);
145 if (sts != 0)
146 fprintf(logfile,
147 "? xc_domain_shutdown failed to issue reboot, sts %d\n",
148 sts);
149 else
150 fprintf(logfile, "Issued domain %d reboot\n", domid);
151 xc_interface_close(xcHandle);
152 }
153 }
155 void cpu_x86_close(CPUX86State *env)
156 {
157 free(env);
158 }
161 void cpu_dump_state(CPUState *env, FILE *f,
162 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
163 int flags)
164 {
165 }
167 /***********************************************************/
168 /* x86 mmu */
169 /* XXX: add PGE support */
171 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
172 {
173 a20_state = (a20_state != 0);
174 if (a20_state != ((env->a20_mask >> 20) & 1)) {
175 #if defined(DEBUG_MMU)
176 printf("A20 update: a20=%d\n", a20_state);
177 #endif
178 env->a20_mask = 0xffefffff | (a20_state << 20);
179 }
180 }
182 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
183 {
184 return addr;
185 }
187 //some functions to handle the io req packet
188 void sp_info()
189 {
190 ioreq_t *req;
191 int i;
193 for (i = 0; i < vcpus; i++) {
194 req = &(shared_page->vcpu_iodata[i].vp_ioreq);
195 term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]);
196 term_printf(" req state: %x, ptr: %x, addr: %"PRIx64", "
197 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
198 req->state, req->data_is_ptr, req->addr,
199 req->data, req->count, req->size);
200 term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n",
201 req->io_count);
202 }
203 }
205 //get the ioreq packets from share mem
206 static ioreq_t *__cpu_get_ioreq(int vcpu)
207 {
208 ioreq_t *req;
210 req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
212 if (req->state != STATE_IOREQ_READY) {
213 fprintf(logfile, "I/O request not ready: "
214 "%x, ptr: %x, port: %"PRIx64", "
215 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
216 req->state, req->data_is_ptr, req->addr,
217 req->data, req->count, req->size);
218 return NULL;
219 }
221 rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
223 req->state = STATE_IOREQ_INPROCESS;
224 return req;
225 }
227 //use poll to get the port notification
228 //ioreq_vec--out,the
229 //retval--the number of ioreq packet
230 static ioreq_t *cpu_get_ioreq(void)
231 {
232 int i;
233 evtchn_port_t port;
235 port = xc_evtchn_pending(xce_handle);
236 if (port != -1) {
237 for ( i = 0; i < vcpus; i++ )
238 if ( ioreq_local_port[i] == port )
239 break;
241 if ( i == vcpus ) {
242 fprintf(logfile, "Fatal error while trying to get io event!\n");
243 exit(1);
244 }
246 // unmask the wanted port again
247 xc_evtchn_unmask(xce_handle, port);
249 //get the io packet from shared memory
250 send_vcpu = i;
251 return __cpu_get_ioreq(i);
252 }
254 //read error or read nothing
255 return NULL;
256 }
258 unsigned long do_inp(CPUState *env, unsigned long addr, unsigned long size)
259 {
260 switch(size) {
261 case 1:
262 return cpu_inb(env, addr);
263 case 2:
264 return cpu_inw(env, addr);
265 case 4:
266 return cpu_inl(env, addr);
267 default:
268 fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
269 exit(-1);
270 }
271 }
273 void do_outp(CPUState *env, unsigned long addr,
274 unsigned long size, unsigned long val)
275 {
276 switch(size) {
277 case 1:
278 return cpu_outb(env, addr, val);
279 case 2:
280 return cpu_outw(env, addr, val);
281 case 4:
282 return cpu_outl(env, addr, val);
283 default:
284 fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
285 exit(-1);
286 }
287 }
289 extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
290 int len, int is_write);
292 static inline void read_physical(uint64_t addr, unsigned long size, void *val)
293 {
294 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
295 }
297 static inline void write_physical(uint64_t addr, unsigned long size, void *val)
298 {
299 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
300 }
302 void cpu_ioreq_pio(CPUState *env, ioreq_t *req)
303 {
304 int i, sign;
306 sign = req->df ? -1 : 1;
308 if (req->dir == IOREQ_READ) {
309 if (!req->data_is_ptr) {
310 req->data = do_inp(env, req->addr, req->size);
311 } else {
312 unsigned long tmp;
314 for (i = 0; i < req->count; i++) {
315 tmp = do_inp(env, req->addr, req->size);
316 write_physical((target_phys_addr_t) req->data
317 + (sign * i * req->size),
318 req->size, &tmp);
319 }
320 }
321 } else if (req->dir == IOREQ_WRITE) {
322 if (!req->data_is_ptr) {
323 do_outp(env, req->addr, req->size, req->data);
324 } else {
325 for (i = 0; i < req->count; i++) {
326 unsigned long tmp = 0;
328 read_physical((target_phys_addr_t) req->data
329 + (sign * i * req->size),
330 req->size, &tmp);
331 do_outp(env, req->addr, req->size, tmp);
332 }
333 }
334 }
335 }
337 void cpu_ioreq_move(CPUState *env, ioreq_t *req)
338 {
339 int i, sign;
341 sign = req->df ? -1 : 1;
343 if (!req->data_is_ptr) {
344 if (req->dir == IOREQ_READ) {
345 for (i = 0; i < req->count; i++) {
346 read_physical(req->addr
347 + (sign * i * req->size),
348 req->size, &req->data);
349 }
350 } else if (req->dir == IOREQ_WRITE) {
351 for (i = 0; i < req->count; i++) {
352 write_physical(req->addr
353 + (sign * i * req->size),
354 req->size, &req->data);
355 }
356 }
357 } else {
358 target_ulong tmp;
360 if (req->dir == IOREQ_READ) {
361 for (i = 0; i < req->count; i++) {
362 read_physical(req->addr
363 + (sign * i * req->size),
364 req->size, &tmp);
365 write_physical((target_phys_addr_t )req->data
366 + (sign * i * req->size),
367 req->size, &tmp);
368 }
369 } else if (req->dir == IOREQ_WRITE) {
370 for (i = 0; i < req->count; i++) {
371 read_physical((target_phys_addr_t) req->data
372 + (sign * i * req->size),
373 req->size, &tmp);
374 write_physical(req->addr
375 + (sign * i * req->size),
376 req->size, &tmp);
377 }
378 }
379 }
380 }
382 void cpu_ioreq_and(CPUState *env, ioreq_t *req)
383 {
384 target_ulong tmp1, tmp2;
386 if (req->data_is_ptr != 0)
387 hw_error("expected scalar value");
389 read_physical(req->addr, req->size, &tmp1);
390 if (req->dir == IOREQ_WRITE) {
391 tmp2 = tmp1 & (target_ulong) req->data;
392 write_physical(req->addr, req->size, &tmp2);
393 }
394 req->data = tmp1;
395 }
397 void cpu_ioreq_add(CPUState *env, ioreq_t *req)
398 {
399 target_ulong tmp1, tmp2;
401 if (req->data_is_ptr != 0)
402 hw_error("expected scalar value");
404 read_physical(req->addr, req->size, &tmp1);
405 if (req->dir == IOREQ_WRITE) {
406 tmp2 = tmp1 + (target_ulong) req->data;
407 write_physical(req->addr, req->size, &tmp2);
408 }
409 req->data = tmp1;
410 }
412 void cpu_ioreq_sub(CPUState *env, ioreq_t *req)
413 {
414 target_ulong tmp1, tmp2;
416 if (req->data_is_ptr != 0)
417 hw_error("expected scalar value");
419 read_physical(req->addr, req->size, &tmp1);
420 if (req->dir == IOREQ_WRITE) {
421 tmp2 = tmp1 - (target_ulong) req->data;
422 write_physical(req->addr, req->size, &tmp2);
423 }
424 req->data = tmp1;
425 }
427 void cpu_ioreq_or(CPUState *env, ioreq_t *req)
428 {
429 target_ulong tmp1, tmp2;
431 if (req->data_is_ptr != 0)
432 hw_error("expected scalar value");
434 read_physical(req->addr, req->size, &tmp1);
435 if (req->dir == IOREQ_WRITE) {
436 tmp2 = tmp1 | (target_ulong) req->data;
437 write_physical(req->addr, req->size, &tmp2);
438 }
439 req->data = tmp1;
440 }
442 void cpu_ioreq_xor(CPUState *env, ioreq_t *req)
443 {
444 target_ulong tmp1, tmp2;
446 if (req->data_is_ptr != 0)
447 hw_error("expected scalar value");
449 read_physical(req->addr, req->size, &tmp1);
450 if (req->dir == IOREQ_WRITE) {
451 tmp2 = tmp1 ^ (target_ulong) req->data;
452 write_physical(req->addr, req->size, &tmp2);
453 }
454 req->data = tmp1;
455 }
457 void timeoffset_get()
458 {
459 char *p;
461 p = xenstore_vm_read(domid, "rtc/timeoffset", NULL);
462 if (!p)
463 return;
465 if (sscanf(p, "%ld", &time_offset) == 1)
466 fprintf(logfile, "Time offset set %ld\n", time_offset);
467 else
468 time_offset = 0;
470 xc_domain_set_time_offset(xc_handle, domid, time_offset);
472 free(p);
473 }
475 void cpu_ioreq_timeoffset(CPUState *env, ioreq_t *req)
476 {
477 char b[64];
479 time_offset += (unsigned long)req->data;
481 fprintf(logfile, "Time offset set %ld, added offset %ld\n", time_offset, req->data);
482 sprintf(b, "%ld", time_offset);
483 xenstore_vm_write(domid, "rtc/timeoffset", b);
484 }
486 void cpu_ioreq_xchg(CPUState *env, ioreq_t *req)
487 {
488 unsigned long tmp1;
490 if (req->data_is_ptr != 0)
491 hw_error("expected scalar value");
493 read_physical(req->addr, req->size, &tmp1);
494 write_physical(req->addr, req->size, &req->data);
495 req->data = tmp1;
496 }
498 void __handle_ioreq(CPUState *env, ioreq_t *req)
499 {
500 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
501 (req->size < sizeof(target_ulong)))
502 req->data &= ((target_ulong)1 << (8 * req->size)) - 1;
504 switch (req->type) {
505 case IOREQ_TYPE_PIO:
506 cpu_ioreq_pio(env, req);
507 break;
508 case IOREQ_TYPE_COPY:
509 cpu_ioreq_move(env, req);
510 break;
511 case IOREQ_TYPE_AND:
512 cpu_ioreq_and(env, req);
513 break;
514 case IOREQ_TYPE_ADD:
515 cpu_ioreq_add(env, req);
516 break;
517 case IOREQ_TYPE_SUB:
518 cpu_ioreq_sub(env, req);
519 break;
520 case IOREQ_TYPE_OR:
521 cpu_ioreq_or(env, req);
522 break;
523 case IOREQ_TYPE_XOR:
524 cpu_ioreq_xor(env, req);
525 break;
526 case IOREQ_TYPE_XCHG:
527 cpu_ioreq_xchg(env, req);
528 break;
529 case IOREQ_TYPE_TIMEOFFSET:
530 cpu_ioreq_timeoffset(env, req);
531 break;
532 case IOREQ_TYPE_INVALIDATE:
533 qemu_invalidate_map_cache();
534 break;
535 default:
536 hw_error("Invalid ioreq type 0x%x\n", req->type);
537 }
538 }
540 void __handle_buffered_iopage(CPUState *env)
541 {
542 buf_ioreq_t *buf_req = NULL;
543 ioreq_t req;
544 int qw;
546 if (!buffered_io_page)
547 return;
549 while (buffered_io_page->read_pointer !=
550 buffered_io_page->write_pointer) {
551 buf_req = &buffered_io_page->buf_ioreq[
552 buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
553 req.size = 1UL << buf_req->size;
554 req.count = 1;
555 req.addr = buf_req->addr;
556 req.data = buf_req->data;
557 req.state = STATE_IOREQ_READY;
558 req.dir = buf_req->dir;
559 req.df = 1;
560 req.type = buf_req->type;
561 req.data_is_ptr = 0;
562 qw = (req.size == 8);
563 if (qw) {
564 buf_req = &buffered_io_page->buf_ioreq[
565 (buffered_io_page->read_pointer+1) % IOREQ_BUFFER_SLOT_NUM];
566 req.data |= ((uint64_t)buf_req->data) << 32;
567 }
569 __handle_ioreq(env, &req);
571 mb();
572 buffered_io_page->read_pointer += qw ? 2 : 1;
573 }
574 }
576 void handle_buffered_io(void *opaque)
577 {
578 CPUState *env = opaque;
580 __handle_buffered_iopage(env);
581 qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
582 qemu_get_clock(rt_clock));
583 }
585 void cpu_handle_ioreq(void *opaque)
586 {
587 extern int vm_running;
588 extern int shutdown_requested;
589 CPUState *env = opaque;
590 ioreq_t *req = cpu_get_ioreq();
592 handle_buffered_io(env);
593 if (req) {
594 __handle_ioreq(env, req);
596 if (req->state != STATE_IOREQ_INPROCESS) {
597 fprintf(logfile, "Badness in I/O request ... not in service?!: "
598 "%x, ptr: %x, port: %"PRIx64", "
599 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
600 req->state, req->data_is_ptr, req->addr,
601 req->data, req->count, req->size);
602 destroy_hvm_domain();
603 return;
604 }
606 wmb(); /* Update ioreq contents /then/ update state. */
608 /*
609 * We do this before we send the response so that the tools
610 * have the opportunity to pick up on the reset before the
611 * guest resumes and does a hlt with interrupts disabled which
612 * causes Xen to powerdown the domain.
613 */
614 if (vm_running) {
615 if (shutdown_requested) {
616 fprintf(logfile, "shutdown requested in cpu_handle_ioreq\n");
617 destroy_hvm_domain();
618 }
619 if (reset_requested) {
620 fprintf(logfile, "reset requested in cpu_handle_ioreq.\n");
621 qemu_system_reset();
622 reset_requested = 0;
623 }
624 }
626 req->state = STATE_IORESP_READY;
627 xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
628 }
629 }
631 int main_loop(void)
632 {
633 extern int vm_running;
634 extern int shutdown_requested;
635 extern int suspend_requested;
636 CPUState *env = cpu_single_env;
637 int evtchn_fd = xce_handle == -1 ? -1 : xc_evtchn_fd(xce_handle);
638 char *qemu_file;
639 fd_set fds;
640 int ret = 0;
642 buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
643 cpu_single_env);
644 qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
646 if (evtchn_fd != -1)
647 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
649 xenstore_record_dm_state("running");
650 while (1) {
651 while (!((vm_running && suspend_requested) || shutdown_requested))
652 /* Wait up to 10 msec. */
653 main_loop_wait(10);
655 if (shutdown_requested) {
656 ret = EXCP_INTERRUPT;
657 break;
658 }
660 fprintf(logfile, "device model saving state\n");
662 /* Pull all outstanding ioreqs through the system */
663 handle_buffered_pio();
664 handle_buffered_io(env);
665 main_loop_wait(1); /* For the select() on events */
667 /* Save the device state */
668 asprintf(&qemu_file, "/var/lib/xen/qemu-save.%d", domid);
669 do_savevm(qemu_file);
670 free(qemu_file);
672 xenstore_record_dm_state("paused");
674 /* Wait to be allowed to continue */
675 while (suspend_requested) {
676 FD_ZERO(&fds);
677 FD_SET(xenstore_fd(), &fds);
678 if (select(xenstore_fd() + 1, &fds, NULL, NULL, NULL) > 0)
679 xenstore_process_event(NULL);
680 }
682 xenstore_record_dm_state("running");
683 }
685 return ret;
686 }
688 void destroy_hvm_domain(void)
689 {
690 int xcHandle;
691 int sts;
693 xcHandle = xc_interface_open();
694 if (xcHandle < 0)
695 fprintf(logfile, "Cannot acquire xenctrl handle\n");
696 else {
697 sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_poweroff);
698 if (sts != 0)
699 fprintf(logfile, "? xc_domain_shutdown failed to issue poweroff, "
700 "sts %d, errno %d\n", sts, errno);
701 else
702 fprintf(logfile, "Issued domain %d poweroff\n", domid);
703 xc_interface_close(xcHandle);
704 }
705 }