ia64/xen-unstable

view tools/ioemu/target-i386-dm/helper2.c @ 15841:c5f735271e22

[IA64] Foreign p2m: Fix vti domain builder.

It should set arch_domain::convmem_end.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Sep 06 13:48:43 2007 -0600 (2007-09-06)
parents d9836851a2a4
children c44d82f36665
line source
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Main cpu loop for handling I/O requests coming from a virtual machine
23 * Copyright 2004, Intel Corporation.
24 * Copyright 2005, International Business Machines Corporation.
25 *
26 * This program is free software; you can redistribute it and/or modify it
27 * under the terms and conditions of the GNU Lesser General Public License,
28 * version 2.1, as published by the Free Software Foundation.
29 *
30 * This program is distributed in the hope it will be useful, but WITHOUT
31 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33 * more details.
34 *
35 * You should have received a copy of the GNU Lesser General Public License
36 * along with this program; if not, write to the Free Software Foundation,
37 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
38 */
39 #include <stdarg.h>
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>
43 #include <inttypes.h>
44 #include <signal.h>
45 #include <assert.h>
47 #include <limits.h>
48 #include <fcntl.h>
50 #include <xenctrl.h>
51 #include <xen/hvm/ioreq.h>
53 #include "cpu.h"
54 #include "exec-all.h"
56 //#define DEBUG_MMU
58 #ifdef USE_CODE_COPY
59 #include <asm/ldt.h>
60 #include <linux/unistd.h>
61 #include <linux/version.h>
63 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
66 #define modify_ldt_ldt_s user_desc
67 #endif
68 #endif /* USE_CODE_COPY */
70 #include "vl.h"
72 int domid = -1;
73 int vcpus = 1;
75 int xc_handle;
77 long time_offset = 0;
79 shared_iopage_t *shared_page = NULL;
81 #define BUFFER_IO_MAX_DELAY 100
82 buffered_iopage_t *buffered_io_page = NULL;
83 QEMUTimer *buffered_io_timer;
85 /* the evtchn fd for polling */
86 int xce_handle = -1;
88 /* which vcpu we are serving */
89 int send_vcpu = 0;
91 //the evtchn port for polling the notification,
92 #define NR_CPUS 32
93 evtchn_port_t ioreq_local_port[NR_CPUS];
95 CPUX86State *cpu_x86_init(void)
96 {
97 CPUX86State *env;
98 static int inited;
99 int i, rc;
101 env = qemu_mallocz(sizeof(CPUX86State));
102 if (!env)
103 return NULL;
104 cpu_exec_init(env);
106 /* init various static tables */
107 if (!inited) {
108 inited = 1;
110 cpu_single_env = env;
112 xce_handle = xc_evtchn_open();
113 if (xce_handle == -1) {
114 perror("open");
115 return NULL;
116 }
118 /* FIXME: how about if we overflow the page here? */
119 for (i = 0; i < vcpus; i++) {
120 rc = xc_evtchn_bind_interdomain(
121 xce_handle, domid, shared_page->vcpu_iodata[i].vp_eport);
122 if (rc == -1) {
123 fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
124 return NULL;
125 }
126 ioreq_local_port[i] = rc;
127 }
128 }
130 return env;
131 }
133 /* called from main_cpu_reset */
134 void cpu_reset(CPUX86State *env)
135 {
136 int xcHandle;
137 int sts;
139 xcHandle = xc_interface_open();
140 if (xcHandle < 0)
141 fprintf(logfile, "Cannot acquire xenctrl handle\n");
142 else {
143 xc_domain_shutdown_hook(xcHandle, domid);
144 sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_reboot);
145 if (sts != 0)
146 fprintf(logfile,
147 "? xc_domain_shutdown failed to issue reboot, sts %d\n",
148 sts);
149 else
150 fprintf(logfile, "Issued domain %d reboot\n", domid);
151 xc_interface_close(xcHandle);
152 }
153 }
155 void cpu_x86_close(CPUX86State *env)
156 {
157 free(env);
158 }
161 void cpu_dump_state(CPUState *env, FILE *f,
162 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
163 int flags)
164 {
165 }
167 /***********************************************************/
168 /* x86 mmu */
169 /* XXX: add PGE support */
171 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
172 {
173 a20_state = (a20_state != 0);
174 if (a20_state != ((env->a20_mask >> 20) & 1)) {
175 #if defined(DEBUG_MMU)
176 printf("A20 update: a20=%d\n", a20_state);
177 #endif
178 env->a20_mask = 0xffefffff | (a20_state << 20);
179 }
180 }
182 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
183 {
184 return addr;
185 }
187 //some functions to handle the io req packet
188 void sp_info()
189 {
190 ioreq_t *req;
191 int i;
193 for (i = 0; i < vcpus; i++) {
194 req = &(shared_page->vcpu_iodata[i].vp_ioreq);
195 term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]);
196 term_printf(" req state: %x, ptr: %x, addr: %"PRIx64", "
197 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
198 req->state, req->data_is_ptr, req->addr,
199 req->data, req->count, req->size);
200 term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n",
201 req->io_count);
202 }
203 }
205 //get the ioreq packets from share mem
206 static ioreq_t *__cpu_get_ioreq(int vcpu)
207 {
208 ioreq_t *req;
210 req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
212 if (req->state != STATE_IOREQ_READY) {
213 fprintf(logfile, "I/O request not ready: "
214 "%x, ptr: %x, port: %"PRIx64", "
215 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
216 req->state, req->data_is_ptr, req->addr,
217 req->data, req->count, req->size);
218 return NULL;
219 }
221 rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
223 req->state = STATE_IOREQ_INPROCESS;
224 return req;
225 }
227 //use poll to get the port notification
228 //ioreq_vec--out,the
229 //retval--the number of ioreq packet
230 static ioreq_t *cpu_get_ioreq(void)
231 {
232 int i;
233 evtchn_port_t port;
235 port = xc_evtchn_pending(xce_handle);
236 if (port != -1) {
237 for ( i = 0; i < vcpus; i++ )
238 if ( ioreq_local_port[i] == port )
239 break;
241 if ( i == vcpus ) {
242 fprintf(logfile, "Fatal error while trying to get io event!\n");
243 exit(1);
244 }
246 // unmask the wanted port again
247 xc_evtchn_unmask(xce_handle, port);
249 //get the io packet from shared memory
250 send_vcpu = i;
251 return __cpu_get_ioreq(i);
252 }
254 //read error or read nothing
255 return NULL;
256 }
258 unsigned long do_inp(CPUState *env, unsigned long addr, unsigned long size)
259 {
260 switch(size) {
261 case 1:
262 return cpu_inb(env, addr);
263 case 2:
264 return cpu_inw(env, addr);
265 case 4:
266 return cpu_inl(env, addr);
267 default:
268 fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
269 exit(-1);
270 }
271 }
273 void do_outp(CPUState *env, unsigned long addr,
274 unsigned long size, unsigned long val)
275 {
276 switch(size) {
277 case 1:
278 return cpu_outb(env, addr, val);
279 case 2:
280 return cpu_outw(env, addr, val);
281 case 4:
282 return cpu_outl(env, addr, val);
283 default:
284 fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
285 exit(-1);
286 }
287 }
289 extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
290 int len, int is_write);
292 static inline void read_physical(uint64_t addr, unsigned long size, void *val)
293 {
294 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
295 }
297 static inline void write_physical(uint64_t addr, unsigned long size, void *val)
298 {
299 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
300 }
302 void cpu_ioreq_pio(CPUState *env, ioreq_t *req)
303 {
304 int i, sign;
306 sign = req->df ? -1 : 1;
308 if (req->dir == IOREQ_READ) {
309 if (!req->data_is_ptr) {
310 req->data = do_inp(env, req->addr, req->size);
311 } else {
312 unsigned long tmp;
314 for (i = 0; i < req->count; i++) {
315 tmp = do_inp(env, req->addr, req->size);
316 write_physical((target_phys_addr_t) req->data
317 + (sign * i * req->size),
318 req->size, &tmp);
319 }
320 }
321 } else if (req->dir == IOREQ_WRITE) {
322 if (!req->data_is_ptr) {
323 do_outp(env, req->addr, req->size, req->data);
324 } else {
325 for (i = 0; i < req->count; i++) {
326 unsigned long tmp = 0;
328 read_physical((target_phys_addr_t) req->data
329 + (sign * i * req->size),
330 req->size, &tmp);
331 do_outp(env, req->addr, req->size, tmp);
332 }
333 }
334 }
335 }
337 void cpu_ioreq_move(CPUState *env, ioreq_t *req)
338 {
339 int i, sign;
341 sign = req->df ? -1 : 1;
343 if (!req->data_is_ptr) {
344 if (req->dir == IOREQ_READ) {
345 for (i = 0; i < req->count; i++) {
346 read_physical(req->addr
347 + (sign * i * req->size),
348 req->size, &req->data);
349 }
350 } else if (req->dir == IOREQ_WRITE) {
351 for (i = 0; i < req->count; i++) {
352 write_physical(req->addr
353 + (sign * i * req->size),
354 req->size, &req->data);
355 }
356 }
357 } else {
358 target_ulong tmp;
360 if (req->dir == IOREQ_READ) {
361 for (i = 0; i < req->count; i++) {
362 read_physical(req->addr
363 + (sign * i * req->size),
364 req->size, &tmp);
365 write_physical((target_phys_addr_t )req->data
366 + (sign * i * req->size),
367 req->size, &tmp);
368 }
369 } else if (req->dir == IOREQ_WRITE) {
370 for (i = 0; i < req->count; i++) {
371 read_physical((target_phys_addr_t) req->data
372 + (sign * i * req->size),
373 req->size, &tmp);
374 write_physical(req->addr
375 + (sign * i * req->size),
376 req->size, &tmp);
377 }
378 }
379 }
380 }
382 void cpu_ioreq_and(CPUState *env, ioreq_t *req)
383 {
384 target_ulong tmp1, tmp2;
386 if (req->data_is_ptr != 0)
387 hw_error("expected scalar value");
389 read_physical(req->addr, req->size, &tmp1);
390 if (req->dir == IOREQ_WRITE) {
391 tmp2 = tmp1 & (target_ulong) req->data;
392 write_physical(req->addr, req->size, &tmp2);
393 }
394 req->data = tmp1;
395 }
397 void cpu_ioreq_add(CPUState *env, ioreq_t *req)
398 {
399 target_ulong tmp1, tmp2;
401 if (req->data_is_ptr != 0)
402 hw_error("expected scalar value");
404 read_physical(req->addr, req->size, &tmp1);
405 if (req->dir == IOREQ_WRITE) {
406 tmp2 = tmp1 + (target_ulong) req->data;
407 write_physical(req->addr, req->size, &tmp2);
408 }
409 req->data = tmp1;
410 }
412 void cpu_ioreq_sub(CPUState *env, ioreq_t *req)
413 {
414 target_ulong tmp1, tmp2;
416 if (req->data_is_ptr != 0)
417 hw_error("expected scalar value");
419 read_physical(req->addr, req->size, &tmp1);
420 if (req->dir == IOREQ_WRITE) {
421 tmp2 = tmp1 - (target_ulong) req->data;
422 write_physical(req->addr, req->size, &tmp2);
423 }
424 req->data = tmp1;
425 }
427 void cpu_ioreq_or(CPUState *env, ioreq_t *req)
428 {
429 target_ulong tmp1, tmp2;
431 if (req->data_is_ptr != 0)
432 hw_error("expected scalar value");
434 read_physical(req->addr, req->size, &tmp1);
435 if (req->dir == IOREQ_WRITE) {
436 tmp2 = tmp1 | (target_ulong) req->data;
437 write_physical(req->addr, req->size, &tmp2);
438 }
439 req->data = tmp1;
440 }
442 void cpu_ioreq_xor(CPUState *env, ioreq_t *req)
443 {
444 target_ulong tmp1, tmp2;
446 if (req->data_is_ptr != 0)
447 hw_error("expected scalar value");
449 read_physical(req->addr, req->size, &tmp1);
450 if (req->dir == IOREQ_WRITE) {
451 tmp2 = tmp1 ^ (target_ulong) req->data;
452 write_physical(req->addr, req->size, &tmp2);
453 }
454 req->data = tmp1;
455 }
457 void timeoffset_get()
458 {
459 char *p;
461 p = xenstore_vm_read(domid, "rtc/timeoffset", NULL);
462 if (!p)
463 return;
465 if (sscanf(p, "%ld", &time_offset) == 1)
466 fprintf(logfile, "Time offset set %ld\n", time_offset);
467 else
468 time_offset = 0;
470 xc_domain_set_time_offset(xc_handle, domid, time_offset);
472 free(p);
473 }
475 void cpu_ioreq_timeoffset(CPUState *env, ioreq_t *req)
476 {
477 char b[64];
479 time_offset += (ulong)req->data;
481 sprintf(b, "%ld", time_offset);
482 xenstore_vm_write(domid, "rtc/timeoffset", b);
483 }
485 void cpu_ioreq_xchg(CPUState *env, ioreq_t *req)
486 {
487 unsigned long tmp1;
489 if (req->data_is_ptr != 0)
490 hw_error("expected scalar value");
492 read_physical(req->addr, req->size, &tmp1);
493 write_physical(req->addr, req->size, &req->data);
494 req->data = tmp1;
495 }
497 void __handle_ioreq(CPUState *env, ioreq_t *req)
498 {
499 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
500 (req->size < sizeof(target_ulong)))
501 req->data &= ((target_ulong)1 << (8 * req->size)) - 1;
503 switch (req->type) {
504 case IOREQ_TYPE_PIO:
505 cpu_ioreq_pio(env, req);
506 break;
507 case IOREQ_TYPE_COPY:
508 cpu_ioreq_move(env, req);
509 break;
510 case IOREQ_TYPE_AND:
511 cpu_ioreq_and(env, req);
512 break;
513 case IOREQ_TYPE_ADD:
514 cpu_ioreq_add(env, req);
515 break;
516 case IOREQ_TYPE_SUB:
517 cpu_ioreq_sub(env, req);
518 break;
519 case IOREQ_TYPE_OR:
520 cpu_ioreq_or(env, req);
521 break;
522 case IOREQ_TYPE_XOR:
523 cpu_ioreq_xor(env, req);
524 break;
525 case IOREQ_TYPE_XCHG:
526 cpu_ioreq_xchg(env, req);
527 break;
528 case IOREQ_TYPE_TIMEOFFSET:
529 cpu_ioreq_timeoffset(env, req);
530 break;
531 case IOREQ_TYPE_INVALIDATE:
532 qemu_invalidate_map_cache();
533 break;
534 default:
535 hw_error("Invalid ioreq type 0x%x\n", req->type);
536 }
537 }
539 void __handle_buffered_iopage(CPUState *env)
540 {
541 ioreq_t *req = NULL;
543 if (!buffered_io_page)
544 return;
546 while (buffered_io_page->read_pointer !=
547 buffered_io_page->write_pointer) {
548 req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
549 IOREQ_BUFFER_SLOT_NUM];
551 __handle_ioreq(env, req);
553 mb();
554 buffered_io_page->read_pointer++;
555 }
556 }
558 void handle_buffered_io(void *opaque)
559 {
560 CPUState *env = opaque;
562 __handle_buffered_iopage(env);
563 qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
564 qemu_get_clock(rt_clock));
565 }
567 void cpu_handle_ioreq(void *opaque)
568 {
569 extern int vm_running;
570 extern int shutdown_requested;
571 CPUState *env = opaque;
572 ioreq_t *req = cpu_get_ioreq();
574 handle_buffered_io(env);
575 if (req) {
576 __handle_ioreq(env, req);
578 if (req->state != STATE_IOREQ_INPROCESS) {
579 fprintf(logfile, "Badness in I/O request ... not in service?!: "
580 "%x, ptr: %x, port: %"PRIx64", "
581 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
582 req->state, req->data_is_ptr, req->addr,
583 req->data, req->count, req->size);
584 destroy_hvm_domain();
585 return;
586 }
588 wmb(); /* Update ioreq contents /then/ update state. */
590 /*
591 * We do this before we send the response so that the tools
592 * have the opportunity to pick up on the reset before the
593 * guest resumes and does a hlt with interrupts disabled which
594 * causes Xen to powerdown the domain.
595 */
596 if (vm_running) {
597 if (shutdown_requested) {
598 fprintf(logfile, "shutdown requested in cpu_handle_ioreq\n");
599 destroy_hvm_domain();
600 }
601 if (reset_requested) {
602 fprintf(logfile, "reset requested in cpu_handle_ioreq.\n");
603 qemu_system_reset();
604 reset_requested = 0;
605 }
606 }
608 req->state = STATE_IORESP_READY;
609 xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
610 }
611 }
613 int main_loop(void)
614 {
615 extern int vm_running;
616 extern int shutdown_requested;
617 extern int suspend_requested;
618 CPUState *env = cpu_single_env;
619 int evtchn_fd = xc_evtchn_fd(xce_handle);
620 char qemu_file[PATH_MAX];
621 fd_set fds;
623 buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
624 cpu_single_env);
625 qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
627 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
629 xenstore_record_dm_state("running");
630 while (1) {
631 while (!(vm_running && suspend_requested))
632 /* Wait up to 10 msec. */
633 main_loop_wait(10);
635 fprintf(logfile, "device model saving state\n");
637 /* Pull all outstanding ioreqs through the system */
638 handle_buffered_io(env);
639 main_loop_wait(1); /* For the select() on events */
641 /* Save the device state */
642 snprintf(qemu_file, sizeof(qemu_file),
643 "/var/lib/xen/qemu-save.%d", domid);
644 do_savevm(qemu_file);
646 xenstore_record_dm_state("paused");
648 /* Wait to be allowed to continue */
649 while (suspend_requested) {
650 FD_ZERO(&fds);
651 FD_SET(xenstore_fd(), &fds);
652 if (select(xenstore_fd() + 1, &fds, NULL, NULL, NULL) > 0)
653 xenstore_process_event(NULL);
654 }
656 xenstore_record_dm_state("running");
657 }
659 return 0;
660 }
662 void destroy_hvm_domain(void)
663 {
664 int xcHandle;
665 int sts;
667 xcHandle = xc_interface_open();
668 if (xcHandle < 0)
669 fprintf(logfile, "Cannot acquire xenctrl handle\n");
670 else {
671 sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_poweroff);
672 if (sts != 0)
673 fprintf(logfile, "? xc_domain_shutdown failed to issue poweroff, "
674 "sts %d, errno %d\n", sts, errno);
675 else
676 fprintf(logfile, "Issued domain %d poweroff\n", domid);
677 xc_interface_close(xcHandle);
678 }
679 }