ia64/xen-unstable

view tools/ioemu/target-i386-dm/helper2.c @ 15092:3ecf51689671

hvm qemu: Fix for masking 64-bit operands broke 32-bit operands with
32-bit qemu. Issue spotted and initial fix provided by Dexuan Cui.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Sun May 13 10:04:34 2007 +0100 (2007-05-13)
parents e527b4ff1948
children 9c2a616722da
line source
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Main cpu loop for handling I/O requests coming from a virtual machine
23 * Copyright 2004, Intel Corporation.
24 * Copyright 2005, International Business Machines Corporation.
25 *
26 * This program is free software; you can redistribute it and/or modify it
27 * under the terms and conditions of the GNU Lesser General Public License,
28 * version 2.1, as published by the Free Software Foundation.
29 *
30 * This program is distributed in the hope it will be useful, but WITHOUT
31 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33 * more details.
34 *
35 * You should have received a copy of the GNU Lesser General Public License
36 * along with this program; if not, write to the Free Software Foundation,
37 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307 USA.
38 */
39 #include <stdarg.h>
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>
43 #include <inttypes.h>
44 #include <signal.h>
45 #include <assert.h>
47 #include <limits.h>
48 #include <fcntl.h>
50 #include <xenctrl.h>
51 #include <xen/hvm/ioreq.h>
53 #include "cpu.h"
54 #include "exec-all.h"
56 //#define DEBUG_MMU
58 #ifdef USE_CODE_COPY
59 #include <asm/ldt.h>
60 #include <linux/unistd.h>
61 #include <linux/version.h>
63 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
66 #define modify_ldt_ldt_s user_desc
67 #endif
68 #endif /* USE_CODE_COPY */
70 #include "vl.h"
72 int domid = -1;
73 int vcpus = 1;
75 int xc_handle;
77 long time_offset = 0;
79 shared_iopage_t *shared_page = NULL;
81 #define BUFFER_IO_MAX_DELAY 100
82 buffered_iopage_t *buffered_io_page = NULL;
83 QEMUTimer *buffered_io_timer;
85 /* the evtchn fd for polling */
86 int xce_handle = -1;
88 /* which vcpu we are serving */
89 int send_vcpu = 0;
91 //the evtchn port for polling the notification,
92 #define NR_CPUS 32
93 evtchn_port_t ioreq_local_port[NR_CPUS];
95 CPUX86State *cpu_x86_init(void)
96 {
97 CPUX86State *env;
98 static int inited;
99 int i, rc;
101 env = qemu_mallocz(sizeof(CPUX86State));
102 if (!env)
103 return NULL;
104 cpu_exec_init(env);
106 /* init various static tables */
107 if (!inited) {
108 inited = 1;
110 cpu_single_env = env;
112 xce_handle = xc_evtchn_open();
113 if (xce_handle == -1) {
114 perror("open");
115 return NULL;
116 }
118 /* FIXME: how about if we overflow the page here? */
119 for (i = 0; i < vcpus; i++) {
120 rc = xc_evtchn_bind_interdomain(
121 xce_handle, domid, shared_page->vcpu_iodata[i].vp_eport);
122 if (rc == -1) {
123 fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
124 return NULL;
125 }
126 ioreq_local_port[i] = rc;
127 }
128 }
130 return env;
131 }
133 /* called from main_cpu_reset */
134 void cpu_reset(CPUX86State *env)
135 {
136 int xcHandle;
137 int sts;
139 xcHandle = xc_interface_open();
140 if (xcHandle < 0)
141 fprintf(logfile, "Cannot acquire xenctrl handle\n");
142 else {
143 sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_reboot);
144 if (sts != 0)
145 fprintf(logfile,
146 "? xc_domain_shutdown failed to issue reboot, sts %d\n",
147 sts);
148 else
149 fprintf(logfile, "Issued domain %d reboot\n", domid);
150 xc_interface_close(xcHandle);
151 }
152 }
154 void cpu_x86_close(CPUX86State *env)
155 {
156 free(env);
157 }
160 void cpu_dump_state(CPUState *env, FILE *f,
161 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
162 int flags)
163 {
164 }
166 /***********************************************************/
167 /* x86 mmu */
168 /* XXX: add PGE support */
170 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
171 {
172 a20_state = (a20_state != 0);
173 if (a20_state != ((env->a20_mask >> 20) & 1)) {
174 #if defined(DEBUG_MMU)
175 printf("A20 update: a20=%d\n", a20_state);
176 #endif
177 env->a20_mask = 0xffefffff | (a20_state << 20);
178 }
179 }
181 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
182 {
183 return addr;
184 }
186 //some functions to handle the io req packet
187 void sp_info()
188 {
189 ioreq_t *req;
190 int i;
192 for (i = 0; i < vcpus; i++) {
193 req = &(shared_page->vcpu_iodata[i].vp_ioreq);
194 term_printf("vcpu %d: event port %d\n", i, ioreq_local_port[i]);
195 term_printf(" req state: %x, ptr: %x, addr: %"PRIx64", "
196 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
197 req->state, req->data_is_ptr, req->addr,
198 req->data, req->count, req->size);
199 term_printf(" IO totally occurred on this vcpu: %"PRIx64"\n",
200 req->io_count);
201 }
202 }
204 //get the ioreq packets from share mem
205 static ioreq_t *__cpu_get_ioreq(int vcpu)
206 {
207 ioreq_t *req;
209 req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
211 if (req->state != STATE_IOREQ_READY) {
212 fprintf(logfile, "I/O request not ready: "
213 "%x, ptr: %x, port: %"PRIx64", "
214 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
215 req->state, req->data_is_ptr, req->addr,
216 req->data, req->count, req->size);
217 return NULL;
218 }
220 rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
222 req->state = STATE_IOREQ_INPROCESS;
223 return req;
224 }
226 //use poll to get the port notification
227 //ioreq_vec--out,the
228 //retval--the number of ioreq packet
229 static ioreq_t *cpu_get_ioreq(void)
230 {
231 int i;
232 evtchn_port_t port;
234 port = xc_evtchn_pending(xce_handle);
235 if (port != -1) {
236 for ( i = 0; i < vcpus; i++ )
237 if ( ioreq_local_port[i] == port )
238 break;
240 if ( i == vcpus ) {
241 fprintf(logfile, "Fatal error while trying to get io event!\n");
242 exit(1);
243 }
245 // unmask the wanted port again
246 xc_evtchn_unmask(xce_handle, port);
248 //get the io packet from shared memory
249 send_vcpu = i;
250 return __cpu_get_ioreq(i);
251 }
253 //read error or read nothing
254 return NULL;
255 }
257 unsigned long do_inp(CPUState *env, unsigned long addr, unsigned long size)
258 {
259 switch(size) {
260 case 1:
261 return cpu_inb(env, addr);
262 case 2:
263 return cpu_inw(env, addr);
264 case 4:
265 return cpu_inl(env, addr);
266 default:
267 fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
268 exit(-1);
269 }
270 }
272 void do_outp(CPUState *env, unsigned long addr,
273 unsigned long size, unsigned long val)
274 {
275 switch(size) {
276 case 1:
277 return cpu_outb(env, addr, val);
278 case 2:
279 return cpu_outw(env, addr, val);
280 case 4:
281 return cpu_outl(env, addr, val);
282 default:
283 fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
284 exit(-1);
285 }
286 }
288 extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
289 int len, int is_write);
291 static inline void read_physical(uint64_t addr, unsigned long size, void *val)
292 {
293 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
294 }
296 static inline void write_physical(uint64_t addr, unsigned long size, void *val)
297 {
298 return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
299 }
301 void cpu_ioreq_pio(CPUState *env, ioreq_t *req)
302 {
303 int i, sign;
305 sign = req->df ? -1 : 1;
307 if (req->dir == IOREQ_READ) {
308 if (!req->data_is_ptr) {
309 req->data = do_inp(env, req->addr, req->size);
310 } else {
311 unsigned long tmp;
313 for (i = 0; i < req->count; i++) {
314 tmp = do_inp(env, req->addr, req->size);
315 write_physical((target_phys_addr_t) req->data
316 + (sign * i * req->size),
317 req->size, &tmp);
318 }
319 }
320 } else if (req->dir == IOREQ_WRITE) {
321 if (!req->data_is_ptr) {
322 do_outp(env, req->addr, req->size, req->data);
323 } else {
324 for (i = 0; i < req->count; i++) {
325 unsigned long tmp;
327 read_physical((target_phys_addr_t) req->data
328 + (sign * i * req->size),
329 req->size, &tmp);
330 do_outp(env, req->addr, req->size, tmp);
331 }
332 }
333 }
334 }
336 void cpu_ioreq_move(CPUState *env, ioreq_t *req)
337 {
338 int i, sign;
340 sign = req->df ? -1 : 1;
342 if (!req->data_is_ptr) {
343 if (req->dir == IOREQ_READ) {
344 for (i = 0; i < req->count; i++) {
345 read_physical(req->addr
346 + (sign * i * req->size),
347 req->size, &req->data);
348 }
349 } else if (req->dir == IOREQ_WRITE) {
350 for (i = 0; i < req->count; i++) {
351 write_physical(req->addr
352 + (sign * i * req->size),
353 req->size, &req->data);
354 }
355 }
356 } else {
357 unsigned long tmp;
359 if (req->dir == IOREQ_READ) {
360 for (i = 0; i < req->count; i++) {
361 read_physical(req->addr
362 + (sign * i * req->size),
363 req->size, &tmp);
364 write_physical((target_phys_addr_t )req->data
365 + (sign * i * req->size),
366 req->size, &tmp);
367 }
368 } else if (req->dir == IOREQ_WRITE) {
369 for (i = 0; i < req->count; i++) {
370 read_physical((target_phys_addr_t) req->data
371 + (sign * i * req->size),
372 req->size, &tmp);
373 write_physical(req->addr
374 + (sign * i * req->size),
375 req->size, &tmp);
376 }
377 }
378 }
379 }
381 void cpu_ioreq_and(CPUState *env, ioreq_t *req)
382 {
383 unsigned long tmp1, tmp2;
385 if (req->data_is_ptr != 0)
386 hw_error("expected scalar value");
388 read_physical(req->addr, req->size, &tmp1);
389 if (req->dir == IOREQ_WRITE) {
390 tmp2 = tmp1 & (unsigned long) req->data;
391 write_physical(req->addr, req->size, &tmp2);
392 }
393 req->data = tmp1;
394 }
396 void cpu_ioreq_add(CPUState *env, ioreq_t *req)
397 {
398 unsigned long tmp1, tmp2;
400 if (req->data_is_ptr != 0)
401 hw_error("expected scalar value");
403 read_physical(req->addr, req->size, &tmp1);
404 if (req->dir == IOREQ_WRITE) {
405 tmp2 = tmp1 + (unsigned long) req->data;
406 write_physical(req->addr, req->size, &tmp2);
407 }
408 req->data = tmp1;
409 }
411 void cpu_ioreq_sub(CPUState *env, ioreq_t *req)
412 {
413 unsigned long tmp1, tmp2;
415 if (req->data_is_ptr != 0)
416 hw_error("expected scalar value");
418 read_physical(req->addr, req->size, &tmp1);
419 if (req->dir == IOREQ_WRITE) {
420 tmp2 = tmp1 - (unsigned long) req->data;
421 write_physical(req->addr, req->size, &tmp2);
422 }
423 req->data = tmp1;
424 }
426 void cpu_ioreq_or(CPUState *env, ioreq_t *req)
427 {
428 unsigned long tmp1, tmp2;
430 if (req->data_is_ptr != 0)
431 hw_error("expected scalar value");
433 read_physical(req->addr, req->size, &tmp1);
434 if (req->dir == IOREQ_WRITE) {
435 tmp2 = tmp1 | (unsigned long) req->data;
436 write_physical(req->addr, req->size, &tmp2);
437 }
438 req->data = tmp1;
439 }
441 void cpu_ioreq_xor(CPUState *env, ioreq_t *req)
442 {
443 unsigned long tmp1, tmp2;
445 if (req->data_is_ptr != 0)
446 hw_error("expected scalar value");
448 read_physical(req->addr, req->size, &tmp1);
449 if (req->dir == IOREQ_WRITE) {
450 tmp2 = tmp1 ^ (unsigned long) req->data;
451 write_physical(req->addr, req->size, &tmp2);
452 }
453 req->data = tmp1;
454 }
456 void timeoffset_get()
457 {
458 char *p;
460 p = xenstore_vm_read(domid, "rtc/timeoffset", NULL);
461 if (!p)
462 return;
464 if (sscanf(p, "%ld", &time_offset) == 1)
465 fprintf(logfile, "Time offset set %ld\n", time_offset);
466 else
467 time_offset = 0;
469 xc_domain_set_time_offset(xc_handle, domid, time_offset);
471 free(p);
472 }
474 void cpu_ioreq_timeoffset(CPUState *env, ioreq_t *req)
475 {
476 char b[64];
478 time_offset += (ulong)req->data;
480 sprintf(b, "%ld", time_offset);
481 xenstore_vm_write(domid, "rtc/timeoffset", b);
482 }
484 void cpu_ioreq_xchg(CPUState *env, ioreq_t *req)
485 {
486 unsigned long tmp1;
488 if (req->data_is_ptr != 0)
489 hw_error("expected scalar value");
491 read_physical(req->addr, req->size, &tmp1);
492 write_physical(req->addr, req->size, &req->data);
493 req->data = tmp1;
494 }
496 void __handle_ioreq(CPUState *env, ioreq_t *req)
497 {
498 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE)) {
499 /* Clamp data operand to size of a long. */
500 if (req->size < sizeof(long))
501 req->data &= (1UL << (8 * req->size)) - 1;
502 req->data = (unsigned long)req->data;
503 }
505 switch (req->type) {
506 case IOREQ_TYPE_PIO:
507 cpu_ioreq_pio(env, req);
508 break;
509 case IOREQ_TYPE_COPY:
510 cpu_ioreq_move(env, req);
511 break;
512 case IOREQ_TYPE_AND:
513 cpu_ioreq_and(env, req);
514 break;
515 case IOREQ_TYPE_ADD:
516 cpu_ioreq_add(env, req);
517 break;
518 case IOREQ_TYPE_SUB:
519 cpu_ioreq_sub(env, req);
520 break;
521 case IOREQ_TYPE_OR:
522 cpu_ioreq_or(env, req);
523 break;
524 case IOREQ_TYPE_XOR:
525 cpu_ioreq_xor(env, req);
526 break;
527 case IOREQ_TYPE_XCHG:
528 cpu_ioreq_xchg(env, req);
529 break;
530 case IOREQ_TYPE_TIMEOFFSET:
531 cpu_ioreq_timeoffset(env, req);
532 break;
533 case IOREQ_TYPE_INVALIDATE:
534 qemu_invalidate_map_cache();
535 break;
536 default:
537 hw_error("Invalid ioreq type 0x%x\n", req->type);
538 }
539 }
541 void __handle_buffered_iopage(CPUState *env)
542 {
543 ioreq_t *req = NULL;
545 if (!buffered_io_page)
546 return;
548 while (buffered_io_page->read_pointer !=
549 buffered_io_page->write_pointer) {
550 req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
551 IOREQ_BUFFER_SLOT_NUM];
553 __handle_ioreq(env, req);
555 mb();
556 buffered_io_page->read_pointer++;
557 }
558 }
560 void handle_buffered_io(void *opaque)
561 {
562 CPUState *env = opaque;
564 __handle_buffered_iopage(env);
565 qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
566 qemu_get_clock(rt_clock));
567 }
569 void cpu_handle_ioreq(void *opaque)
570 {
571 extern int vm_running;
572 extern int shutdown_requested;
573 CPUState *env = opaque;
574 ioreq_t *req = cpu_get_ioreq();
576 handle_buffered_io(env);
577 if (req) {
578 __handle_ioreq(env, req);
580 if (req->state != STATE_IOREQ_INPROCESS) {
581 fprintf(logfile, "Badness in I/O request ... not in service?!: "
582 "%x, ptr: %x, port: %"PRIx64", "
583 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
584 req->state, req->data_is_ptr, req->addr,
585 req->data, req->count, req->size);
586 destroy_hvm_domain();
587 return;
588 }
590 wmb(); /* Update ioreq contents /then/ update state. */
592 /*
593 * We do this before we send the response so that the tools
594 * have the opportunity to pick up on the reset before the
595 * guest resumes and does a hlt with interrupts disabled which
596 * causes Xen to powerdown the domain.
597 */
598 if (vm_running) {
599 if (shutdown_requested) {
600 fprintf(logfile, "shutdown requested in cpu_handle_ioreq\n");
601 destroy_hvm_domain();
602 }
603 if (reset_requested) {
604 fprintf(logfile, "reset requested in cpu_handle_ioreq.\n");
605 qemu_system_reset();
606 reset_requested = 0;
607 }
608 }
610 req->state = STATE_IORESP_READY;
611 xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
612 }
613 }
615 int main_loop(void)
616 {
617 extern int vm_running;
618 extern int shutdown_requested;
619 extern int suspend_requested;
620 CPUState *env = cpu_single_env;
621 int evtchn_fd = xc_evtchn_fd(xce_handle);
622 char qemu_file[20];
624 buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
625 cpu_single_env);
626 qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
628 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
630 while (!(vm_running && suspend_requested))
631 /* Wait up to 10 msec. */
632 main_loop_wait(10);
634 fprintf(logfile, "device model received suspend signal!\n");
636 /* Pull all outstanding ioreqs through the system */
637 handle_buffered_io(env);
638 main_loop_wait(1); /* For the select() on events */
640 /* Save the device state */
641 sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
642 do_savevm(qemu_file);
644 return 0;
645 }
647 void destroy_hvm_domain(void)
648 {
649 int xcHandle;
650 int sts;
652 xcHandle = xc_interface_open();
653 if (xcHandle < 0)
654 fprintf(logfile, "Cannot acquire xenctrl handle\n");
655 else {
656 sts = xc_domain_shutdown(xcHandle, domid, SHUTDOWN_poweroff);
657 if (sts != 0)
658 fprintf(logfile, "? xc_domain_shutdown failed to issue poweroff, "
659 "sts %d, errno %d\n", sts, errno);
660 else
661 fprintf(logfile, "Issued domain %d poweroff\n", domid);
662 xc_interface_close(xcHandle);
663 }
664 }