ia64/xen-unstable

view tools/ioemu/target-i386-dm/exec-dm.c @ 15797:b485d8d7347a

ioemu: Avoid unaligned guest memory accesses on ia64.
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Aug 29 15:47:55 2007 +0100 (2007-08-29)
parents 0eec072e870a
children 3805cc382dbe
line source
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "vl.h"
39 //#define DEBUG_TB_INVALIDATE
40 //#define DEBUG_FLUSH
41 //#define DEBUG_TLB
43 /* make various TB consistency checks */
44 //#define DEBUG_TB_CHECK
45 //#define DEBUG_TLB_CHECK
47 #ifndef CONFIG_DM
48 /* threshold to flush the translated code buffer */
49 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
51 #define SMC_BITMAP_USE_THRESHOLD 10
53 #define MMAP_AREA_START 0x00000000
54 #define MMAP_AREA_END 0xa8000000
56 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
57 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
58 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
59 int nb_tbs;
60 /* any access to the tbs or the page table must use this lock */
61 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
63 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
64 uint8_t *code_gen_ptr;
65 #endif /* !CONFIG_DM */
67 uint64_t phys_ram_size;
68 extern uint64_t ram_size;
69 int phys_ram_fd;
70 uint8_t *phys_ram_base;
71 uint8_t *phys_ram_dirty;
73 CPUState *first_cpu;
74 /* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
76 CPUState *cpu_single_env;
78 typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
84 uint8_t *code_bitmap;
85 #if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87 #endif
88 } PageDesc;
90 typedef struct PhysPageDesc {
91 /* offset in host memory of the page + io_index in the low 12 bits */
92 unsigned long phys_offset;
93 } PhysPageDesc;
95 typedef struct VirtPageDesc {
96 /* physical address of code page. It is valid only if 'valid_tag'
97 matches 'virt_valid_tag' */
98 target_ulong phys_addr;
99 unsigned int valid_tag;
100 #if !defined(CONFIG_SOFTMMU)
101 /* original page access rights. It is valid only if 'valid_tag'
102 matches 'virt_valid_tag' */
103 unsigned int prot;
104 #endif
105 } VirtPageDesc;
107 #define L2_BITS 10
108 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
110 #define L1_SIZE (1 << L1_BITS)
111 #define L2_SIZE (1 << L2_BITS)
113 unsigned long qemu_real_host_page_size;
114 unsigned long qemu_host_page_bits;
115 unsigned long qemu_host_page_size;
116 unsigned long qemu_host_page_mask;
118 /* io memory support */
119 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
120 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
121 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
122 static int io_mem_nb = 1;
124 /* log support */
125 FILE *logfile;
126 int loglevel;
128 #ifdef MAPCACHE
129 pthread_mutex_t mapcache_mutex;
130 #endif
132 void cpu_exec_init(CPUState *env)
133 {
134 CPUState **penv;
135 int cpu_index;
136 #ifdef MAPCACHE
137 pthread_mutexattr_t mxattr;
138 #endif
140 env->next_cpu = NULL;
141 penv = &first_cpu;
142 cpu_index = 0;
143 while (*penv != NULL) {
144 penv = (CPUState **)&(*penv)->next_cpu;
145 cpu_index++;
146 }
147 env->cpu_index = cpu_index;
148 *penv = env;
150 /* alloc dirty bits array */
151 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
153 #ifdef MAPCACHE
154 /* setup memory access mutex to protect mapcache */
155 pthread_mutexattr_init(&mxattr);
156 pthread_mutexattr_settype(&mxattr, PTHREAD_MUTEX_RECURSIVE);
157 pthread_mutex_init(&mapcache_mutex, &mxattr);
158 pthread_mutexattr_destroy(&mxattr);
159 #endif
160 }
162 /* enable or disable low levels log */
163 void cpu_set_log(int log_flags)
164 {
165 loglevel = log_flags;
166 if (!logfile)
167 logfile = stderr;
168 }
170 void cpu_set_log_filename(const char *filename)
171 {
172 logfile = fopen(filename, "w");
173 if (!logfile) {
174 perror(filename);
175 _exit(1);
176 }
177 #if !defined(CONFIG_SOFTMMU)
178 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
179 {
180 static uint8_t logfile_buf[4096];
181 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
182 }
183 #else
184 setvbuf(logfile, NULL, _IOLBF, 0);
185 #endif
186 stdout = logfile;
187 stderr = logfile;
188 }
190 /* mask must never be zero, except for A20 change call */
191 void cpu_interrupt(CPUState *env, int mask)
192 {
193 env->interrupt_request |= mask;
194 }
196 void cpu_reset_interrupt(CPUState *env, int mask)
197 {
198 env->interrupt_request &= ~mask;
199 }
201 CPULogItem cpu_log_items[] = {
202 { CPU_LOG_TB_OUT_ASM, "out_asm",
203 "show generated host assembly code for each compiled TB" },
204 { CPU_LOG_TB_IN_ASM, "in_asm",
205 "show target assembly code for each compiled TB" },
206 { CPU_LOG_TB_OP, "op",
207 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
208 #ifdef TARGET_I386
209 { CPU_LOG_TB_OP_OPT, "op_opt",
210 "show micro ops after optimization for each compiled TB" },
211 #endif
212 { CPU_LOG_INT, "int",
213 "show interrupts/exceptions in short format" },
214 { CPU_LOG_EXEC, "exec",
215 "show trace before each executed TB (lots of logs)" },
216 { CPU_LOG_TB_CPU, "cpu",
217 "show CPU state before bloc translation" },
218 #ifdef TARGET_I386
219 { CPU_LOG_PCALL, "pcall",
220 "show protected mode far calls/returns/exceptions" },
221 #endif
222 #ifdef DEBUG_IOPORT
223 { CPU_LOG_IOPORT, "ioport",
224 "show all i/o ports accesses" },
225 #endif
226 { 0, NULL, NULL },
227 };
229 static int cmp1(const char *s1, int n, const char *s2)
230 {
231 if (strlen(s2) != n)
232 return 0;
233 return memcmp(s1, s2, n) == 0;
234 }
236 /* takes a comma separated list of log masks. Return 0 if error. */
237 int cpu_str_to_log_mask(const char *str)
238 {
239 CPULogItem *item;
240 int mask;
241 const char *p, *p1;
243 p = str;
244 mask = 0;
245 for(;;) {
246 p1 = strchr(p, ',');
247 if (!p1)
248 p1 = p + strlen(p);
249 if(cmp1(p,p1-p,"all")) {
250 for(item = cpu_log_items; item->mask != 0; item++) {
251 mask |= item->mask;
252 }
253 } else {
254 for(item = cpu_log_items; item->mask != 0; item++) {
255 if (cmp1(p, p1 - p, item->name))
256 goto found;
257 }
258 return 0;
259 }
260 found:
261 mask |= item->mask;
262 if (*p1 != ',')
263 break;
264 p = p1 + 1;
265 }
266 return mask;
267 }
269 void cpu_abort(CPUState *env, const char *fmt, ...)
270 {
271 va_list ap;
273 va_start(ap, fmt);
274 fprintf(stderr, "qemu: fatal: ");
275 vfprintf(stderr, fmt, ap);
276 fprintf(stderr, "\n");
277 va_end(ap);
278 abort();
279 }
282 /* XXX: Simple implementation. Fix later */
283 #define MAX_MMIO 32
284 struct mmio_space {
285 target_phys_addr_t start;
286 unsigned long size;
287 unsigned long io_index;
288 } mmio[MAX_MMIO];
289 unsigned long mmio_cnt;
291 /* register physical memory. 'size' must be a multiple of the target
292 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
293 io memory page */
294 void cpu_register_physical_memory(target_phys_addr_t start_addr,
295 unsigned long size,
296 unsigned long phys_offset)
297 {
298 int i;
300 for (i = 0; i < mmio_cnt; i++) {
301 if(mmio[i].start == start_addr) {
302 mmio[i].io_index = phys_offset;
303 mmio[i].size = size;
304 return;
305 }
306 }
308 if (mmio_cnt == MAX_MMIO) {
309 fprintf(logfile, "too many mmio regions\n");
310 exit(-1);
311 }
313 mmio[mmio_cnt].io_index = phys_offset;
314 mmio[mmio_cnt].start = start_addr;
315 mmio[mmio_cnt++].size = size;
316 }
318 /* mem_read and mem_write are arrays of functions containing the
319 function to access byte (index 0), word (index 1) and dword (index
320 2). All functions must be supplied. If io_index is non zero, the
321 corresponding io zone is modified. If it is zero, a new io zone is
322 allocated. The return value can be used with
323 cpu_register_physical_memory(). (-1) is returned if error. */
324 int cpu_register_io_memory(int io_index,
325 CPUReadMemoryFunc **mem_read,
326 CPUWriteMemoryFunc **mem_write,
327 void *opaque)
328 {
329 int i;
331 if (io_index <= 0) {
332 if (io_index >= IO_MEM_NB_ENTRIES)
333 return -1;
334 io_index = io_mem_nb++;
335 } else {
336 if (io_index >= IO_MEM_NB_ENTRIES)
337 return -1;
338 }
340 for(i = 0;i < 3; i++) {
341 io_mem_read[io_index][i] = mem_read[i];
342 io_mem_write[io_index][i] = mem_write[i];
343 }
344 io_mem_opaque[io_index] = opaque;
345 return io_index << IO_MEM_SHIFT;
346 }
348 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
349 {
350 return io_mem_write[io_index >> IO_MEM_SHIFT];
351 }
353 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
354 {
355 return io_mem_read[io_index >> IO_MEM_SHIFT];
356 }
358 #ifdef __ia64__
360 #define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
361 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
362 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
364 /* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
365 * So to emulate right behavior that guest OS is assumed, we need to flush
366 * I/D cache here.
367 */
368 static void sync_icache(unsigned long address, int len)
369 {
370 int l;
372 for(l = 0; l < (len + 32); l += 32)
373 __ia64_fc(address + l);
375 ia64_sync_i();
376 ia64_srlz_i();
377 }
378 #endif
380 /* physical memory access (slow version, mainly for debug) */
381 #if defined(CONFIG_USER_ONLY)
382 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
383 int len, int is_write)
384 {
385 int l, flags;
386 target_ulong page;
388 while (len > 0) {
389 page = addr & TARGET_PAGE_MASK;
390 l = (page + TARGET_PAGE_SIZE) - addr;
391 if (l > len)
392 l = len;
393 flags = page_get_flags(page);
394 if (!(flags & PAGE_VALID))
395 return;
396 if (is_write) {
397 if (!(flags & PAGE_WRITE))
398 return;
399 memcpy((uint8_t *)addr, buf, len);
400 } else {
401 if (!(flags & PAGE_READ))
402 return;
403 memcpy(buf, (uint8_t *)addr, len);
404 }
405 len -= l;
406 buf += l;
407 addr += l;
408 }
409 }
410 #else
412 int iomem_index(target_phys_addr_t addr)
413 {
414 int i;
416 for (i = 0; i < mmio_cnt; i++) {
417 unsigned long start, end;
419 start = mmio[i].start;
420 end = mmio[i].start + mmio[i].size;
422 if ((addr >= start) && (addr < end)){
423 return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
424 }
425 }
426 return 0;
427 }
429 #if defined(__i386__) || defined(__x86_64__)
430 #define phys_ram_addr(x) (qemu_map_cache(x))
431 #elif defined(__ia64__)
432 #define phys_ram_addr(x) ((addr < ram_size) ? (phys_ram_base + (x)) : NULL)
433 #endif
435 extern unsigned long *logdirty_bitmap;
436 extern unsigned long logdirty_bitmap_size;
438 /*
439 * Replace the standard byte memcpy with a word memcpy for appropriately sized
440 * memory copy operations. Some users (USB-UHCI) can not tolerate the possible
441 * word tearing that can result from a guest concurrently writing a memory
442 * structure while the qemu device model is modifying the same location.
443 * Forcing a word-sized read/write prevents the guest from seeing a partially
444 * written word-sized atom.
445 */
446 #if defined(__x86_64__) || defined(__i386__)
447 static void memcpy_words(void *dst, void *src, size_t n)
448 {
449 asm volatile (
450 " movl %%edx,%%ecx \n"
451 #ifdef __x86_64__
452 " shrl $3,%%ecx \n"
453 " rep movsq \n"
454 " test $4,%%edx \n"
455 " jz 1f \n"
456 " movsl \n"
457 #else /* __i386__ */
458 " shrl $2,%%ecx \n"
459 " rep movsl \n"
460 #endif
461 "1: test $2,%%edx \n"
462 " jz 1f \n"
463 " movsw \n"
464 "1: test $1,%%edx \n"
465 " jz 1f \n"
466 " movsb \n"
467 "1: \n"
468 : "+S" (src), "+D" (dst) : "d" (n) : "ecx", "memory" );
469 }
470 #else
471 static void memcpy_words(void *dst, void *src, size_t n)
472 {
473 /* Some architectures do not like unaligned accesses. */
474 if (((unsigned long)dst | (unsigned long)src) & 3) {
475 memcpy(dst, src, n);
476 return;
477 }
479 while (n >= sizeof(uint32_t)) {
480 *((uint32_t *)dst) = *((uint32_t *)src);
481 dst = ((uint32_t *)dst) + 1;
482 src = ((uint32_t *)src) + 1;
483 n -= sizeof(uint32_t);
484 }
486 if (n & 2) {
487 *((uint16_t *)dst) = *((uint16_t *)src);
488 dst = ((uint16_t *)dst) + 1;
489 src = ((uint16_t *)src) + 1;
490 }
492 if (n & 1) {
493 *((uint8_t *)dst) = *((uint8_t *)src);
494 dst = ((uint8_t *)dst) + 1;
495 src = ((uint8_t *)src) + 1;
496 }
497 }
498 #endif
500 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
501 int len, int is_write)
502 {
503 int l, io_index;
504 uint8_t *ptr;
505 uint32_t val;
507 mapcache_lock();
509 while (len > 0) {
510 /* How much can we copy before the next page boundary? */
511 l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK);
512 if (l > len)
513 l = len;
515 io_index = iomem_index(addr);
516 if (is_write) {
517 if (io_index) {
518 if (l >= 4 && ((addr & 3) == 0)) {
519 /* 32 bit read access */
520 val = ldl_raw(buf);
521 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
522 l = 4;
523 } else if (l >= 2 && ((addr & 1) == 0)) {
524 /* 16 bit read access */
525 val = lduw_raw(buf);
526 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
527 l = 2;
528 } else {
529 /* 8 bit access */
530 val = ldub_raw(buf);
531 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
532 l = 1;
533 }
534 } else if ((ptr = phys_ram_addr(addr)) != NULL) {
535 /* Writing to RAM */
536 memcpy_words(ptr, buf, l);
537 if (logdirty_bitmap != NULL) {
538 /* Record that we have dirtied this frame */
539 unsigned long pfn = addr >> TARGET_PAGE_BITS;
540 if (pfn / 8 >= logdirty_bitmap_size) {
541 fprintf(logfile, "dirtying pfn %lx >= bitmap "
542 "size %lx\n", pfn, logdirty_bitmap_size * 8);
543 } else {
544 logdirty_bitmap[pfn / HOST_LONG_BITS]
545 |= 1UL << pfn % HOST_LONG_BITS;
546 }
547 }
548 #ifdef __ia64__
549 sync_icache(ptr, l);
550 #endif
551 }
552 } else {
553 if (io_index) {
554 if (l >= 4 && ((addr & 3) == 0)) {
555 /* 32 bit read access */
556 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
557 stl_raw(buf, val);
558 l = 4;
559 } else if (l >= 2 && ((addr & 1) == 0)) {
560 /* 16 bit read access */
561 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
562 stw_raw(buf, val);
563 l = 2;
564 } else {
565 /* 8 bit access */
566 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
567 stb_raw(buf, val);
568 l = 1;
569 }
570 } else if ((ptr = phys_ram_addr(addr)) != NULL) {
571 /* Reading from RAM */
572 memcpy_words(buf, ptr, l);
573 } else {
574 /* Neither RAM nor known MMIO space */
575 memset(buf, 0xff, len);
576 }
577 }
578 len -= l;
579 buf += l;
580 addr += l;
581 }
583 mapcache_unlock();
584 }
585 #endif
587 /* virtual memory access for debug */
588 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
589 uint8_t *buf, int len, int is_write)
590 {
591 int l;
592 target_ulong page, phys_addr;
594 while (len > 0) {
595 page = addr & TARGET_PAGE_MASK;
596 phys_addr = cpu_get_phys_page_debug(env, page);
597 /* if no physical page mapped, return an error */
598 if (phys_addr == -1)
599 return -1;
600 l = (page + TARGET_PAGE_SIZE) - addr;
601 if (l > len)
602 l = len;
603 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
604 buf, l, is_write);
605 len -= l;
606 buf += l;
607 addr += l;
608 }
609 return 0;
610 }
612 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
613 int dirty_flags)
614 {
615 unsigned long length;
616 int i, mask, len;
617 uint8_t *p;
619 start &= TARGET_PAGE_MASK;
620 end = TARGET_PAGE_ALIGN(end);
622 length = end - start;
623 if (length == 0)
624 return;
625 mask = ~dirty_flags;
626 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
627 len = length >> TARGET_PAGE_BITS;
628 for(i = 0; i < len; i++)
629 p[i] &= mask;
631 return;
632 }