ia64/xen-unstable

view tools/ioemu/exec.c @ 8740:3d7ea7972b39

Update patches for linux 2.6.15.

Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Thu Feb 02 17:16:00 2006 +0000 (2006-02-02)
parents b92a36713192
children f7b43e5c42b9
line source
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
38 //#define DEBUG_TB_INVALIDATE
39 //#define DEBUG_FLUSH
40 //#define DEBUG_TLB
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57 int nb_tbs;
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
61 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62 uint8_t *code_gen_ptr;
64 uint64_t phys_ram_size;
65 int phys_ram_fd;
66 uint8_t *phys_ram_base;
67 uint8_t *phys_ram_dirty;
69 typedef struct PageDesc {
70 /* list of TBs intersecting this ram page */
71 TranslationBlock *first_tb;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
75 uint8_t *code_bitmap;
76 #if defined(CONFIG_USER_ONLY)
77 unsigned long flags;
78 #endif
79 } PageDesc;
81 typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset;
84 } PhysPageDesc;
86 typedef struct VirtPageDesc {
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr;
90 unsigned int valid_tag;
91 #if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
94 unsigned int prot;
95 #endif
96 } VirtPageDesc;
98 #define L2_BITS 10
99 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101 #define L1_SIZE (1 << L1_BITS)
102 #define L2_SIZE (1 << L2_BITS)
104 unsigned long qemu_real_host_page_size;
105 unsigned long qemu_host_page_bits;
106 unsigned long qemu_host_page_size;
107 unsigned long qemu_host_page_mask;
109 /* io memory support */
110 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
111 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
112 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
113 static int io_mem_nb = 1;
115 /* log support */
116 char *logfilename = "/tmp/qemu.log";
117 FILE *logfile;
118 int loglevel;
120 void cpu_exec_init(void)
121 {
122 /* alloc dirty bits array */
123 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
124 }
126 /* enable or disable low levels log */
127 void cpu_set_log(int log_flags)
128 {
129 loglevel = log_flags;
130 if (!logfile) {
131 logfile = fopen(logfilename, "w");
132 if (!logfile) {
133 perror(logfilename);
134 _exit(1);
135 }
136 #if !defined(CONFIG_SOFTMMU)
137 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
138 {
139 static uint8_t logfile_buf[4096];
140 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
141 }
142 #else
143 setvbuf(logfile, NULL, _IOLBF, 0);
144 #endif
146 stdout = logfile;
147 stderr = logfile;
148 }
149 }
151 void cpu_set_log_filename(const char *filename)
152 {
153 logfilename = strdup(filename);
154 }
156 /* mask must never be zero, except for A20 change call */
157 void cpu_interrupt(CPUState *env, int mask)
158 {
159 env->interrupt_request |= mask;
160 }
162 void cpu_reset_interrupt(CPUState *env, int mask)
163 {
164 env->interrupt_request &= ~mask;
165 }
167 CPULogItem cpu_log_items[] = {
168 { CPU_LOG_TB_OUT_ASM, "out_asm",
169 "show generated host assembly code for each compiled TB" },
170 { CPU_LOG_TB_IN_ASM, "in_asm",
171 "show target assembly code for each compiled TB" },
172 { CPU_LOG_TB_OP, "op",
173 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
174 #ifdef TARGET_I386
175 { CPU_LOG_TB_OP_OPT, "op_opt",
176 "show micro ops after optimization for each compiled TB" },
177 #endif
178 { CPU_LOG_INT, "int",
179 "show interrupts/exceptions in short format" },
180 { CPU_LOG_EXEC, "exec",
181 "show trace before each executed TB (lots of logs)" },
182 { CPU_LOG_TB_CPU, "cpu",
183 "show CPU state before bloc translation" },
184 #ifdef TARGET_I386
185 { CPU_LOG_PCALL, "pcall",
186 "show protected mode far calls/returns/exceptions" },
187 #endif
188 #ifdef DEBUG_IOPORT
189 { CPU_LOG_IOPORT, "ioport",
190 "show all i/o ports accesses" },
191 #endif
192 { 0, NULL, NULL },
193 };
195 static int cmp1(const char *s1, int n, const char *s2)
196 {
197 if (strlen(s2) != n)
198 return 0;
199 return memcmp(s1, s2, n) == 0;
200 }
202 /* takes a comma separated list of log masks. Return 0 if error. */
203 int cpu_str_to_log_mask(const char *str)
204 {
205 CPULogItem *item;
206 int mask;
207 const char *p, *p1;
209 p = str;
210 mask = 0;
211 for(;;) {
212 p1 = strchr(p, ',');
213 if (!p1)
214 p1 = p + strlen(p);
215 if(cmp1(p,p1-p,"all")) {
216 for(item = cpu_log_items; item->mask != 0; item++) {
217 mask |= item->mask;
218 }
219 } else {
220 for(item = cpu_log_items; item->mask != 0; item++) {
221 if (cmp1(p, p1 - p, item->name))
222 goto found;
223 }
224 return 0;
225 }
226 found:
227 mask |= item->mask;
228 if (*p1 != ',')
229 break;
230 p = p1 + 1;
231 }
232 return mask;
233 }
235 void cpu_abort(CPUState *env, const char *fmt, ...)
236 {
237 va_list ap;
239 va_start(ap, fmt);
240 fprintf(stderr, "qemu: fatal: ");
241 vfprintf(stderr, fmt, ap);
242 fprintf(stderr, "\n");
243 va_end(ap);
244 abort();
245 }
248 /* XXX: Simple implementation. Fix later */
249 #define MAX_MMIO 32
250 struct mmio_space {
251 target_phys_addr_t start;
252 unsigned long size;
253 unsigned long io_index;
254 } mmio[MAX_MMIO];
255 unsigned long mmio_cnt;
257 /* register physical memory. 'size' must be a multiple of the target
258 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
259 io memory page */
260 void cpu_register_physical_memory(target_phys_addr_t start_addr,
261 unsigned long size,
262 unsigned long phys_offset)
263 {
264 int i;
266 for (i = 0; i < mmio_cnt; i++) {
267 if(mmio[i].start == start_addr) {
268 mmio[i].io_index = phys_offset;
269 mmio[i].size = size;
270 return;
271 }
272 }
274 if (mmio_cnt == MAX_MMIO) {
275 fprintf(logfile, "too many mmio regions\n");
276 exit(-1);
277 }
279 mmio[mmio_cnt].io_index = phys_offset;
280 mmio[mmio_cnt].start = start_addr;
281 mmio[mmio_cnt++].size = size;
282 }
284 /* mem_read and mem_write are arrays of functions containing the
285 function to access byte (index 0), word (index 1) and dword (index
286 2). All functions must be supplied. If io_index is non zero, the
287 corresponding io zone is modified. If it is zero, a new io zone is
288 allocated. The return value can be used with
289 cpu_register_physical_memory(). (-1) is returned if error. */
290 int cpu_register_io_memory(int io_index,
291 CPUReadMemoryFunc **mem_read,
292 CPUWriteMemoryFunc **mem_write,
293 void *opaque)
294 {
295 int i;
297 if (io_index <= 0) {
298 if (io_index >= IO_MEM_NB_ENTRIES)
299 return -1;
300 io_index = io_mem_nb++;
301 } else {
302 if (io_index >= IO_MEM_NB_ENTRIES)
303 return -1;
304 }
306 for(i = 0;i < 3; i++) {
307 io_mem_read[io_index][i] = mem_read[i];
308 io_mem_write[io_index][i] = mem_write[i];
309 }
310 io_mem_opaque[io_index] = opaque;
311 return io_index << IO_MEM_SHIFT;
312 }
314 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
315 {
316 return io_mem_write[io_index >> IO_MEM_SHIFT];
317 }
319 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
320 {
321 return io_mem_read[io_index >> IO_MEM_SHIFT];
322 }
324 /* physical memory access (slow version, mainly for debug) */
325 #if defined(CONFIG_USER_ONLY)
326 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
327 int len, int is_write)
328 {
329 int l, flags;
330 target_ulong page;
332 while (len > 0) {
333 page = addr & TARGET_PAGE_MASK;
334 l = (page + TARGET_PAGE_SIZE) - addr;
335 if (l > len)
336 l = len;
337 flags = page_get_flags(page);
338 if (!(flags & PAGE_VALID))
339 return;
340 if (is_write) {
341 if (!(flags & PAGE_WRITE))
342 return;
343 memcpy((uint8_t *)addr, buf, len);
344 } else {
345 if (!(flags & PAGE_READ))
346 return;
347 memcpy(buf, (uint8_t *)addr, len);
348 }
349 len -= l;
350 buf += l;
351 addr += l;
352 }
353 }
354 #else
356 int iomem_index(target_phys_addr_t addr)
357 {
358 int i;
360 for (i = 0; i < mmio_cnt; i++) {
361 unsigned long start, end;
363 start = mmio[i].start;
364 end = mmio[i].start + mmio[i].size;
366 if ((addr >= start) && (addr <= end)){
367 return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
368 }
369 }
370 return 0;
371 }
373 #ifdef __ia64__
374 /* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
375 * So to emulate right behavior that guest OS is assumed, we need to flush
376 * I/D cache here.
377 */
378 static void sync_icache(unsigned long address, int len)
379 {
380 int l;
381 for(l = 0; l < (len + 32); l += 32)
382 __ia64_fc(address + l);
384 ia64_sync_i();
385 ia64_srlz_i();
386 }
387 #endif
389 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
390 int len, int is_write)
391 {
392 int l, io_index;
393 uint8_t *ptr;
394 uint32_t val;
395 target_phys_addr_t page;
396 unsigned long pd;
398 while (len > 0) {
399 page = addr & TARGET_PAGE_MASK;
400 l = (page + TARGET_PAGE_SIZE) - addr;
401 if (l > len)
402 l = len;
404 pd = page;
405 io_index = iomem_index(page);
406 if (is_write) {
407 if (io_index) {
408 if (l >= 4 && ((addr & 3) == 0)) {
409 /* 32 bit read access */
410 val = ldl_raw(buf);
411 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
412 l = 4;
413 } else if (l >= 2 && ((addr & 1) == 0)) {
414 /* 16 bit read access */
415 val = lduw_raw(buf);
416 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
417 l = 2;
418 } else {
419 /* 8 bit access */
420 val = ldub_raw(buf);
421 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
422 l = 1;
423 }
424 } else {
425 unsigned long addr1;
427 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
428 /* RAM case */
429 ptr = phys_ram_base + addr1;
430 memcpy(ptr, buf, l);
431 #ifdef __ia64__
432 sync_icache((unsigned long)ptr,l);
433 #endif
434 }
435 } else {
436 if (io_index) {
437 if (l >= 4 && ((addr & 3) == 0)) {
438 /* 32 bit read access */
439 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
440 stl_raw(buf, val);
441 l = 4;
442 } else if (l >= 2 && ((addr & 1) == 0)) {
443 /* 16 bit read access */
444 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
445 stw_raw(buf, val);
446 l = 2;
447 } else {
448 /* 8 bit access */
449 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
450 stb_raw(buf, val);
451 l = 1;
452 }
453 } else {
454 /* RAM case */
455 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
456 (addr & ~TARGET_PAGE_MASK);
457 memcpy(buf, ptr, l);
458 }
459 }
460 len -= l;
461 buf += l;
462 addr += l;
463 }
464 }
465 #endif
467 /* virtual memory access for debug */
468 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
469 uint8_t *buf, int len, int is_write)
470 {
471 int l;
472 target_ulong page, phys_addr;
474 while (len > 0) {
475 page = addr & TARGET_PAGE_MASK;
476 phys_addr = cpu_get_phys_page_debug(env, page);
477 /* if no physical page mapped, return an error */
478 if (phys_addr == -1)
479 return -1;
480 l = (page + TARGET_PAGE_SIZE) - addr;
481 if (l > len)
482 l = len;
483 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
484 buf, l, is_write);
485 len -= l;
486 buf += l;
487 addr += l;
488 }
489 return 0;
490 }
492 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
493 {
494 uint8_t *p;
495 int len;
497 if ((len = (end - start)) <= 0)
498 return;
499 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
500 len = len >> TARGET_PAGE_BITS;
501 while (len > 0)
502 p[--len] &= ~VGA_DIRTY_FLAG;
503 return;
504 }