ia64/xen-unstable

view tools/ioemu/exec.c @ 6946:e703abaf6e3d

Add behaviour to the remove methods to remove the transaction's path itself. This allows us to write Remove(path) to remove the specified path rather than having to slice the path ourselves.
author emellor@ewan
date Sun Sep 18 14:42:13 2005 +0100 (2005-09-18)
parents 3233e7ecfa9f
children 06d84bf87159
line source
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
38 //#define DEBUG_TB_INVALIDATE
39 //#define DEBUG_FLUSH
40 //#define DEBUG_TLB
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
56 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
57 int nb_tbs;
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
61 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62 uint8_t *code_gen_ptr;
64 int phys_ram_size;
65 int phys_ram_fd;
66 uint8_t *phys_ram_base;
67 uint8_t *phys_ram_dirty;
69 typedef struct PageDesc {
70 /* list of TBs intersecting this ram page */
71 TranslationBlock *first_tb;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
75 uint8_t *code_bitmap;
76 #if defined(CONFIG_USER_ONLY)
77 unsigned long flags;
78 #endif
79 } PageDesc;
81 typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset;
84 } PhysPageDesc;
86 typedef struct VirtPageDesc {
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr;
90 unsigned int valid_tag;
91 #if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
94 unsigned int prot;
95 #endif
96 } VirtPageDesc;
98 #define L2_BITS 10
99 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101 #define L1_SIZE (1 << L1_BITS)
102 #define L2_SIZE (1 << L2_BITS)
104 unsigned long qemu_real_host_page_size;
105 unsigned long qemu_host_page_bits;
106 unsigned long qemu_host_page_size;
107 unsigned long qemu_host_page_mask;
109 /* io memory support */
110 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
111 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
112 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
113 static int io_mem_nb = 1;
115 /* log support */
116 char *logfilename = "/tmp/qemu.log";
117 FILE *logfile;
118 int loglevel;
120 void cpu_exec_init(void)
121 {
122 /* alloc dirty bits array */
123 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
124 }
126 /* enable or disable low levels log */
127 void cpu_set_log(int log_flags)
128 {
129 loglevel = log_flags;
130 if (!logfile) {
131 logfile = fopen(logfilename, "w");
132 if (!logfile) {
133 perror(logfilename);
134 _exit(1);
135 }
136 #if !defined(CONFIG_SOFTMMU)
137 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
138 {
139 static uint8_t logfile_buf[4096];
140 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
141 }
142 #else
143 setvbuf(logfile, NULL, _IOLBF, 0);
144 #endif
145 /*
146 stdout = logfile;
147 stderr = logfile;
148 */
149 }
150 }
152 void cpu_set_log_filename(const char *filename)
153 {
154 logfilename = strdup(filename);
155 }
157 /* mask must never be zero, except for A20 change call */
158 void cpu_interrupt(CPUState *env, int mask)
159 {
160 env->interrupt_request |= mask;
161 }
163 void cpu_reset_interrupt(CPUState *env, int mask)
164 {
165 env->interrupt_request &= ~mask;
166 }
168 CPULogItem cpu_log_items[] = {
169 { CPU_LOG_TB_OUT_ASM, "out_asm",
170 "show generated host assembly code for each compiled TB" },
171 { CPU_LOG_TB_IN_ASM, "in_asm",
172 "show target assembly code for each compiled TB" },
173 { CPU_LOG_TB_OP, "op",
174 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
175 #ifdef TARGET_I386
176 { CPU_LOG_TB_OP_OPT, "op_opt",
177 "show micro ops after optimization for each compiled TB" },
178 #endif
179 { CPU_LOG_INT, "int",
180 "show interrupts/exceptions in short format" },
181 { CPU_LOG_EXEC, "exec",
182 "show trace before each executed TB (lots of logs)" },
183 { CPU_LOG_TB_CPU, "cpu",
184 "show CPU state before bloc translation" },
185 #ifdef TARGET_I386
186 { CPU_LOG_PCALL, "pcall",
187 "show protected mode far calls/returns/exceptions" },
188 #endif
189 #ifdef DEBUG_IOPORT
190 { CPU_LOG_IOPORT, "ioport",
191 "show all i/o ports accesses" },
192 #endif
193 { 0, NULL, NULL },
194 };
196 static int cmp1(const char *s1, int n, const char *s2)
197 {
198 if (strlen(s2) != n)
199 return 0;
200 return memcmp(s1, s2, n) == 0;
201 }
203 /* takes a comma separated list of log masks. Return 0 if error. */
204 int cpu_str_to_log_mask(const char *str)
205 {
206 CPULogItem *item;
207 int mask;
208 const char *p, *p1;
210 p = str;
211 mask = 0;
212 for(;;) {
213 p1 = strchr(p, ',');
214 if (!p1)
215 p1 = p + strlen(p);
216 if(cmp1(p,p1-p,"all")) {
217 for(item = cpu_log_items; item->mask != 0; item++) {
218 mask |= item->mask;
219 }
220 } else {
221 for(item = cpu_log_items; item->mask != 0; item++) {
222 if (cmp1(p, p1 - p, item->name))
223 goto found;
224 }
225 return 0;
226 }
227 found:
228 mask |= item->mask;
229 if (*p1 != ',')
230 break;
231 p = p1 + 1;
232 }
233 return mask;
234 }
236 void cpu_abort(CPUState *env, const char *fmt, ...)
237 {
238 va_list ap;
240 va_start(ap, fmt);
241 fprintf(stderr, "qemu: fatal: ");
242 vfprintf(stderr, fmt, ap);
243 fprintf(stderr, "\n");
244 va_end(ap);
245 abort();
246 }
249 /* XXX: Simple implementation. Fix later */
250 #define MAX_MMIO 32
251 struct mmio_space {
252 target_phys_addr_t start;
253 unsigned long size;
254 unsigned long io_index;
255 } mmio[MAX_MMIO];
256 unsigned long mmio_cnt;
258 /* register physical memory. 'size' must be a multiple of the target
259 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
260 io memory page */
261 void cpu_register_physical_memory(target_phys_addr_t start_addr,
262 unsigned long size,
263 unsigned long phys_offset)
264 {
265 if (mmio_cnt == MAX_MMIO) {
266 fprintf(logfile, "too many mmio regions\n");
267 exit(-1);
268 }
269 mmio[mmio_cnt].io_index = phys_offset;
270 mmio[mmio_cnt].start = start_addr;
271 mmio[mmio_cnt++].size = size;
272 }
274 /* mem_read and mem_write are arrays of functions containing the
275 function to access byte (index 0), word (index 1) and dword (index
276 2). All functions must be supplied. If io_index is non zero, the
277 corresponding io zone is modified. If it is zero, a new io zone is
278 allocated. The return value can be used with
279 cpu_register_physical_memory(). (-1) is returned if error. */
280 int cpu_register_io_memory(int io_index,
281 CPUReadMemoryFunc **mem_read,
282 CPUWriteMemoryFunc **mem_write,
283 void *opaque)
284 {
285 int i;
287 if (io_index <= 0) {
288 if (io_index >= IO_MEM_NB_ENTRIES)
289 return -1;
290 io_index = io_mem_nb++;
291 } else {
292 if (io_index >= IO_MEM_NB_ENTRIES)
293 return -1;
294 }
296 for(i = 0;i < 3; i++) {
297 io_mem_read[io_index][i] = mem_read[i];
298 io_mem_write[io_index][i] = mem_write[i];
299 }
300 io_mem_opaque[io_index] = opaque;
301 return io_index << IO_MEM_SHIFT;
302 }
304 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
305 {
306 return io_mem_write[io_index >> IO_MEM_SHIFT];
307 }
309 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
310 {
311 return io_mem_read[io_index >> IO_MEM_SHIFT];
312 }
314 /* physical memory access (slow version, mainly for debug) */
315 #if defined(CONFIG_USER_ONLY)
316 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
317 int len, int is_write)
318 {
319 int l, flags;
320 target_ulong page;
322 while (len > 0) {
323 page = addr & TARGET_PAGE_MASK;
324 l = (page + TARGET_PAGE_SIZE) - addr;
325 if (l > len)
326 l = len;
327 flags = page_get_flags(page);
328 if (!(flags & PAGE_VALID))
329 return;
330 if (is_write) {
331 if (!(flags & PAGE_WRITE))
332 return;
333 memcpy((uint8_t *)addr, buf, len);
334 } else {
335 if (!(flags & PAGE_READ))
336 return;
337 memcpy(buf, (uint8_t *)addr, len);
338 }
339 len -= l;
340 buf += l;
341 addr += l;
342 }
343 }
344 #else
346 int iomem_index(target_phys_addr_t addr)
347 {
348 int i;
350 for (i = 0; i < mmio_cnt; i++) {
351 unsigned long start, end;
353 start = mmio[i].start;
354 end = mmio[i].start + mmio[i].size;
356 if ((addr >= start) && (addr <= end)){
357 return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
358 }
359 }
360 return 0;
361 }
363 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
364 int len, int is_write)
365 {
366 int l, io_index;
367 uint8_t *ptr;
368 uint32_t val;
369 target_phys_addr_t page;
370 unsigned long pd;
372 while (len > 0) {
373 page = addr & TARGET_PAGE_MASK;
374 l = (page + TARGET_PAGE_SIZE) - addr;
375 if (l > len)
376 l = len;
378 pd = page;
379 io_index = iomem_index(page);
380 if (is_write) {
381 if (io_index) {
382 if (l >= 4 && ((addr & 3) == 0)) {
383 /* 32 bit read access */
384 val = ldl_raw(buf);
385 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
386 l = 4;
387 } else if (l >= 2 && ((addr & 1) == 0)) {
388 /* 16 bit read access */
389 val = lduw_raw(buf);
390 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
391 l = 2;
392 } else {
393 /* 8 bit access */
394 val = ldub_raw(buf);
395 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
396 l = 1;
397 }
398 } else {
399 unsigned long addr1;
401 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
402 /* RAM case */
403 ptr = phys_ram_base + addr1;
404 memcpy(ptr, buf, l);
405 }
406 } else {
407 if (io_index) {
408 if (l >= 4 && ((addr & 3) == 0)) {
409 /* 32 bit read access */
410 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
411 stl_raw(buf, val);
412 l = 4;
413 } else if (l >= 2 && ((addr & 1) == 0)) {
414 /* 16 bit read access */
415 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
416 stw_raw(buf, val);
417 l = 2;
418 } else {
419 /* 8 bit access */
420 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
421 stb_raw(buf, val);
422 l = 1;
423 }
424 } else {
425 /* RAM case */
426 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
427 (addr & ~TARGET_PAGE_MASK);
428 memcpy(buf, ptr, l);
429 }
430 }
431 len -= l;
432 buf += l;
433 addr += l;
434 }
435 }
436 #endif
438 /* virtual memory access for debug */
439 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
440 uint8_t *buf, int len, int is_write)
441 {
442 int l;
443 target_ulong page, phys_addr;
445 while (len > 0) {
446 page = addr & TARGET_PAGE_MASK;
447 phys_addr = cpu_get_phys_page_debug(env, page);
448 /* if no physical page mapped, return an error */
449 if (phys_addr == -1)
450 return -1;
451 l = (page + TARGET_PAGE_SIZE) - addr;
452 if (l > len)
453 l = len;
454 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
455 buf, l, is_write);
456 len -= l;
457 buf += l;
458 addr += l;
459 }
460 return 0;
461 }
463 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
464 {
465 uint8_t *p;
466 int len;
468 if ((len = (end - start)) <= 0)
469 return;
470 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
471 len = len >> TARGET_PAGE_BITS;
472 while (len > 0)
473 p[--len] &= ~VGA_DIRTY_FLAG;
474 return;
475 }