ia64/xen-unstable

view tools/ioemu/target-i386-dm/exec-dm.c @ 10803:42aa63188a88

IA64-specific code for new Qemu
Due to some ia64 patches aren't checked into xen-unstable.hg.
I reversed related logic.

Signed-off-by: Zhang xiantao <xiantao.zhang@intel.com>
Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author chris@kneesaa.uk.xensource.com
date Wed Jul 26 13:41:10 2006 +0100 (2006-07-26)
parents 14642f36a201
children f2eb2089c9eb
line source
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
38 //#define DEBUG_TB_INVALIDATE
39 //#define DEBUG_FLUSH
40 //#define DEBUG_TLB
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 #ifndef CONFIG_DM
47 /* threshold to flush the translated code buffer */
48 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
50 #define SMC_BITMAP_USE_THRESHOLD 10
52 #define MMAP_AREA_START 0x00000000
53 #define MMAP_AREA_END 0xa8000000
55 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
56 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
57 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
58 int nb_tbs;
59 /* any access to the tbs or the page table must use this lock */
60 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
62 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
63 uint8_t *code_gen_ptr;
64 #endif /* !CONFIG_DM */
66 uint64_t phys_ram_size;
67 int phys_ram_fd;
68 uint8_t *phys_ram_base;
69 uint8_t *phys_ram_dirty;
71 CPUState *first_cpu;
72 /* current CPU in the current thread. It is only valid inside
73 cpu_exec() */
74 CPUState *cpu_single_env;
76 typedef struct PageDesc {
77 /* list of TBs intersecting this ram page */
78 TranslationBlock *first_tb;
79 /* in order to optimize self modifying code, we count the number
80 of lookups we do to a given page to use a bitmap */
81 unsigned int code_write_count;
82 uint8_t *code_bitmap;
83 #if defined(CONFIG_USER_ONLY)
84 unsigned long flags;
85 #endif
86 } PageDesc;
88 typedef struct PhysPageDesc {
89 /* offset in host memory of the page + io_index in the low 12 bits */
90 unsigned long phys_offset;
91 } PhysPageDesc;
93 typedef struct VirtPageDesc {
94 /* physical address of code page. It is valid only if 'valid_tag'
95 matches 'virt_valid_tag' */
96 target_ulong phys_addr;
97 unsigned int valid_tag;
98 #if !defined(CONFIG_SOFTMMU)
99 /* original page access rights. It is valid only if 'valid_tag'
100 matches 'virt_valid_tag' */
101 unsigned int prot;
102 #endif
103 } VirtPageDesc;
105 #define L2_BITS 10
106 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
108 #define L1_SIZE (1 << L1_BITS)
109 #define L2_SIZE (1 << L2_BITS)
111 unsigned long qemu_real_host_page_size;
112 unsigned long qemu_host_page_bits;
113 unsigned long qemu_host_page_size;
114 unsigned long qemu_host_page_mask;
116 /* io memory support */
117 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
119 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
120 static int io_mem_nb = 1;
122 /* log support */
123 char *logfilename = "/tmp/qemu.log";
124 FILE *logfile;
125 int loglevel;
127 void cpu_exec_init(CPUState *env)
128 {
129 CPUState **penv;
130 int cpu_index;
132 env->next_cpu = NULL;
133 penv = &first_cpu;
134 cpu_index = 0;
135 while (*penv != NULL) {
136 penv = (CPUState **)&(*penv)->next_cpu;
137 cpu_index++;
138 }
139 env->cpu_index = cpu_index;
140 *penv = env;
142 /* alloc dirty bits array */
143 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
144 }
146 /* enable or disable low levels log */
147 void cpu_set_log(int log_flags)
148 {
149 loglevel = log_flags;
150 if (!logfile) {
151 logfile = fopen(logfilename, "w");
152 if (!logfile) {
153 perror(logfilename);
154 _exit(1);
155 }
156 #if !defined(CONFIG_SOFTMMU)
157 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
158 {
159 static uint8_t logfile_buf[4096];
160 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
161 }
162 #else
163 setvbuf(logfile, NULL, _IOLBF, 0);
164 #endif
165 stdout = logfile;
166 stderr = logfile;
167 }
168 }
170 void cpu_set_log_filename(const char *filename)
171 {
172 logfilename = strdup(filename);
173 }
175 /* mask must never be zero, except for A20 change call */
176 void cpu_interrupt(CPUState *env, int mask)
177 {
178 env->interrupt_request |= mask;
179 }
181 void cpu_reset_interrupt(CPUState *env, int mask)
182 {
183 env->interrupt_request &= ~mask;
184 }
186 CPULogItem cpu_log_items[] = {
187 { CPU_LOG_TB_OUT_ASM, "out_asm",
188 "show generated host assembly code for each compiled TB" },
189 { CPU_LOG_TB_IN_ASM, "in_asm",
190 "show target assembly code for each compiled TB" },
191 { CPU_LOG_TB_OP, "op",
192 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
193 #ifdef TARGET_I386
194 { CPU_LOG_TB_OP_OPT, "op_opt",
195 "show micro ops after optimization for each compiled TB" },
196 #endif
197 { CPU_LOG_INT, "int",
198 "show interrupts/exceptions in short format" },
199 { CPU_LOG_EXEC, "exec",
200 "show trace before each executed TB (lots of logs)" },
201 { CPU_LOG_TB_CPU, "cpu",
202 "show CPU state before bloc translation" },
203 #ifdef TARGET_I386
204 { CPU_LOG_PCALL, "pcall",
205 "show protected mode far calls/returns/exceptions" },
206 #endif
207 #ifdef DEBUG_IOPORT
208 { CPU_LOG_IOPORT, "ioport",
209 "show all i/o ports accesses" },
210 #endif
211 { 0, NULL, NULL },
212 };
214 static int cmp1(const char *s1, int n, const char *s2)
215 {
216 if (strlen(s2) != n)
217 return 0;
218 return memcmp(s1, s2, n) == 0;
219 }
221 /* takes a comma separated list of log masks. Return 0 if error. */
222 int cpu_str_to_log_mask(const char *str)
223 {
224 CPULogItem *item;
225 int mask;
226 const char *p, *p1;
228 p = str;
229 mask = 0;
230 for(;;) {
231 p1 = strchr(p, ',');
232 if (!p1)
233 p1 = p + strlen(p);
234 if(cmp1(p,p1-p,"all")) {
235 for(item = cpu_log_items; item->mask != 0; item++) {
236 mask |= item->mask;
237 }
238 } else {
239 for(item = cpu_log_items; item->mask != 0; item++) {
240 if (cmp1(p, p1 - p, item->name))
241 goto found;
242 }
243 return 0;
244 }
245 found:
246 mask |= item->mask;
247 if (*p1 != ',')
248 break;
249 p = p1 + 1;
250 }
251 return mask;
252 }
254 void cpu_abort(CPUState *env, const char *fmt, ...)
255 {
256 va_list ap;
258 va_start(ap, fmt);
259 fprintf(stderr, "qemu: fatal: ");
260 vfprintf(stderr, fmt, ap);
261 fprintf(stderr, "\n");
262 va_end(ap);
263 abort();
264 }
267 /* XXX: Simple implementation. Fix later */
268 #define MAX_MMIO 32
269 struct mmio_space {
270 target_phys_addr_t start;
271 unsigned long size;
272 unsigned long io_index;
273 } mmio[MAX_MMIO];
274 unsigned long mmio_cnt;
276 /* register physical memory. 'size' must be a multiple of the target
277 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
278 io memory page */
279 void cpu_register_physical_memory(target_phys_addr_t start_addr,
280 unsigned long size,
281 unsigned long phys_offset)
282 {
283 int i;
285 for (i = 0; i < mmio_cnt; i++) {
286 if(mmio[i].start == start_addr) {
287 mmio[i].io_index = phys_offset;
288 mmio[i].size = size;
289 return;
290 }
291 }
293 if (mmio_cnt == MAX_MMIO) {
294 fprintf(logfile, "too many mmio regions\n");
295 exit(-1);
296 }
298 mmio[mmio_cnt].io_index = phys_offset;
299 mmio[mmio_cnt].start = start_addr;
300 mmio[mmio_cnt++].size = size;
301 }
303 /* mem_read and mem_write are arrays of functions containing the
304 function to access byte (index 0), word (index 1) and dword (index
305 2). All functions must be supplied. If io_index is non zero, the
306 corresponding io zone is modified. If it is zero, a new io zone is
307 allocated. The return value can be used with
308 cpu_register_physical_memory(). (-1) is returned if error. */
309 int cpu_register_io_memory(int io_index,
310 CPUReadMemoryFunc **mem_read,
311 CPUWriteMemoryFunc **mem_write,
312 void *opaque)
313 {
314 int i;
316 if (io_index <= 0) {
317 if (io_index >= IO_MEM_NB_ENTRIES)
318 return -1;
319 io_index = io_mem_nb++;
320 } else {
321 if (io_index >= IO_MEM_NB_ENTRIES)
322 return -1;
323 }
325 for(i = 0;i < 3; i++) {
326 io_mem_read[io_index][i] = mem_read[i];
327 io_mem_write[io_index][i] = mem_write[i];
328 }
329 io_mem_opaque[io_index] = opaque;
330 return io_index << IO_MEM_SHIFT;
331 }
333 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
334 {
335 return io_mem_write[io_index >> IO_MEM_SHIFT];
336 }
338 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
339 {
340 return io_mem_read[io_index >> IO_MEM_SHIFT];
341 }
343 #ifdef __ia64__
344 /* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
345 * So to emulate right behavior that guest OS is assumed, we need to flush
346 * I/D cache here.
347 */
348 static void sync_icache(unsigned long address, int len)
349 {
350 int l;
352 for(l = 0; l < (len + 32); l += 32)
353 __ia64_fc(address + l);
355 ia64_sync_i();
356 ia64_srlz_i();
357 }
358 #endif
360 /* physical memory access (slow version, mainly for debug) */
361 #if defined(CONFIG_USER_ONLY)
362 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
363 int len, int is_write)
364 {
365 int l, flags;
366 target_ulong page;
368 while (len > 0) {
369 page = addr & TARGET_PAGE_MASK;
370 l = (page + TARGET_PAGE_SIZE) - addr;
371 if (l > len)
372 l = len;
373 flags = page_get_flags(page);
374 if (!(flags & PAGE_VALID))
375 return;
376 if (is_write) {
377 if (!(flags & PAGE_WRITE))
378 return;
379 memcpy((uint8_t *)addr, buf, len);
380 } else {
381 if (!(flags & PAGE_READ))
382 return;
383 memcpy(buf, (uint8_t *)addr, len);
384 }
385 len -= l;
386 buf += l;
387 addr += l;
388 }
389 }
390 #else
392 int iomem_index(target_phys_addr_t addr)
393 {
394 int i;
396 for (i = 0; i < mmio_cnt; i++) {
397 unsigned long start, end;
399 start = mmio[i].start;
400 end = mmio[i].start + mmio[i].size;
402 if ((addr >= start) && (addr < end)){
403 return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
404 }
405 }
406 return 0;
407 }
409 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
410 int len, int is_write)
411 {
412 int l, io_index;
413 uint8_t *ptr;
414 uint32_t val;
415 target_phys_addr_t page;
416 unsigned long pd;
418 while (len > 0) {
419 page = addr & TARGET_PAGE_MASK;
420 l = (page + TARGET_PAGE_SIZE) - addr;
421 if (l > len)
422 l = len;
424 pd = page;
425 io_index = iomem_index(page);
426 if (is_write) {
427 if (io_index) {
428 if (l >= 4 && ((addr & 3) == 0)) {
429 /* 32 bit read access */
430 val = ldl_raw(buf);
431 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
432 l = 4;
433 } else if (l >= 2 && ((addr & 1) == 0)) {
434 /* 16 bit read access */
435 val = lduw_raw(buf);
436 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
437 l = 2;
438 } else {
439 /* 8 bit access */
440 val = ldub_raw(buf);
441 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
442 l = 1;
443 }
444 } else {
445 unsigned long addr1;
447 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
448 /* RAM case */
449 ptr = phys_ram_base + addr1;
450 memcpy(ptr, buf, l);
451 }
452 } else {
453 if (io_index) {
454 if (l >= 4 && ((addr & 3) == 0)) {
455 /* 32 bit read access */
456 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
457 stl_raw(buf, val);
458 l = 4;
459 } else if (l >= 2 && ((addr & 1) == 0)) {
460 /* 16 bit read access */
461 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
462 stw_raw(buf, val);
463 l = 2;
464 } else {
465 /* 8 bit access */
466 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
467 stb_raw(buf, val);
468 l = 1;
469 }
470 } else {
471 /* RAM case */
472 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
473 (addr & ~TARGET_PAGE_MASK);
474 memcpy(buf, ptr, l);
475 #ifdef __ia64__
476 sync_icache((unsigned long)ptr, l);
477 #endif
478 }
479 }
480 len -= l;
481 buf += l;
482 addr += l;
483 }
484 }
485 #endif
487 /* virtual memory access for debug */
488 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
489 uint8_t *buf, int len, int is_write)
490 {
491 int l;
492 target_ulong page, phys_addr;
494 while (len > 0) {
495 page = addr & TARGET_PAGE_MASK;
496 phys_addr = cpu_get_phys_page_debug(env, page);
497 /* if no physical page mapped, return an error */
498 if (phys_addr == -1)
499 return -1;
500 l = (page + TARGET_PAGE_SIZE) - addr;
501 if (l > len)
502 l = len;
503 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
504 buf, l, is_write);
505 len -= l;
506 buf += l;
507 addr += l;
508 }
509 return 0;
510 }
512 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
513 int dirty_flags)
514 {
515 unsigned long length;
516 int i, mask, len;
517 uint8_t *p;
519 start &= TARGET_PAGE_MASK;
520 end = TARGET_PAGE_ALIGN(end);
522 length = end - start;
523 if (length == 0)
524 return;
525 mask = ~dirty_flags;
526 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
527 len = length >> TARGET_PAGE_BITS;
528 for(i = 0; i < len; i++)
529 p[i] &= mask;
531 return;
532 }