ia64/xen-unstable

view tools/ioemu/target-i386-dm/exec-dm.c @ 15841:c5f735271e22

[IA64] Foreign p2m: Fix vti domain builder.

It should set arch_domain::convmem_end.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Sep 06 13:48:43 2007 -0600 (2007-09-06)
parents 3805cc382dbe
children 04cc0e22a20a
line source
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "vl.h"
39 //#define DEBUG_TB_INVALIDATE
40 //#define DEBUG_FLUSH
41 //#define DEBUG_TLB
43 /* make various TB consistency checks */
44 //#define DEBUG_TB_CHECK
45 //#define DEBUG_TLB_CHECK
47 #ifndef CONFIG_DM
48 /* threshold to flush the translated code buffer */
49 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
51 #define SMC_BITMAP_USE_THRESHOLD 10
53 #define MMAP_AREA_START 0x00000000
54 #define MMAP_AREA_END 0xa8000000
56 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
57 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
58 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
59 int nb_tbs;
60 /* any access to the tbs or the page table must use this lock */
61 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
63 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
64 uint8_t *code_gen_ptr;
65 #endif /* !CONFIG_DM */
67 uint64_t phys_ram_size;
68 extern uint64_t ram_size;
69 int phys_ram_fd;
70 uint8_t *phys_ram_base;
71 uint8_t *phys_ram_dirty;
73 CPUState *first_cpu;
74 /* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
76 CPUState *cpu_single_env;
78 typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
84 uint8_t *code_bitmap;
85 #if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87 #endif
88 } PageDesc;
90 typedef struct PhysPageDesc {
91 /* offset in host memory of the page + io_index in the low 12 bits */
92 unsigned long phys_offset;
93 } PhysPageDesc;
95 typedef struct VirtPageDesc {
96 /* physical address of code page. It is valid only if 'valid_tag'
97 matches 'virt_valid_tag' */
98 target_ulong phys_addr;
99 unsigned int valid_tag;
100 #if !defined(CONFIG_SOFTMMU)
101 /* original page access rights. It is valid only if 'valid_tag'
102 matches 'virt_valid_tag' */
103 unsigned int prot;
104 #endif
105 } VirtPageDesc;
107 #define L2_BITS 10
108 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
110 #define L1_SIZE (1 << L1_BITS)
111 #define L2_SIZE (1 << L2_BITS)
113 unsigned long qemu_real_host_page_size;
114 unsigned long qemu_host_page_bits;
115 unsigned long qemu_host_page_size;
116 unsigned long qemu_host_page_mask;
118 /* io memory support */
119 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
120 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
121 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
122 static int io_mem_nb = 1;
124 /* log support */
125 FILE *logfile;
126 int loglevel;
128 void cpu_exec_init(CPUState *env)
129 {
130 CPUState **penv;
131 int cpu_index;
133 env->next_cpu = NULL;
134 penv = &first_cpu;
135 cpu_index = 0;
136 while (*penv != NULL) {
137 penv = (CPUState **)&(*penv)->next_cpu;
138 cpu_index++;
139 }
140 env->cpu_index = cpu_index;
141 *penv = env;
143 /* alloc dirty bits array */
144 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
145 }
147 /* enable or disable low levels log */
148 void cpu_set_log(int log_flags)
149 {
150 loglevel = log_flags;
151 if (!logfile)
152 logfile = stderr;
153 }
155 void cpu_set_log_filename(const char *filename)
156 {
157 logfile = fopen(filename, "w");
158 if (!logfile) {
159 perror(filename);
160 _exit(1);
161 }
162 #if !defined(CONFIG_SOFTMMU)
163 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
164 {
165 static uint8_t logfile_buf[4096];
166 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
167 }
168 #else
169 setvbuf(logfile, NULL, _IOLBF, 0);
170 #endif
171 stdout = logfile;
172 stderr = logfile;
173 }
175 /* mask must never be zero, except for A20 change call */
176 void cpu_interrupt(CPUState *env, int mask)
177 {
178 env->interrupt_request |= mask;
179 }
181 void cpu_reset_interrupt(CPUState *env, int mask)
182 {
183 env->interrupt_request &= ~mask;
184 }
186 CPULogItem cpu_log_items[] = {
187 { CPU_LOG_TB_OUT_ASM, "out_asm",
188 "show generated host assembly code for each compiled TB" },
189 { CPU_LOG_TB_IN_ASM, "in_asm",
190 "show target assembly code for each compiled TB" },
191 { CPU_LOG_TB_OP, "op",
192 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
193 #ifdef TARGET_I386
194 { CPU_LOG_TB_OP_OPT, "op_opt",
195 "show micro ops after optimization for each compiled TB" },
196 #endif
197 { CPU_LOG_INT, "int",
198 "show interrupts/exceptions in short format" },
199 { CPU_LOG_EXEC, "exec",
200 "show trace before each executed TB (lots of logs)" },
201 { CPU_LOG_TB_CPU, "cpu",
202 "show CPU state before bloc translation" },
203 #ifdef TARGET_I386
204 { CPU_LOG_PCALL, "pcall",
205 "show protected mode far calls/returns/exceptions" },
206 #endif
207 #ifdef DEBUG_IOPORT
208 { CPU_LOG_IOPORT, "ioport",
209 "show all i/o ports accesses" },
210 #endif
211 { 0, NULL, NULL },
212 };
214 static int cmp1(const char *s1, int n, const char *s2)
215 {
216 if (strlen(s2) != n)
217 return 0;
218 return memcmp(s1, s2, n) == 0;
219 }
221 /* takes a comma separated list of log masks. Return 0 if error. */
222 int cpu_str_to_log_mask(const char *str)
223 {
224 CPULogItem *item;
225 int mask;
226 const char *p, *p1;
228 p = str;
229 mask = 0;
230 for(;;) {
231 p1 = strchr(p, ',');
232 if (!p1)
233 p1 = p + strlen(p);
234 if(cmp1(p,p1-p,"all")) {
235 for(item = cpu_log_items; item->mask != 0; item++) {
236 mask |= item->mask;
237 }
238 } else {
239 for(item = cpu_log_items; item->mask != 0; item++) {
240 if (cmp1(p, p1 - p, item->name))
241 goto found;
242 }
243 return 0;
244 }
245 found:
246 mask |= item->mask;
247 if (*p1 != ',')
248 break;
249 p = p1 + 1;
250 }
251 return mask;
252 }
254 void cpu_abort(CPUState *env, const char *fmt, ...)
255 {
256 va_list ap;
258 va_start(ap, fmt);
259 fprintf(stderr, "qemu: fatal: ");
260 vfprintf(stderr, fmt, ap);
261 fprintf(stderr, "\n");
262 va_end(ap);
263 abort();
264 }
267 /* XXX: Simple implementation. Fix later */
268 #define MAX_MMIO 32
269 struct mmio_space {
270 target_phys_addr_t start;
271 unsigned long size;
272 unsigned long io_index;
273 } mmio[MAX_MMIO];
274 unsigned long mmio_cnt;
276 /* register physical memory. 'size' must be a multiple of the target
277 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
278 io memory page */
279 void cpu_register_physical_memory(target_phys_addr_t start_addr,
280 unsigned long size,
281 unsigned long phys_offset)
282 {
283 int i;
285 for (i = 0; i < mmio_cnt; i++) {
286 if(mmio[i].start == start_addr) {
287 mmio[i].io_index = phys_offset;
288 mmio[i].size = size;
289 return;
290 }
291 }
293 if (mmio_cnt == MAX_MMIO) {
294 fprintf(logfile, "too many mmio regions\n");
295 exit(-1);
296 }
298 mmio[mmio_cnt].io_index = phys_offset;
299 mmio[mmio_cnt].start = start_addr;
300 mmio[mmio_cnt++].size = size;
301 }
303 /* mem_read and mem_write are arrays of functions containing the
304 function to access byte (index 0), word (index 1) and dword (index
305 2). All functions must be supplied. If io_index is non zero, the
306 corresponding io zone is modified. If it is zero, a new io zone is
307 allocated. The return value can be used with
308 cpu_register_physical_memory(). (-1) is returned if error. */
309 int cpu_register_io_memory(int io_index,
310 CPUReadMemoryFunc **mem_read,
311 CPUWriteMemoryFunc **mem_write,
312 void *opaque)
313 {
314 int i;
316 if (io_index <= 0) {
317 if (io_index >= IO_MEM_NB_ENTRIES)
318 return -1;
319 io_index = io_mem_nb++;
320 } else {
321 if (io_index >= IO_MEM_NB_ENTRIES)
322 return -1;
323 }
325 for(i = 0;i < 3; i++) {
326 io_mem_read[io_index][i] = mem_read[i];
327 io_mem_write[io_index][i] = mem_write[i];
328 }
329 io_mem_opaque[io_index] = opaque;
330 return io_index << IO_MEM_SHIFT;
331 }
333 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
334 {
335 return io_mem_write[io_index >> IO_MEM_SHIFT];
336 }
338 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
339 {
340 return io_mem_read[io_index >> IO_MEM_SHIFT];
341 }
343 #ifdef __ia64__
345 #define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
346 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
347 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
349 /* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
350 * So to emulate right behavior that guest OS is assumed, we need to flush
351 * I/D cache here.
352 */
353 static void sync_icache(unsigned long address, int len)
354 {
355 int l;
357 for(l = 0; l < (len + 32); l += 32)
358 __ia64_fc(address + l);
360 ia64_sync_i();
361 ia64_srlz_i();
362 }
363 #endif
365 /* physical memory access (slow version, mainly for debug) */
366 #if defined(CONFIG_USER_ONLY)
367 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
368 int len, int is_write)
369 {
370 int l, flags;
371 target_ulong page;
373 while (len > 0) {
374 page = addr & TARGET_PAGE_MASK;
375 l = (page + TARGET_PAGE_SIZE) - addr;
376 if (l > len)
377 l = len;
378 flags = page_get_flags(page);
379 if (!(flags & PAGE_VALID))
380 return;
381 if (is_write) {
382 if (!(flags & PAGE_WRITE))
383 return;
384 memcpy((uint8_t *)addr, buf, len);
385 } else {
386 if (!(flags & PAGE_READ))
387 return;
388 memcpy(buf, (uint8_t *)addr, len);
389 }
390 len -= l;
391 buf += l;
392 addr += l;
393 }
394 }
395 #else
397 int iomem_index(target_phys_addr_t addr)
398 {
399 int i;
401 for (i = 0; i < mmio_cnt; i++) {
402 unsigned long start, end;
404 start = mmio[i].start;
405 end = mmio[i].start + mmio[i].size;
407 if ((addr >= start) && (addr < end)){
408 return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
409 }
410 }
411 return 0;
412 }
414 #if defined(__i386__) || defined(__x86_64__)
415 #define phys_ram_addr(x) (qemu_map_cache(x))
416 #elif defined(__ia64__)
417 #define phys_ram_addr(x) ((addr < ram_size) ? (phys_ram_base + (x)) : NULL)
418 #endif
420 extern unsigned long *logdirty_bitmap;
421 extern unsigned long logdirty_bitmap_size;
423 /*
424 * Replace the standard byte memcpy with a word memcpy for appropriately sized
425 * memory copy operations. Some users (USB-UHCI) can not tolerate the possible
426 * word tearing that can result from a guest concurrently writing a memory
427 * structure while the qemu device model is modifying the same location.
428 * Forcing a word-sized read/write prevents the guest from seeing a partially
429 * written word-sized atom.
430 */
431 #if defined(__x86_64__) || defined(__i386__)
432 static void memcpy_words(void *dst, void *src, size_t n)
433 {
434 asm volatile (
435 " movl %%edx,%%ecx \n"
436 #ifdef __x86_64__
437 " shrl $3,%%ecx \n"
438 " rep movsq \n"
439 " test $4,%%edx \n"
440 " jz 1f \n"
441 " movsl \n"
442 #else /* __i386__ */
443 " shrl $2,%%ecx \n"
444 " rep movsl \n"
445 #endif
446 "1: test $2,%%edx \n"
447 " jz 1f \n"
448 " movsw \n"
449 "1: test $1,%%edx \n"
450 " jz 1f \n"
451 " movsb \n"
452 "1: \n"
453 : "+S" (src), "+D" (dst) : "d" (n) : "ecx", "memory" );
454 }
455 #else
456 static void memcpy_words(void *dst, void *src, size_t n)
457 {
458 /* Some architectures do not like unaligned accesses. */
459 if (((unsigned long)dst | (unsigned long)src) & 3) {
460 memcpy(dst, src, n);
461 return;
462 }
464 while (n >= sizeof(uint32_t)) {
465 *((uint32_t *)dst) = *((uint32_t *)src);
466 dst = ((uint32_t *)dst) + 1;
467 src = ((uint32_t *)src) + 1;
468 n -= sizeof(uint32_t);
469 }
471 if (n & 2) {
472 *((uint16_t *)dst) = *((uint16_t *)src);
473 dst = ((uint16_t *)dst) + 1;
474 src = ((uint16_t *)src) + 1;
475 }
477 if (n & 1) {
478 *((uint8_t *)dst) = *((uint8_t *)src);
479 dst = ((uint8_t *)dst) + 1;
480 src = ((uint8_t *)src) + 1;
481 }
482 }
483 #endif
485 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
486 int len, int is_write)
487 {
488 int l, io_index;
489 uint8_t *ptr;
490 uint32_t val;
492 mapcache_lock();
494 while (len > 0) {
495 /* How much can we copy before the next page boundary? */
496 l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK);
497 if (l > len)
498 l = len;
500 io_index = iomem_index(addr);
501 if (is_write) {
502 if (io_index) {
503 if (l >= 4 && ((addr & 3) == 0)) {
504 /* 32 bit read access */
505 val = ldl_raw(buf);
506 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
507 l = 4;
508 } else if (l >= 2 && ((addr & 1) == 0)) {
509 /* 16 bit read access */
510 val = lduw_raw(buf);
511 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
512 l = 2;
513 } else {
514 /* 8 bit access */
515 val = ldub_raw(buf);
516 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
517 l = 1;
518 }
519 } else if ((ptr = phys_ram_addr(addr)) != NULL) {
520 /* Writing to RAM */
521 memcpy_words(ptr, buf, l);
522 if (logdirty_bitmap != NULL) {
523 /* Record that we have dirtied this frame */
524 unsigned long pfn = addr >> TARGET_PAGE_BITS;
525 if (pfn / 8 >= logdirty_bitmap_size) {
526 fprintf(logfile, "dirtying pfn %lx >= bitmap "
527 "size %lx\n", pfn, logdirty_bitmap_size * 8);
528 } else {
529 logdirty_bitmap[pfn / HOST_LONG_BITS]
530 |= 1UL << pfn % HOST_LONG_BITS;
531 }
532 }
533 #ifdef __ia64__
534 sync_icache(ptr, l);
535 #endif
536 }
537 } else {
538 if (io_index) {
539 if (l >= 4 && ((addr & 3) == 0)) {
540 /* 32 bit read access */
541 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
542 stl_raw(buf, val);
543 l = 4;
544 } else if (l >= 2 && ((addr & 1) == 0)) {
545 /* 16 bit read access */
546 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
547 stw_raw(buf, val);
548 l = 2;
549 } else {
550 /* 8 bit access */
551 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
552 stb_raw(buf, val);
553 l = 1;
554 }
555 } else if ((ptr = phys_ram_addr(addr)) != NULL) {
556 /* Reading from RAM */
557 memcpy_words(buf, ptr, l);
558 } else {
559 /* Neither RAM nor known MMIO space */
560 memset(buf, 0xff, len);
561 }
562 }
563 len -= l;
564 buf += l;
565 addr += l;
566 }
568 mapcache_unlock();
569 }
570 #endif
572 /* virtual memory access for debug */
573 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
574 uint8_t *buf, int len, int is_write)
575 {
576 int l;
577 target_ulong page, phys_addr;
579 while (len > 0) {
580 page = addr & TARGET_PAGE_MASK;
581 phys_addr = cpu_get_phys_page_debug(env, page);
582 /* if no physical page mapped, return an error */
583 if (phys_addr == -1)
584 return -1;
585 l = (page + TARGET_PAGE_SIZE) - addr;
586 if (l > len)
587 l = len;
588 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
589 buf, l, is_write);
590 len -= l;
591 buf += l;
592 addr += l;
593 }
594 return 0;
595 }
597 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
598 int dirty_flags)
599 {
600 unsigned long length;
601 int i, mask, len;
602 uint8_t *p;
604 start &= TARGET_PAGE_MASK;
605 end = TARGET_PAGE_ALIGN(end);
607 length = end - start;
608 if (length == 0)
609 return;
610 mask = ~dirty_flags;
611 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
612 len = length >> TARGET_PAGE_BITS;
613 for(i = 0; i < len; i++)
614 p[i] &= mask;
616 return;
617 }