ia64/xen-unstable

view tools/ioemu/exec-all.h @ 15841:c5f735271e22

[IA64] Foreign p2m: Fix vti domain builder.

It should set arch_domain::convmem_end.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Sep 06 13:48:43 2007 -0600 (2007-09-06)
parents 090ca10cb543
children a905c582a406
line source
1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
22 #define DEBUG_DISAS
24 #ifndef glue
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
29 #endif
31 #if __GNUC__ < 3
32 #define __builtin_expect(x, n) (x)
33 #endif
35 #ifdef __i386__
36 #define REGPARM(n) __attribute((regparm(n)))
37 #else
38 #define REGPARM(n)
39 #endif
41 /* is_jmp field values */
42 #define DISAS_NEXT 0 /* next instruction can be analyzed */
43 #define DISAS_JUMP 1 /* only pc was modified dynamically */
44 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
45 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
47 struct TranslationBlock;
49 /* XXX: make safe guess about sizes */
50 #define MAX_OP_PER_INSTR 32
51 #define OPC_BUF_SIZE 512
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
56 extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
57 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
58 extern long gen_labels[OPC_BUF_SIZE];
59 extern int nb_gen_labels;
60 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
61 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
62 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
63 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
64 extern target_ulong gen_opc_jump_pc[2];
65 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
67 typedef void (GenOpFunc)(void);
68 typedef void (GenOpFunc1)(long);
69 typedef void (GenOpFunc2)(long, long);
70 typedef void (GenOpFunc3)(long, long, long);
72 #if defined(TARGET_I386)
74 void optimize_flags_init(void);
76 #endif
78 extern FILE *logfile;
79 extern int loglevel;
81 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
82 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
83 void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
84 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
85 int max_code_size, int *gen_code_size_ptr);
86 int cpu_restore_state(struct TranslationBlock *tb,
87 CPUState *env, unsigned long searched_pc,
88 void *puc);
89 int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
90 int max_code_size, int *gen_code_size_ptr);
91 int cpu_restore_state_copy(struct TranslationBlock *tb,
92 CPUState *env, unsigned long searched_pc,
93 void *puc);
94 void cpu_resume_from_signal(CPUState *env1, void *puc);
95 void cpu_exec_init(CPUState *env);
96 int page_unprotect(target_ulong address, unsigned long pc, void *puc);
97 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
98 int is_cpu_write_access);
99 void tb_invalidate_page_range(target_ulong start, target_ulong end);
100 void tlb_flush_page(CPUState *env, target_ulong addr);
101 void tlb_flush(CPUState *env, int flush_global);
102 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
103 target_phys_addr_t paddr, int prot,
104 int is_user, int is_softmmu);
105 static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
106 target_phys_addr_t paddr, int prot,
107 int is_user, int is_softmmu)
108 {
109 if (prot & PAGE_READ)
110 prot |= PAGE_EXEC;
111 return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
112 }
114 #define CODE_GEN_MAX_SIZE 65536
115 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
117 #define CODE_GEN_PHYS_HASH_BITS 15
118 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
120 /* maximum total translate dcode allocated */
122 /* NOTE: the translated code area cannot be too big because on some
123 archs the range of "fast" function calls is limited. Here is a
124 summary of the ranges:
126 i386 : signed 32 bits
127 arm : signed 26 bits
128 ppc : signed 24 bits
129 sparc : signed 32 bits
130 alpha : signed 23 bits
131 */
133 #if defined(__alpha__)
134 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
135 #elif defined(__ia64)
136 #define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */
137 #elif defined(__powerpc__)
138 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
139 #else
140 #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024)
141 #endif
143 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
145 /* estimated block size for TB allocation */
146 /* XXX: use a per code average code fragment size and modulate it
147 according to the host CPU */
148 #if defined(CONFIG_SOFTMMU)
149 #define CODE_GEN_AVG_BLOCK_SIZE 128
150 #else
151 #define CODE_GEN_AVG_BLOCK_SIZE 64
152 #endif
154 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
156 #if defined(__powerpc__)
157 #define USE_DIRECT_JUMP
158 #endif
159 #if defined(__i386__) && !defined(_WIN32)
160 #define USE_DIRECT_JUMP
161 #endif
163 typedef struct TranslationBlock {
164 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
165 target_ulong cs_base; /* CS base for this block */
166 unsigned int flags; /* flags defining in which context the code was generated */
167 uint16_t size; /* size of target code for this block (1 <=
168 size <= TARGET_PAGE_SIZE) */
169 uint16_t cflags; /* compile flags */
170 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
171 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
172 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
173 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
175 uint8_t *tc_ptr; /* pointer to the translated code */
176 /* next matching tb for physical address. */
177 struct TranslationBlock *phys_hash_next;
178 /* first and second physical page containing code. The lower bit
179 of the pointer tells the index in page_next[] */
180 struct TranslationBlock *page_next[2];
181 target_ulong page_addr[2];
183 /* the following data are used to directly call another TB from
184 the code of this one. */
185 uint16_t tb_next_offset[2]; /* offset of original jump target */
186 #ifdef USE_DIRECT_JUMP
187 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
188 #else
189 uint32_t tb_next[2]; /* address of jump generated code */
190 #endif
191 /* list of TBs jumping to this one. This is a circular list using
192 the two least significant bits of the pointers to tell what is
193 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
194 jmp_first */
195 struct TranslationBlock *jmp_next[2];
196 struct TranslationBlock *jmp_first;
197 } TranslationBlock;
199 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
200 {
201 target_ulong tmp;
202 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
203 return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
204 }
206 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
207 {
208 target_ulong tmp;
209 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
210 return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
211 (tmp & TB_JMP_ADDR_MASK));
212 }
214 static inline unsigned int tb_phys_hash_func(unsigned long pc)
215 {
216 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
217 }
219 TranslationBlock *tb_alloc(target_ulong pc);
220 void tb_flush(CPUState *env);
221 void tb_link_phys(TranslationBlock *tb,
222 target_ulong phys_pc, target_ulong phys_page2);
224 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
226 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
227 extern uint8_t *code_gen_ptr;
229 #if defined(USE_DIRECT_JUMP)
231 #if defined(__powerpc__)
232 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
233 {
234 uint32_t val, *ptr;
236 /* patch the branch destination */
237 ptr = (uint32_t *)jmp_addr;
238 val = *ptr;
239 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
240 *ptr = val;
241 /* flush icache */
242 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
243 asm volatile ("sync" : : : "memory");
244 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
245 asm volatile ("sync" : : : "memory");
246 asm volatile ("isync" : : : "memory");
247 }
248 #elif defined(__i386__)
249 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
250 {
251 /* patch the branch destination */
252 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
253 /* no need to flush icache explicitely */
254 }
255 #endif
257 static inline void tb_set_jmp_target(TranslationBlock *tb,
258 int n, unsigned long addr)
259 {
260 unsigned long offset;
262 offset = tb->tb_jmp_offset[n];
263 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
264 offset = tb->tb_jmp_offset[n + 2];
265 if (offset != 0xffff)
266 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
267 }
269 #else
271 /* set the jump target */
272 static inline void tb_set_jmp_target(TranslationBlock *tb,
273 int n, unsigned long addr)
274 {
275 tb->tb_next[n] = addr;
276 }
278 #endif
280 static inline void tb_add_jump(TranslationBlock *tb, int n,
281 TranslationBlock *tb_next)
282 {
283 /* NOTE: this test is only needed for thread safety */
284 if (!tb->jmp_next[n]) {
285 /* patch the native jump address */
286 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
288 /* add in TB jmp circular list */
289 tb->jmp_next[n] = tb_next->jmp_first;
290 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
291 }
292 }
294 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
296 #ifndef offsetof
297 #define offsetof(type, field) ((size_t) &((type *)0)->field)
298 #endif
300 #if defined(_WIN32)
301 #define ASM_DATA_SECTION ".section \".data\"\n"
302 #define ASM_PREVIOUS_SECTION ".section .text\n"
303 #elif defined(__APPLE__)
304 #define ASM_DATA_SECTION ".data\n"
305 #define ASM_PREVIOUS_SECTION ".text\n"
306 #else
307 #define ASM_DATA_SECTION ".section \".data\"\n"
308 #define ASM_PREVIOUS_SECTION ".previous\n"
309 #endif
311 #define ASM_OP_LABEL_NAME(n, opname) \
312 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
314 #if defined(__powerpc__)
316 /* we patch the jump instruction directly */
317 #define GOTO_TB(opname, tbparam, n)\
318 do {\
319 asm volatile (ASM_DATA_SECTION\
320 ASM_OP_LABEL_NAME(n, opname) ":\n"\
321 ".long 1f\n"\
322 ASM_PREVIOUS_SECTION \
323 "b " ASM_NAME(__op_jmp) #n "\n"\
324 "1:\n");\
325 } while (0)
327 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
329 /* we patch the jump instruction directly */
330 #define GOTO_TB(opname, tbparam, n)\
331 do {\
332 asm volatile (".section .data\n"\
333 ASM_OP_LABEL_NAME(n, opname) ":\n"\
334 ".long 1f\n"\
335 ASM_PREVIOUS_SECTION \
336 "jmp " ASM_NAME(__op_jmp) #n "\n"\
337 "1:\n");\
338 } while (0)
340 #else
342 /* jump to next block operations (more portable code, does not need
343 cache flushing, but slower because of indirect jump) */
344 #define GOTO_TB(opname, tbparam, n)\
345 do {\
346 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
347 static void __attribute__((unused)) *__op_label ## n \
348 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
349 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
350 label ## n: ;\
351 dummy_label ## n: ;\
352 } while (0)
354 #endif
356 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
357 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
358 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
360 #ifdef __powerpc__
361 static inline int testandset (int *p)
362 {
363 int ret;
364 __asm__ __volatile__ (
365 "0: lwarx %0,0,%1\n"
366 " xor. %0,%3,%0\n"
367 " bne 1f\n"
368 " stwcx. %2,0,%1\n"
369 " bne- 0b\n"
370 "1: "
371 : "=&r" (ret)
372 : "r" (p), "r" (1), "r" (0)
373 : "cr0", "memory");
374 return ret;
375 }
376 #endif
378 #ifdef __i386__
379 static inline int testandset (int *p)
380 {
381 long int readval = 0;
383 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
384 : "+m" (*p), "+a" (readval)
385 : "r" (1)
386 : "cc");
387 return readval;
388 }
389 #endif
391 #ifdef __x86_64__
392 static inline int testandset (int *p)
393 {
394 long int readval = 0;
396 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
397 : "+m" (*p), "+a" (readval)
398 : "r" (1)
399 : "cc");
400 return readval;
401 }
402 #endif
404 #ifdef __s390__
405 static inline int testandset (int *p)
406 {
407 int ret;
409 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
410 " jl 0b"
411 : "=&d" (ret)
412 : "r" (1), "a" (p), "0" (*p)
413 : "cc", "memory" );
414 return ret;
415 }
416 #endif
418 #ifdef __alpha__
419 static inline int testandset (int *p)
420 {
421 int ret;
422 unsigned long one;
424 __asm__ __volatile__ ("0: mov 1,%2\n"
425 " ldl_l %0,%1\n"
426 " stl_c %2,%1\n"
427 " beq %2,1f\n"
428 ".subsection 2\n"
429 "1: br 0b\n"
430 ".previous"
431 : "=r" (ret), "=m" (*p), "=r" (one)
432 : "m" (*p));
433 return ret;
434 }
435 #endif
437 #ifdef __sparc__
438 static inline int testandset (int *p)
439 {
440 int ret;
442 __asm__ __volatile__("ldstub [%1], %0"
443 : "=r" (ret)
444 : "r" (p)
445 : "memory");
447 return (ret ? 1 : 0);
448 }
449 #endif
451 #ifdef __arm__
452 static inline int testandset (int *spinlock)
453 {
454 register unsigned int ret;
455 __asm__ __volatile__("swp %0, %1, [%2]"
456 : "=r"(ret)
457 : "0"(1), "r"(spinlock));
459 return ret;
460 }
461 #endif
463 #ifdef __mc68000
464 static inline int testandset (int *p)
465 {
466 char ret;
467 __asm__ __volatile__("tas %1; sne %0"
468 : "=r" (ret)
469 : "m" (p)
470 : "cc","memory");
471 return ret;
472 }
473 #endif
475 #ifdef __ia64
476 #include <ia64intrin.h>
478 static inline int testandset (int *p)
479 {
480 return __sync_lock_test_and_set (p, 1);
481 }
482 #endif
484 typedef int spinlock_t;
486 #define SPIN_LOCK_UNLOCKED 0
488 #if defined(CONFIG_USER_ONLY)
489 static inline void spin_lock(spinlock_t *lock)
490 {
491 while (testandset(lock));
492 }
494 static inline void spin_unlock(spinlock_t *lock)
495 {
496 *lock = 0;
497 }
499 static inline int spin_trylock(spinlock_t *lock)
500 {
501 return !testandset(lock);
502 }
503 #else
504 static inline void spin_lock(spinlock_t *lock)
505 {
506 }
508 static inline void spin_unlock(spinlock_t *lock)
509 {
510 }
512 static inline int spin_trylock(spinlock_t *lock)
513 {
514 return 1;
515 }
516 #endif
518 extern spinlock_t tb_lock;
520 extern int tb_invalidated_flag;
522 #if !defined(CONFIG_USER_ONLY) && !defined(CONFIG_DM)
524 void tlb_fill(target_ulong addr, int is_write, int is_user,
525 void *retaddr);
527 #define ACCESS_TYPE 3
528 #define MEMSUFFIX _code
529 #define env cpu_single_env
531 #define DATA_SIZE 1
532 #include "softmmu_header.h"
534 #define DATA_SIZE 2
535 #include "softmmu_header.h"
537 #define DATA_SIZE 4
538 #include "softmmu_header.h"
540 #define DATA_SIZE 8
541 #include "softmmu_header.h"
543 #undef ACCESS_TYPE
544 #undef MEMSUFFIX
545 #undef env
547 #endif
549 #if defined(CONFIG_USER_ONLY) || defined(CONFIG_DM)
550 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
551 {
552 return addr;
553 }
554 #else
555 /* NOTE: this function can trigger an exception */
556 /* NOTE2: the returned address is not exactly the physical address: it
557 is the offset relative to phys_ram_base */
558 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
559 {
560 int is_user, index, pd;
562 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
563 #if defined(TARGET_I386)
564 is_user = ((env->hflags & HF_CPL_MASK) == 3);
565 #elif defined (TARGET_PPC)
566 is_user = msr_pr;
567 #elif defined (TARGET_MIPS)
568 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
569 #elif defined (TARGET_SPARC)
570 is_user = (env->psrs == 0);
571 #elif defined (TARGET_ARM)
572 is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
573 #elif defined (TARGET_SH4)
574 is_user = ((env->sr & SR_MD) == 0);
575 #else
576 #error unimplemented CPU
577 #endif
578 if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
579 (addr & TARGET_PAGE_MASK), 0)) {
580 ldub_code(addr);
581 }
582 pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
583 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
584 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
585 }
586 return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
587 }
588 #endif
591 #ifdef USE_KQEMU
592 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
594 int kqemu_init(CPUState *env);
595 int kqemu_cpu_exec(CPUState *env);
596 void kqemu_flush_page(CPUState *env, target_ulong addr);
597 void kqemu_flush(CPUState *env, int global);
598 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
599 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
600 void kqemu_cpu_interrupt(CPUState *env);
601 void kqemu_record_dump(void);
603 static inline int kqemu_is_ok(CPUState *env)
604 {
605 return(env->kqemu_enabled &&
606 (env->cr[0] & CR0_PE_MASK) &&
607 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
608 (env->eflags & IF_MASK) &&
609 !(env->eflags & VM_MASK) &&
610 (env->kqemu_enabled == 2 ||
611 ((env->hflags & HF_CPL_MASK) == 3 &&
612 (env->eflags & IOPL_MASK) != IOPL_MASK)));
613 }
615 #endif