ia64/xen-unstable

view tools/ioemu/exec-all.h @ 6946:e703abaf6e3d

Add behaviour to the remove methods to remove the transaction's path itself. This allows us to write Remove(path) to remove the specified path rather than having to slice the path ourselves.
author emellor@ewan
date Sun Sep 18 14:42:13 2005 +0100 (2005-09-18)
parents 4669354bba9a
children 19432bec4c06
line source
1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
22 #define DEBUG_DISAS
24 #ifndef glue
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
29 #endif
31 #if GCC_MAJOR < 3
32 #define __builtin_expect(x, n) (x)
33 #endif
35 #ifdef __i386__
36 #define REGPARM(n) __attribute((regparm(n)))
37 #else
38 #define REGPARM(n)
39 #endif
41 /* is_jmp field values */
42 #define DISAS_NEXT 0 /* next instruction can be analyzed */
43 #define DISAS_JUMP 1 /* only pc was modified dynamically */
44 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
45 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
47 struct TranslationBlock;
49 /* XXX: make safe guess about sizes */
50 #define MAX_OP_PER_INSTR 32
51 #define OPC_BUF_SIZE 512
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
56 extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
57 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
58 extern uint32_t gen_opc_pc[OPC_BUF_SIZE];
59 extern uint32_t gen_opc_npc[OPC_BUF_SIZE];
60 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
61 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
63 typedef void (GenOpFunc)(void);
64 typedef void (GenOpFunc1)(long);
65 typedef void (GenOpFunc2)(long, long);
66 typedef void (GenOpFunc3)(long, long, long);
68 #if defined(TARGET_I386)
70 void optimize_flags_init(void);
72 #endif
74 extern FILE *logfile;
75 extern int loglevel;
77 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
78 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
79 void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
80 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
81 int max_code_size, int *gen_code_size_ptr);
82 int cpu_restore_state(struct TranslationBlock *tb,
83 CPUState *env, unsigned long searched_pc,
84 void *puc);
85 int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
86 int max_code_size, int *gen_code_size_ptr);
87 int cpu_restore_state_copy(struct TranslationBlock *tb,
88 CPUState *env, unsigned long searched_pc,
89 void *puc);
90 void cpu_resume_from_signal(CPUState *env1, void *puc);
91 void cpu_exec_init(void);
92 int page_unprotect(unsigned long address, unsigned long pc, void *puc);
93 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
94 int is_cpu_write_access);
95 void tb_invalidate_page_range(target_ulong start, target_ulong end);
96 void tlb_flush_page(CPUState *env, target_ulong addr);
97 void tlb_flush(CPUState *env, int flush_global);
98 int tlb_set_page(CPUState *env, target_ulong vaddr,
99 target_phys_addr_t paddr, int prot,
100 int is_user, int is_softmmu);
102 #define CODE_GEN_MAX_SIZE 65536
103 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
105 #define CODE_GEN_HASH_BITS 15
106 #define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)
108 #define CODE_GEN_PHYS_HASH_BITS 15
109 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
111 /* maximum total translate dcode allocated */
113 /* NOTE: the translated code area cannot be too big because on some
114 archs the range of "fast" function calls is limited. Here is a
115 summary of the ranges:
117 i386 : signed 32 bits
118 arm : signed 26 bits
119 ppc : signed 24 bits
120 sparc : signed 32 bits
121 alpha : signed 23 bits
122 */
124 #if defined(__alpha__)
125 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
126 #elif defined(__powerpc__)
127 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
128 #else
129 #define CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
130 #endif
132 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
134 /* estimated block size for TB allocation */
135 /* XXX: use a per code average code fragment size and modulate it
136 according to the host CPU */
137 #if defined(CONFIG_SOFTMMU)
138 #define CODE_GEN_AVG_BLOCK_SIZE 128
139 #else
140 #define CODE_GEN_AVG_BLOCK_SIZE 64
141 #endif
143 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
145 #if defined(__powerpc__)
146 #define USE_DIRECT_JUMP
147 #endif
148 #if defined(__i386__) && !defined(_WIN32)
149 #define USE_DIRECT_JUMP
150 #endif
152 typedef struct TranslationBlock {
153 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
154 target_ulong cs_base; /* CS base for this block */
155 unsigned int flags; /* flags defining in which context the code was generated */
156 uint16_t size; /* size of target code for this block (1 <=
157 size <= TARGET_PAGE_SIZE) */
158 uint16_t cflags; /* compile flags */
159 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
160 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
161 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
162 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
164 uint8_t *tc_ptr; /* pointer to the translated code */
165 struct TranslationBlock *hash_next; /* next matching tb for virtual address */
166 /* next matching tb for physical address. */
167 struct TranslationBlock *phys_hash_next;
168 /* first and second physical page containing code. The lower bit
169 of the pointer tells the index in page_next[] */
170 struct TranslationBlock *page_next[2];
171 target_ulong page_addr[2];
173 /* the following data are used to directly call another TB from
174 the code of this one. */
175 uint16_t tb_next_offset[2]; /* offset of original jump target */
176 #ifdef USE_DIRECT_JUMP
177 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
178 #else
179 uint32_t tb_next[2]; /* address of jump generated code */
180 #endif
181 /* list of TBs jumping to this one. This is a circular list using
182 the two least significant bits of the pointers to tell what is
183 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
184 jmp_first */
185 struct TranslationBlock *jmp_next[2];
186 struct TranslationBlock *jmp_first;
187 } TranslationBlock;
189 static inline unsigned int tb_hash_func(unsigned long pc)
190 {
191 return pc & (CODE_GEN_HASH_SIZE - 1);
192 }
194 static inline unsigned int tb_phys_hash_func(unsigned long pc)
195 {
196 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
197 }
199 TranslationBlock *tb_alloc(unsigned long pc);
200 void tb_flush(CPUState *env);
201 void tb_link(TranslationBlock *tb);
202 void tb_link_phys(TranslationBlock *tb,
203 target_ulong phys_pc, target_ulong phys_page2);
205 extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
206 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
208 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
209 extern uint8_t *code_gen_ptr;
211 /* find a translation block in the translation cache. If not found,
212 return NULL and the pointer to the last element of the list in pptb */
213 static inline TranslationBlock *tb_find(TranslationBlock ***pptb,
214 target_ulong pc,
215 target_ulong cs_base,
216 unsigned int flags)
217 {
218 TranslationBlock **ptb, *tb;
219 unsigned int h;
221 h = tb_hash_func(pc);
222 ptb = &tb_hash[h];
223 for(;;) {
224 tb = *ptb;
225 if (!tb)
226 break;
227 if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)
228 return tb;
229 ptb = &tb->hash_next;
230 }
231 *pptb = ptb;
232 return NULL;
233 }
236 #if defined(USE_DIRECT_JUMP)
238 #if defined(__powerpc__)
239 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
240 {
241 uint32_t val, *ptr;
243 /* patch the branch destination */
244 ptr = (uint32_t *)jmp_addr;
245 val = *ptr;
246 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
247 *ptr = val;
248 /* flush icache */
249 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
250 asm volatile ("sync" : : : "memory");
251 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
252 asm volatile ("sync" : : : "memory");
253 asm volatile ("isync" : : : "memory");
254 }
255 #elif defined(__i386__)
256 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
257 {
258 /* patch the branch destination */
259 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
260 /* no need to flush icache explicitely */
261 }
262 #endif
264 static inline void tb_set_jmp_target(TranslationBlock *tb,
265 int n, unsigned long addr)
266 {
267 unsigned long offset;
269 offset = tb->tb_jmp_offset[n];
270 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
271 offset = tb->tb_jmp_offset[n + 2];
272 if (offset != 0xffff)
273 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
274 }
276 #else
278 /* set the jump target */
279 static inline void tb_set_jmp_target(TranslationBlock *tb,
280 int n, unsigned long addr)
281 {
282 tb->tb_next[n] = addr;
283 }
285 #endif
287 static inline void tb_add_jump(TranslationBlock *tb, int n,
288 TranslationBlock *tb_next)
289 {
290 /* NOTE: this test is only needed for thread safety */
291 if (!tb->jmp_next[n]) {
292 /* patch the native jump address */
293 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
295 /* add in TB jmp circular list */
296 tb->jmp_next[n] = tb_next->jmp_first;
297 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
298 }
299 }
301 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
303 #ifndef offsetof
304 #define offsetof(type, field) ((size_t) &((type *)0)->field)
305 #endif
307 #if defined(_WIN32)
308 #define ASM_DATA_SECTION ".section \".data\"\n"
309 #define ASM_PREVIOUS_SECTION ".section .text\n"
310 #elif defined(__APPLE__)
311 #define ASM_DATA_SECTION ".data\n"
312 #define ASM_PREVIOUS_SECTION ".text\n"
313 #define ASM_NAME(x) "_" #x
314 #else
315 #define ASM_DATA_SECTION ".section \".data\"\n"
316 #define ASM_PREVIOUS_SECTION ".previous\n"
317 #define ASM_NAME(x) stringify(x)
318 #endif
320 #if defined(__powerpc__)
322 /* we patch the jump instruction directly */
323 #define JUMP_TB(opname, tbparam, n, eip)\
324 do {\
325 asm volatile (ASM_DATA_SECTION\
326 ASM_NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\
327 ".long 1f\n"\
328 ASM_PREVIOUS_SECTION \
329 "b " ASM_NAME(__op_jmp) #n "\n"\
330 "1:\n");\
331 T0 = (long)(tbparam) + (n);\
332 EIP = eip;\
333 EXIT_TB();\
334 } while (0)
336 #define JUMP_TB2(opname, tbparam, n)\
337 do {\
338 asm volatile ("b " ASM_NAME(__op_jmp) #n "\n");\
339 } while (0)
341 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
343 /* we patch the jump instruction directly */
344 #define JUMP_TB(opname, tbparam, n, eip)\
345 do {\
346 asm volatile (".section .data\n"\
347 ASM_NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\
348 ".long 1f\n"\
349 ASM_PREVIOUS_SECTION \
350 "jmp " ASM_NAME(__op_jmp) #n "\n"\
351 "1:\n");\
352 T0 = (long)(tbparam) + (n);\
353 EIP = eip;\
354 EXIT_TB();\
355 } while (0)
357 #define JUMP_TB2(opname, tbparam, n)\
358 do {\
359 asm volatile ("jmp " ASM_NAME(__op_jmp) #n "\n");\
360 } while (0)
362 #else
364 /* jump to next block operations (more portable code, does not need
365 cache flushing, but slower because of indirect jump) */
366 #define JUMP_TB(opname, tbparam, n, eip)\
367 do {\
368 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
369 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
370 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
371 label ## n:\
372 T0 = (long)(tbparam) + (n);\
373 EIP = eip;\
374 dummy_label ## n:\
375 EXIT_TB();\
376 } while (0)
378 /* second jump to same destination 'n' */
379 #define JUMP_TB2(opname, tbparam, n)\
380 do {\
381 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n - 2]);\
382 } while (0)
384 #endif
386 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
387 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
388 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
390 #ifdef __powerpc__
391 static inline int testandset (int *p)
392 {
393 int ret;
394 __asm__ __volatile__ (
395 "0: lwarx %0,0,%1\n"
396 " xor. %0,%3,%0\n"
397 " bne 1f\n"
398 " stwcx. %2,0,%1\n"
399 " bne- 0b\n"
400 "1: "
401 : "=&r" (ret)
402 : "r" (p), "r" (1), "r" (0)
403 : "cr0", "memory");
404 return ret;
405 }
406 #endif
408 #ifdef __i386__
409 static inline int testandset (int *p)
410 {
411 char ret;
412 long int readval;
414 __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
415 : "=q" (ret), "=m" (*p), "=a" (readval)
416 : "r" (1), "m" (*p), "a" (0)
417 : "memory");
418 return ret;
419 }
420 #endif
422 #ifdef __x86_64__
423 static inline int testandset (int *p)
424 {
425 char ret;
426 int readval;
428 __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
429 : "=q" (ret), "=m" (*p), "=a" (readval)
430 : "r" (1), "m" (*p), "a" (0)
431 : "memory");
432 return ret;
433 }
434 #endif
436 #ifdef __s390__
437 static inline int testandset (int *p)
438 {
439 int ret;
441 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
442 " jl 0b"
443 : "=&d" (ret)
444 : "r" (1), "a" (p), "0" (*p)
445 : "cc", "memory" );
446 return ret;
447 }
448 #endif
450 #ifdef __alpha__
451 static inline int testandset (int *p)
452 {
453 int ret;
454 unsigned long one;
456 __asm__ __volatile__ ("0: mov 1,%2\n"
457 " ldl_l %0,%1\n"
458 " stl_c %2,%1\n"
459 " beq %2,1f\n"
460 ".subsection 2\n"
461 "1: br 0b\n"
462 ".previous"
463 : "=r" (ret), "=m" (*p), "=r" (one)
464 : "m" (*p));
465 return ret;
466 }
467 #endif
469 #ifdef __sparc__
470 static inline int testandset (int *p)
471 {
472 int ret;
474 __asm__ __volatile__("ldstub [%1], %0"
475 : "=r" (ret)
476 : "r" (p)
477 : "memory");
479 return (ret ? 1 : 0);
480 }
481 #endif
483 #ifdef __arm__
484 static inline int testandset (int *spinlock)
485 {
486 register unsigned int ret;
487 __asm__ __volatile__("swp %0, %1, [%2]"
488 : "=r"(ret)
489 : "0"(1), "r"(spinlock));
491 return ret;
492 }
493 #endif
495 #ifdef __mc68000
496 static inline int testandset (int *p)
497 {
498 char ret;
499 __asm__ __volatile__("tas %1; sne %0"
500 : "=r" (ret)
501 : "m" (p)
502 : "cc","memory");
503 return ret == 0;
504 }
505 #endif
507 typedef int spinlock_t;
509 #define SPIN_LOCK_UNLOCKED 0
511 #if defined(CONFIG_USER_ONLY)
512 static inline void spin_lock(spinlock_t *lock)
513 {
514 while (testandset(lock));
515 }
517 static inline void spin_unlock(spinlock_t *lock)
518 {
519 *lock = 0;
520 }
522 static inline int spin_trylock(spinlock_t *lock)
523 {
524 return !testandset(lock);
525 }
526 #else
527 static inline void spin_lock(spinlock_t *lock)
528 {
529 }
531 static inline void spin_unlock(spinlock_t *lock)
532 {
533 }
535 static inline int spin_trylock(spinlock_t *lock)
536 {
537 return 1;
538 }
539 #endif
541 extern spinlock_t tb_lock;
543 extern int tb_invalidated_flag;
545 #if !defined(CONFIG_USER_ONLY)
547 void tlb_fill(unsigned long addr, int is_write, int is_user,
548 void *retaddr);
550 #define ACCESS_TYPE 3
551 #define MEMSUFFIX _code
552 #define env cpu_single_env
554 #undef ACCESS_TYPE
555 #undef MEMSUFFIX
556 #undef env
558 #endif
560 #if defined(CONFIG_USER_ONLY)
561 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
562 {
563 return addr;
564 }
565 #else
566 /* NOTE: this function can trigger an exception */
567 /* NOTE2: the returned address is not exactly the physical address: it
568 is the offset relative to phys_ram_base */
569 /* XXX: i386 target specific */
570 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
571 {
572 return addr;
573 }
574 #endif
576 #define DEBUG_UNUSED_IOPORT
577 #define DEBUG_IOPORT
578 #define TARGET_VMX