ia64/xen-unstable

view tools/ioemu/cpu-all.h @ 6946:e703abaf6e3d

Add behaviour to the remove methods to remove the transaction's path itself. This allows us to write Remove(path) to remove the specified path rather than having to slice the path ourselves.
author emellor@ewan
date Sun Sep 18 14:42:13 2005 +0100 (2005-09-18)
parents 3233e7ecfa9f
children 06d84bf87159
line source
1 /*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #ifndef CPU_ALL_H
21 #define CPU_ALL_H
23 #if defined(__arm__) || defined(__sparc__)
24 #define WORDS_ALIGNED
25 #endif
27 /* some important defines:
28 *
29 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
30 * memory accesses.
31 *
32 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
33 * otherwise little endian.
34 *
35 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
36 *
37 * TARGET_WORDS_BIGENDIAN : same for target cpu
38 */
40 #include "bswap.h"
42 #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
43 #define BSWAP_NEEDED
44 #endif
46 #ifdef BSWAP_NEEDED
48 static inline uint16_t tswap16(uint16_t s)
49 {
50 return bswap16(s);
51 }
53 static inline uint32_t tswap32(uint32_t s)
54 {
55 return bswap32(s);
56 }
58 static inline uint64_t tswap64(uint64_t s)
59 {
60 return bswap64(s);
61 }
63 static inline void tswap16s(uint16_t *s)
64 {
65 *s = bswap16(*s);
66 }
68 static inline void tswap32s(uint32_t *s)
69 {
70 *s = bswap32(*s);
71 }
73 static inline void tswap64s(uint64_t *s)
74 {
75 *s = bswap64(*s);
76 }
78 #else
80 static inline uint16_t tswap16(uint16_t s)
81 {
82 return s;
83 }
85 static inline uint32_t tswap32(uint32_t s)
86 {
87 return s;
88 }
90 static inline uint64_t tswap64(uint64_t s)
91 {
92 return s;
93 }
95 static inline void tswap16s(uint16_t *s)
96 {
97 }
99 static inline void tswap32s(uint32_t *s)
100 {
101 }
103 static inline void tswap64s(uint64_t *s)
104 {
105 }
107 #endif
109 #if TARGET_LONG_SIZE == 4
110 #define tswapl(s) tswap32(s)
111 #define tswapls(s) tswap32s((uint32_t *)(s))
112 #else
113 #define tswapl(s) tswap64(s)
114 #define tswapls(s) tswap64s((uint64_t *)(s))
115 #endif
117 /* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */
118 typedef union {
119 double d;
120 #if !defined(WORDS_BIGENDIAN) && !defined(__arm__)
121 struct {
122 uint32_t lower;
123 uint32_t upper;
124 } l;
125 #else
126 struct {
127 uint32_t upper;
128 uint32_t lower;
129 } l;
130 #endif
131 uint64_t ll;
132 } CPU_DoubleU;
134 /* CPU memory access without any memory or io remapping */
136 /*
137 * the generic syntax for the memory accesses is:
138 *
139 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
140 *
141 * store: st{type}{size}{endian}_{access_type}(ptr, val)
142 *
143 * type is:
144 * (empty): integer access
145 * f : float access
146 *
147 * sign is:
148 * (empty): for floats or 32 bit size
149 * u : unsigned
150 * s : signed
151 *
152 * size is:
153 * b: 8 bits
154 * w: 16 bits
155 * l: 32 bits
156 * q: 64 bits
157 *
158 * endian is:
159 * (empty): target cpu endianness or 8 bit access
160 * r : reversed target cpu endianness (not implemented yet)
161 * be : big endian (not implemented yet)
162 * le : little endian (not implemented yet)
163 *
164 * access_type is:
165 * raw : host memory access
166 * user : user mode access using soft MMU
167 * kernel : kernel mode access using soft MMU
168 */
169 static inline int ldub_raw(void *ptr)
170 {
171 return *(uint8_t *)ptr;
172 }
174 static inline int ldsb_raw(void *ptr)
175 {
176 return *(int8_t *)ptr;
177 }
179 static inline void stb_raw(void *ptr, int v)
180 {
181 *(uint8_t *)ptr = v;
182 }
184 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
185 kernel handles unaligned load/stores may give better results, but
186 it is a system wide setting : bad */
187 #if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
189 /* conservative code for little endian unaligned accesses */
190 static inline int lduw_raw(void *ptr)
191 {
192 #ifdef __powerpc__
193 int val;
194 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
195 return val;
196 #else
197 uint8_t *p = ptr;
198 return p[0] | (p[1] << 8);
199 #endif
200 }
202 static inline int ldsw_raw(void *ptr)
203 {
204 #ifdef __powerpc__
205 int val;
206 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
207 return (int16_t)val;
208 #else
209 uint8_t *p = ptr;
210 return (int16_t)(p[0] | (p[1] << 8));
211 #endif
212 }
214 static inline int ldl_raw(void *ptr)
215 {
216 #ifdef __powerpc__
217 int val;
218 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
219 return val;
220 #else
221 uint8_t *p = ptr;
222 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
223 #endif
224 }
226 static inline uint64_t ldq_raw(void *ptr)
227 {
228 uint8_t *p = ptr;
229 uint32_t v1, v2;
230 v1 = ldl_raw(p);
231 v2 = ldl_raw(p + 4);
232 return v1 | ((uint64_t)v2 << 32);
233 }
235 static inline void stw_raw(void *ptr, int v)
236 {
237 #ifdef __powerpc__
238 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
239 #else
240 uint8_t *p = ptr;
241 p[0] = v;
242 p[1] = v >> 8;
243 #endif
244 }
246 static inline void stl_raw(void *ptr, int v)
247 {
248 #ifdef __powerpc__
249 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
250 #else
251 uint8_t *p = ptr;
252 p[0] = v;
253 p[1] = v >> 8;
254 p[2] = v >> 16;
255 p[3] = v >> 24;
256 #endif
257 }
259 static inline void stq_raw(void *ptr, uint64_t v)
260 {
261 uint8_t *p = ptr;
262 stl_raw(p, (uint32_t)v);
263 stl_raw(p + 4, v >> 32);
264 }
266 /* float access */
268 static inline float ldfl_raw(void *ptr)
269 {
270 union {
271 float f;
272 uint32_t i;
273 } u;
274 u.i = ldl_raw(ptr);
275 return u.f;
276 }
278 static inline void stfl_raw(void *ptr, float v)
279 {
280 union {
281 float f;
282 uint32_t i;
283 } u;
284 u.f = v;
285 stl_raw(ptr, u.i);
286 }
288 static inline double ldfq_raw(void *ptr)
289 {
290 CPU_DoubleU u;
291 u.l.lower = ldl_raw(ptr);
292 u.l.upper = ldl_raw(ptr + 4);
293 return u.d;
294 }
296 static inline void stfq_raw(void *ptr, double v)
297 {
298 CPU_DoubleU u;
299 u.d = v;
300 stl_raw(ptr, u.l.lower);
301 stl_raw(ptr + 4, u.l.upper);
302 }
304 #elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
306 static inline int lduw_raw(void *ptr)
307 {
308 #if defined(__i386__)
309 int val;
310 asm volatile ("movzwl %1, %0\n"
311 "xchgb %b0, %h0\n"
312 : "=q" (val)
313 : "m" (*(uint16_t *)ptr));
314 return val;
315 #else
316 uint8_t *b = (uint8_t *) ptr;
317 return ((b[0] << 8) | b[1]);
318 #endif
319 }
321 static inline int ldsw_raw(void *ptr)
322 {
323 #if defined(__i386__)
324 int val;
325 asm volatile ("movzwl %1, %0\n"
326 "xchgb %b0, %h0\n"
327 : "=q" (val)
328 : "m" (*(uint16_t *)ptr));
329 return (int16_t)val;
330 #else
331 uint8_t *b = (uint8_t *) ptr;
332 return (int16_t)((b[0] << 8) | b[1]);
333 #endif
334 }
336 static inline int ldl_raw(void *ptr)
337 {
338 #if defined(__i386__) || defined(__x86_64__)
339 int val;
340 asm volatile ("movl %1, %0\n"
341 "bswap %0\n"
342 : "=r" (val)
343 : "m" (*(uint32_t *)ptr));
344 return val;
345 #else
346 uint8_t *b = (uint8_t *) ptr;
347 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
348 #endif
349 }
351 static inline uint64_t ldq_raw(void *ptr)
352 {
353 uint32_t a,b;
354 a = ldl_raw(ptr);
355 b = ldl_raw(ptr+4);
356 return (((uint64_t)a<<32)|b);
357 }
359 static inline void stw_raw(void *ptr, int v)
360 {
361 #if defined(__i386__)
362 asm volatile ("xchgb %b0, %h0\n"
363 "movw %w0, %1\n"
364 : "=q" (v)
365 : "m" (*(uint16_t *)ptr), "0" (v));
366 #else
367 uint8_t *d = (uint8_t *) ptr;
368 d[0] = v >> 8;
369 d[1] = v;
370 #endif
371 }
373 static inline void stl_raw(void *ptr, int v)
374 {
375 #if defined(__i386__) || defined(__x86_64__)
376 asm volatile ("bswap %0\n"
377 "movl %0, %1\n"
378 : "=r" (v)
379 : "m" (*(uint32_t *)ptr), "0" (v));
380 #else
381 uint8_t *d = (uint8_t *) ptr;
382 d[0] = v >> 24;
383 d[1] = v >> 16;
384 d[2] = v >> 8;
385 d[3] = v;
386 #endif
387 }
389 static inline void stq_raw(void *ptr, uint64_t v)
390 {
391 stl_raw(ptr, v >> 32);
392 stl_raw(ptr + 4, v);
393 }
395 /* float access */
397 static inline float ldfl_raw(void *ptr)
398 {
399 union {
400 float f;
401 uint32_t i;
402 } u;
403 u.i = ldl_raw(ptr);
404 return u.f;
405 }
407 static inline void stfl_raw(void *ptr, float v)
408 {
409 union {
410 float f;
411 uint32_t i;
412 } u;
413 u.f = v;
414 stl_raw(ptr, u.i);
415 }
417 static inline double ldfq_raw(void *ptr)
418 {
419 CPU_DoubleU u;
420 u.l.upper = ldl_raw(ptr);
421 u.l.lower = ldl_raw(ptr + 4);
422 return u.d;
423 }
425 static inline void stfq_raw(void *ptr, double v)
426 {
427 CPU_DoubleU u;
428 u.d = v;
429 stl_raw(ptr, u.l.upper);
430 stl_raw(ptr + 4, u.l.lower);
431 }
433 #else
435 static inline int lduw_raw(void *ptr)
436 {
437 return *(uint16_t *)ptr;
438 }
440 static inline int ldsw_raw(void *ptr)
441 {
442 return *(int16_t *)ptr;
443 }
445 static inline int ldl_raw(void *ptr)
446 {
447 return *(uint32_t *)ptr;
448 }
450 static inline uint64_t ldq_raw(void *ptr)
451 {
452 return *(uint64_t *)ptr;
453 }
455 static inline void stw_raw(void *ptr, int v)
456 {
457 *(uint16_t *)ptr = v;
458 }
460 static inline void stl_raw(void *ptr, int v)
461 {
462 *(uint32_t *)ptr = v;
463 }
465 static inline void stq_raw(void *ptr, uint64_t v)
466 {
467 *(uint64_t *)ptr = v;
468 }
470 /* float access */
472 static inline float ldfl_raw(void *ptr)
473 {
474 return *(float *)ptr;
475 }
477 static inline double ldfq_raw(void *ptr)
478 {
479 return *(double *)ptr;
480 }
482 static inline void stfl_raw(void *ptr, float v)
483 {
484 *(float *)ptr = v;
485 }
487 static inline void stfq_raw(void *ptr, double v)
488 {
489 *(double *)ptr = v;
490 }
491 #endif
493 /* MMU memory access macros */
495 #if defined(CONFIG_USER_ONLY)
497 /* if user mode, no other memory access functions */
498 #define ldub(p) ldub_raw(p)
499 #define ldsb(p) ldsb_raw(p)
500 #define lduw(p) lduw_raw(p)
501 #define ldsw(p) ldsw_raw(p)
502 #define ldl(p) ldl_raw(p)
503 #define ldq(p) ldq_raw(p)
504 #define ldfl(p) ldfl_raw(p)
505 #define ldfq(p) ldfq_raw(p)
506 #define stb(p, v) stb_raw(p, v)
507 #define stw(p, v) stw_raw(p, v)
508 #define stl(p, v) stl_raw(p, v)
509 #define stq(p, v) stq_raw(p, v)
510 #define stfl(p, v) stfl_raw(p, v)
511 #define stfq(p, v) stfq_raw(p, v)
513 #define ldub_code(p) ldub_raw(p)
514 #define ldsb_code(p) ldsb_raw(p)
515 #define lduw_code(p) lduw_raw(p)
516 #define ldsw_code(p) ldsw_raw(p)
517 #define ldl_code(p) ldl_raw(p)
519 #define ldub_kernel(p) ldub_raw(p)
520 #define ldsb_kernel(p) ldsb_raw(p)
521 #define lduw_kernel(p) lduw_raw(p)
522 #define ldsw_kernel(p) ldsw_raw(p)
523 #define ldl_kernel(p) ldl_raw(p)
524 #define ldfl_kernel(p) ldfl_raw(p)
525 #define ldfq_kernel(p) ldfq_raw(p)
526 #define stb_kernel(p, v) stb_raw(p, v)
527 #define stw_kernel(p, v) stw_raw(p, v)
528 #define stl_kernel(p, v) stl_raw(p, v)
529 #define stq_kernel(p, v) stq_raw(p, v)
530 #define stfl_kernel(p, v) stfl_raw(p, v)
531 #define stfq_kernel(p, vt) stfq_raw(p, v)
533 #endif /* defined(CONFIG_USER_ONLY) */
535 /* page related stuff */
537 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
538 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
539 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
541 extern unsigned long qemu_real_host_page_size;
542 extern unsigned long qemu_host_page_bits;
543 extern unsigned long qemu_host_page_size;
544 extern unsigned long qemu_host_page_mask;
546 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
548 /* same as PROT_xxx */
549 #define PAGE_READ 0x0001
550 #define PAGE_WRITE 0x0002
551 #define PAGE_EXEC 0x0004
552 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
553 #define PAGE_VALID 0x0008
554 /* original state of the write flag (used when tracking self-modifying
555 code */
556 #define PAGE_WRITE_ORG 0x0010
558 void page_dump(FILE *f);
559 int page_get_flags(unsigned long address);
560 void page_set_flags(unsigned long start, unsigned long end, int flags);
561 void page_unprotect_range(uint8_t *data, unsigned long data_size);
563 #define CPUState CPUX86State
565 void cpu_dump_state(CPUState *env, FILE *f,
566 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
567 int flags);
569 void cpu_abort(CPUState *env, const char *fmt, ...);
570 extern CPUState *cpu_single_env;
571 extern int code_copy_enabled;
573 #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
574 #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
575 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
576 #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
577 void cpu_interrupt(CPUState *s, int mask);
578 void cpu_reset_interrupt(CPUState *env, int mask);
580 int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
581 int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
582 void cpu_single_step(CPUState *env, int enabled);
583 void cpu_reset(CPUState *s);
584 CPUState *cpu_init(void);
585 int main_loop(void);
587 /* Return the physical page corresponding to a virtual one. Use it
588 only for debugging because no protection checks are done. Return -1
589 if no page found. */
590 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
592 #define CPU_LOG_TB_OUT_ASM (1 << 0)
593 #define CPU_LOG_TB_IN_ASM (1 << 1)
594 #define CPU_LOG_TB_OP (1 << 2)
595 #define CPU_LOG_TB_OP_OPT (1 << 3)
596 #define CPU_LOG_INT (1 << 4)
597 #define CPU_LOG_EXEC (1 << 5)
598 #define CPU_LOG_PCALL (1 << 6)
599 #define CPU_LOG_IOPORT (1 << 7)
600 #define CPU_LOG_TB_CPU (1 << 8)
602 /* define log items */
603 typedef struct CPULogItem {
604 int mask;
605 const char *name;
606 const char *help;
607 } CPULogItem;
609 extern CPULogItem cpu_log_items[];
611 void cpu_set_log(int log_flags);
612 void cpu_set_log_filename(const char *filename);
613 int cpu_str_to_log_mask(const char *str);
615 /* IO ports API */
617 /* NOTE: as these functions may be even used when there is an isa
618 brige on non x86 targets, we always defined them */
619 #ifndef NO_CPU_IO_DEFS
620 void cpu_outb(CPUState *env, int addr, int val);
621 void cpu_outw(CPUState *env, int addr, int val);
622 void cpu_outl(CPUState *env, int addr, int val);
623 int cpu_inb(CPUState *env, int addr);
624 int cpu_inw(CPUState *env, int addr);
625 int cpu_inl(CPUState *env, int addr);
626 #endif
628 /* memory API */
630 extern int phys_ram_size;
631 extern int phys_ram_fd;
632 extern uint8_t *phys_ram_base;
633 extern uint8_t *phys_ram_dirty;
635 /* physical memory access */
636 #define IO_MEM_NB_ENTRIES 256
637 #define TLB_INVALID_MASK (1 << 3)
638 #define IO_MEM_SHIFT 4
640 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
641 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
642 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
643 #define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */
644 #define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
646 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
647 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
649 void cpu_register_physical_memory(target_phys_addr_t start_addr,
650 unsigned long size,
651 unsigned long phys_offset);
652 int cpu_register_io_memory(int io_index,
653 CPUReadMemoryFunc **mem_read,
654 CPUWriteMemoryFunc **mem_write,
655 void *opaque);
656 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
657 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
659 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
660 int len, int is_write);
661 static inline void cpu_physical_memory_read(target_phys_addr_t addr,
662 uint8_t *buf, int len)
663 {
664 cpu_physical_memory_rw(addr, buf, len, 0);
665 }
666 static inline void cpu_physical_memory_write(target_phys_addr_t addr,
667 const uint8_t *buf, int len)
668 {
669 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
670 }
672 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
673 uint8_t *buf, int len, int is_write);
675 #define VGA_DIRTY_FLAG 0x01
677 /* read dirty bit (return 0 or 1) */
678 static inline int cpu_physical_memory_is_dirty(target_ulong addr)
679 {
680 return phys_ram_dirty[addr >> TARGET_PAGE_BITS];
681 }
683 static inline void cpu_physical_memory_set_dirty(target_ulong addr)
684 {
685 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 1;
686 }
688 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end);
690 #endif /* CPU_ALL_H */