ia64/xen-unstable

view tools/ioemu/patches/ioemu-ia64 @ 10965:87346792fe90

[qemu patches] Update patches for changeset 10936:a0b3cf802d99.

Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author chris@kneesaa.uk.xensource.com
date Fri Aug 04 10:52:49 2006 +0100 (2006-08-04)
parents acccec7e213a
children 253498168658
line source
1 Index: ioemu/hw/iommu.c
2 ===================================================================
3 --- ioemu.orig/hw/iommu.c 2006-08-04 10:42:20.772886174 +0100
4 +++ ioemu/hw/iommu.c 2006-08-04 10:42:25.397351756 +0100
5 @@ -82,7 +82,11 @@
6 #define IOPTE_VALID 0x00000002 /* IOPTE is valid */
7 #define IOPTE_WAZ 0x00000001 /* Write as zeros */
9 +#if defined(__i386__) || defined(__x86_64__)
10 #define PAGE_SHIFT 12
11 +#elif defined(__ia64__)
12 +#define PAGE_SHIFT 14
13 +#endif
14 #define PAGE_SIZE (1 << PAGE_SHIFT)
15 #define PAGE_MASK (PAGE_SIZE - 1)
17 Index: ioemu/cpu-all.h
18 ===================================================================
19 --- ioemu.orig/cpu-all.h 2006-08-04 10:42:25.029394277 +0100
20 +++ ioemu/cpu-all.h 2006-08-04 10:42:25.398351640 +0100
21 @@ -835,6 +835,31 @@
22 :"=m" (*(volatile long *)addr)
23 :"dIr" (nr));
24 }
25 +#elif defined(__ia64__)
26 +#include "ia64_intrinsic.h"
27 +#define atomic_set_bit(nr, addr) ({ \
28 + typeof(*addr) bit, old, new; \
29 + volatile typeof(*addr) *m; \
30 + \
31 + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \
32 + bit = 1 << (nr % (8*sizeof(*addr))); \
33 + do { \
34 + old = *m; \
35 + new = old | bit; \
36 + } while (cmpxchg_acq(m, old, new) != old); \
37 +})
38 +
39 +#define atomic_clear_bit(nr, addr) ({ \
40 + typeof(*addr) bit, old, new; \
41 + volatile typeof(*addr) *m; \
42 + \
43 + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \
44 + bit = ~(1 << (nr % (8*sizeof(*addr)))); \
45 + do { \
46 + old = *m; \
47 + new = old & bit; \
48 + } while (cmpxchg_acq(m, old, new) != old); \
49 +})
50 #endif
52 /* memory API */
53 Index: ioemu/vl.c
54 ===================================================================
55 --- ioemu.orig/vl.c 2006-08-04 10:42:25.326359960 +0100
56 +++ ioemu/vl.c 2006-08-04 10:51:21.239442976 +0100
57 @@ -5567,6 +5567,11 @@
58 /* init the memory */
59 phys_ram_size = ram_size + vga_ram_size + bios_size;
61 +#if defined (__ia64__)
62 + if (ram_size > MMIO_START)
63 + ram_size += 1 * MEM_G; /* skip 3G-4G MMIO, LEGACY_IO_SPACE etc. */
64 +#endif
65 +
66 #ifdef CONFIG_DM
68 nr_pages = ram_size/PAGE_SIZE;
69 @@ -5578,6 +5583,7 @@
70 exit(-1);
71 }
73 +#if defined(__i386__) || defined(__x86_64__)
74 if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
75 fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
76 exit(-1);
77 @@ -5598,6 +5604,41 @@
78 fprintf(logfile, "shared page at pfn:%lx, mfn: %"PRIx64"\n", nr_pages - 1,
79 (uint64_t)(page_array[nr_pages - 1]));
81 +#elif defined(__ia64__)
82 +
83 + if (xc_ia64_get_pfn_list(xc_handle, domid, page_array,
84 + IO_PAGE_START >> PAGE_SHIFT, 1) != 1) {
85 + fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
86 + exit(-1);
87 + }
88 +
89 + shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
90 + PROT_READ|PROT_WRITE,
91 + page_array[0]);
92 +
93 + fprintf(logfile, "shared page at pfn:%lx, mfn: %016lx\n",
94 + IO_PAGE_START >> PAGE_SHIFT, page_array[0]);
95 +
96 + if (xc_ia64_get_pfn_list(xc_handle, domid,
97 + page_array, 0, nr_pages) != nr_pages) {
98 + fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
99 + exit(-1);
100 + }
101 +
102 + if (ram_size > MMIO_START) {
103 + for (i = 0 ; i < MEM_G >> PAGE_SHIFT; i++)
104 + page_array[MMIO_START >> PAGE_SHIFT + i] =
105 + page_array[IO_PAGE_START >> PAGE_SHIFT + 1];
106 + }
107 +
108 + phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
109 + PROT_READ|PROT_WRITE,
110 + page_array, nr_pages);
111 + if (phys_ram_base == 0) {
112 + fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
113 + exit(-1);
114 + }
115 +#endif
116 #else /* !CONFIG_DM */
118 #ifdef CONFIG_SOFTMMU
119 Index: ioemu/target-i386-dm/exec-dm.c
120 ===================================================================
121 --- ioemu.orig/target-i386-dm/exec-dm.c 2006-08-04 10:42:24.647438417 +0100
122 +++ ioemu/target-i386-dm/exec-dm.c 2006-08-04 10:42:25.401351294 +0100
123 @@ -341,6 +341,23 @@
124 return io_mem_read[io_index >> IO_MEM_SHIFT];
125 }
127 +#ifdef __ia64__
128 +/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
129 + * So to emulate right behavior that guest OS is assumed, we need to flush
130 + * I/D cache here.
131 + */
132 +static void sync_icache(unsigned long address, int len)
133 +{
134 + int l;
135 +
136 + for(l = 0; l < (len + 32); l += 32)
137 + __ia64_fc(address + l);
138 +
139 + ia64_sync_i();
140 + ia64_srlz_i();
141 +}
142 +#endif
143 +
144 /* physical memory access (slow version, mainly for debug) */
145 #if defined(CONFIG_USER_ONLY)
146 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
147 @@ -456,6 +473,9 @@
148 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
149 (addr & ~TARGET_PAGE_MASK);
150 memcpy(buf, ptr, l);
151 +#ifdef __ia64__
152 + sync_icache((unsigned long)ptr, l);
153 +#endif
154 } else {
155 /* unreported MMIO space */
156 memset(buf, 0xff, len);
157 Index: ioemu/exec-all.h
158 ===================================================================
159 --- ioemu.orig/exec-all.h 2006-08-04 10:42:24.518453323 +0100
160 +++ ioemu/exec-all.h 2006-08-04 10:42:25.401351294 +0100
161 @@ -462,12 +462,13 @@
162 }
163 #endif
165 -#ifdef __ia64
166 -#include <ia64intrin.h>
167 +#ifdef __ia64__
168 +#include "ia64_intrinsic.h"
170 static inline int testandset (int *p)
171 {
172 - return __sync_lock_test_and_set (p, 1);
173 + uint32_t o = 0, n = 1;
174 + return (int)cmpxchg_acq(p, o, n);
175 }
176 #endif
178 Index: ioemu/target-i386-dm/cpu.h
179 ===================================================================
180 --- ioemu.orig/target-i386-dm/cpu.h 2006-08-04 10:42:24.647438417 +0100
181 +++ ioemu/target-i386-dm/cpu.h 2006-08-04 10:42:25.401351294 +0100
182 @@ -80,7 +80,11 @@
183 /* helper2.c */
184 int main_loop(void);
186 +#if defined(__i386__) || defined(__x86_64__)
187 #define TARGET_PAGE_BITS 12
188 +#elif defined(__ia64__)
189 +#define TARGET_PAGE_BITS 14
190 +#endif
191 #include "cpu-all.h"
193 #endif /* CPU_I386_H */
194 Index: ioemu/ia64_intrinsic.h
195 ===================================================================
196 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
197 +++ ioemu/ia64_intrinsic.h 2006-08-04 10:42:25.402351178 +0100
198 @@ -0,0 +1,276 @@
199 +#ifndef IA64_INTRINSIC_H
200 +#define IA64_INTRINSIC_H
201 +
202 +/*
203 + * Compiler-dependent Intrinsics
204 + *
205 + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
206 + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
207 + *
208 + */
209 +extern long ia64_cmpxchg_called_with_bad_pointer (void);
210 +extern void ia64_bad_param_for_getreg (void);
211 +#define ia64_cmpxchg(sem,ptr,o,n,s) ({ \
212 + uint64_t _o, _r; \
213 + switch(s) { \
214 + case 1: _o = (uint8_t)(long)(o); break; \
215 + case 2: _o = (uint16_t)(long)(o); break; \
216 + case 4: _o = (uint32_t)(long)(o); break; \
217 + case 8: _o = (uint64_t)(long)(o); break; \
218 + default: break; \
219 + } \
220 + switch(s) { \
221 + case 1: \
222 + _r = ia64_cmpxchg1_##sem((uint8_t*)ptr,n,_o); break; \
223 + case 2: \
224 + _r = ia64_cmpxchg2_##sem((uint16_t*)ptr,n,_o); break; \
225 + case 4: \
226 + _r = ia64_cmpxchg4_##sem((uint32_t*)ptr,n,_o); break; \
227 + case 8: \
228 + _r = ia64_cmpxchg8_##sem((uint64_t*)ptr,n,_o); break; \
229 + default: \
230 + _r = ia64_cmpxchg_called_with_bad_pointer(); break; \
231 + } \
232 + (__typeof__(o)) _r; \
233 +})
234 +
235 +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq,ptr,o,n,sizeof(*ptr))
236 +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel,ptr,o,n,sizeof(*ptr))
237 +
238 +/*
239 + * Register Names for getreg() and setreg().
240 + *
241 + * The "magic" numbers happen to match the values used by the Intel compiler's
242 + * getreg()/setreg() intrinsics.
243 + */
244 +
245 +/* Special Registers */
246 +
247 +#define _IA64_REG_IP 1016 /* getreg only */
248 +#define _IA64_REG_PSR 1019
249 +#define _IA64_REG_PSR_L 1019
250 +
251 +/* General Integer Registers */
252 +
253 +#define _IA64_REG_GP 1025 /* R1 */
254 +#define _IA64_REG_R8 1032 /* R8 */
255 +#define _IA64_REG_R9 1033 /* R9 */
256 +#define _IA64_REG_SP 1036 /* R12 */
257 +#define _IA64_REG_TP 1037 /* R13 */
258 +
259 +/* Application Registers */
260 +
261 +#define _IA64_REG_AR_KR0 3072
262 +#define _IA64_REG_AR_KR1 3073
263 +#define _IA64_REG_AR_KR2 3074
264 +#define _IA64_REG_AR_KR3 3075
265 +#define _IA64_REG_AR_KR4 3076
266 +#define _IA64_REG_AR_KR5 3077
267 +#define _IA64_REG_AR_KR6 3078
268 +#define _IA64_REG_AR_KR7 3079
269 +#define _IA64_REG_AR_RSC 3088
270 +#define _IA64_REG_AR_BSP 3089
271 +#define _IA64_REG_AR_BSPSTORE 3090
272 +#define _IA64_REG_AR_RNAT 3091
273 +#define _IA64_REG_AR_FCR 3093
274 +#define _IA64_REG_AR_EFLAG 3096
275 +#define _IA64_REG_AR_CSD 3097
276 +#define _IA64_REG_AR_SSD 3098
277 +#define _IA64_REG_AR_CFLAG 3099
278 +#define _IA64_REG_AR_FSR 3100
279 +#define _IA64_REG_AR_FIR 3101
280 +#define _IA64_REG_AR_FDR 3102
281 +#define _IA64_REG_AR_CCV 3104
282 +#define _IA64_REG_AR_UNAT 3108
283 +#define _IA64_REG_AR_FPSR 3112
284 +#define _IA64_REG_AR_ITC 3116
285 +#define _IA64_REG_AR_PFS 3136
286 +#define _IA64_REG_AR_LC 3137
287 +#define _IA64_REG_AR_EC 3138
288 +
289 +/* Control Registers */
290 +
291 +#define _IA64_REG_CR_DCR 4096
292 +#define _IA64_REG_CR_ITM 4097
293 +#define _IA64_REG_CR_IVA 4098
294 +#define _IA64_REG_CR_PTA 4104
295 +#define _IA64_REG_CR_IPSR 4112
296 +#define _IA64_REG_CR_ISR 4113
297 +#define _IA64_REG_CR_IIP 4115
298 +#define _IA64_REG_CR_IFA 4116
299 +#define _IA64_REG_CR_ITIR 4117
300 +#define _IA64_REG_CR_IIPA 4118
301 +#define _IA64_REG_CR_IFS 4119
302 +#define _IA64_REG_CR_IIM 4120
303 +#define _IA64_REG_CR_IHA 4121
304 +#define _IA64_REG_CR_LID 4160
305 +#define _IA64_REG_CR_IVR 4161 /* getreg only */
306 +#define _IA64_REG_CR_TPR 4162
307 +#define _IA64_REG_CR_EOI 4163
308 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */
309 +#define _IA64_REG_CR_IRR1 4165 /* getreg only */
310 +#define _IA64_REG_CR_IRR2 4166 /* getreg only */
311 +#define _IA64_REG_CR_IRR3 4167 /* getreg only */
312 +#define _IA64_REG_CR_ITV 4168
313 +#define _IA64_REG_CR_PMV 4169
314 +#define _IA64_REG_CR_CMCV 4170
315 +#define _IA64_REG_CR_LRR0 4176
316 +#define _IA64_REG_CR_LRR1 4177
317 +
318 +/* Indirect Registers for getindreg() and setindreg() */
319 +
320 +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
321 +#define _IA64_REG_INDR_DBR 9001
322 +#define _IA64_REG_INDR_IBR 9002
323 +#define _IA64_REG_INDR_PKR 9003
324 +#define _IA64_REG_INDR_PMC 9004
325 +#define _IA64_REG_INDR_PMD 9005
326 +#define _IA64_REG_INDR_RR 9006
327 +
328 +#ifdef __INTEL_COMPILER
329 +void __fc(uint64_t *addr);
330 +void __synci(void);
331 +void __isrlz(void);
332 +void __dsrlz(void);
333 +uint64_t __getReg(const int whichReg);
334 +uint64_t _InterlockedCompareExchange8_rel(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
335 +uint64_t _InterlockedCompareExchange8_acq(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
336 +uint64_t _InterlockedCompareExchange16_rel(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
337 +uint64_t _InterlockedCompareExchange16_acq(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
338 +uint64_t _InterlockedCompareExchange_rel(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
339 +uint64_t _InterlockedCompareExchange_acq(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
340 +uint64_t _InterlockedCompareExchange64_rel(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
341 +u64_t _InterlockedCompareExchange64_acq(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
342 +
343 +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
344 +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
345 +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
346 +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
347 +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
348 +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
349 +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
350 +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
351 +
352 +#define ia64_srlz_d __dsrlz
353 +#define ia64_srlz_i __isrlz
354 +#define __ia64_fc __fc
355 +#define ia64_sync_i __synci
356 +#define __ia64_getreg __getReg
357 +#else /* __INTEL_COMPILER */
358 +#define ia64_cmpxchg1_acq(ptr, new, old) \
359 +({ \
360 + uint64_t ia64_intri_res; \
361 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
362 + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
363 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
364 + ia64_intri_res; \
365 +})
366 +
367 +#define ia64_cmpxchg1_rel(ptr, new, old) \
368 +({ \
369 + uint64_t ia64_intri_res; \
370 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
371 + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
372 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
373 + ia64_intri_res; \
374 +})
375 +
376 +#define ia64_cmpxchg2_acq(ptr, new, old) \
377 +({ \
378 + uint64_t ia64_intri_res; \
379 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
380 + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
381 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
382 + ia64_intri_res; \
383 +})
384 +
385 +#define ia64_cmpxchg2_rel(ptr, new, old) \
386 +({ \
387 + uint64_t ia64_intri_res; \
388 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
389 + \
390 + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
391 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
392 + ia64_intri_res; \
393 +})
394 +
395 +#define ia64_cmpxchg4_acq(ptr, new, old) \
396 +({ \
397 + uint64_t ia64_intri_res; \
398 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
399 + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
400 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
401 + ia64_intri_res; \
402 +})
403 +
404 +#define ia64_cmpxchg4_rel(ptr, new, old) \
405 +({ \
406 + uint64_t ia64_intri_res; \
407 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
408 + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
409 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
410 + ia64_intri_res; \
411 +})
412 +
413 +#define ia64_cmpxchg8_acq(ptr, new, old) \
414 +({ \
415 + uint64_t ia64_intri_res; \
416 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
417 + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
418 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
419 + ia64_intri_res; \
420 +})
421 +
422 +#define ia64_cmpxchg8_rel(ptr, new, old) \
423 +({ \
424 + uint64_t ia64_intri_res; \
425 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
426 + \
427 + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
428 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
429 + ia64_intri_res; \
430 +})
431 +
432 +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
433 +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
434 +#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
435 +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
436 +
437 +register unsigned long ia64_r13 asm ("r13") __attribute_used__;
438 +#define __ia64_getreg(regnum) \
439 +({ \
440 + uint64_t ia64_intri_res; \
441 + \
442 + switch (regnum) { \
443 + case _IA64_REG_GP: \
444 + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
445 + break; \
446 + case _IA64_REG_IP: \
447 + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
448 + break; \
449 + case _IA64_REG_PSR: \
450 + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
451 + break; \
452 + case _IA64_REG_TP: /* for current() */ \
453 + ia64_intri_res = ia64_r13; \
454 + break; \
455 + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
456 + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
457 + : "i"(regnum - _IA64_REG_AR_KR0)); \
458 + break; \
459 + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
460 + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
461 + : "i" (regnum - _IA64_REG_CR_DCR)); \
462 + break; \
463 + case _IA64_REG_SP: \
464 + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
465 + break; \
466 + default: \
467 + ia64_bad_param_for_getreg(); \
468 + break; \
469 + } \
470 + ia64_intri_res; \
471 +})
472 +
473 +#endif /* __INTEL_COMPILER */
474 +#endif /* IA64_INTRINSIC_H */