ia64/xen-unstable

view tools/ioemu/patches/ioemu-ia64 @ 10904:acccec7e213a

Update patch to reflect changes from 10867:edf7a25fdc43.

Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author chris@kneesaa.uk.xensource.com
date Wed Aug 02 09:54:32 2006 +0100 (2006-08-02)
parents 158db2446071
children 87346792fe90
line source
1 Index: ioemu/hw/iommu.c
2 ===================================================================
3 --- ioemu.orig/hw/iommu.c 2006-08-02 09:46:38.774790244 +0100
4 +++ ioemu/hw/iommu.c 2006-08-02 09:46:39.030761544 +0100
5 @@ -82,7 +82,11 @@
6 #define IOPTE_VALID 0x00000002 /* IOPTE is valid */
7 #define IOPTE_WAZ 0x00000001 /* Write as zeros */
9 +#if defined(__i386__) || defined(__x86_64__)
10 #define PAGE_SHIFT 12
11 +#elif defined(__ia64__)
12 +#define PAGE_SHIFT 14
13 +#endif
14 #define PAGE_SIZE (1 << PAGE_SHIFT)
15 #define PAGE_MASK (PAGE_SIZE - 1)
17 Index: ioemu/cpu-all.h
18 ===================================================================
19 --- ioemu.orig/cpu-all.h 2006-08-02 09:46:38.969768383 +0100
20 +++ ioemu/cpu-all.h 2006-08-02 09:46:39.030761544 +0100
21 @@ -835,6 +835,31 @@
22 :"=m" (*(volatile long *)addr)
23 :"dIr" (nr));
24 }
25 +#elif defined(__ia64__)
26 +#include "ia64_intrinsic.h"
27 +#define atomic_set_bit(nr, addr) ({ \
28 + typeof(*addr) bit, old, new; \
29 + volatile typeof(*addr) *m; \
30 + \
31 + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \
32 + bit = 1 << (nr % (8*sizeof(*addr))); \
33 + do { \
34 + old = *m; \
35 + new = old | bit; \
36 + } while (cmpxchg_acq(m, old, new) != old); \
37 +})
38 +
39 +#define atomic_clear_bit(nr, addr) ({ \
40 + typeof(*addr) bit, old, new; \
41 + volatile typeof(*addr) *m; \
42 + \
43 + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \
44 + bit = ~(1 << (nr % (8*sizeof(*addr)))); \
45 + do { \
46 + old = *m; \
47 + new = old & bit; \
48 + } while (cmpxchg_acq(m, old, new) != old); \
49 +})
50 #endif
52 /* memory API */
53 Index: ioemu/vl.c
54 ===================================================================
55 --- ioemu.orig/vl.c 2006-08-02 09:46:39.020762665 +0100
56 +++ ioemu/vl.c 2006-08-02 09:47:02.896085814 +0100
57 @@ -5578,6 +5578,7 @@
58 exit(-1);
59 }
61 +#if defined(__i386__) || defined(__x86_64__)
62 if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
63 fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
64 exit(-1);
65 @@ -5598,6 +5599,34 @@
66 fprintf(logfile, "shared page at pfn:%lx, mfn: %"PRIx64"\n", nr_pages - 1,
67 (uint64_t)(page_array[nr_pages - 1]));
69 +#elif defined(__ia64__)
70 + if (xc_ia64_get_pfn_list(xc_handle, domid,
71 + page_array, 0, nr_pages) != nr_pages) {
72 + fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
73 + exit(-1);
74 + }
75 +
76 + phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
77 + PROT_READ|PROT_WRITE,
78 + page_array, nr_pages);
79 + if (phys_ram_base == 0) {
80 + fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
81 + exit(-1);
82 + }
83 +
84 + if (xc_ia64_get_pfn_list(xc_handle, domid, page_array,
85 + IO_PAGE_START >> PAGE_SHIFT, 1) != 1){
86 + fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
87 + exit(-1);
88 + }
89 +
90 + shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
91 + PROT_READ|PROT_WRITE,
92 + page_array[0]);
93 +
94 + fprintf(logfile, "shared page at pfn:%lx, mfn: %016lx\n",
95 + IO_PAGE_START >> PAGE_SHIFT, page_array[0]);
96 +#endif
97 #else /* !CONFIG_DM */
99 #ifdef CONFIG_SOFTMMU
100 Index: ioemu/target-i386-dm/exec-dm.c
101 ===================================================================
102 --- ioemu.orig/target-i386-dm/exec-dm.c 2006-08-02 09:46:38.903775782 +0100
103 +++ ioemu/target-i386-dm/exec-dm.c 2006-08-02 09:46:39.034761096 +0100
104 @@ -341,6 +341,23 @@
105 return io_mem_read[io_index >> IO_MEM_SHIFT];
106 }
108 +#ifdef __ia64__
109 +/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
110 + * So to emulate right behavior that guest OS is assumed, we need to flush
111 + * I/D cache here.
112 + */
113 +static void sync_icache(unsigned long address, int len)
114 +{
115 + int l;
116 +
117 + for(l = 0; l < (len + 32); l += 32)
118 + __ia64_fc(address + l);
119 +
120 + ia64_sync_i();
121 + ia64_srlz_i();
122 +}
123 +#endif
124 +
125 /* physical memory access (slow version, mainly for debug) */
126 #if defined(CONFIG_USER_ONLY)
127 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
128 @@ -456,6 +473,9 @@
129 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
130 (addr & ~TARGET_PAGE_MASK);
131 memcpy(buf, ptr, l);
132 +#ifdef __ia64__
133 + sync_icache((unsigned long)ptr, l);
134 +#endif
135 } else {
136 /* unreported MMIO space */
137 memset(buf, 0xff, len);
138 Index: ioemu/exec-all.h
139 ===================================================================
140 --- ioemu.orig/exec-all.h 2006-08-02 09:46:38.881778248 +0100
141 +++ ioemu/exec-all.h 2006-08-02 09:46:39.034761096 +0100
142 @@ -462,12 +462,13 @@
143 }
144 #endif
146 -#ifdef __ia64
147 -#include <ia64intrin.h>
148 +#ifdef __ia64__
149 +#include "ia64_intrinsic.h"
151 static inline int testandset (int *p)
152 {
153 - return __sync_lock_test_and_set (p, 1);
154 + uint32_t o = 0, n = 1;
155 + return (int)cmpxchg_acq(p, o, n);
156 }
157 #endif
159 Index: ioemu/target-i386-dm/cpu.h
160 ===================================================================
161 --- ioemu.orig/target-i386-dm/cpu.h 2006-08-02 09:46:38.902775894 +0100
162 +++ ioemu/target-i386-dm/cpu.h 2006-08-02 09:46:39.034761096 +0100
163 @@ -80,7 +80,11 @@
164 /* helper2.c */
165 int main_loop(void);
167 +#if defined(__i386__) || defined(__x86_64__)
168 #define TARGET_PAGE_BITS 12
169 +#elif defined(__ia64__)
170 +#define TARGET_PAGE_BITS 14
171 +#endif
172 #include "cpu-all.h"
174 #endif /* CPU_I386_H */
175 Index: ioemu/ia64_intrinsic.h
176 ===================================================================
177 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
178 +++ ioemu/ia64_intrinsic.h 2006-08-02 09:46:39.035760983 +0100
179 @@ -0,0 +1,276 @@
180 +#ifndef IA64_INTRINSIC_H
181 +#define IA64_INTRINSIC_H
182 +
183 +/*
184 + * Compiler-dependent Intrinsics
185 + *
186 + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
187 + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
188 + *
189 + */
190 +extern long ia64_cmpxchg_called_with_bad_pointer (void);
191 +extern void ia64_bad_param_for_getreg (void);
192 +#define ia64_cmpxchg(sem,ptr,o,n,s) ({ \
193 + uint64_t _o, _r; \
194 + switch(s) { \
195 + case 1: _o = (uint8_t)(long)(o); break; \
196 + case 2: _o = (uint16_t)(long)(o); break; \
197 + case 4: _o = (uint32_t)(long)(o); break; \
198 + case 8: _o = (uint64_t)(long)(o); break; \
199 + default: break; \
200 + } \
201 + switch(s) { \
202 + case 1: \
203 + _r = ia64_cmpxchg1_##sem((uint8_t*)ptr,n,_o); break; \
204 + case 2: \
205 + _r = ia64_cmpxchg2_##sem((uint16_t*)ptr,n,_o); break; \
206 + case 4: \
207 + _r = ia64_cmpxchg4_##sem((uint32_t*)ptr,n,_o); break; \
208 + case 8: \
209 + _r = ia64_cmpxchg8_##sem((uint64_t*)ptr,n,_o); break; \
210 + default: \
211 + _r = ia64_cmpxchg_called_with_bad_pointer(); break; \
212 + } \
213 + (__typeof__(o)) _r; \
214 +})
215 +
216 +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq,ptr,o,n,sizeof(*ptr))
217 +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel,ptr,o,n,sizeof(*ptr))
218 +
219 +/*
220 + * Register Names for getreg() and setreg().
221 + *
222 + * The "magic" numbers happen to match the values used by the Intel compiler's
223 + * getreg()/setreg() intrinsics.
224 + */
225 +
226 +/* Special Registers */
227 +
228 +#define _IA64_REG_IP 1016 /* getreg only */
229 +#define _IA64_REG_PSR 1019
230 +#define _IA64_REG_PSR_L 1019
231 +
232 +/* General Integer Registers */
233 +
234 +#define _IA64_REG_GP 1025 /* R1 */
235 +#define _IA64_REG_R8 1032 /* R8 */
236 +#define _IA64_REG_R9 1033 /* R9 */
237 +#define _IA64_REG_SP 1036 /* R12 */
238 +#define _IA64_REG_TP 1037 /* R13 */
239 +
240 +/* Application Registers */
241 +
242 +#define _IA64_REG_AR_KR0 3072
243 +#define _IA64_REG_AR_KR1 3073
244 +#define _IA64_REG_AR_KR2 3074
245 +#define _IA64_REG_AR_KR3 3075
246 +#define _IA64_REG_AR_KR4 3076
247 +#define _IA64_REG_AR_KR5 3077
248 +#define _IA64_REG_AR_KR6 3078
249 +#define _IA64_REG_AR_KR7 3079
250 +#define _IA64_REG_AR_RSC 3088
251 +#define _IA64_REG_AR_BSP 3089
252 +#define _IA64_REG_AR_BSPSTORE 3090
253 +#define _IA64_REG_AR_RNAT 3091
254 +#define _IA64_REG_AR_FCR 3093
255 +#define _IA64_REG_AR_EFLAG 3096
256 +#define _IA64_REG_AR_CSD 3097
257 +#define _IA64_REG_AR_SSD 3098
258 +#define _IA64_REG_AR_CFLAG 3099
259 +#define _IA64_REG_AR_FSR 3100
260 +#define _IA64_REG_AR_FIR 3101
261 +#define _IA64_REG_AR_FDR 3102
262 +#define _IA64_REG_AR_CCV 3104
263 +#define _IA64_REG_AR_UNAT 3108
264 +#define _IA64_REG_AR_FPSR 3112
265 +#define _IA64_REG_AR_ITC 3116
266 +#define _IA64_REG_AR_PFS 3136
267 +#define _IA64_REG_AR_LC 3137
268 +#define _IA64_REG_AR_EC 3138
269 +
270 +/* Control Registers */
271 +
272 +#define _IA64_REG_CR_DCR 4096
273 +#define _IA64_REG_CR_ITM 4097
274 +#define _IA64_REG_CR_IVA 4098
275 +#define _IA64_REG_CR_PTA 4104
276 +#define _IA64_REG_CR_IPSR 4112
277 +#define _IA64_REG_CR_ISR 4113
278 +#define _IA64_REG_CR_IIP 4115
279 +#define _IA64_REG_CR_IFA 4116
280 +#define _IA64_REG_CR_ITIR 4117
281 +#define _IA64_REG_CR_IIPA 4118
282 +#define _IA64_REG_CR_IFS 4119
283 +#define _IA64_REG_CR_IIM 4120
284 +#define _IA64_REG_CR_IHA 4121
285 +#define _IA64_REG_CR_LID 4160
286 +#define _IA64_REG_CR_IVR 4161 /* getreg only */
287 +#define _IA64_REG_CR_TPR 4162
288 +#define _IA64_REG_CR_EOI 4163
289 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */
290 +#define _IA64_REG_CR_IRR1 4165 /* getreg only */
291 +#define _IA64_REG_CR_IRR2 4166 /* getreg only */
292 +#define _IA64_REG_CR_IRR3 4167 /* getreg only */
293 +#define _IA64_REG_CR_ITV 4168
294 +#define _IA64_REG_CR_PMV 4169
295 +#define _IA64_REG_CR_CMCV 4170
296 +#define _IA64_REG_CR_LRR0 4176
297 +#define _IA64_REG_CR_LRR1 4177
298 +
299 +/* Indirect Registers for getindreg() and setindreg() */
300 +
301 +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
302 +#define _IA64_REG_INDR_DBR 9001
303 +#define _IA64_REG_INDR_IBR 9002
304 +#define _IA64_REG_INDR_PKR 9003
305 +#define _IA64_REG_INDR_PMC 9004
306 +#define _IA64_REG_INDR_PMD 9005
307 +#define _IA64_REG_INDR_RR 9006
308 +
309 +#ifdef __INTEL_COMPILER
310 +void __fc(uint64_t *addr);
311 +void __synci(void);
312 +void __isrlz(void);
313 +void __dsrlz(void);
314 +uint64_t __getReg(const int whichReg);
315 +uint64_t _InterlockedCompareExchange8_rel(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
316 +uint64_t _InterlockedCompareExchange8_acq(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
317 +uint64_t _InterlockedCompareExchange16_rel(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
318 +uint64_t _InterlockedCompareExchange16_acq(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
319 +uint64_t _InterlockedCompareExchange_rel(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
320 +uint64_t _InterlockedCompareExchange_acq(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
321 +uint64_t _InterlockedCompareExchange64_rel(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
322 +u64_t _InterlockedCompareExchange64_acq(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
323 +
324 +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
325 +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
326 +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
327 +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
328 +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
329 +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
330 +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
331 +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
332 +
333 +#define ia64_srlz_d __dsrlz
334 +#define ia64_srlz_i __isrlz
335 +#define __ia64_fc __fc
336 +#define ia64_sync_i __synci
337 +#define __ia64_getreg __getReg
338 +#else /* __INTEL_COMPILER */
339 +#define ia64_cmpxchg1_acq(ptr, new, old) \
340 +({ \
341 + uint64_t ia64_intri_res; \
342 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
343 + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
344 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
345 + ia64_intri_res; \
346 +})
347 +
348 +#define ia64_cmpxchg1_rel(ptr, new, old) \
349 +({ \
350 + uint64_t ia64_intri_res; \
351 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
352 + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
353 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
354 + ia64_intri_res; \
355 +})
356 +
357 +#define ia64_cmpxchg2_acq(ptr, new, old) \
358 +({ \
359 + uint64_t ia64_intri_res; \
360 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
361 + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
362 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
363 + ia64_intri_res; \
364 +})
365 +
366 +#define ia64_cmpxchg2_rel(ptr, new, old) \
367 +({ \
368 + uint64_t ia64_intri_res; \
369 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
370 + \
371 + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
372 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
373 + ia64_intri_res; \
374 +})
375 +
376 +#define ia64_cmpxchg4_acq(ptr, new, old) \
377 +({ \
378 + uint64_t ia64_intri_res; \
379 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
380 + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
381 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
382 + ia64_intri_res; \
383 +})
384 +
385 +#define ia64_cmpxchg4_rel(ptr, new, old) \
386 +({ \
387 + uint64_t ia64_intri_res; \
388 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
389 + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
390 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
391 + ia64_intri_res; \
392 +})
393 +
394 +#define ia64_cmpxchg8_acq(ptr, new, old) \
395 +({ \
396 + uint64_t ia64_intri_res; \
397 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
398 + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
399 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
400 + ia64_intri_res; \
401 +})
402 +
403 +#define ia64_cmpxchg8_rel(ptr, new, old) \
404 +({ \
405 + uint64_t ia64_intri_res; \
406 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
407 + \
408 + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
409 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
410 + ia64_intri_res; \
411 +})
412 +
413 +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
414 +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
415 +#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
416 +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
417 +
418 +register unsigned long ia64_r13 asm ("r13") __attribute_used__;
419 +#define __ia64_getreg(regnum) \
420 +({ \
421 + uint64_t ia64_intri_res; \
422 + \
423 + switch (regnum) { \
424 + case _IA64_REG_GP: \
425 + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
426 + break; \
427 + case _IA64_REG_IP: \
428 + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
429 + break; \
430 + case _IA64_REG_PSR: \
431 + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
432 + break; \
433 + case _IA64_REG_TP: /* for current() */ \
434 + ia64_intri_res = ia64_r13; \
435 + break; \
436 + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
437 + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
438 + : "i"(regnum - _IA64_REG_AR_KR0)); \
439 + break; \
440 + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
441 + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
442 + : "i" (regnum - _IA64_REG_CR_DCR)); \
443 + break; \
444 + case _IA64_REG_SP: \
445 + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
446 + break; \
447 + default: \
448 + ia64_bad_param_for_getreg(); \
449 + break; \
450 + } \
451 + ia64_intri_res; \
452 +})
453 +
454 +#endif /* __INTEL_COMPILER */
455 +#endif /* IA64_INTRINSIC_H */