ia64/xen-unstable

view tools/ioemu/patches/ioemu-ia64 @ 10803:42aa63188a88

IA64-specific code for new Qemu
Due to some ia64 patches aren't checked into xen-unstable.hg.
I reversed related logic.

Signed-off-by: Zhang xiantao <xiantao.zhang@intel.com>
Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author chris@kneesaa.uk.xensource.com
date Wed Jul 26 13:41:10 2006 +0100 (2006-07-26)
parents
children 5d76b22f80e4
line source
1 Index: ioemu/hw/iommu.c
2 ===================================================================
3 --- ioemu.orig/hw/iommu.c 2006-07-14 13:43:45.000000000 +0100
4 +++ ioemu/hw/iommu.c 2006-07-26 13:34:50.039997837 +0100
5 @@ -82,7 +82,11 @@
6 #define IOPTE_VALID 0x00000002 /* IOPTE is valid */
7 #define IOPTE_WAZ 0x00000001 /* Write as zeros */
9 +#if defined(__i386__) || defined(__x86_64__)
10 #define PAGE_SHIFT 12
11 +#elif defined(__ia64__)
12 +#define PAGE_SHIFT 14
13 +#endif
14 #define PAGE_SIZE (1 << PAGE_SHIFT)
15 #define PAGE_MASK (PAGE_SIZE - 1)
17 Index: ioemu/cpu-all.h
18 ===================================================================
19 --- ioemu.orig/cpu-all.h 2006-07-26 13:33:45.946834283 +0100
20 +++ ioemu/cpu-all.h 2006-07-26 13:34:50.038997944 +0100
21 @@ -835,6 +835,31 @@
22 :"=m" (*(volatile long *)addr)
23 :"dIr" (nr));
24 }
25 +#elif defined(__ia64__)
26 +#include "ia64_intrinsic.h"
27 +#define atomic_set_bit(nr, addr) ({ \
28 + typeof(*addr) bit, old, new; \
29 + volatile typeof(*addr) *m; \
30 + \
31 + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \
32 + bit = 1 << (nr % (8*sizeof(*addr))); \
33 + do { \
34 + old = *m; \
35 + new = old | bit; \
36 + } while (cmpxchg_acq(m, old, new) != old); \
37 +})
38 +
39 +#define atomic_clear_bit(nr, addr) ({ \
40 + typeof(*addr) bit, old, new; \
41 + volatile typeof(*addr) *m; \
42 + \
43 + m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr))); \
44 + bit = ~(1 << (nr % (8*sizeof(*addr)))); \
45 + do { \
46 + old = *m; \
47 + new = old & bit; \
48 + } while (cmpxchg_acq(m, old, new) != old); \
49 +})
50 #endif
52 /* memory API */
53 Index: ioemu/vl.c
54 ===================================================================
55 --- ioemu.orig/vl.c 2006-07-26 13:33:45.996828953 +0100
56 +++ ioemu/vl.c 2006-07-26 13:34:50.044997304 +0100
57 @@ -5577,6 +5577,7 @@
58 exit(-1);
59 }
61 +#if defined(__i386__) || defined(__x86_64__)
62 if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
63 fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
64 exit(-1);
65 @@ -5597,6 +5598,34 @@
66 fprintf(logfile, "shared page at pfn:%lx, mfn: %"PRIx64"\n", nr_pages - 1,
67 (uint64_t)(page_array[nr_pages - 1]));
69 +#elif defined(__ia64__)
70 + if (xc_ia64_get_pfn_list(xc_handle, domid,
71 + page_array, 0, nr_pages) != nr_pages) {
72 + fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
73 + exit(-1);
74 + }
75 +
76 + phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
77 + PROT_READ|PROT_WRITE,
78 + page_array, nr_pages);
79 + if (phys_ram_base == 0) {
80 + fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
81 + exit(-1);
82 + }
83 +
84 + if (xc_ia64_get_pfn_list(xc_handle, domid, page_array,
85 + nr_pages + (GFW_SIZE >> PAGE_SHIFT), 1)!= 1){
86 + fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
87 + exit(-1);
88 + }
89 +
90 + shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
91 + PROT_READ|PROT_WRITE,
92 + page_array[0]);
93 +
94 + fprintf(logfile, "shared page at pfn:%lx, mfn: %l016x\n",
95 + IO_PAGE_START >> PAGE_SHIFT, page_array[0]);
96 +#endif
97 #else /* !CONFIG_DM */
99 #ifdef CONFIG_SOFTMMU
100 Index: ioemu/target-i386-dm/exec-dm.c
101 ===================================================================
102 --- ioemu.orig/target-i386-dm/exec-dm.c 2006-07-26 13:33:45.882841107 +0100
103 +++ ioemu/target-i386-dm/exec-dm.c 2006-07-26 13:34:50.040997731 +0100
104 @@ -340,6 +340,23 @@
105 return io_mem_read[io_index >> IO_MEM_SHIFT];
106 }
108 +#ifdef __ia64__
109 +/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
110 + * So to emulate right behavior that guest OS is assumed, we need to flush
111 + * I/D cache here.
112 + */
113 +static void sync_icache(unsigned long address, int len)
114 +{
115 + int l;
116 +
117 + for(l = 0; l < (len + 32); l += 32)
118 + __ia64_fc(address + l);
119 +
120 + ia64_sync_i();
121 + ia64_srlz_i();
122 +}
123 +#endif
124 +
125 /* physical memory access (slow version, mainly for debug) */
126 #if defined(CONFIG_USER_ONLY)
127 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
128 @@ -455,6 +472,9 @@
129 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
130 (addr & ~TARGET_PAGE_MASK);
131 memcpy(buf, ptr, l);
132 +#ifdef __ia64__
133 + sync_icache((unsigned long)ptr, l);
134 +#endif
135 }
136 }
137 len -= l;
138 Index: ioemu/exec-all.h
139 ===================================================================
140 --- ioemu.orig/exec-all.h 2006-07-26 13:33:45.861843346 +0100
141 +++ ioemu/exec-all.h 2006-07-26 13:38:30.096491388 +0100
142 @@ -391,6 +391,15 @@
143 }
144 #endif
146 +#ifdef __ia64__
147 +#include "ia64_intrinsic.h"
148 +static inline int testandset (int *p)
149 +{
150 + uint32_t o = 0, n = 1;
151 + return (int)cmpxchg_acq(p, o, n);
152 +}
153 +#endif
154 +
155 #ifdef __s390__
156 static inline int testandset (int *p)
157 {
158 @@ -462,12 +471,13 @@
159 }
160 #endif
162 -#ifdef __ia64
163 -#include <ia64intrin.h>
164 +#ifdef __ia64__
165 +#include "ia64_intrinsic.h"
167 static inline int testandset (int *p)
168 {
169 - return __sync_lock_test_and_set (p, 1);
170 + uint32_t o = 0, n = 1;
171 + return (int)cmpxchg_acq(p, o, n);
172 }
173 #endif
175 Index: ioemu/target-i386-dm/cpu.h
176 ===================================================================
177 --- ioemu.orig/target-i386-dm/cpu.h 2006-07-26 13:33:45.882841107 +0100
178 +++ ioemu/target-i386-dm/cpu.h 2006-07-26 13:34:50.040997731 +0100
179 @@ -80,7 +80,11 @@
180 /* helper2.c */
181 int main_loop(void);
183 +#if defined(__i386__) || defined(__x86_64__)
184 #define TARGET_PAGE_BITS 12
185 +#elif defined(__ia64__)
186 +#define TARGET_PAGE_BITS 14
187 +#endif
188 #include "cpu-all.h"
190 #endif /* CPU_I386_H */
191 Index: ioemu/ia64_intrinsic.h
192 ===================================================================
193 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
194 +++ ioemu/ia64_intrinsic.h 2006-07-26 13:34:50.038997944 +0100
195 @@ -0,0 +1,276 @@
196 +#ifndef IA64_INTRINSIC_H
197 +#define IA64_INTRINSIC_H
198 +
199 +/*
200 + * Compiler-dependent Intrinsics
201 + *
202 + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
203 + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
204 + *
205 + */
206 +extern long ia64_cmpxchg_called_with_bad_pointer (void);
207 +extern void ia64_bad_param_for_getreg (void);
208 +#define ia64_cmpxchg(sem,ptr,o,n,s) ({ \
209 + uint64_t _o, _r; \
210 + switch(s) { \
211 + case 1: _o = (uint8_t)(long)(o); break; \
212 + case 2: _o = (uint16_t)(long)(o); break; \
213 + case 4: _o = (uint32_t)(long)(o); break; \
214 + case 8: _o = (uint64_t)(long)(o); break; \
215 + default: break; \
216 + } \
217 + switch(s) { \
218 + case 1: \
219 + _r = ia64_cmpxchg1_##sem((uint8_t*)ptr,n,_o); break; \
220 + case 2: \
221 + _r = ia64_cmpxchg2_##sem((uint16_t*)ptr,n,_o); break; \
222 + case 4: \
223 + _r = ia64_cmpxchg4_##sem((uint32_t*)ptr,n,_o); break; \
224 + case 8: \
225 + _r = ia64_cmpxchg8_##sem((uint64_t*)ptr,n,_o); break; \
226 + default: \
227 + _r = ia64_cmpxchg_called_with_bad_pointer(); break; \
228 + } \
229 + (__typeof__(o)) _r; \
230 +})
231 +
232 +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq,ptr,o,n,sizeof(*ptr))
233 +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel,ptr,o,n,sizeof(*ptr))
234 +
235 +/*
236 + * Register Names for getreg() and setreg().
237 + *
238 + * The "magic" numbers happen to match the values used by the Intel compiler's
239 + * getreg()/setreg() intrinsics.
240 + */
241 +
242 +/* Special Registers */
243 +
244 +#define _IA64_REG_IP 1016 /* getreg only */
245 +#define _IA64_REG_PSR 1019
246 +#define _IA64_REG_PSR_L 1019
247 +
248 +/* General Integer Registers */
249 +
250 +#define _IA64_REG_GP 1025 /* R1 */
251 +#define _IA64_REG_R8 1032 /* R8 */
252 +#define _IA64_REG_R9 1033 /* R9 */
253 +#define _IA64_REG_SP 1036 /* R12 */
254 +#define _IA64_REG_TP 1037 /* R13 */
255 +
256 +/* Application Registers */
257 +
258 +#define _IA64_REG_AR_KR0 3072
259 +#define _IA64_REG_AR_KR1 3073
260 +#define _IA64_REG_AR_KR2 3074
261 +#define _IA64_REG_AR_KR3 3075
262 +#define _IA64_REG_AR_KR4 3076
263 +#define _IA64_REG_AR_KR5 3077
264 +#define _IA64_REG_AR_KR6 3078
265 +#define _IA64_REG_AR_KR7 3079
266 +#define _IA64_REG_AR_RSC 3088
267 +#define _IA64_REG_AR_BSP 3089
268 +#define _IA64_REG_AR_BSPSTORE 3090
269 +#define _IA64_REG_AR_RNAT 3091
270 +#define _IA64_REG_AR_FCR 3093
271 +#define _IA64_REG_AR_EFLAG 3096
272 +#define _IA64_REG_AR_CSD 3097
273 +#define _IA64_REG_AR_SSD 3098
274 +#define _IA64_REG_AR_CFLAG 3099
275 +#define _IA64_REG_AR_FSR 3100
276 +#define _IA64_REG_AR_FIR 3101
277 +#define _IA64_REG_AR_FDR 3102
278 +#define _IA64_REG_AR_CCV 3104
279 +#define _IA64_REG_AR_UNAT 3108
280 +#define _IA64_REG_AR_FPSR 3112
281 +#define _IA64_REG_AR_ITC 3116
282 +#define _IA64_REG_AR_PFS 3136
283 +#define _IA64_REG_AR_LC 3137
284 +#define _IA64_REG_AR_EC 3138
285 +
286 +/* Control Registers */
287 +
288 +#define _IA64_REG_CR_DCR 4096
289 +#define _IA64_REG_CR_ITM 4097
290 +#define _IA64_REG_CR_IVA 4098
291 +#define _IA64_REG_CR_PTA 4104
292 +#define _IA64_REG_CR_IPSR 4112
293 +#define _IA64_REG_CR_ISR 4113
294 +#define _IA64_REG_CR_IIP 4115
295 +#define _IA64_REG_CR_IFA 4116
296 +#define _IA64_REG_CR_ITIR 4117
297 +#define _IA64_REG_CR_IIPA 4118
298 +#define _IA64_REG_CR_IFS 4119
299 +#define _IA64_REG_CR_IIM 4120
300 +#define _IA64_REG_CR_IHA 4121
301 +#define _IA64_REG_CR_LID 4160
302 +#define _IA64_REG_CR_IVR 4161 /* getreg only */
303 +#define _IA64_REG_CR_TPR 4162
304 +#define _IA64_REG_CR_EOI 4163
305 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */
306 +#define _IA64_REG_CR_IRR1 4165 /* getreg only */
307 +#define _IA64_REG_CR_IRR2 4166 /* getreg only */
308 +#define _IA64_REG_CR_IRR3 4167 /* getreg only */
309 +#define _IA64_REG_CR_ITV 4168
310 +#define _IA64_REG_CR_PMV 4169
311 +#define _IA64_REG_CR_CMCV 4170
312 +#define _IA64_REG_CR_LRR0 4176
313 +#define _IA64_REG_CR_LRR1 4177
314 +
315 +/* Indirect Registers for getindreg() and setindreg() */
316 +
317 +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
318 +#define _IA64_REG_INDR_DBR 9001
319 +#define _IA64_REG_INDR_IBR 9002
320 +#define _IA64_REG_INDR_PKR 9003
321 +#define _IA64_REG_INDR_PMC 9004
322 +#define _IA64_REG_INDR_PMD 9005
323 +#define _IA64_REG_INDR_RR 9006
324 +
325 +#ifdef __INTEL_COMPILER
326 +void __fc(uint64_t *addr);
327 +void __synci(void);
328 +void __isrlz(void);
329 +void __dsrlz(void);
330 +uint64_t __getReg(const int whichReg);
331 +uint64_t _InterlockedCompareExchange8_rel(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
332 +uint64_t _InterlockedCompareExchange8_acq(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
333 +uint64_t _InterlockedCompareExchange16_rel(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
334 +uint64_t _InterlockedCompareExchange16_acq(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
335 +uint64_t _InterlockedCompareExchange_rel(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
336 +uint64_t _InterlockedCompareExchange_acq(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
337 +uint64_t _InterlockedCompareExchange64_rel(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
338 +u64_t _InterlockedCompareExchange64_acq(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
339 +
340 +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
341 +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
342 +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
343 +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
344 +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
345 +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
346 +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
347 +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
348 +
349 +#define ia64_srlz_d __dsrlz
350 +#define ia64_srlz_i __isrlz
351 +#define __ia64_fc __fc
352 +#define ia64_sync_i __synci
353 +#define __ia64_getreg __getReg
354 +#else /* __INTEL_COMPILER */
355 +#define ia64_cmpxchg1_acq(ptr, new, old) \
356 +({ \
357 + uint64_t ia64_intri_res; \
358 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
359 + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
360 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
361 + ia64_intri_res; \
362 +})
363 +
364 +#define ia64_cmpxchg1_rel(ptr, new, old) \
365 +({ \
366 + uint64_t ia64_intri_res; \
367 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
368 + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
369 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
370 + ia64_intri_res; \
371 +})
372 +
373 +#define ia64_cmpxchg2_acq(ptr, new, old) \
374 +({ \
375 + uint64_t ia64_intri_res; \
376 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
377 + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
378 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
379 + ia64_intri_res; \
380 +})
381 +
382 +#define ia64_cmpxchg2_rel(ptr, new, old) \
383 +({ \
384 + uint64_t ia64_intri_res; \
385 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
386 + \
387 + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
388 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
389 + ia64_intri_res; \
390 +})
391 +
392 +#define ia64_cmpxchg4_acq(ptr, new, old) \
393 +({ \
394 + uint64_t ia64_intri_res; \
395 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
396 + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
397 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
398 + ia64_intri_res; \
399 +})
400 +
401 +#define ia64_cmpxchg4_rel(ptr, new, old) \
402 +({ \
403 + uint64_t ia64_intri_res; \
404 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
405 + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
406 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
407 + ia64_intri_res; \
408 +})
409 +
410 +#define ia64_cmpxchg8_acq(ptr, new, old) \
411 +({ \
412 + uint64_t ia64_intri_res; \
413 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
414 + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
415 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
416 + ia64_intri_res; \
417 +})
418 +
419 +#define ia64_cmpxchg8_rel(ptr, new, old) \
420 +({ \
421 + uint64_t ia64_intri_res; \
422 + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
423 + \
424 + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
425 + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
426 + ia64_intri_res; \
427 +})
428 +
429 +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
430 +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
431 +#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
432 +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
433 +
434 +register unsigned long ia64_r13 asm ("r13") __attribute_used__;
435 +#define __ia64_getreg(regnum) \
436 +({ \
437 + uint64_t ia64_intri_res; \
438 + \
439 + switch (regnum) { \
440 + case _IA64_REG_GP: \
441 + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
442 + break; \
443 + case _IA64_REG_IP: \
444 + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
445 + break; \
446 + case _IA64_REG_PSR: \
447 + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
448 + break; \
449 + case _IA64_REG_TP: /* for current() */ \
450 + ia64_intri_res = ia64_r13; \
451 + break; \
452 + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
453 + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
454 + : "i"(regnum - _IA64_REG_AR_KR0)); \
455 + break; \
456 + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
457 + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
458 + : "i" (regnum - _IA64_REG_CR_DCR)); \
459 + break; \
460 + case _IA64_REG_SP: \
461 + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
462 + break; \
463 + default: \
464 + ia64_bad_param_for_getreg(); \
465 + break; \
466 + } \
467 + ia64_intri_res; \
468 +})
469 +
470 +#endif /* __INTEL_COMPILER */
471 +#endif /* IA64_INTRINSIC_H */