ia64/xen-unstable

changeset 7635:0cae0c6436f5

This patch is intended to make qemu support ia64/vti. We have validated
it against latest xen-unstable.hg, both ia32 and x86-64 are not
affected.

Signed-off-by Ke Yu <ke.yu@intel.com>
Signed-off-by Kevin Tian <kevin.tian@intel.com>
Signed-off-by Nakajima Jun <nakajima.jun@intel.com>
Signed-off-by Anthony Xu <anthony.xu@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Nov 05 11:30:01 2005 +0100 (2005-11-05)
parents 65c3b9382caa
children f41c33671a43
files tools/ioemu/cpu-all.h tools/ioemu/cpu.h tools/ioemu/exec-all.h tools/ioemu/exec.c tools/ioemu/hw/i8259_stub.c tools/ioemu/hw/iommu.c tools/ioemu/hw/vga.c tools/ioemu/ia64_intrinsic.h tools/ioemu/target-i386-dm/helper2.c tools/ioemu/vl.c
line diff
     1.1 --- a/tools/ioemu/cpu-all.h	Sat Nov 05 11:26:29 2005 +0100
     1.2 +++ b/tools/ioemu/cpu-all.h	Sat Nov 05 11:30:01 2005 +0100
     1.3 @@ -625,6 +625,47 @@ int cpu_inw(CPUState *env, int addr);
     1.4  int cpu_inl(CPUState *env, int addr);
     1.5  #endif
     1.6  
     1.7 +#if defined(__i386__) || defined(__x86_64__)
     1.8 +static __inline__ void atomic_set_bit(long nr, volatile void *addr)
     1.9 +{
    1.10 +        __asm__ __volatile__(
    1.11 +                "lock ; bts %1,%0"
    1.12 +                :"=m" (*(volatile long *)addr)
    1.13 +                :"dIr" (nr));
    1.14 +}
    1.15 +static __inline__ void atomic_clear_bit(long nr, volatile void *addr)
    1.16 +{
    1.17 +        __asm__ __volatile__(
    1.18 +                "lock ; btr %1,%0"
    1.19 +                :"=m" (*(volatile long *)addr)
    1.20 +                :"dIr" (nr));
    1.21 +}
    1.22 +#elif defined(__ia64__)
    1.23 +#include "ia64_intrinsic.h"
    1.24 +#define atomic_set_bit(nr, addr) ({					\
    1.25 +	typeof(*addr) bit, old, new;					\
    1.26 +	volatile typeof(*addr) *m;					\
    1.27 +									\
    1.28 +	m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr)));	\
    1.29 +	bit = 1 << (nr % (8*sizeof(*addr)));				\
    1.30 +	do {								\
    1.31 +		old = *m;						\
    1.32 +		new = old | bit;					\
    1.33 +	} while (cmpxchg_acq(m, old, new) != old);	\
    1.34 +})
    1.35 +
    1.36 +#define atomic_clear_bit(nr, addr) ({					\
    1.37 +	typeof(*addr) bit, old, new;					\
    1.38 +	volatile typeof(*addr) *m;					\
    1.39 +									\
    1.40 +	m = (volatile typeof(*addr)*)(addr + nr / (8*sizeof(*addr)));	\
    1.41 +	bit = ~(1 << (nr % (8*sizeof(*addr))));				\
    1.42 +	do {								\
    1.43 +		old = *m;						\
    1.44 +		new = old & bit;					\
    1.45 +	} while (cmpxchg_acq(m, old, new) != old);	\
    1.46 +})
    1.47 +#endif
    1.48  /* memory API */
    1.49  
    1.50  extern int phys_ram_size;
     2.1 --- a/tools/ioemu/cpu.h	Sat Nov 05 11:26:29 2005 +0100
     2.2 +++ b/tools/ioemu/cpu.h	Sat Nov 05 11:30:01 2005 +0100
     2.3 @@ -63,7 +63,11 @@ int cpu_get_pic_interrupt(CPUX86State *s
     2.4  /* MSDOS compatibility mode FPU exception support */
     2.5  void cpu_set_ferr(CPUX86State *s);
     2.6  
     2.7 +#if defined(__i386__) || defined(__x86_64__)
     2.8  #define TARGET_PAGE_BITS 12
     2.9 +#elif defined(__ia64__)
    2.10 +#define TARGET_PAGE_BITS 14
    2.11 +#endif
    2.12  #include "cpu-all.h"
    2.13  
    2.14  #endif /* CPU_I386_H */
     3.1 --- a/tools/ioemu/exec-all.h	Sat Nov 05 11:26:29 2005 +0100
     3.2 +++ b/tools/ioemu/exec-all.h	Sat Nov 05 11:30:01 2005 +0100
     3.3 @@ -433,6 +433,15 @@ static inline int testandset (int *p)
     3.4  }
     3.5  #endif
     3.6  
     3.7 +#ifdef __ia64__
     3.8 +#include "ia64_intrinsic.h"
     3.9 +static inline int testandset (int *p)
    3.10 +{
    3.11 +    uint32_t o = 0, n = 1;
    3.12 +    return (int)cmpxchg_acq(p, o, n);
    3.13 +}
    3.14 +#endif
    3.15 +
    3.16  #ifdef __s390__
    3.17  static inline int testandset (int *p)
    3.18  {
     4.1 --- a/tools/ioemu/exec.c	Sat Nov 05 11:26:29 2005 +0100
     4.2 +++ b/tools/ioemu/exec.c	Sat Nov 05 11:30:01 2005 +0100
     4.3 @@ -360,6 +360,22 @@ int iomem_index(target_phys_addr_t addr)
     4.4          return 0;
     4.5  }
     4.6  
     4.7 +#ifdef __ia64__
     4.8 +/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.
     4.9 + * So to emulate right behavior that guest OS is assumed, we need to flush
    4.10 + * I/D cache here.
    4.11 + */
    4.12 +static void sync_icache(unsigned long address, int len)
    4.13 +{
    4.14 +    int l;
    4.15 +    for(l = 0; l < (len + 32); l += 32)
    4.16 +	__ia64_fc(address + l);
    4.17 +
    4.18 +    ia64_sync_i();
    4.19 +    ia64_srlz_i();
    4.20 +}
    4.21 +#endif 
    4.22 +
    4.23  void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
    4.24                              int len, int is_write)
    4.25  {
    4.26 @@ -402,6 +418,9 @@ void cpu_physical_memory_rw(target_phys_
    4.27                  /* RAM case */
    4.28                  ptr = phys_ram_base + addr1;
    4.29                  memcpy(ptr, buf, l);
    4.30 +#ifdef  __ia64__
    4.31 +                sync_icache((unsigned long)ptr,l);
    4.32 +#endif                
    4.33              }
    4.34          } else {
    4.35              if (io_index) {
     5.1 --- a/tools/ioemu/hw/i8259_stub.c	Sat Nov 05 11:26:29 2005 +0100
     5.2 +++ b/tools/ioemu/hw/i8259_stub.c	Sat Nov 05 11:30:01 2005 +0100
     5.3 @@ -27,21 +27,6 @@
     5.4  #include "cpu.h"
     5.5  #include "cpu-all.h"
     5.6  
     5.7 -static __inline__ void atomic_set_bit(long nr, volatile void *addr)
     5.8 -{
     5.9 -        __asm__ __volatile__(
    5.10 -                "lock ; bts %1,%0"
    5.11 -                :"=m" (*(volatile long *)addr)
    5.12 -                :"dIr" (nr));
    5.13 -}
    5.14 -static __inline__ void atomic_clear_bit(long nr, volatile void *addr)
    5.15 -{
    5.16 -        __asm__ __volatile__(
    5.17 -                "lock ; btr %1,%0"
    5.18 -                :"=m" (*(volatile long *)addr)
    5.19 -                :"dIr" (nr));
    5.20 -}
    5.21 -
    5.22  #include <vl.h>
    5.23  extern shared_iopage_t *shared_page;
    5.24  extern CPUState *global_env;
     6.1 --- a/tools/ioemu/hw/iommu.c	Sat Nov 05 11:26:29 2005 +0100
     6.2 +++ b/tools/ioemu/hw/iommu.c	Sat Nov 05 11:30:01 2005 +0100
     6.3 @@ -107,7 +107,11 @@ struct iommu_regs {
     6.4  #define IOPTE_VALID         0x00000002 /* IOPTE is valid */
     6.5  #define IOPTE_WAZ           0x00000001 /* Write as zeros */
     6.6  
     6.7 +#if defined(__i386__) || defined(__x86_64__)
     6.8  #define PAGE_SHIFT      12
     6.9 +#elif defined(__ia64__)
    6.10 +#define PAGE_SHIFT      14
    6.11 +#endif
    6.12  #define PAGE_SIZE       (1 << PAGE_SHIFT)
    6.13  #define PAGE_MASK	(PAGE_SIZE - 1)
    6.14  
     7.1 --- a/tools/ioemu/hw/vga.c	Sat Nov 05 11:26:29 2005 +0100
     7.2 +++ b/tools/ioemu/hw/vga.c	Sat Nov 05 11:30:01 2005 +0100
     7.3 @@ -1879,7 +1879,11 @@ void vga_common_init(VGAState *s, Displa
     7.4  
     7.5      /* qemu's vga mem is not detached from phys_ram_base and can cause DM abort
     7.6       * when guest write vga mem, so allocate a new one */
     7.7 +#if defined(__i386__) || defined(__x86_64__)
     7.8      s->vram_ptr = shared_vram;
     7.9 +#else
    7.10 +    s->vram_ptr = qemu_malloc(vga_ram_size);
    7.11 +#endif
    7.12  
    7.13      s->vram_offset = vga_ram_offset;
    7.14      s->vram_size = vga_ram_size;
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/tools/ioemu/ia64_intrinsic.h	Sat Nov 05 11:30:01 2005 +0100
     8.3 @@ -0,0 +1,275 @@
     8.4 +#ifndef IA64_INTRINSIC_H
     8.5 +#define IA64_INTRINSIC_H
     8.6 +
     8.7 +/*
     8.8 + * Compiler-dependent Intrinsics
     8.9 + *
    8.10 + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
    8.11 + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
    8.12 + *
    8.13 + */
    8.14 +extern long ia64_cmpxchg_called_with_bad_pointer (void);
    8.15 +extern void ia64_bad_param_for_getreg (void);
    8.16 +#define ia64_cmpxchg(sem,ptr,o,n,s) ({					\
    8.17 +	uint64_t _o, _r;						\
    8.18 +	switch(s) {							\
    8.19 +		case 1: _o = (uint8_t)(long)(o); break;			\
    8.20 +		case 2: _o = (uint16_t)(long)(o); break;		\
    8.21 +		case 4: _o = (uint32_t)(long)(o); break;		\
    8.22 +		case 8: _o = (uint64_t)(long)(o); break;		\
    8.23 +		default: break;						\
    8.24 +	}								\
    8.25 +	switch(s) {							\
    8.26 +		case 1:							\
    8.27 +		_r = ia64_cmpxchg1_##sem((uint8_t*)ptr,n,_o); break;	\
    8.28 +		case 2:							\
    8.29 +		_r = ia64_cmpxchg2_##sem((uint16_t*)ptr,n,_o); break;	\
    8.30 +		case 4:							\
    8.31 +		_r = ia64_cmpxchg4_##sem((uint32_t*)ptr,n,_o); break;	\
    8.32 +		case 8:							\
    8.33 +		_r = ia64_cmpxchg8_##sem((uint64_t*)ptr,n,_o); break;	\
    8.34 +		default:						\
    8.35 +		_r = ia64_cmpxchg_called_with_bad_pointer(); break;	\
    8.36 +	}								\
    8.37 +	(__typeof__(o)) _r;						\
    8.38 +})
    8.39 +
    8.40 +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq,ptr,o,n,sizeof(*ptr))
    8.41 +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel,ptr,o,n,sizeof(*ptr))
    8.42 +
    8.43 +/*
    8.44 + * Register Names for getreg() and setreg().
    8.45 + *
    8.46 + * The "magic" numbers happen to match the values used by the Intel compiler's
    8.47 + * getreg()/setreg() intrinsics.
    8.48 + */
    8.49 +
    8.50 +/* Special Registers */
    8.51 +
    8.52 +#define _IA64_REG_IP		1016	/* getreg only */
    8.53 +#define _IA64_REG_PSR		1019
    8.54 +#define _IA64_REG_PSR_L		1019
    8.55 +
    8.56 +/* General Integer Registers */
    8.57 +
    8.58 +#define _IA64_REG_GP		1025	/* R1 */
    8.59 +#define _IA64_REG_R8		1032	/* R8 */
    8.60 +#define _IA64_REG_R9		1033	/* R9 */
    8.61 +#define _IA64_REG_SP		1036	/* R12 */
    8.62 +#define _IA64_REG_TP		1037	/* R13 */
    8.63 +
    8.64 +/* Application Registers */
    8.65 +
    8.66 +#define _IA64_REG_AR_KR0	3072
    8.67 +#define _IA64_REG_AR_KR1	3073
    8.68 +#define _IA64_REG_AR_KR2	3074
    8.69 +#define _IA64_REG_AR_KR3	3075
    8.70 +#define _IA64_REG_AR_KR4	3076
    8.71 +#define _IA64_REG_AR_KR5	3077
    8.72 +#define _IA64_REG_AR_KR6	3078
    8.73 +#define _IA64_REG_AR_KR7	3079
    8.74 +#define _IA64_REG_AR_RSC	3088
    8.75 +#define _IA64_REG_AR_BSP	3089
    8.76 +#define _IA64_REG_AR_BSPSTORE	3090
    8.77 +#define _IA64_REG_AR_RNAT	3091
    8.78 +#define _IA64_REG_AR_FCR	3093
    8.79 +#define _IA64_REG_AR_EFLAG	3096
    8.80 +#define _IA64_REG_AR_CSD	3097
    8.81 +#define _IA64_REG_AR_SSD	3098
    8.82 +#define _IA64_REG_AR_CFLAG	3099
    8.83 +#define _IA64_REG_AR_FSR	3100
    8.84 +#define _IA64_REG_AR_FIR	3101
    8.85 +#define _IA64_REG_AR_FDR	3102
    8.86 +#define _IA64_REG_AR_CCV	3104
    8.87 +#define _IA64_REG_AR_UNAT	3108
    8.88 +#define _IA64_REG_AR_FPSR	3112
    8.89 +#define _IA64_REG_AR_ITC	3116
    8.90 +#define _IA64_REG_AR_PFS	3136
    8.91 +#define _IA64_REG_AR_LC		3137
    8.92 +#define _IA64_REG_AR_EC		3138
    8.93 +
    8.94 +/* Control Registers */
    8.95 +
    8.96 +#define _IA64_REG_CR_DCR	4096
    8.97 +#define _IA64_REG_CR_ITM	4097
    8.98 +#define _IA64_REG_CR_IVA	4098
    8.99 +#define _IA64_REG_CR_PTA	4104
   8.100 +#define _IA64_REG_CR_IPSR	4112
   8.101 +#define _IA64_REG_CR_ISR	4113
   8.102 +#define _IA64_REG_CR_IIP	4115
   8.103 +#define _IA64_REG_CR_IFA	4116
   8.104 +#define _IA64_REG_CR_ITIR	4117
   8.105 +#define _IA64_REG_CR_IIPA	4118
   8.106 +#define _IA64_REG_CR_IFS	4119
   8.107 +#define _IA64_REG_CR_IIM	4120
   8.108 +#define _IA64_REG_CR_IHA	4121
   8.109 +#define _IA64_REG_CR_LID	4160
   8.110 +#define _IA64_REG_CR_IVR	4161	/* getreg only */
   8.111 +#define _IA64_REG_CR_TPR	4162
   8.112 +#define _IA64_REG_CR_EOI	4163
   8.113 +#define _IA64_REG_CR_IRR0	4164	/* getreg only */
   8.114 +#define _IA64_REG_CR_IRR1	4165	/* getreg only */
   8.115 +#define _IA64_REG_CR_IRR2	4166	/* getreg only */
   8.116 +#define _IA64_REG_CR_IRR3	4167	/* getreg only */
   8.117 +#define _IA64_REG_CR_ITV	4168
   8.118 +#define _IA64_REG_CR_PMV	4169
   8.119 +#define _IA64_REG_CR_CMCV	4170
   8.120 +#define _IA64_REG_CR_LRR0	4176
   8.121 +#define _IA64_REG_CR_LRR1	4177
   8.122 +
   8.123 +/* Indirect Registers for getindreg() and setindreg() */
   8.124 +
   8.125 +#define _IA64_REG_INDR_CPUID	9000	/* getindreg only */
   8.126 +#define _IA64_REG_INDR_DBR	9001
   8.127 +#define _IA64_REG_INDR_IBR	9002
   8.128 +#define _IA64_REG_INDR_PKR	9003
   8.129 +#define _IA64_REG_INDR_PMC	9004
   8.130 +#define _IA64_REG_INDR_PMD	9005
   8.131 +#define _IA64_REG_INDR_RR	9006
   8.132 +
   8.133 +#ifdef __INTEL_COMPILER
   8.134 +void  __fc(uint64_t *addr);
   8.135 +void  __synci(void);
   8.136 +void __isrlz(void);
   8.137 +void __dsrlz(void);
   8.138 +uint64_t __getReg(const int whichReg);
   8.139 +uint64_t _InterlockedCompareExchange8_rel(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
   8.140 +uint64_t _InterlockedCompareExchange8_acq(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
   8.141 +uint64_t _InterlockedCompareExchange16_rel(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
   8.142 +uint64_t _InterlockedCompareExchange16_acq(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
   8.143 +uint64_t _InterlockedCompareExchange_rel(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
   8.144 +uint64_t _InterlockedCompareExchange_acq(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
   8.145 +uint64_t _InterlockedCompareExchange64_rel(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
   8.146 +u64_t _InterlockedCompareExchange64_acq(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
   8.147 +
   8.148 +#define ia64_cmpxchg1_rel	_InterlockedCompareExchange8_rel
   8.149 +#define ia64_cmpxchg1_acq	_InterlockedCompareExchange8_acq
   8.150 +#define ia64_cmpxchg2_rel	_InterlockedCompareExchange16_rel
   8.151 +#define ia64_cmpxchg2_acq	_InterlockedCompareExchange16_acq
   8.152 +#define ia64_cmpxchg4_rel	_InterlockedCompareExchange_rel
   8.153 +#define ia64_cmpxchg4_acq	_InterlockedCompareExchange_acq
   8.154 +#define ia64_cmpxchg8_rel	_InterlockedCompareExchange64_rel
   8.155 +#define ia64_cmpxchg8_acq	_InterlockedCompareExchange64_acq
   8.156 +
   8.157 +#define ia64_srlz_d		__dsrlz
   8.158 +#define ia64_srlz_i		__isrlz
   8.159 +#define __ia64_fc 		__fc
   8.160 +#define ia64_sync_i		__synci
   8.161 +#define __ia64_getreg		__getReg
   8.162 +#else /* __INTEL_COMPILER */
   8.163 +#define ia64_cmpxchg1_acq(ptr, new, old)						\
   8.164 +({											\
   8.165 +	uint64_t ia64_intri_res;							\
   8.166 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.167 +	asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":					\
   8.168 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.169 +	ia64_intri_res;									\
   8.170 +})
   8.171 +
   8.172 +#define ia64_cmpxchg1_rel(ptr, new, old)						\
   8.173 +({											\
   8.174 +	uint64_t ia64_intri_res;							\
   8.175 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.176 +	asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":					\
   8.177 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.178 +	ia64_intri_res;									\
   8.179 +})
   8.180 +
   8.181 +#define ia64_cmpxchg2_acq(ptr, new, old)						\
   8.182 +({											\
   8.183 +	uint64_t ia64_intri_res;							\
   8.184 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.185 +	asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":					\
   8.186 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.187 +	ia64_intri_res;									\
   8.188 +})
   8.189 +
   8.190 +#define ia64_cmpxchg2_rel(ptr, new, old)						\
   8.191 +({											\
   8.192 +	uint64_t ia64_intri_res;							\
   8.193 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.194 +											\
   8.195 +	asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":					\
   8.196 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.197 +	ia64_intri_res;									\
   8.198 +})
   8.199 +
   8.200 +#define ia64_cmpxchg4_acq(ptr, new, old)						\
   8.201 +({											\
   8.202 +	uint64_t ia64_intri_res;							\
   8.203 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.204 +	asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":					\
   8.205 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.206 +	ia64_intri_res;									\
   8.207 +})
   8.208 +
   8.209 +#define ia64_cmpxchg4_rel(ptr, new, old)						\
   8.210 +({											\
   8.211 +	uint64_t ia64_intri_res;							\
   8.212 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.213 +	asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":					\
   8.214 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.215 +	ia64_intri_res;									\
   8.216 +})
   8.217 +
   8.218 +#define ia64_cmpxchg8_acq(ptr, new, old)						\
   8.219 +({											\
   8.220 +	uint64_t ia64_intri_res;							\
   8.221 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.222 +	asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":					\
   8.223 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.224 +	ia64_intri_res;									\
   8.225 +})
   8.226 +
   8.227 +#define ia64_cmpxchg8_rel(ptr, new, old)						\
   8.228 +({											\
   8.229 +	uint64_t ia64_intri_res;							\
   8.230 +	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
   8.231 +											\
   8.232 +	asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":					\
   8.233 +			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
   8.234 +	ia64_intri_res;									\
   8.235 +})
   8.236 +
   8.237 +#define ia64_srlz_i()	asm volatile (";; srlz.i ;;" ::: "memory")
   8.238 +#define ia64_srlz_d()	asm volatile (";; srlz.d" ::: "memory");
   8.239 +#define __ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
   8.240 +#define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
   8.241 +
   8.242 +#define __ia64_getreg(regnum)							\
   8.243 +({										\
   8.244 +	uint64_t ia64_intri_res;							\
   8.245 +										\
   8.246 +	switch (regnum) {							\
   8.247 +	case _IA64_REG_GP:							\
   8.248 +		asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));		\
   8.249 +		break;								\
   8.250 +	case _IA64_REG_IP:							\
   8.251 +		asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));		\
   8.252 +		break;								\
   8.253 +	case _IA64_REG_PSR:							\
   8.254 +		asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));		\
   8.255 +		break;								\
   8.256 +	case _IA64_REG_TP:	/* for current() */				\
   8.257 +		ia64_intri_res = ia64_r13;					\
   8.258 +		break;								\
   8.259 +	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
   8.260 +		asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)		\
   8.261 +				      : "i"(regnum - _IA64_REG_AR_KR0));	\
   8.262 +		break;								\
   8.263 +	case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:				\
   8.264 +		asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)		\
   8.265 +				      : "i" (regnum - _IA64_REG_CR_DCR));	\
   8.266 +		break;								\
   8.267 +	case _IA64_REG_SP:							\
   8.268 +		asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));		\
   8.269 +		break;								\
   8.270 +	default:								\
   8.271 +		ia64_bad_param_for_getreg();					\
   8.272 +		break;								\
   8.273 +	}									\
   8.274 +	ia64_intri_res;								\
   8.275 +})
   8.276 +
   8.277 +#endif /* __INTEL_COMPILER */
   8.278 +#endif /* IA64_INTRINSIC_H */
     9.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Sat Nov 05 11:26:29 2005 +0100
     9.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Sat Nov 05 11:30:01 2005 +0100
     9.3 @@ -389,14 +389,6 @@ cpu_handle_ioreq(CPUState *env)
     9.4  
     9.5  int xc_handle;
     9.6  
     9.7 -static __inline__ void atomic_set_bit(long nr, volatile void *addr)
     9.8 -{
     9.9 -        __asm__ __volatile__(
    9.10 -                "lock ; bts %1,%0"
    9.11 -                :"=m" (*(volatile long *)addr)
    9.12 -                :"dIr" (nr));
    9.13 -}
    9.14 -
    9.15  void
    9.16  destroy_vmx_domain(void)
    9.17  {
    10.1 --- a/tools/ioemu/vl.c	Sat Nov 05 11:26:29 2005 +0100
    10.2 +++ b/tools/ioemu/vl.c	Sat Nov 05 11:30:01 2005 +0100
    10.3 @@ -22,6 +22,9 @@
    10.4   * THE SOFTWARE.
    10.5   */
    10.6  #include "vl.h"
    10.7 +#ifdef __ia64__
    10.8 +#include <xen/arch-ia64.h>
    10.9 +#endif
   10.10  
   10.11  #include <unistd.h>
   10.12  #include <fcntl.h>
   10.13 @@ -518,6 +521,11 @@ int64_t cpu_get_real_ticks(void)
   10.14      return val;
   10.15  }
   10.16  
   10.17 +#elif defined(__ia64__)
   10.18 +#include "ia64_intrinsic.h"
   10.19 +#define cpu_get_reak_ticks()	\
   10.20 +    ia64_getreg(_IA64_REG_AR_ITC)
   10.21 +
   10.22  #else
   10.23  #error unsupported CPU
   10.24  #endif
   10.25 @@ -2375,6 +2383,7 @@ static uint8_t *signal_stack;
   10.26  
   10.27  #include <xg_private.h>
   10.28  
   10.29 +#if defined(__i386__) || defined (__x86_64__)
   10.30  #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
   10.31  #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
   10.32  
   10.33 @@ -2544,6 +2553,10 @@ void unset_vram_mapping(unsigned long ad
   10.34      /* FIXME Flush the shadow page */
   10.35      unsetup_mapping(xc_handle, domid, toptab, addr, end);
   10.36  }
   10.37 +#elif defined(__ia64__)
   10.38 +void set_vram_mapping(unsigned long addr, unsigned long end) {}
   10.39 +void unset_vram_mapping(unsigned long addr, unsigned long end) {}
   10.40 +#endif
   10.41  
   10.42  int main(int argc, char **argv)
   10.43  {
   10.44 @@ -3018,9 +3031,14 @@ int main(int argc, char **argv)
   10.45      phys_ram_size = ram_size + vga_ram_size + bios_size;
   10.46  
   10.47      ram_pages = ram_size/PAGE_SIZE;
   10.48 +#if defined(__i386__) || defined(__x86_64__)
   10.49      vgaram_pages =  (vga_ram_size -1)/PAGE_SIZE + 1;
   10.50      free_pages = vgaram_pages / L1_PAGETABLE_ENTRIES;
   10.51      extra_pages = vgaram_pages + free_pages;
   10.52 +#else
   10.53 +    /* Test vga acceleration later */
   10.54 +    extra_pages = 0;
   10.55 +#endif
   10.56  
   10.57      xc_handle = xc_interface_open();
   10.58  
   10.59 @@ -3049,6 +3067,7 @@ int main(int argc, char **argv)
   10.60          exit(-1);
   10.61      }
   10.62  
   10.63 +#if defined(__i386__) || defined(__x86_64__)
   10.64      if ( xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages )
   10.65      {
   10.66  	    perror("xc_get_pfn_list");
   10.67 @@ -3077,8 +3096,6 @@ int main(int argc, char **argv)
   10.68   	    exit(-1);
   10.69       }
   10.70  
   10.71 -
   10.72 -
   10.73      memset(shared_vram, 0, vgaram_pages * PAGE_SIZE);
   10.74      toptab = page_array[ram_pages] << PAGE_SHIFT;
   10.75  
   10.76 @@ -3087,7 +3104,31 @@ int main(int argc, char **argv)
   10.77   				       page_array[ram_pages]);
   10.78  
   10.79      freepage_array = &page_array[nr_pages - extra_pages];
   10.80 - 
   10.81 +#elif defined(__ia64__)
   10.82 +    if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array, 0, ram_pages) != ram_pages)
   10.83 +    {
   10.84 +        perror("xc_ia64_get_pfn_list");
   10.85 +        exit(-1);
   10.86 +    }
   10.87 +
   10.88 +    if ((phys_ram_base =  xc_map_foreign_batch(xc_handle, domid,
   10.89 +						 PROT_READ|PROT_WRITE,
   10.90 +						 page_array,
   10.91 +						 ram_pages)) == 0) {
   10.92 +	    perror("xc_map_foreign_batch");
   10.93 +	    exit(-1);
   10.94 +    }
   10.95 +
   10.96 +    if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array, IO_PAGE_START>>PAGE_SHIFT, 1) != 1)
   10.97 +    {
   10.98 +        perror("xc_ia64_get_pfn_list");
   10.99 +        exit(-1);
  10.100 +    }
  10.101 +
  10.102 +    shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
  10.103 +				       PROT_READ|PROT_WRITE,
  10.104 + 				       page_array[0]);
  10.105 +#endif 
  10.106  
  10.107      fprintf(logfile, "shared page at pfn:%lx, mfn: %lx\n", (nr_pages-1), 
  10.108             (page_array[nr_pages - 1]));