direct-io.hg

annotate xen/include/asm-x86/system.h @ 15516:fa9fa5f98c91

Simplify Unisys ES7000 code in light of the fact we do not support
legacy boxes with very non-standard APIC setup.
From: Raj Subrahmanian <raj.subrahmanian@unisys.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Jul 10 10:07:00 2007 +0100 (2007-07-10)
parents 3e2d3d737624
children d7e3224b661a
rev   line source
kaf24@1452 1 #ifndef __ASM_SYSTEM_H
kaf24@1452 2 #define __ASM_SYSTEM_H
kaf24@1452 3
kaf24@1452 4 #include <xen/config.h>
kaf24@5149 5 #include <xen/types.h>
kaf24@1452 6 #include <asm/bitops.h>
kaf24@1452 7
kaf24@8833 8 #define read_segment_register(name) \
kaf24@8833 9 ({ u16 __sel; \
kaf24@8833 10 __asm__ __volatile__ ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
kaf24@8833 11 __sel; \
kaf24@8833 12 })
kaf24@8833 13
kaf24@1452 14 #define wbinvd() \
kaf24@1452 15 __asm__ __volatile__ ("wbinvd": : :"memory");
kaf24@1452 16
kaf24@1452 17 #define nop() __asm__ __volatile__ ("nop")
kaf24@1452 18
kaf24@1452 19 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
kaf24@1452 20
kaf24@1452 21 struct __xchg_dummy { unsigned long a[100]; };
kfraser@13120 22 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
kaf24@1452 23
kaf24@1452 24
kaf24@1452 25 /*
kaf24@1452 26 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
kaf24@1452 27 * Note 2: xchg has side effect, so that attribute volatile is necessary,
kaf24@1452 28 * but generally the primitive is invalid, *ptr is output argument. --ANK
kaf24@1452 29 */
kaf24@2340 30 static always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
kaf24@1452 31 {
kaf24@1452 32 switch (size) {
kaf24@1452 33 case 1:
kaf24@1452 34 __asm__ __volatile__("xchgb %b0,%1"
kaf24@1452 35 :"=q" (x)
kaf24@2344 36 :"m" (*__xg((volatile void *)ptr)), "0" (x)
kaf24@1452 37 :"memory");
kaf24@1452 38 break;
kaf24@1452 39 case 2:
kaf24@1452 40 __asm__ __volatile__("xchgw %w0,%1"
kaf24@1452 41 :"=r" (x)
kaf24@2344 42 :"m" (*__xg((volatile void *)ptr)), "0" (x)
kaf24@1452 43 :"memory");
kaf24@1452 44 break;
kaf24@1463 45 #if defined(__i386__)
kaf24@1452 46 case 4:
kaf24@1452 47 __asm__ __volatile__("xchgl %0,%1"
kaf24@1452 48 :"=r" (x)
kaf24@2344 49 :"m" (*__xg((volatile void *)ptr)), "0" (x)
kaf24@1452 50 :"memory");
kaf24@1452 51 break;
kaf24@1463 52 #elif defined(__x86_64__)
kaf24@1452 53 case 4:
kaf24@1452 54 __asm__ __volatile__("xchgl %k0,%1"
kaf24@1452 55 :"=r" (x)
kaf24@2344 56 :"m" (*__xg((volatile void *)ptr)), "0" (x)
kaf24@1452 57 :"memory");
kaf24@1452 58 break;
kaf24@1452 59 case 8:
kaf24@1452 60 __asm__ __volatile__("xchgq %0,%1"
kaf24@1452 61 :"=r" (x)
kaf24@2344 62 :"m" (*__xg((volatile void *)ptr)), "0" (x)
kaf24@1452 63 :"memory");
kaf24@1452 64 break;
kaf24@1452 65 #endif
kaf24@1452 66 }
kaf24@1452 67 return x;
kaf24@1452 68 }
kaf24@1452 69
kaf24@1452 70 /*
kaf24@1452 71 * Atomic compare and exchange. Compare OLD with MEM, if identical,
kaf24@1452 72 * store NEW in MEM. Return the initial value in MEM. Success is
kaf24@1452 73 * indicated by comparing RETURN with OLD.
kaf24@1452 74 */
kaf24@1452 75
kaf24@5149 76 static always_inline unsigned long __cmpxchg(
kaf24@5149 77 volatile void *ptr, unsigned long old, unsigned long new, int size)
kaf24@1452 78 {
kaf24@1452 79 unsigned long prev;
kaf24@1452 80 switch (size) {
kaf24@1452 81 case 1:
kaf24@1452 82 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
kaf24@1452 83 : "=a"(prev)
kaf24@2344 84 : "q"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
kaf24@1452 85 : "memory");
kaf24@1452 86 return prev;
kaf24@1452 87 case 2:
kaf24@1452 88 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
kaf24@1452 89 : "=a"(prev)
kaf24@2344 90 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
kaf24@1452 91 : "memory");
kaf24@1452 92 return prev;
kaf24@1463 93 #if defined(__i386__)
kaf24@1452 94 case 4:
kaf24@1452 95 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
kaf24@1452 96 : "=a"(prev)
kaf24@2344 97 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
kaf24@1452 98 : "memory");
kaf24@1452 99 return prev;
kaf24@1463 100 #elif defined(__x86_64__)
kaf24@1452 101 case 4:
kaf24@1452 102 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
kaf24@1452 103 : "=a"(prev)
kaf24@2344 104 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
kaf24@1452 105 : "memory");
kaf24@1452 106 return prev;
kaf24@1452 107 case 8:
kaf24@1452 108 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
kaf24@1452 109 : "=a"(prev)
kaf24@2344 110 : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old)
kaf24@1452 111 : "memory");
kaf24@1452 112 return prev;
kaf24@1452 113 #endif
kaf24@1452 114 }
kaf24@1452 115 return old;
kaf24@1452 116 }
kaf24@1452 117
kaf24@4804 118 #define __HAVE_ARCH_CMPXCHG
kaf24@5149 119
kaf24@5149 120 #if BITS_PER_LONG == 64
kaf24@5149 121
kaf24@5149 122 #define cmpxchg(ptr,o,n) \
kaf24@5149 123 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
kaf24@5149 124 (unsigned long)(n),sizeof(*(ptr))))
kaf24@5149 125 #else
kaf24@5149 126
kaf24@5149 127 static always_inline unsigned long long __cmpxchg8b(
kaf24@5149 128 volatile void *ptr, unsigned long long old, unsigned long long new)
kaf24@5149 129 {
kaf24@5149 130 unsigned long long prev;
kaf24@5149 131 __asm__ __volatile__ (
kaf24@5149 132 LOCK_PREFIX "cmpxchg8b %3"
kaf24@5149 133 : "=A" (prev)
kaf24@5149 134 : "c" ((u32)(new>>32)), "b" ((u32)new),
kaf24@5149 135 "m" (*__xg((volatile void *)ptr)), "0" (old)
kaf24@5149 136 : "memory" );
kaf24@5149 137 return prev;
kaf24@5149 138 }
kaf24@5149 139
kaf24@5149 140 #define cmpxchg(ptr,o,n) \
kaf24@5149 141 ({ \
kaf24@5149 142 __typeof__(*(ptr)) __prev; \
kaf24@5149 143 switch ( sizeof(*(ptr)) ) { \
kaf24@5149 144 case 8: \
kaf24@5149 145 __prev = ((__typeof__(*(ptr)))__cmpxchg8b( \
kaf24@5149 146 (ptr), \
kaf24@5149 147 (unsigned long long)(o), \
kaf24@5149 148 (unsigned long long)(n))); \
kaf24@5149 149 break; \
kaf24@5149 150 default: \
kaf24@5149 151 __prev = ((__typeof__(*(ptr)))__cmpxchg( \
kaf24@5149 152 (ptr), \
kaf24@5149 153 (unsigned long)(o), \
kaf24@5149 154 (unsigned long)(n), \
kaf24@5149 155 sizeof(*(ptr)))); \
kaf24@5149 156 break; \
kaf24@5149 157 } \
kaf24@5149 158 __prev; \
kaf24@5149 159 })
kaf24@5149 160
kaf24@5149 161 #endif
kaf24@1452 162
kaf24@1452 163
kaf24@1452 164 /*
kaf24@2340 165 * This function causes value _o to be changed to _n at location _p.
kaf24@1452 166 * If this access causes a fault then we return 1, otherwise we return 0.
kaf24@2340 167 * If no fault occurs then _o is updated to the value we saw at _p. If this
kaf24@1452 168 * is the same as the initial value of _o then _n is written to location _p.
kaf24@1452 169 */
kaf24@2396 170 #ifdef __i386__
kaf24@2340 171 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
kaf24@1452 172 __asm__ __volatile__ ( \
kaf24@2340 173 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
kaf24@1452 174 "2:\n" \
kaf24@1452 175 ".section .fixup,\"ax\"\n" \
kaf24@1452 176 "3: movl $1,%1\n" \
kaf24@1452 177 " jmp 2b\n" \
kaf24@1452 178 ".previous\n" \
kaf24@1452 179 ".section __ex_table,\"a\"\n" \
kaf24@1452 180 " .align 4\n" \
kaf24@1452 181 " .long 1b,3b\n" \
kaf24@1452 182 ".previous" \
kaf24@1452 183 : "=a" (_o), "=r" (_rc) \
kaf24@2340 184 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
kaf24@2340 185 : "memory");
kaf24@2340 186 #define cmpxchg_user(_p,_o,_n) \
kaf24@2340 187 ({ \
kaf24@2340 188 int _rc; \
kaf24@2340 189 switch ( sizeof(*(_p)) ) { \
kaf24@2340 190 case 1: \
kaf24@2340 191 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
kaf24@2340 192 break; \
kaf24@2340 193 case 2: \
kaf24@2340 194 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
kaf24@2340 195 break; \
kaf24@2340 196 case 4: \
kaf24@2340 197 __cmpxchg_user(_p,_o,_n,"l","","r"); \
kaf24@2340 198 break; \
kaf24@4613 199 case 8: \
kaf24@4613 200 __asm__ __volatile__ ( \
kaf24@4613 201 "1: " LOCK_PREFIX "cmpxchg8b %4\n" \
kaf24@4613 202 "2:\n" \
kaf24@4613 203 ".section .fixup,\"ax\"\n" \
kaf24@4613 204 "3: movl $1,%1\n" \
kaf24@4613 205 " jmp 2b\n" \
kaf24@4613 206 ".previous\n" \
kaf24@4613 207 ".section __ex_table,\"a\"\n" \
kaf24@4613 208 " .align 4\n" \
kaf24@4613 209 " .long 1b,3b\n" \
kaf24@4613 210 ".previous" \
kaf24@4613 211 : "=A" (_o), "=r" (_rc) \
kaf24@4613 212 : "c" ((u32)((u64)(_n)>>32)), "b" ((u32)(_n)), \
kaf24@4613 213 "m" (*__xg((volatile void *)(_p))), "0" (_o), "1" (0) \
kaf24@4613 214 : "memory"); \
kaf24@4613 215 break; \
kaf24@2340 216 } \
kaf24@1452 217 _rc; \
kaf24@1452 218 })
kaf24@1712 219 #else
kaf24@2396 220 #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
kaf24@2396 221 __asm__ __volatile__ ( \
kaf24@2396 222 "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
kaf24@2396 223 "2:\n" \
kaf24@2396 224 ".section .fixup,\"ax\"\n" \
kaf24@2396 225 "3: movl $1,%1\n" \
kaf24@2396 226 " jmp 2b\n" \
kaf24@2396 227 ".previous\n" \
kaf24@2396 228 ".section __ex_table,\"a\"\n" \
kaf24@2396 229 " .align 8\n" \
kaf24@2396 230 " .quad 1b,3b\n" \
kaf24@2396 231 ".previous" \
kaf24@2396 232 : "=a" (_o), "=r" (_rc) \
kaf24@2396 233 : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
kaf24@2396 234 : "memory");
kaf24@2340 235 #define cmpxchg_user(_p,_o,_n) \
kaf24@2340 236 ({ \
kaf24@2340 237 int _rc; \
kaf24@2340 238 switch ( sizeof(*(_p)) ) { \
kaf24@2340 239 case 1: \
kaf24@2340 240 __cmpxchg_user(_p,_o,_n,"b","b","q"); \
kaf24@2340 241 break; \
kaf24@2340 242 case 2: \
kaf24@2340 243 __cmpxchg_user(_p,_o,_n,"w","w","r"); \
kaf24@2340 244 break; \
kaf24@2340 245 case 4: \
kaf24@2340 246 __cmpxchg_user(_p,_o,_n,"l","k","r"); \
kaf24@2340 247 break; \
kaf24@2340 248 case 8: \
kaf24@2340 249 __cmpxchg_user(_p,_o,_n,"q","","r"); \
kaf24@2340 250 break; \
kaf24@2340 251 } \
kaf24@2340 252 _rc; \
kaf24@2340 253 })
kaf24@1712 254 #endif
kaf24@1452 255
kaf24@1452 256 /*
kaf24@1452 257 * Force strict CPU ordering.
kaf24@1452 258 * And yes, this is required on UP too when we're talking
kaf24@1452 259 * to devices.
kaf24@1452 260 *
kaf24@1452 261 * For now, "wmb()" doesn't actually do anything, as all
kaf24@1452 262 * Intel CPU's follow what Intel calls a *Processor Order*,
kaf24@1452 263 * in which all writes are seen in the program order even
kaf24@1452 264 * outside the CPU.
kaf24@1452 265 *
kaf24@1452 266 * I expect future Intel CPU's to have a weaker ordering,
kaf24@1452 267 * but I'd also expect them to finally get their act together
kaf24@1452 268 * and add some real memory barriers if so.
kaf24@1452 269 *
kaf24@1452 270 * Some non intel clones support out of order store. wmb() ceases to be a
kaf24@1452 271 * nop for these.
kaf24@1452 272 */
kaf24@1480 273 #if defined(__i386__)
kaf24@1452 274 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
kaf24@1480 275 #define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
kaf24@1452 276 #ifdef CONFIG_X86_OOSTORE
kaf24@1452 277 #define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
kaf24@1480 278 #endif
kaf24@1480 279 #elif defined(__x86_64__)
kaf24@1480 280 #define mb() __asm__ __volatile__ ("mfence":::"memory")
kaf24@1480 281 #define rmb() __asm__ __volatile__ ("lfence":::"memory")
kaf24@1480 282 #ifdef CONFIG_X86_OOSTORE
kaf24@1480 283 #define wmb() __asm__ __volatile__ ("sfence":::"memory")
kaf24@1480 284 #endif
kaf24@1480 285 #endif
kaf24@1480 286
kaf24@1480 287 #ifndef CONFIG_X86_OOSTORE
kaf24@1452 288 #define wmb() __asm__ __volatile__ ("": : :"memory")
kaf24@1452 289 #endif
kaf24@1452 290
kaf24@1452 291 #ifdef CONFIG_SMP
kaf24@1452 292 #define smp_mb() mb()
kaf24@1452 293 #define smp_rmb() rmb()
kaf24@1452 294 #define smp_wmb() wmb()
kaf24@1452 295 #else
kaf24@1452 296 #define smp_mb() barrier()
kaf24@1452 297 #define smp_rmb() barrier()
kaf24@1452 298 #define smp_wmb() barrier()
kaf24@1452 299 #endif
kaf24@1452 300
kaf24@1452 301 #define set_mb(var, value) do { xchg(&var, value); } while (0)
kaf24@1452 302 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
kaf24@1452 303
kaf24@1452 304 /* interrupt control.. */
kaf24@1463 305 #if defined(__i386__)
kaf24@1452 306 #define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
kaf24@1452 307 #define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
kaf24@1463 308 #elif defined(__x86_64__)
kaf24@1463 309 #define __save_flags(x) do { __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
kaf24@1463 310 #define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
kaf24@1452 311 #endif
kaf24@1452 312 #define __cli() __asm__ __volatile__("cli": : :"memory")
kaf24@1452 313 #define __sti() __asm__ __volatile__("sti": : :"memory")
kaf24@1452 314 /* used in the idle loop; sti takes one instruction cycle to complete */
kaf24@1452 315 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
kaf24@1452 316
kaf24@1452 317 /* For spinlocks etc */
kaf24@1463 318 #if defined(__i386__)
kaf24@1452 319 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
kaf24@1452 320 #define local_irq_restore(x) __restore_flags(x)
kaf24@1463 321 #elif defined(__x86_64__)
kaf24@1463 322 #define local_irq_save(x) do { __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
kaf24@1463 323 #define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
kaf24@1452 324 #endif
kaf24@1452 325 #define local_irq_disable() __cli()
kaf24@1452 326 #define local_irq_enable() __sti()
kaf24@1452 327
kaf24@1452 328 static inline int local_irq_is_enabled(void)
kaf24@1452 329 {
kaf24@1452 330 unsigned long flags;
kaf24@1452 331 __save_flags(flags);
kaf24@1452 332 return !!(flags & (1<<9)); /* EFLAGS_IF */
kaf24@1452 333 }
kaf24@1452 334
kaf24@1452 335 #define BROKEN_ACPI_Sx 0x0001
kaf24@1452 336 #define BROKEN_INIT_AFTER_S1 0x0002
kaf24@1452 337
kaf24@1452 338 #endif