ia64/xen-unstable

view xen/include/asm-x86/system.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents f4c1a347311b
children
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <xen/lib.h>
5 #include <asm/bitops.h>
7 #define read_segment_register(name) \
8 ({ u16 __sel; \
9 asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
10 __sel; \
11 })
13 #define wbinvd() \
14 asm volatile ( "wbinvd" : : : "memory" )
16 #define clflush(a) \
17 asm volatile ( "clflush (%0)" : : "r"(a) )
19 #define nop() \
20 asm volatile ( "nop" )
22 #define xchg(ptr,v) \
23 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
25 struct __xchg_dummy { unsigned long a[100]; };
26 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
28 #if defined(__i386__)
29 # include <asm/x86_32/system.h>
30 #elif defined(__x86_64__)
31 # include <asm/x86_64/system.h>
32 #endif
34 /*
35 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
36 * Note 2: xchg has side effect, so that attribute volatile is necessary,
37 * but generally the primitive is invalid, *ptr is output argument. --ANK
38 */
39 static always_inline unsigned long __xchg(
40 unsigned long x, volatile void *ptr, int size)
41 {
42 switch ( size )
43 {
44 case 1:
45 asm volatile ( "xchgb %b0,%1"
46 : "=q" (x)
47 : "m" (*__xg((volatile void *)ptr)), "0" (x)
48 : "memory" );
49 break;
50 case 2:
51 asm volatile ( "xchgw %w0,%1"
52 : "=r" (x)
53 : "m" (*__xg((volatile void *)ptr)), "0" (x)
54 : "memory" );
55 break;
56 #if defined(__i386__)
57 case 4:
58 asm volatile ( "xchgl %0,%1"
59 : "=r" (x)
60 : "m" (*__xg((volatile void *)ptr)), "0" (x)
61 : "memory" );
62 break;
63 #elif defined(__x86_64__)
64 case 4:
65 asm volatile ( "xchgl %k0,%1"
66 : "=r" (x)
67 : "m" (*__xg((volatile void *)ptr)), "0" (x)
68 : "memory" );
69 break;
70 case 8:
71 asm volatile ( "xchgq %0,%1"
72 : "=r" (x)
73 : "m" (*__xg((volatile void *)ptr)), "0" (x)
74 : "memory" );
75 break;
76 #endif
77 }
78 return x;
79 }
81 /*
82 * Atomic compare and exchange. Compare OLD with MEM, if identical,
83 * store NEW in MEM. Return the initial value in MEM. Success is
84 * indicated by comparing RETURN with OLD.
85 */
87 static always_inline unsigned long __cmpxchg(
88 volatile void *ptr, unsigned long old, unsigned long new, int size)
89 {
90 unsigned long prev;
91 switch ( size )
92 {
93 case 1:
94 asm volatile ( LOCK_PREFIX "cmpxchgb %b1,%2"
95 : "=a" (prev)
96 : "q" (new), "m" (*__xg((volatile void *)ptr)),
97 "0" (old)
98 : "memory" );
99 return prev;
100 case 2:
101 asm volatile ( LOCK_PREFIX "cmpxchgw %w1,%2"
102 : "=a" (prev)
103 : "r" (new), "m" (*__xg((volatile void *)ptr)),
104 "0" (old)
105 : "memory" );
106 return prev;
107 #if defined(__i386__)
108 case 4:
109 asm volatile ( LOCK_PREFIX "cmpxchgl %1,%2"
110 : "=a" (prev)
111 : "r" (new), "m" (*__xg((volatile void *)ptr)),
112 "0" (old)
113 : "memory" );
114 return prev;
115 #elif defined(__x86_64__)
116 case 4:
117 asm volatile ( LOCK_PREFIX "cmpxchgl %k1,%2"
118 : "=a" (prev)
119 : "r" (new), "m" (*__xg((volatile void *)ptr)),
120 "0" (old)
121 : "memory" );
122 return prev;
123 case 8:
124 asm volatile ( LOCK_PREFIX "cmpxchgq %1,%2"
125 : "=a" (prev)
126 : "r" (new), "m" (*__xg((volatile void *)ptr)),
127 "0" (old)
128 : "memory" );
129 return prev;
130 #endif
131 }
132 return old;
133 }
135 #define __HAVE_ARCH_CMPXCHG
137 /*
138 * Both Intel and AMD agree that, from a programmer's viewpoint:
139 * Loads cannot be reordered relative to other loads.
140 * Stores cannot be reordered relative to other stores.
141 *
142 * Intel64 Architecture Memory Ordering White Paper
143 * <http://developer.intel.com/products/processor/manuals/318147.pdf>
144 *
145 * AMD64 Architecture Programmer's Manual, Volume 2: System Programming
146 * <http://www.amd.com/us-en/assets/content_type/\
147 * white_papers_and_tech_docs/24593.pdf>
148 */
149 #define rmb() barrier()
150 #define wmb() barrier()
152 #ifdef CONFIG_SMP
153 #define smp_mb() mb()
154 #define smp_rmb() rmb()
155 #define smp_wmb() wmb()
156 #else
157 #define smp_mb() barrier()
158 #define smp_rmb() barrier()
159 #define smp_wmb() barrier()
160 #endif
162 #define set_mb(var, value) do { xchg(&var, value); } while (0)
163 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
165 #define local_irq_disable() asm volatile ( "cli" : : : "memory" )
166 #define local_irq_enable() asm volatile ( "sti" : : : "memory" )
168 /* used in the idle loop; sti takes one instruction cycle to complete */
169 #define safe_halt() asm volatile ( "sti; hlt" : : : "memory" )
170 /* used when interrupts are already enabled or to shutdown the processor */
171 #define halt() asm volatile ( "hlt" : : : "memory" )
173 #define local_save_flags(x) \
174 ({ \
175 BUILD_BUG_ON(sizeof(x) != sizeof(long)); \
176 asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \
177 })
178 #define local_irq_save(x) \
179 ({ \
180 local_save_flags(x); \
181 local_irq_disable(); \
182 })
183 #define local_irq_restore(x) \
184 ({ \
185 BUILD_BUG_ON(sizeof(x) != sizeof(long)); \
186 asm volatile ( "push" __OS " %0 ; popf" __OS \
187 : : "g" (x) : "memory", "cc" ); \
188 })
190 static inline int local_irq_is_enabled(void)
191 {
192 unsigned long flags;
193 local_save_flags(flags);
194 return !!(flags & (1<<9)); /* EFLAGS_IF */
195 }
197 #define BROKEN_ACPI_Sx 0x0001
198 #define BROKEN_INIT_AFTER_S1 0x0002
200 void trap_init(void);
201 void percpu_traps_init(void);
202 void subarch_percpu_traps_init(void);
204 #endif