ia64/linux-2.6.18-xen.hg

view drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h @ 847:ad4d307bf9ce

net sfc: Update sfc and sfc_resource driver to latest release

...and update sfc_netfront, sfc_netback, sfc_netutil for any API changes

sfc_netback: Fix asymmetric use of SFC buffer table alloc and free
sfc_netback: Clean up if no SFC accel device found
sfc_netback: Gracefully handle case where page grant fails
sfc_netback: Disable net acceleration if the physical link goes down
sfc_netfront: Less verbose error messages, more verbose counters for
rx discard errors
sfc_netfront: Gracefully handle case where SFC netfront fails during
initialisation

Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:59:10 2009 +0100 (2009-03-31)
parents af0d925ba938
children
line source
1 /****************************************************************************
2 * Copyright 2002-2005: Level 5 Networks Inc.
3 * Copyright 2005-2008: Solarflare Communications Inc,
4 * 9501 Jeronimo Road, Suite 250,
5 * Irvine, CA 92618, USA
6 *
7 * Maintained by Solarflare Communications
8 * <linux-xen-drivers@solarflare.com>
9 * <onload-dev@solarflare.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation, incorporated herein by reference.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 ****************************************************************************
24 */
26 /*! \cidoxg_include_ci_tools_platform */
28 #ifndef __CI_TOOLS_GCC_X86_H__
29 #define __CI_TOOLS_GCC_X86_H__
32 /**********************************************************************
33 * Free-running cycle counters.
34 */
36 #define CI_HAVE_FRC64
37 #define CI_HAVE_FRC32
39 #define ci_frc32(pval) __asm__ __volatile__("rdtsc" : "=a" (*pval) : : "edx")
41 #if defined(__x86_64__)
42 ci_inline void ci_frc64(ci_uint64* pval) {
43 /* temp fix until we figure how to get this out in one bite */
44 ci_uint64 low, high;
45 __asm__ __volatile__("rdtsc" : "=a" (low) , "=d" (high));
46 *pval = (high << 32) | low;
47 }
49 #else
50 #define ci_frc64(pval) __asm__ __volatile__("rdtsc" : "=A" (*pval))
51 #endif
53 #define ci_frc_flush() /* ?? Need a pipeline barrier. */
56 /**********************************************************************
57 * Atomic integer.
58 */
60 /*
61 ** int ci_atomic_read(a) { return a->n; }
62 ** void ci_atomic_set(a, v) { a->n = v; }
63 ** void ci_atomic_inc(a) { ++a->n; }
64 ** void ci_atomic_dec(a) { --a->n; }
65 ** int ci_atomic_inc_and_test(a) { return ++a->n == 0; }
66 ** int ci_atomic_dec_and_test(a) { return --a->n == 0; }
67 ** void ci_atomic_and(a, v) { a->n &= v; }
68 ** void ci_atomic_or(a, v) { a->n |= v; }
69 */
71 typedef struct { volatile ci_int32 n; } ci_atomic_t;
73 #define CI_ATOMIC_INITIALISER(i) {(i)}
75 static inline ci_int32 ci_atomic_read(const ci_atomic_t* a) { return a->n; }
76 static inline void ci_atomic_set(ci_atomic_t* a, int v) { a->n = v; ci_wmb(); }
78 static inline void ci_atomic_inc(ci_atomic_t* a)
79 { __asm__ __volatile__("lock; incl %0" : "+m" (a->n)); }
82 static inline void ci_atomic_dec(ci_atomic_t* a)
83 { __asm__ __volatile__("lock; decl %0" : "+m" (a->n)); }
85 static inline int ci_atomic_inc_and_test(ci_atomic_t* a) {
86 char r;
87 __asm__ __volatile__("lock; incl %0; sete %1"
88 : "+m" (a->n), "=qm" (r));
89 return r;
90 }
92 static inline int ci_atomic_dec_and_test(ci_atomic_t* a) {
93 char r;
94 __asm__ __volatile__("lock; decl %0; sete %1"
95 : "+m" (a->n), "=qm" (r));
96 return r;
97 }
99 ci_inline int
100 ci_atomic_xadd (ci_atomic_t *a, int v) {
101 __asm__ ("lock xadd %0, %1" : "=r" (v), "+m" (a->n) : "0" (v));
102 return v;
103 }
104 ci_inline int
105 ci_atomic_xchg (ci_atomic_t *a, int v) {
106 __asm__ ("lock xchg %0, %1" : "=r" (v), "+m" (a->n) : "0" (v));
107 return v;
108 }
110 ci_inline void ci_atomic32_or(volatile ci_uint32* p, ci_uint32 mask)
111 { __asm__ __volatile__("lock; orl %1, %0" : "+m" (*p) : "ir" (mask)); }
113 ci_inline void ci_atomic32_and(volatile ci_uint32* p, ci_uint32 mask)
114 { __asm__ __volatile__("lock; andl %1, %0" : "+m" (*p) : "ir" (mask)); }
116 ci_inline void ci_atomic32_add(volatile ci_uint32* p, ci_uint32 v)
117 { __asm__ __volatile__("lock; addl %1, %0" : "+m" (*p) : "ir" (v)); }
119 ci_inline void ci_atomic32_inc(volatile ci_uint32* p)
120 { __asm__ __volatile__("lock; incl %0" : "+m" (*p)); }
122 ci_inline int ci_atomic32_dec_and_test(volatile ci_uint32* p) {
123 char r;
124 __asm__ __volatile__("lock; decl %0; sete %1" : "+m" (*p), "=qm" (r));
125 return r;
126 }
128 #define ci_atomic_or(a, v) ci_atomic32_or ((ci_uint32*) &(a)->n, (v))
129 #define ci_atomic_and(a, v) ci_atomic32_and((ci_uint32*) &(a)->n, (v))
130 #define ci_atomic_add(a, v) ci_atomic32_add((ci_uint32*) &(a)->n, (v))
132 extern int ci_glibc_uses_nptl (void) CI_HF;
133 extern int ci_glibc_nptl_broken(void) CI_HF;
134 extern int ci_glibc_gs_get_is_multihreaded_offset (void) CI_HF;
135 extern int ci_glibc_gs_is_multihreaded_offset CI_HV;
137 #if !defined(__x86_64__)
138 #ifdef __GLIBC__
139 /* Returns non-zero if the calling process might be mulithreaded, returns 0 if
140 * it definitely isn't (i.e. if reimplementing this function for other
141 * architectures and platforms, you can safely just return 1).
142 */
143 static inline int ci_is_multithreaded (void) {
145 while (1) {
146 if (ci_glibc_gs_is_multihreaded_offset >= 0) {
147 /* NPTL keeps a variable that tells us this hanging off gs (i.e. in thread-
148 * local storage); just return this
149 */
150 int r;
151 __asm__ __volatile__ ("movl %%gs:(%1), %0"
152 : "=r" (r)
153 : "r" (ci_glibc_gs_is_multihreaded_offset));
154 return r;
155 }
157 if (ci_glibc_gs_is_multihreaded_offset == -2) {
158 /* This means we've already determined that the libc version is NOT good
159 * for our funky "is multithreaded" hack
160 */
161 return 1;
162 }
164 /* If we get here, it means this is the first time the function has been
165 * called -- detect the libc version and go around again.
166 */
167 ci_glibc_gs_is_multihreaded_offset = ci_glibc_gs_get_is_multihreaded_offset ();
169 /* Go around again. We do the test here rather than at the top so that we go
170 * quicker in the common the case
171 */
172 }
173 }
175 #else /* def __GLIBC__ */
177 #define ci_is_multithreaded() 1 /* ?? Is the the POSIX way of finding out */
178 /* whether the appication is single */
179 /* threaded? */
181 #endif /* def __GLIBC__ */
183 #else /* defined __x86_64__ */
185 static inline int ci_is_multithreaded (void) {
186 /* Now easy way to tell on x86_64; so assume we're multithreaded */
187 return 1;
188 }
190 #endif /* defined __x86_64__ */
193 /**********************************************************************
194 * Compare and swap.
195 */
197 #define CI_HAVE_COMPARE_AND_SWAP
199 ci_inline int ci_cas32_succeed(volatile ci_int32* p, ci_int32 oldval,
200 ci_int32 newval) {
201 char ret;
202 ci_int32 prevval;
203 __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
204 : "=q"(ret), "+m"(*p), "=a"(prevval)
205 : "r"(newval), "a"(oldval));
206 return ret;
207 }
209 ci_inline int ci_cas32_fail(volatile ci_int32* p, ci_int32 oldval,
210 ci_int32 newval) {
211 char ret;
212 ci_int32 prevval;
213 __asm__ __volatile__("lock; cmpxchgl %3, %1; setne %0"
214 : "=q"(ret), "+m"(*p), "=a"(prevval)
215 : "r"(newval), "a"(oldval));
216 return ret;
217 }
219 #ifdef __x86_64__
220 ci_inline int ci_cas64_succeed(volatile ci_int64* p, ci_int64 oldval,
221 ci_int64 newval) {
222 char ret;
223 ci_int64 prevval;
224 __asm__ __volatile__("lock; cmpxchgq %3, %1; sete %0"
225 : "=q"(ret), "+m"(*p), "=a"(prevval)
226 : "r"(newval), "a"(oldval));
227 return ret;
228 }
230 ci_inline int ci_cas64_fail(volatile ci_int64* p, ci_int64 oldval,
231 ci_int64 newval) {
232 char ret;
233 ci_int64 prevval;
234 __asm__ __volatile__("lock; cmpxchgq %3, %1; setne %0"
235 : "=q"(ret), "+m"(*p), "=a"(prevval)
236 : "r"(newval), "a"(oldval));
237 return ret;
238 }
239 #endif
241 ci_inline int ci_cas32u_succeed(volatile ci_uint32* p, ci_uint32 oldval, ci_uint32 newval) {
242 char ret;
243 ci_uint32 prevval;
244 __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
245 : "=q"(ret), "+m"(*p), "=a"(prevval)
246 : "r"(newval), "a"(oldval));
247 return ret;
248 }
250 ci_inline int ci_cas32u_fail(volatile ci_uint32* p, ci_uint32 oldval, ci_uint32 newval) {
251 char ret;
252 ci_uint32 prevval;
253 __asm__ __volatile__("lock; cmpxchgl %3, %1; setne %0"
254 : "=q"(ret), "+m"(*p), "=a"(prevval)
255 : "r"(newval), "a"(oldval));
256 return ret;
257 }
259 ci_inline int ci_cas64u_succeed(volatile ci_uint64* p, ci_uint64 oldval,
260 ci_uint64 newval) {
261 char ret;
262 ci_uint64 prevval;
263 __asm__ __volatile__("lock; cmpxchgq %3, %1; sete %0"
264 : "=q"(ret), "+m"(*p), "=a"(prevval)
265 : "r"(newval), "a"(oldval));
266 return ret;
267 }
269 ci_inline int ci_cas64u_fail(volatile ci_uint64* p, ci_uint64 oldval,
270 ci_uint64 newval) {
271 char ret;
272 ci_uint64 prevval;
273 __asm__ __volatile__("lock; cmpxchgq %3, %1; setne %0"
274 : "=q"(ret), "+m"(*p), "=a"(prevval)
275 : "r"(newval), "a"(oldval));
276 return ret;
277 }
279 #ifdef __x86_64__
281 # define ci_cas_uintptr_succeed(p,o,n) \
282 ci_cas64u_succeed((volatile ci_uint64*) (p), (o), (n))
283 # define ci_cas_uintptr_fail(p,o,n) \
284 ci_cas64u_fail((volatile ci_uint64*) (p), (o), (n))
286 #else
288 # define ci_cas_uintptr_succeed(p,o,n) \
289 ci_cas32u_succeed((volatile ci_uint32*) (p), (o), (n))
290 # define ci_cas_uintptr_fail(p,o,n) \
291 ci_cas32u_fail((volatile ci_uint32*) (p), (o), (n))
293 #endif
296 /**********************************************************************
297 * Atomic bit field.
298 */
300 typedef ci_uint32 ci_bits;
301 #define CI_BITS_N 32u
303 #define CI_BITS_DECLARE(name, n) \
304 ci_bits name[((n) + CI_BITS_N - 1u) / CI_BITS_N]
306 ci_inline void ci_bits_clear_all(volatile ci_bits* b, int n_bits)
307 { memset((void*) b, 0, (n_bits+CI_BITS_N-1u) / CI_BITS_N * sizeof(ci_bits)); }
309 ci_inline void ci_bit_set(volatile ci_bits* b, int i) {
310 __asm__ __volatile__("lock; btsl %1, %0"
311 : "=m" (*b)
312 : "Ir" (i));
313 }
315 ci_inline void ci_bit_clear(volatile ci_bits* b, int i) {
316 __asm__ __volatile__("lock; btrl %1, %0"
317 : "=m" (*b)
318 : "Ir" (i));
319 }
321 ci_inline int ci_bit_test(volatile ci_bits* b, int i) {
322 char rc;
323 __asm__("btl %2, %1; setc %0"
324 : "=r" (rc)
325 : "m" (*b), "Ir" (i));
326 return rc;
327 }
329 ci_inline int ci_bit_test_and_set(volatile ci_bits* b, int i) {
330 char rc;
331 __asm__ __volatile__("lock; btsl %2, %1; setc %0"
332 : "=r" (rc), "+m" (*b)
333 : "Ir" (i));
334 return rc;
335 }
337 ci_inline int ci_bit_test_and_clear(volatile ci_bits* b, int i) {
338 char rc;
339 __asm__ __volatile__("lock; btrl %2, %1; setc %0"
340 : "=r" (rc), "+m" (*b)
341 : "Ir" (i));
342 return rc;
343 }
345 /* These mask ops only work within a single ci_bits word. */
346 #define ci_bit_mask_set(b,m) ci_atomic32_or((b), (m))
347 #define ci_bit_mask_clear(b,m) ci_atomic32_and((b), ~(m))
350 /**********************************************************************
351 * Misc.
352 */
354 #if __GNUC__ >= 3
355 # define ci_spinloop_pause() __asm__("pause")
356 #else
357 # define ci_spinloop_pause() __asm__(".byte 0xf3, 0x90")
358 #endif
361 #define CI_HAVE_ADDC32
362 #define ci_add_carry32(sum, v) __asm__("addl %1, %0 ;" \
363 "adcl $0, %0 ;" \
364 : "=r" (sum) \
365 : "g" ((ci_uint32) v), "0" (sum))
368 #endif /* __CI_TOOLS_GCC_X86_H__ */
370 /*! \cidoxg_end */