direct-io.hg

view patches/linux-2.6.11/smp-alts.patch @ 5517:10e9028c8e3d

bitkeeper revision 1.1718.1.10 (42b7b19aqOS_1M8I4pIOFjiTPYWV-g)

Merge bk://xenbits.xensource.com/xen-unstable.bk
into spot.cl.cam.ac.uk:C:/Documents and Settings/iap10/xen-unstable.bk
author iap10@spot.cl.cam.ac.uk
date Tue Jun 21 06:20:10 2005 +0000 (2005-06-21)
parents 999293916aa7
children 5ead5ffa04c2 bd1642e8599e 1c119c875f21 32fb371cc283
line source
1 diff -Naur linux-2.6.11/arch/i386/Kconfig linux-2.6.11.post/arch/i386/Kconfig
2 --- linux-2.6.11/arch/i386/Kconfig 2005-03-02 07:37:49.000000000 +0000
3 +++ linux-2.6.11.post/arch/i386/Kconfig 2005-06-10 13:42:35.000000000 +0100
4 @@ -481,6 +481,19 @@
6 If you don't know what to do here, say N.
8 +config SMP_ALTERNATIVES
9 + bool "SMP alternatives support (EXPERIMENTAL)"
10 + depends on SMP && EXPERIMENTAL
11 + help
12 + Try to reduce the overhead of running an SMP kernel on a uniprocessor
13 + host slightly by replacing certain key instruction sequences
14 + according to whether we currently have more than one CPU available.
15 + This should provide a noticeable boost to performance when
16 + running SMP kernels on UP machines, and have negligible impact
17 + when running on an true SMP host.
18 +
19 + If unsure, say N.
20 +
21 config NR_CPUS
22 int "Maximum number of CPUs (2-255)"
23 range 2 255
24 diff -Naur linux-2.6.11/arch/i386/kernel/Makefile linux-2.6.11.post/arch/i386/kernel/Makefile
25 --- linux-2.6.11/arch/i386/kernel/Makefile 2005-03-02 07:37:49.000000000 +0000
26 +++ linux-2.6.11.post/arch/i386/kernel/Makefile 2005-06-16 11:16:18.555332435 +0100
27 @@ -32,6 +32,7 @@
28 obj-$(CONFIG_HPET_TIMER) += time_hpet.o
29 obj-$(CONFIG_EFI) += efi.o efi_stub.o
30 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
31 +obj-$(CONFIG_SMP_ALTERNATIVES) += smpalts.o
33 EXTRA_AFLAGS := -traditional
35 diff -Naur linux-2.6.11/arch/i386/kernel/smpalts.c linux-2.6.11.post/arch/i386/kernel/smpalts.c
36 --- linux-2.6.11/arch/i386/kernel/smpalts.c 1970-01-01 01:00:00.000000000 +0100
37 +++ linux-2.6.11.post/arch/i386/kernel/smpalts.c 2005-06-16 11:23:39.300902424 +0100
38 @@ -0,0 +1,76 @@
39 +#include <linux/kernel.h>
40 +#include <asm/system.h>
41 +#include <asm/smp_alt.h>
42 +#include <asm/processor.h>
43 +#include <asm/string.h>
44 +
45 +struct smp_replacement_record {
46 + unsigned char targ_size;
47 + unsigned char smp1_size;
48 + unsigned char smp2_size;
49 + unsigned char up_size;
50 + unsigned char feature;
51 + unsigned char data[0];
52 +};
53 +
54 +struct smp_alternative_record {
55 + void *targ_start;
56 + struct smp_replacement_record *repl;
57 +};
58 +
59 +extern struct smp_alternative_record __start_smp_alternatives_table,
60 + __stop_smp_alternatives_table;
61 +
62 +void prepare_for_smp(void)
63 +{
64 + struct smp_alternative_record *r;
65 + printk(KERN_INFO "Enabling SMP...\n");
66 + for (r = &__start_smp_alternatives_table;
67 + r != &__stop_smp_alternatives_table;
68 + r++) {
69 + BUG_ON(r->repl->targ_size < r->repl->smp1_size);
70 + BUG_ON(r->repl->targ_size < r->repl->smp2_size);
71 + BUG_ON(r->repl->targ_size < r->repl->up_size);
72 + if (r->repl->feature != (unsigned char)-1 &&
73 + boot_cpu_has(r->repl->feature)) {
74 + memcpy(r->targ_start,
75 + r->repl->data + r->repl->smp1_size,
76 + r->repl->smp2_size);
77 + memset(r->targ_start + r->repl->smp2_size,
78 + 0x90,
79 + r->repl->targ_size - r->repl->smp2_size);
80 + } else {
81 + memcpy(r->targ_start,
82 + r->repl->data,
83 + r->repl->smp1_size);
84 + memset(r->targ_start + r->repl->smp1_size,
85 + 0x90,
86 + r->repl->targ_size - r->repl->smp1_size);
87 + }
88 + }
89 + /* Paranoia */
90 + asm volatile ("jmp 1f\n1:");
91 + mb();
92 +}
93 +
94 +void unprepare_for_smp(void)
95 +{
96 + struct smp_alternative_record *r;
97 + printk(KERN_INFO "Disabling SMP...\n");
98 + for (r = &__start_smp_alternatives_table;
99 + r != &__stop_smp_alternatives_table;
100 + r++) {
101 + BUG_ON(r->repl->targ_size < r->repl->smp1_size);
102 + BUG_ON(r->repl->targ_size < r->repl->smp2_size);
103 + BUG_ON(r->repl->targ_size < r->repl->up_size);
104 + memcpy(r->targ_start,
105 + r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
106 + r->repl->up_size);
107 + memset(r->targ_start + r->repl->up_size,
108 + 0x90,
109 + r->repl->targ_size - r->repl->up_size);
110 + }
111 + /* Paranoia */
112 + asm volatile ("jmp 1f\n1:");
113 + mb();
114 +}
115 diff -Naur linux-2.6.11/arch/i386/kernel/smpboot.c linux-2.6.11.post/arch/i386/kernel/smpboot.c
116 --- linux-2.6.11/arch/i386/kernel/smpboot.c 2005-03-02 07:38:09.000000000 +0000
117 +++ linux-2.6.11.post/arch/i386/kernel/smpboot.c 2005-06-16 11:17:09.287064617 +0100
118 @@ -1003,6 +1003,11 @@
119 if (max_cpus <= cpucount+1)
120 continue;
122 +#ifdef CONFIG_SMP_ALTERNATIVES
123 + if (kicked == 1)
124 + prepare_for_smp();
125 +#endif
126 +
127 if (do_boot_cpu(apicid))
128 printk("CPU #%d not responding - cannot use it.\n",
129 apicid);
130 @@ -1118,6 +1123,11 @@
131 return -EIO;
132 }
134 +#ifdef CONFIG_SMP_ALTERNATIVES
135 + if (num_online_cpus() == 1)
136 + prepare_for_smp();
137 +#endif
138 +
139 local_irq_enable();
140 /* Unleash the CPU! */
141 cpu_set(cpu, smp_commenced_mask);
142 diff -Naur linux-2.6.11/arch/i386/kernel/vmlinux.lds.S linux-2.6.11.post/arch/i386/kernel/vmlinux.lds.S
143 --- linux-2.6.11/arch/i386/kernel/vmlinux.lds.S 2005-03-02 07:38:37.000000000 +0000
144 +++ linux-2.6.11.post/arch/i386/kernel/vmlinux.lds.S 2005-06-10 11:14:14.000000000 +0100
145 @@ -30,6 +30,13 @@
146 __ex_table : { *(__ex_table) }
147 __stop___ex_table = .;
149 + . = ALIGN(16);
150 + __start_smp_alternatives_table = .;
151 + __smp_alternatives : { *(__smp_alternatives) }
152 + __stop_smp_alternatives_table = .;
153 +
154 + __smp_replacements : { *(__smp_replacements) }
155 +
156 RODATA
158 /* writeable */
159 diff -Naur linux-2.6.11/include/asm-i386/atomic.h linux-2.6.11.post/include/asm-i386/atomic.h
160 --- linux-2.6.11/include/asm-i386/atomic.h 2005-03-02 07:37:51.000000000 +0000
161 +++ linux-2.6.11.post/include/asm-i386/atomic.h 2005-06-13 10:10:39.000000000 +0100
162 @@ -4,18 +4,13 @@
163 #include <linux/config.h>
164 #include <linux/compiler.h>
165 #include <asm/processor.h>
166 +#include <asm/smp_alt.h>
168 /*
169 * Atomic operations that C can't guarantee us. Useful for
170 * resource counting etc..
171 */
173 -#ifdef CONFIG_SMP
174 -#define LOCK "lock ; "
175 -#else
176 -#define LOCK ""
177 -#endif
178 -
179 /*
180 * Make sure gcc doesn't try to be clever and move things around
181 * on us. We need to use _exactly_ the address the user gave us,
182 diff -Naur linux-2.6.11/include/asm-i386/bitops.h linux-2.6.11.post/include/asm-i386/bitops.h
183 --- linux-2.6.11/include/asm-i386/bitops.h 2005-03-02 07:38:12.000000000 +0000
184 +++ linux-2.6.11.post/include/asm-i386/bitops.h 2005-06-13 10:11:54.000000000 +0100
185 @@ -7,6 +7,7 @@
187 #include <linux/config.h>
188 #include <linux/compiler.h>
189 +#include <asm/smp_alt.h>
191 /*
192 * These have to be done with inline assembly: that way the bit-setting
193 @@ -16,12 +17,6 @@
194 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
195 */
197 -#ifdef CONFIG_SMP
198 -#define LOCK_PREFIX "lock ; "
199 -#else
200 -#define LOCK_PREFIX ""
201 -#endif
202 -
203 #define ADDR (*(volatile long *) addr)
205 /**
206 @@ -41,7 +36,7 @@
207 */
208 static inline void set_bit(int nr, volatile unsigned long * addr)
209 {
210 - __asm__ __volatile__( LOCK_PREFIX
211 + __asm__ __volatile__( LOCK
212 "btsl %1,%0"
213 :"=m" (ADDR)
214 :"Ir" (nr));
215 @@ -76,7 +71,7 @@
216 */
217 static inline void clear_bit(int nr, volatile unsigned long * addr)
218 {
219 - __asm__ __volatile__( LOCK_PREFIX
220 + __asm__ __volatile__( LOCK
221 "btrl %1,%0"
222 :"=m" (ADDR)
223 :"Ir" (nr));
224 @@ -121,7 +116,7 @@
225 */
226 static inline void change_bit(int nr, volatile unsigned long * addr)
227 {
228 - __asm__ __volatile__( LOCK_PREFIX
229 + __asm__ __volatile__( LOCK
230 "btcl %1,%0"
231 :"=m" (ADDR)
232 :"Ir" (nr));
233 @@ -140,7 +135,7 @@
234 {
235 int oldbit;
237 - __asm__ __volatile__( LOCK_PREFIX
238 + __asm__ __volatile__( LOCK
239 "btsl %2,%1\n\tsbbl %0,%0"
240 :"=r" (oldbit),"=m" (ADDR)
241 :"Ir" (nr) : "memory");
242 @@ -180,7 +175,7 @@
243 {
244 int oldbit;
246 - __asm__ __volatile__( LOCK_PREFIX
247 + __asm__ __volatile__( LOCK
248 "btrl %2,%1\n\tsbbl %0,%0"
249 :"=r" (oldbit),"=m" (ADDR)
250 :"Ir" (nr) : "memory");
251 @@ -231,7 +226,7 @@
252 {
253 int oldbit;
255 - __asm__ __volatile__( LOCK_PREFIX
256 + __asm__ __volatile__( LOCK
257 "btcl %2,%1\n\tsbbl %0,%0"
258 :"=r" (oldbit),"=m" (ADDR)
259 :"Ir" (nr) : "memory");
260 diff -Naur linux-2.6.11/include/asm-i386/rwsem.h linux-2.6.11.post/include/asm-i386/rwsem.h
261 --- linux-2.6.11/include/asm-i386/rwsem.h 2005-03-02 07:38:08.000000000 +0000
262 +++ linux-2.6.11.post/include/asm-i386/rwsem.h 2005-06-13 10:13:06.000000000 +0100
263 @@ -40,6 +40,7 @@
265 #include <linux/list.h>
266 #include <linux/spinlock.h>
267 +#include <asm/smp_alt.h>
269 struct rwsem_waiter;
271 @@ -99,7 +100,7 @@
272 {
273 __asm__ __volatile__(
274 "# beginning down_read\n\t"
275 -LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
276 +LOCK " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
277 " js 2f\n\t" /* jump if we weren't granted the lock */
278 "1:\n\t"
279 LOCK_SECTION_START("")
280 @@ -130,7 +131,7 @@
281 " movl %1,%2\n\t"
282 " addl %3,%2\n\t"
283 " jle 2f\n\t"
284 -LOCK_PREFIX " cmpxchgl %2,%0\n\t"
285 +LOCK " cmpxchgl %2,%0\n\t"
286 " jnz 1b\n\t"
287 "2:\n\t"
288 "# ending __down_read_trylock\n\t"
289 @@ -150,7 +151,7 @@
290 tmp = RWSEM_ACTIVE_WRITE_BIAS;
291 __asm__ __volatile__(
292 "# beginning down_write\n\t"
293 -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
294 +LOCK " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
295 " testl %%edx,%%edx\n\t" /* was the count 0 before? */
296 " jnz 2f\n\t" /* jump if we weren't granted the lock */
297 "1:\n\t"
298 @@ -188,7 +189,7 @@
299 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
300 __asm__ __volatile__(
301 "# beginning __up_read\n\t"
302 -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
303 +LOCK " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
304 " js 2f\n\t" /* jump if the lock is being waited upon */
305 "1:\n\t"
306 LOCK_SECTION_START("")
307 @@ -214,7 +215,7 @@
308 __asm__ __volatile__(
309 "# beginning __up_write\n\t"
310 " movl %2,%%edx\n\t"
311 -LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
312 +LOCK " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
313 " jnz 2f\n\t" /* jump if the lock is being waited upon */
314 "1:\n\t"
315 LOCK_SECTION_START("")
316 @@ -239,7 +240,7 @@
317 {
318 __asm__ __volatile__(
319 "# beginning __downgrade_write\n\t"
320 -LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
321 +LOCK " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
322 " js 2f\n\t" /* jump if the lock is being waited upon */
323 "1:\n\t"
324 LOCK_SECTION_START("")
325 @@ -263,7 +264,7 @@
326 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
327 {
328 __asm__ __volatile__(
329 -LOCK_PREFIX "addl %1,%0"
330 +LOCK "addl %1,%0"
331 : "=m"(sem->count)
332 : "ir"(delta), "m"(sem->count));
333 }
334 @@ -276,7 +277,7 @@
335 int tmp = delta;
337 __asm__ __volatile__(
338 -LOCK_PREFIX "xadd %0,(%2)"
339 +LOCK "xadd %0,(%2)"
340 : "+r"(tmp), "=m"(sem->count)
341 : "r"(sem), "m"(sem->count)
342 : "memory");
343 diff -Naur linux-2.6.11/include/asm-i386/smp_alt.h linux-2.6.11.post/include/asm-i386/smp_alt.h
344 --- linux-2.6.11/include/asm-i386/smp_alt.h 1970-01-01 01:00:00.000000000 +0100
345 +++ linux-2.6.11.post/include/asm-i386/smp_alt.h 2005-06-16 11:16:50.109433206 +0100
346 @@ -0,0 +1,32 @@
347 +#ifndef __ASM_SMP_ALT_H__
348 +#define __ASM_SMP_ALT_H__
349 +
350 +#include <linux/config.h>
351 +
352 +#ifdef CONFIG_SMP
353 +#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
354 +#define LOCK \
355 + "6677: nop\n" \
356 + ".section __smp_alternatives,\"a\"\n" \
357 + ".long 6677b\n" \
358 + ".long 6678f\n" \
359 + ".previous\n" \
360 + ".section __smp_replacements,\"a\"\n" \
361 + "6678: .byte 1\n" \
362 + ".byte 1\n" \
363 + ".byte 0\n" \
364 + ".byte 1\n" \
365 + ".byte -1\n" \
366 + "lock\n" \
367 + "nop\n" \
368 + ".previous\n"
369 +void prepare_for_smp(void);
370 +void unprepare_for_smp(void);
371 +#else
372 +#define LOCK "lock ; "
373 +#endif
374 +#else
375 +#define LOCK ""
376 +#endif
377 +
378 +#endif /* __ASM_SMP_ALT_H__ */
379 diff -Naur linux-2.6.11/include/asm-i386/spinlock.h linux-2.6.11.post/include/asm-i386/spinlock.h
380 --- linux-2.6.11/include/asm-i386/spinlock.h 2005-03-02 07:37:50.000000000 +0000
381 +++ linux-2.6.11.post/include/asm-i386/spinlock.h 2005-06-13 14:13:52.000000000 +0100
382 @@ -6,6 +6,7 @@
383 #include <asm/page.h>
384 #include <linux/config.h>
385 #include <linux/compiler.h>
386 +#include <asm/smp_alt.h>
388 asmlinkage int printk(const char * fmt, ...)
389 __attribute__ ((format (printf, 1, 2)));
390 @@ -47,8 +48,9 @@
391 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
393 #define spin_lock_string \
394 - "\n1:\t" \
395 - "lock ; decb %0\n\t" \
396 + "1:\n" \
397 + LOCK \
398 + "decb %0\n\t" \
399 "jns 3f\n" \
400 "2:\t" \
401 "rep;nop\n\t" \
402 @@ -58,8 +60,9 @@
403 "3:\n\t"
405 #define spin_lock_string_flags \
406 - "\n1:\t" \
407 - "lock ; decb %0\n\t" \
408 + "1:\n" \
409 + LOCK \
410 + "decb %0\n\t" \
411 "jns 4f\n\t" \
412 "2:\t" \
413 "testl $0x200, %1\n\t" \
414 @@ -121,10 +124,34 @@
415 static inline int _raw_spin_trylock(spinlock_t *lock)
416 {
417 char oldval;
418 +#ifdef CONFIG_SMP_ALTERNATIVES
419 __asm__ __volatile__(
420 - "xchgb %b0,%1"
421 + "1:movb %1,%b0\n"
422 + "movb $0,%1\n"
423 + "2:"
424 + ".section __smp_alternatives,\"a\"\n"
425 + ".long 1b\n"
426 + ".long 3f\n"
427 + ".previous\n"
428 + ".section __smp_replacements,\"a\"\n"
429 + "3: .byte 2b - 1b\n"
430 + ".byte 5f-4f\n"
431 + ".byte 0\n"
432 + ".byte 6f-5f\n"
433 + ".byte -1\n"
434 + "4: xchgb %b0,%1\n"
435 + "5: movb %1,%b0\n"
436 + "movb $0,%1\n"
437 + "6:\n"
438 + ".previous\n"
439 :"=q" (oldval), "=m" (lock->slock)
440 :"0" (0) : "memory");
441 +#else
442 + __asm__ __volatile__(
443 + "xchgb %b0,%1\n"
444 + :"=q" (oldval), "=m" (lock->slock)
445 + :"0" (0) : "memory");
446 +#endif
447 return oldval > 0;
448 }
450 @@ -225,8 +252,8 @@
451 __build_write_lock(rw, "__write_lock_failed");
452 }
454 -#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
455 -#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
456 +#define _raw_read_unlock(rw) asm volatile(LOCK "incl %0" :"=m" ((rw)->lock) : : "memory")
457 +#define _raw_write_unlock(rw) asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
459 static inline int _raw_read_trylock(rwlock_t *lock)
460 {
461 diff -Naur linux-2.6.11/include/asm-i386/system.h linux-2.6.11.post/include/asm-i386/system.h
462 --- linux-2.6.11/include/asm-i386/system.h 2005-03-02 07:37:30.000000000 +0000
463 +++ linux-2.6.11.post/include/asm-i386/system.h 2005-06-15 13:21:40.000000000 +0100
464 @@ -5,7 +5,7 @@
465 #include <linux/kernel.h>
466 #include <asm/segment.h>
467 #include <asm/cpufeature.h>
468 -#include <linux/bitops.h> /* for LOCK_PREFIX */
469 +#include <asm/smp_alt.h>
471 #ifdef __KERNEL__
473 @@ -249,19 +249,19 @@
474 unsigned long prev;
475 switch (size) {
476 case 1:
477 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
478 + __asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
479 : "=a"(prev)
480 : "q"(new), "m"(*__xg(ptr)), "0"(old)
481 : "memory");
482 return prev;
483 case 2:
484 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
485 + __asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
486 : "=a"(prev)
487 : "q"(new), "m"(*__xg(ptr)), "0"(old)
488 : "memory");
489 return prev;
490 case 4:
491 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
492 + __asm__ __volatile__(LOCK "cmpxchgl %1,%2"
493 : "=a"(prev)
494 : "q"(new), "m"(*__xg(ptr)), "0"(old)
495 : "memory");
496 @@ -425,11 +425,55 @@
497 #endif
499 #ifdef CONFIG_SMP
500 -#define smp_mb() mb()
501 -#define smp_rmb() rmb()
502 #define smp_wmb() wmb()
503 -#define smp_read_barrier_depends() read_barrier_depends()
504 +#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
505 +#define smp_alt_mb(instr) \
506 +__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
507 + ".section __smp_alternatives,\"a\"\n" \
508 + ".long 6667b\n" \
509 + ".long 6673f\n" \
510 + ".previous\n" \
511 + ".section __smp_replacements,\"a\"\n" \
512 + "6673:.byte 6668b-6667b\n" \
513 + ".byte 6670f-6669f\n" \
514 + ".byte 6671f-6670f\n" \
515 + ".byte 0\n" \
516 + ".byte %c0\n" \
517 + "6669:lock;addl $0,0(%%esp)\n" \
518 + "6670:" instr "\n" \
519 + "6671:\n" \
520 + ".previous\n" \
521 + : \
522 + : "i" (X86_FEATURE_XMM2) \
523 + : "memory")
524 +#define smp_rmb() smp_alt_mb("lfence")
525 +#define smp_mb() smp_alt_mb("mfence")
526 +#define set_mb(var, value) do { \
527 +unsigned long __set_mb_temp; \
528 +__asm__ __volatile__("6667:movl %1, %0\n6668:\n" \
529 + ".section __smp_alternatives,\"a\"\n" \
530 + ".long 6667b\n" \
531 + ".long 6673f\n" \
532 + ".previous\n" \
533 + ".section __smp_replacements,\"a\"\n" \
534 + "6673: .byte 6668b-6667b\n" \
535 + ".byte 6670f-6669f\n" \
536 + ".byte 0\n" \
537 + ".byte 6671f-6670f\n" \
538 + ".byte -1\n" \
539 + "6669: xchg %1, %0\n" \
540 + "6670:movl %1, %0\n" \
541 + "6671:\n" \
542 + ".previous\n" \
543 + : "=m" (var), "=r" (__set_mb_temp) \
544 + : "1" (value) \
545 + : "memory"); } while (0)
546 +#else
547 +#define smp_rmb() rmb()
548 +#define smp_mb() mb()
549 #define set_mb(var, value) do { xchg(&var, value); } while (0)
550 +#endif
551 +#define smp_read_barrier_depends() read_barrier_depends()
552 #else
553 #define smp_mb() barrier()
554 #define smp_rmb() barrier()