patch-2.4.0-test9 linux/include/asm-alpha/atomic.h

Next file: linux/include/asm-alpha/bitops.h
Previous file: linux/fs/ufs/ialloc.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0-test8/linux/include/asm-alpha/atomic.h linux/include/asm-alpha/atomic.h
@@ -1,8 +1,6 @@
 #ifndef _ALPHA_ATOMIC_H
 #define _ALPHA_ATOMIC_H
 
-#include <linux/config.h>
-
 /*
  * Atomic operations that C can't guarantee us.  Useful for
  * resource counting etc...
@@ -11,11 +9,13 @@
  * than regular operations.
  */
 
-#ifdef CONFIG_SMP
+
+/*
+ * Counter is volatile to make sure gcc doesn't try to be clever
+ * and move things around on us. We need to use _exactly_ the address
+ * the user gave us, not some alias that contains the same information.
+ */
 typedef struct { volatile int counter; } atomic_t;
-#else
-typedef struct { int counter; } atomic_t;
-#endif
 
 #define ATOMIC_INIT(i)	( (atomic_t) { (i) } )
 
@@ -23,19 +23,12 @@
 #define atomic_set(v,i)		((v)->counter = (i))
 
 /*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
-
-/*
  * To get proper branch prediction for the main line, we must branch
  * forward to code at the end of this object's .text section, then
  * branch back to restart the operation.
  */
 
-extern __inline__ void atomic_add(int i, atomic_t * v)
+static __inline__ void atomic_add(int i, atomic_t * v)
 {
 	unsigned long temp;
 	__asm__ __volatile__(
@@ -46,11 +39,11 @@
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
-	:"=&r" (temp), "=m" (__atomic_fool_gcc(v))
-	:"Ir" (i), "m" (__atomic_fool_gcc(v)));
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
 }
 
-extern __inline__ void atomic_sub(int i, atomic_t * v)
+static __inline__ void atomic_sub(int i, atomic_t * v)
 {
 	unsigned long temp;
 	__asm__ __volatile__(
@@ -61,14 +54,14 @@
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
-	:"=&r" (temp), "=m" (__atomic_fool_gcc(v))
-	:"Ir" (i), "m" (__atomic_fool_gcc(v)));
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
 }
 
 /*
  * Same as above, but return the result value
  */
-extern __inline__ long atomic_add_return(int i, atomic_t * v)
+static __inline__ long atomic_add_return(int i, atomic_t * v)
 {
 	long temp, result;
 	__asm__ __volatile__(
@@ -81,12 +74,12 @@
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
-	:"=&r" (temp), "=m" (__atomic_fool_gcc(v)), "=&r" (result)
-	:"Ir" (i), "m" (__atomic_fool_gcc(v)));
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
 	return result;
 }
 
-extern __inline__ long atomic_sub_return(int i, atomic_t * v)
+static __inline__ long atomic_sub_return(int i, atomic_t * v)
 {
 	long temp, result;
 	__asm__ __volatile__(
@@ -99,8 +92,8 @@
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
-	:"=&r" (temp), "=m" (__atomic_fool_gcc(v)), "=&r" (result)
-	:"Ir" (i), "m" (__atomic_fool_gcc(v)));
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
 	return result;
 }
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)