patch-2.4.0-test12 linux/include/asm-arm/proc-armv/system.h

Next file: linux/include/asm-arm/processor.h
Previous file: linux/include/asm-arm/proc-armv/ptrace.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0-test11/linux/include/asm-arm/proc-armv/system.h linux/include/asm-arm/proc-armv/system.h
@@ -10,19 +10,7 @@
 #ifndef __ASM_PROC_SYSTEM_H
 #define __ASM_PROC_SYSTEM_H
 
-extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
-{
-	extern void __bad_xchg(volatile void *, int);
-
-	switch (size) {
-		case 1:	__asm__ __volatile__ ("swpb %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory");
-			break;
-		case 4:	__asm__ __volatile__ ("swp %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory");
-			break;
-		default: __bad_xchg(ptr, size);
-	}
-	return x;
-}
+#include <linux/config.h>
 
 #define set_cr(x)					\
 	__asm__ __volatile__(				\
@@ -130,5 +118,62 @@
 	:							\
 	: "r" (x)						\
 	: "memory")
+
+#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
+/*
+ * On the StrongARM, "swp" is terminally broken since it bypasses the
+ * cache totally.  This means that the cache becomes inconsistent, and,
+ * since we use normal loads/stores as well, this is really bad.
+ * Typically, this causes oopsen in filp_close, but could have other,
+ * more disasterous effects.  There are two work-arounds:
+ *  1. Disable interrupts and emulate the atomic swap
+ *  2. Clean the cache, perform atomic swap, flush the cache
+ *
+ * We choose (1) since its the "easiest" to achieve here and is not
+ * dependent on the processor type.
+ */
+#define swp_is_buggy
+#endif
+
+extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+	extern void __bad_xchg(volatile void *, int);
+	unsigned long ret;
+#ifdef swp_is_buggy
+	unsigned long flags;
+#endif
+
+	switch (size) {
+#ifdef swp_is_buggy
+		case 1:
+			__save_flags_cli(flags);
+			ret = *(volatile unsigned char *)ptr;
+			*(volatile unsigned char *)ptr = x;
+			__restore_flags(flags);
+			break;
+
+		case 4:
+			__save_flags_cli(flags);
+			ret = *(volatile unsigned long *)ptr;
+			*(volatile unsigned long *)ptr = x;
+			__restore_flags(flags);
+			break;
+#else
+		case 1:	__asm__ __volatile__ ("swpb %0, %1, [%2]"
+					: "=r" (ret)
+					: "r" (x), "r" (ptr)
+					: "memory");
+			break;
+		case 4:	__asm__ __volatile__ ("swp %0, %1, [%2]"
+					: "=r" (ret)
+					: "r" (x), "r" (ptr)
+					: "memory");
+			break;
+#endif
+		default: __bad_xchg(ptr, size);
+	}
+
+	return ret;
+}
 
 #endif

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)