patch-2.4.0-test11 linux/arch/i386/lib/mmx.c

Next file: linux/arch/i386/mm/fault.c
Previous file: linux/arch/i386/kernel/traps.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0-test10/linux/arch/i386/lib/mmx.c linux/arch/i386/lib/mmx.c
@@ -16,6 +16,9 @@
  *	Add *user handling. Checksums are not a win with MMX on any CPU
  *	tested so far for any MMX solution figured.
  *
+ *	22/09/2000 - Arjan van de Ven 
+ *		Improved for non-egineering-sample Athlons 
+ *
  */
  
 void *_mmx_memcpy(void *to, const void *from, size_t len)
@@ -104,28 +107,26 @@
 		"  pxor %%mm0, %%mm0\n" : :
 	);
 
-	for(i=0;i<4096/128;i++)
+	for(i=0;i<4096/64;i++)
 	{
 		__asm__ __volatile__ (
-		"  movq %%mm0, (%0)\n"
-		"  movq %%mm0, 8(%0)\n"
-		"  movq %%mm0, 16(%0)\n"
-		"  movq %%mm0, 24(%0)\n"
-		"  movq %%mm0, 32(%0)\n"
-		"  movq %%mm0, 40(%0)\n"
-		"  movq %%mm0, 48(%0)\n"
-		"  movq %%mm0, 56(%0)\n"
-		"  movq %%mm0, 64(%0)\n"
-		"  movq %%mm0, 72(%0)\n"
-		"  movq %%mm0, 80(%0)\n"
-		"  movq %%mm0, 88(%0)\n"
-		"  movq %%mm0, 96(%0)\n"
-		"  movq %%mm0, 104(%0)\n"
-		"  movq %%mm0, 112(%0)\n"
-		"  movq %%mm0, 120(%0)\n"
+		"  movntq %%mm0, (%0)\n"
+		"  movntq %%mm0, 8(%0)\n"
+		"  movntq %%mm0, 16(%0)\n"
+		"  movntq %%mm0, 24(%0)\n"
+		"  movntq %%mm0, 32(%0)\n"
+		"  movntq %%mm0, 40(%0)\n"
+		"  movntq %%mm0, 48(%0)\n"
+		"  movntq %%mm0, 56(%0)\n"
 		: : "r" (page) : "memory");
-		page+=128;
+		page+=64;
 	}
+	/* since movntq is weakly-ordered, a "sfence" is needed to become
+	 * ordered again.
+	 */
+	__asm__ __volatile__ (
+		"  sfence \n" : :
+	);
 	stts();
 }
 
@@ -140,6 +141,9 @@
 		current->flags &= ~PF_USEDFPU;
 	}
 
+	/* maybe the prefetch stuff can go before the expensive fnsave...
+	 * but that is for later. -AV
+	 */
 	__asm__ __volatile__ (
 		"1: prefetch (%0)\n"
 		"   prefetch 64(%0)\n"
@@ -162,21 +166,21 @@
 		__asm__ __volatile__ (
 		"1: prefetch 320(%0)\n"
 		"2: movq (%0), %%mm0\n"
+		"   movntq %%mm0, (%1)\n"
 		"   movq 8(%0), %%mm1\n"
+		"   movntq %%mm1, 8(%1)\n"
 		"   movq 16(%0), %%mm2\n"
+		"   movntq %%mm2, 16(%1)\n"
 		"   movq 24(%0), %%mm3\n"
-		"   movq %%mm0, (%1)\n"
-		"   movq %%mm1, 8(%1)\n"
-		"   movq %%mm2, 16(%1)\n"
-		"   movq %%mm3, 24(%1)\n"
-		"   movq 32(%0), %%mm0\n"
-		"   movq 40(%0), %%mm1\n"
-		"   movq 48(%0), %%mm2\n"
-		"   movq 56(%0), %%mm3\n"
-		"   movq %%mm0, 32(%1)\n"
-		"   movq %%mm1, 40(%1)\n"
-		"   movq %%mm2, 48(%1)\n"
-		"   movq %%mm3, 56(%1)\n"
+		"   movntq %%mm3, 24(%1)\n"
+		"   movq 32(%0), %%mm4\n"
+		"   movntq %%mm4, 32(%1)\n"
+		"   movq 40(%0), %%mm5\n"
+		"   movntq %%mm5, 40(%1)\n"
+		"   movq 48(%0), %%mm6\n"
+		"   movntq %%mm6, 48(%1)\n"
+		"   movq 56(%0), %%mm7\n"
+		"   movntq %%mm7, 56(%1)\n"
 		".section .fixup, \"ax\"\n"
 		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
 		"   jmp 2b\n"
@@ -189,6 +193,12 @@
 		from+=64;
 		to+=64;
 	}
+	/* since movntq is weakly-ordered, a "sfence" is needed to become
+	 * ordered again.
+	 */
+	__asm__ __volatile__ (
+		"  sfence \n" : :
+	);
 	stts();
 }
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)