patch-2.4.0-test9 linux/include/asm-arm/proc-armv/cache.h

Next file: linux/include/asm-arm/proc-armv/domain.h
Previous file: linux/include/asm-arm/proc-armv/assembler.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0-test8/linux/include/asm-arm/proc-armv/cache.h linux/include/asm-arm/proc-armv/cache.h
@@ -1,57 +1,98 @@
+/*
+ *  linux/include/asm-arm/proc-armv/cache.h
+ *
+ *  Copyright (C) 1999-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
 #include <asm/mman.h>
 
 /*
- * Cache flushing...
+ * Cache handling for 32-bit ARM processors.
+ *
+ * Note that on ARM, we have a more accurate specification than that
+ * Linux's "flush".  We therefore do not use "flush" here, but instead
+ * use:
+ *
+ * clean:      the act of pushing dirty cache entries out to memory.
+ * invalidate: the act of discarding data held within the cache,
+ *             whether it is dirty or not.
+ */
+
+/*
+ * Generic I + D cache
  */
 #define flush_cache_all()						\
-	cpu_flush_cache_all()
+	do {								\
+		cpu_cache_clean_invalidate_all();			\
+	} while (0)
 
+/* This is always called for current->mm */
 #define flush_cache_mm(_mm)						\
 	do {								\
-		if ((_mm) == current->mm)				\
-			cpu_flush_cache_all();				\
+		if ((_mm) == current->active_mm)			\
+			cpu_cache_clean_invalidate_all();		\
 	} while (0)
 
 #define flush_cache_range(_mm,_start,_end)				\
 	do {								\
 		if ((_mm) == current->mm)				\
-			cpu_flush_cache_area((_start), (_end), 1);	\
+			cpu_cache_clean_invalidate_range((_start), (_end), 1); \
 	} while (0)
 
 #define flush_cache_page(_vma,_vmaddr)					\
 	do {								\
-		if ((_vma)->vm_mm == current->mm)			\
-			cpu_flush_cache_area((_vmaddr),			\
+		if ((_vma)->vm_mm == current->mm) {			\
+			cpu_cache_clean_invalidate_range((_vmaddr),	\
 				(_vmaddr) + PAGE_SIZE,			\
 				((_vma)->vm_flags & VM_EXEC));		\
+		} \
 	} while (0)
 
-#define clean_cache_range(_start,_end)					\
-	do {								\
-		unsigned long _s, _sz;					\
-		_s = (unsigned long)_start;				\
-		_sz = (unsigned long)_end - _s;				\
-		cpu_clean_cache_area(_s, _sz);				\
-	} while (0)
+/*
+ * This flushes back any buffered write data.  We have to clean the entries
+ * in the cache for this page.  This does not invalidate either I or D caches.
+ */
+static __inline__ void flush_page_to_ram(struct page *page)
+{
+	cpu_flush_ram_page(page_address(page));
+}
 
-#define clean_cache_area(_start,_size)					\
+/*
+ * D cache only
+ */
+
+#define invalidate_dcache_range(_s,_e)	cpu_dcache_invalidate_range((_s),(_e))
+#define clean_dcache_range(_s,_e)	cpu_dcache_clean_range((_s),(_e))
+#define flush_dcache_range(_s,_e)	cpu_cache_clean_invalidate_range((_s),(_e),0)
+
+/*
+ * FIXME: We currently clean the dcache for this page.  Should we
+ * also invalidate the Dcache?  And what about the Icache? -- rmk
+ */
+#define flush_dcache_page(page)		cpu_dcache_clean_page(page_address(page))
+
+#define clean_dcache_entry(_s)		cpu_dcache_clean_entry((unsigned long)(_s))
+
+/*
+ * I cache only
+ */
+#define flush_icache_range(_s,_e)					\
 	do {								\
-		unsigned long _s;					\
-		_s = (unsigned long)_start;				\
-		cpu_clean_cache_area(_s, _size);			\
+		cpu_icache_invalidate_range((_s), (_e));		\
 	} while (0)
 
-#define flush_icache_range(_start,_end)					\
-	cpu_flush_icache_area((_start), (_end) - (_start))
-
 #define flush_icache_page(vma,pg)					\
 	do {								\
 		if ((vma)->vm_flags & PROT_EXEC)			\
-			cpu_flush_icache_page((unsigned long) page_address(pg));	\
+			cpu_icache_invalidate_page(page_address(pg));	\
 	} while (0)
 
 /*
- * We don't have a MEMC chip...
+ * Old ARM MEMC stuff.  This supports the reversed mapping handling that
+ * we have on the older 26-bit machines.  We don't have a MEMC chip, so...
  */
 #define memc_update_all()		do { } while (0)
 #define memc_update_mm(mm)		do { } while (0)
@@ -59,48 +100,57 @@
 #define memc_clear(mm,physaddr)		do { } while (0)
 
 /*
- * This flushes back any buffered write data.  We have to clean the entries
- * in the cache for this page.  This does not invalidate either I or D caches.
- */
-static __inline__ void flush_page_to_ram(struct page *page)
-{
-	cpu_flush_ram_page((unsigned long) page_address(page));
-}
-
-/* You guys might need to do something here. -DaveM */
-#define flush_dcache_page(page)			do { } while (0)
-
-/*
- * TLB flushing:
+ * TLB flushing.
  *
- *  - flush_tlb_all() flushes all processes TLBs
- *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
- *  - flush_tlb_page(vma, vmaddr) flushes one page
- *  - flush_tlb_range(mm, start, end) flushes a range of pages
+ *  - flush_tlb_all()			flushes all processes TLBs
+ *  - flush_tlb_mm(mm)			flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr)	flushes TLB for specified page
+ *  - flush_tlb_range(mm, start, end)	flushes TLB for specified range of pages
  *
  * We drain the write buffer in here to ensure that the page tables in ram
  * are really up to date.  It is more efficient to do this here...
  */
-#define flush_tlb_all()								\
-	cpu_flush_tlb_all()
 
-#define flush_tlb_mm(_mm)							\
-	do {									\
-		if ((_mm) == current->mm)					\
-			cpu_flush_tlb_all();					\
-	} while (0)
-
-#define flush_tlb_range(_mm,_start,_end)					\
-	do {									\
-		if ((_mm) == current->mm)					\
-			cpu_flush_tlb_area((_start), (_end), 1);		\
-	} while (0)
-
-#define flush_tlb_page(_vma,_vmaddr)						\
-	do {									\
-		if ((_vma)->vm_mm == current->mm)				\
-			cpu_flush_tlb_page((_vmaddr),				\
-				 ((_vma)->vm_flags & VM_EXEC));			\
+/*
+ * Notes:
+ *  current->active_mm is the currently active memory description.
+ *  current->mm == NULL iff we are lazy.
+ */
+#define flush_tlb_all()							\
+	do {								\
+		cpu_tlb_invalidate_all();				\
 	} while (0)
 
+/*
+ * Flush all user virtual address space translations described by `_mm'.
+ *
+ * Currently, this is always called for current->mm, which should be
+ * the same as current->active_mm.  This is currently not be called for
+ * the lazy TLB case.
+ */
+#define flush_tlb_mm(_mm)						\
+	do {								\
+		if ((_mm) == current->active_mm)			\
+			cpu_tlb_invalidate_all();			\
+	} while (0)
+
+/*
+ * Flush the specified range of user virtual address space translations.
+ *
+ * _mm may not be current->active_mm, but may not be NULL.
+ */
+#define flush_tlb_range(_mm,_start,_end)				\
+	do {								\
+		if ((_mm) == current->active_mm)			\
+			cpu_tlb_invalidate_range((_start), (_end));	\
+	} while (0)
 
+/*
+ * Flush the specified user virtual address space translation.
+ */
+#define flush_tlb_page(_vma,_page)					\
+	do {								\
+		if ((_vma)->vm_mm == current->active_mm)		\
+			cpu_tlb_invalidate_page((_page),		\
+				 ((_vma)->vm_flags & VM_EXEC));		\
+	} while (0)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)