patch-2.4.0-test6 linux/include/asm-mips64/pgtable.h
Next file: linux/include/asm-mips64/processor.h
Previous file: linux/include/asm-mips64/page.h
Back to the patch index
Back to the overall index
- Lines: 108
- Date:
Mon Aug 7 21:02:27 2000
- Orig file:
v2.4.0-test5/linux/include/asm-mips64/pgtable.h
- Orig date:
Mon Jul 10 16:47:26 2000
diff -u --recursive --new-file v2.4.0-test5/linux/include/asm-mips64/pgtable.h linux/include/asm-mips64/pgtable.h
@@ -27,29 +27,63 @@
* - flush_cache_range(mm, start, end) flushes a range of pages
* - flush_page_to_ram(page) write back kernel page to ram
*/
-extern void (*_flush_cache_all)(void);
extern void (*_flush_cache_mm)(struct mm_struct *mm);
extern void (*_flush_cache_range)(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void (*_flush_cache_page)(struct vm_area_struct *vma, unsigned long page);
-extern void (*_flush_cache_sigtramp)(unsigned long addr);
extern void (*_flush_page_to_ram)(struct page * page);
-#define flush_cache_all() _flush_cache_all()
+#define flush_cache_all() do { } while(0)
+#define flush_dcache_page(page) do { } while (0)
+
+#ifndef CONFIG_CPU_R10000
#define flush_cache_mm(mm) _flush_cache_mm(mm)
#define flush_cache_range(mm,start,end) _flush_cache_range(mm,start,end)
#define flush_cache_page(vma,page) _flush_cache_page(vma, page)
-#define flush_cache_sigtramp(addr) _flush_cache_sigtramp(addr)
#define flush_page_to_ram(page) _flush_page_to_ram(page)
-#define flush_icache_range(start, end) flush_cache_all()
+#define flush_icache_range(start, end) _flush_cache_l1()
#define flush_icache_page(vma, page) \
do { \
unsigned long addr; \
- addr = page_address(page); \
+ addr = (unsigned long) page_address(page); \
_flush_cache_page(vma, addr); \
} while (0)
+#else /* !CONFIG_CPU_R10000 */
+/*
+ * Since the r10k handles VCEs in hardware, most of the flush cache
+ * routines are not needed. Only the icache on a processor is not
+ * coherent with the dcache of the _same_ processor, so we must flush
+ * the icache so that it does not contain stale contents of physical
+ * memory. No flushes are needed for dma coherency, since the o200s
+ * are io coherent. The only place where we might be overoptimizing
+ * out icache flushes are from mprotect (when PROT_EXEC is added).
+ */
+extern void andes_flush_icache_page(unsigned long);
+#define flush_cache_mm(mm) do { } while(0)
+#define flush_cache_range(mm,start,end) do { } while(0)
+#define flush_cache_page(vma,page) do { } while(0)
+#define flush_page_to_ram(page) do { } while(0)
+#define flush_icache_range(start, end) _flush_cache_l1()
+#define flush_icache_page(vma, page) \
+do { \
+ if ((vma)->vm_flags & VM_EXEC) \
+ andes_flush_icache_page(page_address(page)); \
+} while (0)
+#endif /* !CONFIG_CPU_R10000 */
+
+/*
+ * The foll cache flushing routines are MIPS specific.
+ * flush_cache_l2 is needed only during initialization.
+ */
+extern void (*_flush_cache_sigtramp)(unsigned long addr);
+extern void (*_flush_cache_l2)(void);
+extern void (*_flush_cache_l1)(void);
+
+#define flush_cache_sigtramp(addr) _flush_cache_sigtramp(addr)
+#define flush_cache_l2() _flush_cache_l2()
+#define flush_cache_l1() _flush_cache_l1()
/*
* Each address space has 2 4K pages as its page directory, giving 1024
@@ -207,7 +241,7 @@
#define BAD_PMDTABLE __bad_pmd_table()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) \
- (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
@@ -233,11 +267,6 @@
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-extern inline unsigned long pte_page(pte_t pte)
-{
- return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK);
-}
-
extern inline unsigned long pmd_page(pmd_t pmd)
{
return pmd_val(pmd);
@@ -325,13 +354,13 @@
*/
#define page_address(page) ((page)->virtual)
#ifndef CONFIG_DISCONTIGMEM
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#else
-#define pte_pagenr(x) \
+#define mips64_pte_pagenr(x) \
(PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \
PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
+#define pte_page(x) (mem_map+mips64_pte_pagenr(x))
#endif
-#define pte_page(x) (mem_map+pte_pagenr(x))
/*
* The following only work if pte_present() is true.
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)