patch-2.4.0-test6 linux/include/asm-ia64/pgtable.h
Next file: linux/include/asm-ia64/processor.h
Previous file: linux/include/asm-ia64/page.h
Back to the patch index
Back to the overall index
- Lines: 50
- Date:
Mon Aug 7 21:02:27 2000
- Orig file:
v2.4.0-test5/linux/include/asm-ia64/pgtable.h
- Orig date:
Thu Jul 27 17:38:02 2000
diff -u --recursive --new-file v2.4.0-test5/linux/include/asm-ia64/pgtable.h linux/include/asm-ia64/pgtable.h
@@ -166,13 +166,7 @@
* Given a pointer to an mem_map[] entry, return the kernel virtual
* address corresponding to that page.
*/
-#define page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-
-/*
- * Given a PTE, return the index of the mem_map[] entry corresponding
- * to the page frame the PTE.
- */
-#define pte_pagenr(x) ((unsigned long) ((pte_val(x) & _PFN_MASK) >> PAGE_SHIFT))
+#define page_address(page) ((void *) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)))
/*
* Now for some cache flushing routines. This is the kind of stuff
@@ -185,6 +179,7 @@
#define flush_cache_range(mm, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
extern void ia64_flush_icache_page (unsigned long addr);
@@ -192,7 +187,7 @@
#define flush_icache_page(vma,pg) \
do { \
if ((vma)->vm_flags & PROT_EXEC) \
- ia64_flush_icache_page(page_address(pg)); \
+ ia64_flush_icache_page((unsigned long) page_address(pg)); \
} while (0)
/*
@@ -249,7 +244,7 @@
#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
/* pte_page() returns the "struct page *" corresponding to the PTE: */
-#define pte_page(pte) (mem_map + pte_pagenr(pte))
+#define pte_page(pte) (mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa(ptep))
#define pmd_none(pmd) (!pmd_val(pmd))
@@ -417,7 +412,7 @@
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
# endif /* !__ASSEMBLY__ */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)