patch-2.4.0-test10 linux/mm/memory.c
Next file: linux/mm/mlock.c
Previous file: linux/mm/highmem.c
Back to the patch index
Back to the overall index
- Lines: 109
- Date:
Mon Oct 30 14:32:57 2000
- Orig file:
v2.4.0-test9/linux/mm/memory.c
- Orig date:
Sun Oct 8 10:50:38 2000
diff -u --recursive --new-file v2.4.0-test9/linux/mm/memory.c linux/mm/memory.c
@@ -215,30 +215,30 @@
/* copy_one_pte */
if (pte_none(pte))
- goto cont_copy_pte_range;
+ goto cont_copy_pte_range_noset;
if (!pte_present(pte)) {
swap_duplicate(pte_to_swp_entry(pte));
- set_pte(dst_pte, pte);
goto cont_copy_pte_range;
}
ptepage = pte_page(pte);
if ((!VALID_PAGE(ptepage)) ||
- PageReserved(ptepage)) {
- set_pte(dst_pte, pte);
+ PageReserved(ptepage))
goto cont_copy_pte_range;
- }
+
/* If it's a COW mapping, write protect it both in the parent and the child */
if (cow) {
- pte = pte_wrprotect(pte);
- set_pte(src_pte, pte);
+ ptep_clear_wrprotect(src_pte);
+ pte = *src_pte;
}
+
/* If it's a shared mapping, mark it clean in the child */
if (vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
- set_pte(dst_pte, pte_mkold(pte));
+ pte = pte_mkold(pte);
get_page(ptepage);
-
-cont_copy_pte_range: address += PAGE_SIZE;
+
+cont_copy_pte_range: set_pte(dst_pte, pte);
+cont_copy_pte_range_noset: address += PAGE_SIZE;
if (address >= end)
goto out;
src_pte++;
@@ -306,10 +306,9 @@
pte_t page;
if (!size)
break;
- page = *pte;
+ page = ptep_get_and_clear(pte);
pte++;
size--;
- pte_clear(pte-1);
if (pte_none(page))
continue;
freed += free_pte(page);
@@ -642,7 +641,7 @@
end = PMD_SIZE;
do {
pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
- pte_t oldpage = *pte;
+ pte_t oldpage = ptep_get_and_clear(pte);
set_pte(pte, zero_pte);
forget_pte(oldpage);
address += PAGE_SIZE;
@@ -712,8 +711,8 @@
end = PMD_SIZE;
do {
struct page *page;
- pte_t oldpage = *pte;
- pte_clear(pte);
+ pte_t oldpage;
+ oldpage = ptep_get_and_clear(pte);
page = virt_to_page(__va(phys_addr));
if ((!VALID_PAGE(page)) || PageReserved(page))
@@ -746,6 +745,7 @@
return 0;
}
+/* Note: this is only safe if the mm semaphore is held when called. */
int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
int error = 0;
@@ -781,8 +781,8 @@
*/
static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry)
{
- flush_tlb_page(vma, address);
set_pte(page_table, entry);
+ flush_tlb_page(vma, address);
update_mmu_cache(vma, address, entry);
}
@@ -867,7 +867,7 @@
/*
* Re-check the pte - we dropped the lock
*/
- if (pte_val(*page_table) == pte_val(pte)) {
+ if (pte_same(*page_table, pte)) {
if (PageReserved(old_page))
++mm->rss;
break_cow(vma, old_page, new_page, address, page_table);
@@ -1214,7 +1214,7 @@
* didn't change from under us..
*/
spin_lock(&mm->page_table_lock);
- if (pte_val(entry) == pte_val(*pte)) {
+ if (pte_same(entry, *pte)) {
if (write_access) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address, pte, entry);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)