patch-2.4.0-test9 linux/mm/filemap.c
Next file: linux/mm/memory.c
Previous file: linux/kernel/timer.c
Back to the patch index
Back to the overall index
- Lines: 274
- Date:
Mon Oct 2 11:28:38 2000
- Orig file:
v2.4.0-test8/linux/mm/filemap.c
- Orig date:
Fri Sep 8 12:37:34 2000
diff -u --recursive --new-file v2.4.0-test8/linux/mm/filemap.c linux/mm/filemap.c
@@ -44,9 +44,8 @@
atomic_t page_cache_size = ATOMIC_INIT(0);
unsigned int page_hash_bits;
struct page **page_hash_table;
-struct list_head lru_cache;
-static spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
/*
* NOTE: to avoid deadlocking you must never acquire the pagecache_lock with
* the pagemap_lru_lock held.
@@ -92,7 +91,7 @@
* sure the page is locked and that nobody else uses it - or that usage
* is safe.
*/
-static inline void __remove_inode_page(struct page *page)
+void __remove_inode_page(struct page *page)
{
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
@@ -235,141 +234,6 @@
spin_unlock(&pagecache_lock);
}
-/*
- * nr_dirty represents the number of dirty pages that we will write async
- * before doing sync writes. We can only do sync writes if we can
- * wait for IO (__GFP_IO set).
- */
-int shrink_mmap(int priority, int gfp_mask)
-{
- int ret = 0, count, nr_dirty;
- struct list_head * page_lru;
- struct page * page = NULL;
-
- count = nr_lru_pages / (priority + 1);
- nr_dirty = priority;
-
- /* we need pagemap_lru_lock for list_del() ... subtle code below */
- spin_lock(&pagemap_lru_lock);
- while (count > 0 && (page_lru = lru_cache.prev) != &lru_cache) {
- page = list_entry(page_lru, struct page, lru);
- list_del(page_lru);
-
- if (PageTestandClearReferenced(page))
- goto dispose_continue;
-
- count--;
- /*
- * Avoid unscalable SMP locking for pages we can
- * immediate tell are untouchable..
- */
- if (!page->buffers && page_count(page) > 1)
- goto dispose_continue;
-
- if (TryLockPage(page))
- goto dispose_continue;
-
- /* Release the pagemap_lru lock even if the page is not yet
- queued in any lru queue since we have just locked down
- the page so nobody else may SMP race with us running
- a lru_cache_del() (lru_cache_del() always run with the
- page locked down ;). */
- spin_unlock(&pagemap_lru_lock);
-
- /* avoid freeing the page while it's locked */
- page_cache_get(page);
-
- /*
- * Is it a buffer page? Try to clean it up regardless
- * of zone - it's old.
- */
- if (page->buffers) {
- int wait;
- /*
- * 0 - free it if can do so without IO
- * 1 - start write-out of dirty buffers
- * 2 - wait for locked buffers
- */
- wait = (gfp_mask & __GFP_IO) ? (nr_dirty-- < 0) ? 2 : 1 : 0;
- if (!try_to_free_buffers(page, wait))
- goto unlock_continue;
- /* page was locked, inode can't go away under us */
- if (!page->mapping) {
- atomic_dec(&buffermem_pages);
- goto made_buffer_progress;
- }
- }
-
- /* Take the pagecache_lock spinlock held to avoid
- other tasks to notice the page while we are looking at its
- page count. If it's a pagecache-page we'll free it
- in one atomic transaction after checking its page count. */
- spin_lock(&pagecache_lock);
-
- /*
- * We can't free pages unless there's just one user
- * (count == 2 because we added one ourselves above).
- */
- if (page_count(page) != 2)
- goto cache_unlock_continue;
-
- /*
- * Is it a page swap page? If so, we want to
- * drop it if it is no longer used, even if it
- * were to be marked referenced..
- */
- if (PageSwapCache(page)) {
- spin_unlock(&pagecache_lock);
- __delete_from_swap_cache(page);
- goto made_inode_progress;
- }
-
- /*
- * Page is from a zone we don't care about.
- * Don't drop page cache entries in vain.
- */
- if (page->zone->free_pages > page->zone->pages_high)
- goto cache_unlock_continue;
-
- /* is it a page-cache page? */
- if (page->mapping) {
- if (!PageDirty(page) && !pgcache_under_min()) {
- __remove_inode_page(page);
- spin_unlock(&pagecache_lock);
- goto made_inode_progress;
- }
- goto cache_unlock_continue;
- }
-
- printk(KERN_ERR "shrink_mmap: unknown LRU page!\n");
-
-cache_unlock_continue:
- spin_unlock(&pagecache_lock);
-unlock_continue:
- spin_lock(&pagemap_lru_lock);
- UnlockPage(page);
- page_cache_release(page);
-dispose_continue:
- list_add(page_lru, &lru_cache);
- }
- goto out;
-
-made_inode_progress:
- page_cache_release(page);
-made_buffer_progress:
- UnlockPage(page);
- page_cache_release(page);
- ret = 1;
- spin_lock(&pagemap_lru_lock);
- /* nr_lru_pages needs the spinlock */
- nr_lru_pages--;
-
-out:
- spin_unlock(&pagemap_lru_lock);
-
- return ret;
-}
-
static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page)
{
goto inside;
@@ -384,7 +248,14 @@
if (page->index == offset)
break;
}
- SetPageReferenced(page);
+ /*
+ * Touching the page may move it to the active list.
+ * If we end up with too few inactive pages, we wake
+ * up kswapd.
+ */
+ age_page_up(page);
+ if (inactive_shortage() > inactive_target / 2 && free_shortage())
+ wakeup_kswapd(0);
not_found:
return page;
}
@@ -616,6 +487,7 @@
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!PageLocked(page))
break;
+ run_task_queue(&tq_disk);
schedule();
} while (PageLocked(page));
tsk->state = TASK_RUNNING;
@@ -739,6 +611,53 @@
#endif
/*
+ * We combine this with read-ahead to deactivate pages when we
+ * think there's sequential IO going on. Note that this is
+ * harmless since we don't actually evict the pages from memory
+ * but just move them to the inactive list.
+ *
+ * TODO:
+ * - make the readahead code smarter
+ * - move readahead to the VMA level so we can do the same
+ * trick with mmap()
+ *
+ * Rik van Riel, 2000
+ */
+static void drop_behind(struct file * file, unsigned long index)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct address_space *mapping = inode->i_mapping;
+ struct page **hash;
+ struct page *page;
+ unsigned long start;
+
+ /* Nothing to drop-behind if we're on the first page. */
+ if (!index)
+ return;
+
+ if (index > file->f_rawin)
+ start = index - file->f_rawin;
+ else
+ start = 0;
+
+ /*
+ * Go backwards from index-1 and drop all pages in the
+ * readahead window. Since the readahead window may have
+ * been increased since the last time we were called, we
+ * stop when the page isn't there.
+ */
+ spin_lock(&pagecache_lock);
+ while (--index >= start) {
+ hash = page_hash(mapping, index);
+ page = __find_page_nolock(mapping, index, *hash);
+ if (!page)
+ break;
+ deactivate_page(page);
+ }
+ spin_unlock(&pagecache_lock);
+}
+
+/*
* Read-ahead profiling information
* --------------------------------
* Every PROFILE_MAXREADCOUNT, the following information is written
@@ -961,6 +880,12 @@
if (filp->f_ramax > max_readahead)
filp->f_ramax = max_readahead;
+ /*
+ * Move the pages that have already been passed
+ * to the inactive list.
+ */
+ drop_behind(filp, index);
+
#ifdef PROFILE_READAHEAD
profile_readahead((reada_ok == 2), filp);
#endif
@@ -1999,10 +1924,10 @@
* Application no longer needs these pages. If the pages are dirty,
* it's OK to just throw them away. The app will be more careful about
* data it wants to keep. Be sure to free swap resources too. The
- * zap_page_range call sets things up for shrink_mmap to actually free
+ * zap_page_range call sets things up for refill_inactive to actually free
* these pages later if no one else has touched them in the meantime,
* although we could add these pages to a global reuse list for
- * shrink_mmap to pick up before reclaiming other pages.
+ * refill_inactive to pick up before reclaiming other pages.
*
* NB: This interface discards data rather than pushes it out to swap,
* as some implementations do. This has performance implications for
@@ -2527,6 +2452,7 @@
unlock:
/* Mark it unlocked again and drop the page.. */
UnlockPage(page);
+ deactivate_page(page);
page_cache_release(page);
if (status < 0)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)