patch-2.4.0-test9 linux/fs/buffer.c
Next file: linux/fs/coda/cache.c
Previous file: linux/fs/block_dev.c
Back to the patch index
Back to the overall index
- Lines: 223
- Date:
Mon Oct 2 12:03:34 2000
- Orig file:
v2.4.0-test8/linux/fs/buffer.c
- Orig date:
Wed Sep 6 08:29:45 2000
diff -u --recursive --new-file v2.4.0-test8/linux/fs/buffer.c linux/fs/buffer.c
@@ -35,6 +35,7 @@
#include <linux/locks.h>
#include <linux/errno.h>
#include <linux/swap.h>
+#include <linux/swapctl.h>
#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
@@ -409,8 +410,9 @@
*/
#define _hashfn(dev,block) \
((((dev)<<(bh_hash_shift - 6)) ^ ((dev)<<(bh_hash_shift - 9))) ^ \
- (((block)<<(bh_hash_shift - 6)) ^ ((block) >> 13) ^ ((block) << (bh_hash_shift - 12))))
-#define hash(dev,block) hash_table[(_hashfn(dev,block) & bh_hash_mask)]
+ (((block)<<(bh_hash_shift - 6)) ^ ((block) >> 13) ^ \
+ ((block) << (bh_hash_shift - 12))))
+#define hash(dev,block) hash_table[(_hashfn(HASHDEV(dev),block) & bh_hash_mask)]
static __inline__ void __hash_link(struct buffer_head *bh, struct buffer_head **head)
{
@@ -856,23 +858,35 @@
/* -1 -> no need to flush
0 -> async flush
1 -> sync flush (wait for I/O completation) */
-static int balance_dirty_state(kdev_t dev)
+int balance_dirty_state(kdev_t dev)
{
unsigned long dirty, tot, hard_dirty_limit, soft_dirty_limit;
+ int shortage;
dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT;
tot = nr_free_buffer_pages();
- tot -= size_buffers_type[BUF_PROTECTED] >> PAGE_SHIFT;
dirty *= 200;
soft_dirty_limit = tot * bdf_prm.b_un.nfract;
hard_dirty_limit = soft_dirty_limit * 2;
+ /* First, check for the "real" dirty limit. */
if (dirty > soft_dirty_limit) {
if (dirty > hard_dirty_limit)
return 1;
return 0;
}
+
+ /*
+ * If we are about to get low on free pages and
+ * cleaning the inactive_dirty pages would help
+ * fix this, wake up bdflush.
+ */
+ shortage = free_shortage();
+ if (shortage && nr_inactive_dirty_pages > shortage &&
+ nr_inactive_dirty_pages > freepages.high)
+ return 0;
+
return -1;
}
@@ -1380,6 +1394,19 @@
}
/*
+ * NOTE! All mapped/uptodate combinations are valid:
+ *
+ * Mapped Uptodate Meaning
+ *
+ * No No "unknown" - must do get_block()
+ * No Yes "hole" - zero-filled
+ * Yes No "allocated" - allocated on disk, not read in
+ * Yes Yes "valid" - allocated and up-to-date in memory.
+ *
+ * "Dirty" is valid only with the last case (mapped+uptodate).
+ */
+
+/*
* block_write_full_page() is SMP-safe - currently it's still
* being called with the kernel lock held, but the code is ready.
*/
@@ -1471,6 +1498,10 @@
goto out;
if (buffer_new(bh)) {
unmap_underlying_metadata(bh);
+ if (Page_Uptodate(page)) {
+ set_bit(BH_Uptodate, &bh->b_state);
+ continue;
+ }
if (block_end > to)
memset(kaddr+to, 0, block_end-to);
if (block_start < from)
@@ -1480,6 +1511,10 @@
continue;
}
}
+ if (Page_Uptodate(page)) {
+ set_bit(BH_Uptodate, &bh->b_state);
+ continue;
+ }
if (!buffer_uptodate(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
@@ -1574,8 +1609,10 @@
continue;
if (!buffer_mapped(bh)) {
- if (iblock < lblock)
- get_block(inode, iblock, bh, 0);
+ if (iblock < lblock) {
+ if (get_block(inode, iblock, bh, 0))
+ continue;
+ }
if (!buffer_mapped(bh)) {
if (!kaddr)
kaddr = kmap(page);
@@ -1758,17 +1795,27 @@
pos += blocksize;
}
+ err = 0;
+ if (!buffer_mapped(bh)) {
+ /* Hole? Nothing to do */
+ if (buffer_uptodate(bh))
+ goto unlock;
+ get_block(inode, iblock, bh, 0);
+ /* Still unmapped? Nothing to do */
+ if (!buffer_mapped(bh))
+ goto unlock;
+ }
+
+ /* Ok, it's mapped. Make sure it's up-to-date */
+ if (Page_Uptodate(page))
+ set_bit(BH_Uptodate, &bh->b_state);
+
+ bh->b_end_io = end_buffer_io_sync;
if (!buffer_uptodate(bh)) {
- err = 0;
- if (!buffer_mapped(bh)) {
- get_block(inode, iblock, bh, 0);
- if (!buffer_mapped(bh))
- goto unlock;
- }
err = -EIO;
- bh->b_end_io = end_buffer_io_sync;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
+ /* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
}
@@ -2152,6 +2199,7 @@
page = alloc_page(GFP_BUFFER);
if (!page)
goto out;
+ LockPage(page);
bh = create_buffers(page, size, 0);
if (!bh)
goto no_buffer_head;
@@ -2184,10 +2232,12 @@
page->buffers = bh;
page->flags &= ~(1 << PG_referenced);
lru_cache_add(page);
+ UnlockPage(page);
atomic_inc(&buffermem_pages);
return 1;
no_buffer_head:
+ UnlockPage(page);
page_cache_release(page);
out:
return 0;
@@ -2244,7 +2294,9 @@
{
struct buffer_head * tmp, * bh = page->buffers;
int index = BUFSIZE_INDEX(bh->b_size);
+ int loop = 0;
+cleaned_buffers_try_again:
spin_lock(&lru_list_lock);
write_lock(&hash_table_lock);
spin_lock(&free_list[index].lock);
@@ -2290,8 +2342,14 @@
spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
- if (wait)
+ if (wait) {
sync_page_buffers(bh, wait);
+ /* We waited synchronously, so we can free the buffers. */
+ if (wait > 1 && !loop) {
+ loop = 1;
+ goto cleaned_buffers_try_again;
+ }
+ }
return 0;
}
@@ -2609,6 +2667,8 @@
CHECK_EMERGENCY_SYNC
flushed = flush_dirty_buffers(0);
+ if (free_shortage())
+ flushed += page_launder(GFP_BUFFER, 0);
/* If wakeup_bdflush will wakeup us
after our bdflush_done wakeup, then
@@ -2619,14 +2679,16 @@
(as we would be sleeping) and so it would
deadlock in SMP. */
__set_current_state(TASK_INTERRUPTIBLE);
- wake_up(&bdflush_done);
+ wake_up_all(&bdflush_done);
/*
* If there are still a lot of dirty buffers around,
* skip the sleep and flush some more. Otherwise, we
* go to sleep waiting a wakeup.
*/
- if (!flushed || balance_dirty_state(NODEV) < 0)
+ if (!flushed || balance_dirty_state(NODEV) < 0) {
+ run_task_queue(&tq_disk);
schedule();
+ }
/* Remember to mark us as running otherwise
the next schedule will block. */
__set_current_state(TASK_RUNNING);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)