patch-2.4.0-test6 linux/fs/buffer.c
Next file: linux/fs/cramfs/inode.c
Previous file: linux/fs/block_dev.c
Back to the patch index
Back to the overall index
- Lines: 213
- Date:
Sun Aug 6 11:43:18 2000
- Orig file:
v2.4.0-test5/linux/fs/buffer.c
- Orig date:
Thu Jul 27 17:38:01 2000
diff -u --recursive --new-file v2.4.0-test5/linux/fs/buffer.c linux/fs/buffer.c
@@ -482,16 +482,12 @@
__remove_from_lru_list(bh, bh->b_list);
}
-static void insert_into_queues(struct buffer_head *bh)
+static void __insert_into_queues(struct buffer_head *bh)
{
struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
- spin_lock(&lru_list_lock);
- write_lock(&hash_table_lock);
__hash_link(bh, head);
__insert_into_lru_list(bh, bh->b_list);
- write_unlock(&hash_table_lock);
- spin_unlock(&lru_list_lock);
}
/* This function must only run if there are no other
@@ -524,19 +520,27 @@
* will force it bad). This shouldn't really happen currently, but
* the code is ready.
*/
-struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
+static inline struct buffer_head * __get_hash_table(kdev_t dev, int block, int size)
{
- struct buffer_head **head = &hash(dev, block);
- struct buffer_head *bh;
+ struct buffer_head *bh = hash(dev, block);
- read_lock(&hash_table_lock);
- for(bh = *head; bh; bh = bh->b_next)
+ for (; bh; bh = bh->b_next)
if (bh->b_blocknr == block &&
bh->b_size == size &&
bh->b_dev == dev)
break;
if (bh)
atomic_inc(&bh->b_count);
+
+ return bh;
+}
+
+struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
+{
+ struct buffer_head *bh;
+
+ read_lock(&hash_table_lock);
+ bh = __get_hash_table(dev, block, size);
read_unlock(&hash_table_lock);
return bh;
@@ -804,7 +808,9 @@
int isize;
repeat:
- bh = get_hash_table(dev, block, size);
+ spin_lock(&lru_list_lock);
+ write_lock(&hash_table_lock);
+ bh = __get_hash_table(dev, block, size);
if (bh)
goto out;
@@ -829,8 +835,10 @@
bh->b_state = 1 << BH_Mapped;
/* Insert the buffer into the regular lists */
- insert_into_queues(bh);
+ __insert_into_queues(bh);
out:
+ write_unlock(&hash_table_lock);
+ spin_unlock(&lru_list_lock);
touch_buffer(bh);
return bh;
}
@@ -839,6 +847,8 @@
* If we block while refilling the free list, somebody may
* create the buffer first ... search the hashes again.
*/
+ write_unlock(&hash_table_lock);
+ spin_unlock(&lru_list_lock);
refill_freelist(size);
goto repeat;
}
@@ -1139,7 +1149,7 @@
*/
bh->b_data = (char *)(0 + offset);
else
- bh->b_data = (char *)(page_address(page) + offset);
+ bh->b_data = page_address(page) + offset;
}
/*
@@ -1341,13 +1351,27 @@
page_cache_get(page);
}
+/*
+ * We are taking a block for data and we don't want any output from any
+ * buffer-cache aliases starting from return from that function and
+ * until the moment when something will explicitly mark the buffer
+ * dirty (hopefully that will not happen until we will free that block ;-)
+ * We don't even need to mark it not-uptodate - nobody can expect
+ * anything from a newly allocated buffer anyway. We used to used
+ * unmap_buffer() for such invalidation, but that was wrong. We definitely
+ * don't want to mark the alias unmapped, for example - it would confuse
+ * anyone who might pick it with bread() afterwards...
+ */
+
static void unmap_underlying_metadata(struct buffer_head * bh)
{
struct buffer_head *old_bh;
old_bh = get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size);
if (old_bh) {
- unmap_buffer(old_bh);
+ mark_buffer_clean(old_bh);
+ wait_on_buffer(old_bh);
+ clear_bit(BH_Req, &old_bh->b_state);
/* Here we could run brelse or bforget. We use
bforget because it will try to put the buffer
in the freelist. */
@@ -1451,6 +1475,8 @@
memset(kaddr+to, 0, block_end-to);
if (block_start < from)
memset(kaddr+block_start, 0, from-block_start);
+ if (block_end > to || block_start < from)
+ flush_dcache_page(page);
continue;
}
}
@@ -1554,6 +1580,7 @@
if (!kaddr)
kaddr = kmap(page);
memset((char *)(kaddr + i*blocksize), 0, blocksize);
+ flush_dcache_page(page);
set_bit(BH_Uptodate, &bh->b_state);
continue;
}
@@ -1618,8 +1645,9 @@
PAGE_CACHE_SIZE, get_block);
if (status)
goto out_unmap;
- kaddr = (char*)page_address(new_page);
+ kaddr = page_address(new_page);
memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
+ flush_dcache_page(new_page);
__block_commit_write(inode, new_page, zerofrom, PAGE_CACHE_SIZE);
kunmap(new_page);
UnlockPage(new_page);
@@ -1646,9 +1674,10 @@
status = __block_prepare_write(inode, page, zerofrom, to, get_block);
if (status)
goto out1;
- kaddr = (char*)page_address(page);
+ kaddr = page_address(page);
if (zerofrom < offset) {
memset(kaddr+zerofrom, 0, offset-zerofrom);
+ flush_dcache_page(page);
__block_commit_write(inode, page, zerofrom, offset);
}
return 0;
@@ -1709,7 +1738,8 @@
/* Sigh... will have to work, then... */
err = __block_prepare_write(inode, page, 0, offset, get_block);
if (!err) {
- memset((char *)page_address(page)+offset, 0, PAGE_CACHE_SIZE-offset);
+ memset(page_address(page) + offset, 0, PAGE_CACHE_SIZE - offset);
+ flush_dcache_page(page);
__block_commit_write(inode,page,0,offset);
done:
kunmap(page);
@@ -2011,7 +2041,7 @@
err = mapping->a_ops->prepare_write(NULL, page, 0, len-1);
if (err)
goto fail_map;
- kaddr = (char*)page_address(page);
+ kaddr = page_address(page);
memcpy(kaddr, symname, len-1);
mapping->a_ops->commit_write(NULL, page, 0, len-1);
/*
@@ -2104,6 +2134,11 @@
*
* This all is required so that we can free up memory
* later.
+ *
+ * Wait:
+ * 0 - no wait (this does not get called - see try_to_free_buffers below)
+ * 1 - start IO for dirty buffers
+ * 2 - wait for completion of locked buffers
*/
static void sync_page_buffers(struct buffer_head *bh, int wait)
{
@@ -2113,7 +2148,7 @@
struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
if (buffer_locked(p)) {
- if (wait)
+ if (wait > 1)
__wait_on_buffer(p);
} else if (buffer_dirty(p))
ll_rw_block(WRITE, 1, &p);
@@ -2186,8 +2221,9 @@
/* Uhhuh, start writeback so that we don't end up with all dirty pages */
spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock);
- spin_unlock(&lru_list_lock);
- sync_page_buffers(bh, wait);
+ spin_unlock(&lru_list_lock);
+ if (wait)
+ sync_page_buffers(bh, wait);
return 0;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)