patch-2.4.0-test8 linux/fs/buffer.c
Next file: linux/fs/coda/upcall.c
Previous file: linux/fs/block_dev.c
Back to the patch index
Back to the overall index
- Lines: 143
- Date:
Wed Sep 6 08:29:45 2000
- Orig file:
v2.4.0-test7/linux/fs/buffer.c
- Orig date:
Wed Aug 23 18:36:38 2000
diff -u --recursive --new-file v2.4.0-test7/linux/fs/buffer.c linux/fs/buffer.c
@@ -892,7 +892,7 @@
wakeup_bdflush(state);
}
-static __inline__ void __mark_dirty(struct buffer_head *bh, int flag)
+static __inline__ void __mark_dirty(struct buffer_head *bh)
{
bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer;
refile_buffer(bh);
@@ -900,15 +900,15 @@
/* atomic version, the user must call balance_dirty() by hand
as soon as it become possible to block */
-void __mark_buffer_dirty(struct buffer_head *bh, int flag)
+void __mark_buffer_dirty(struct buffer_head *bh)
{
if (!atomic_set_buffer_dirty(bh))
- __mark_dirty(bh, flag);
+ __mark_dirty(bh);
}
-void mark_buffer_dirty(struct buffer_head *bh, int flag)
+void mark_buffer_dirty(struct buffer_head *bh)
{
- __mark_buffer_dirty(bh, flag);
+ __mark_buffer_dirty(bh);
balance_dirty(bh->b_dev);
}
@@ -1419,7 +1419,7 @@
}
set_bit(BH_Uptodate, &bh->b_state);
if (!atomic_set_buffer_dirty(bh)) {
- __mark_dirty(bh, 0);
+ __mark_dirty(bh);
need_balance_dirty = 1;
}
@@ -1520,7 +1520,7 @@
} else {
set_bit(BH_Uptodate, &bh->b_state);
if (!atomic_set_buffer_dirty(bh)) {
- __mark_dirty(bh, 0);
+ __mark_dirty(bh);
need_balance_dirty = 1;
}
}
@@ -1721,6 +1721,72 @@
return 0;
}
+int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block)
+{
+ unsigned long index = from >> PAGE_CACHE_SHIFT;
+ unsigned offset = from & (PAGE_CACHE_SIZE-1);
+ unsigned blocksize, iblock, length, pos;
+ struct inode *inode = (struct inode *)mapping->host;
+ struct page *page;
+ struct buffer_head *bh;
+ int err;
+
+ blocksize = inode->i_sb->s_blocksize;
+ length = offset & (blocksize - 1);
+
+ /* Block boundary? Nothing to do */
+ if (!length)
+ return 0;
+
+ length = blocksize - length;
+ iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+
+ page = grab_cache_page(mapping, index);
+ err = PTR_ERR(page);
+ if (IS_ERR(page))
+ goto out;
+
+ if (!page->buffers)
+ create_empty_buffers(page, inode, blocksize);
+
+ /* Find the buffer that contains "offset" */
+ bh = page->buffers;
+ pos = blocksize;
+ while (offset >= pos) {
+ bh = bh->b_this_page;
+ iblock++;
+ pos += blocksize;
+ }
+
+ if (!buffer_uptodate(bh)) {
+ err = 0;
+ if (!buffer_mapped(bh)) {
+ get_block(inode, iblock, bh, 0);
+ if (!buffer_mapped(bh))
+ goto unlock;
+ }
+ err = -EIO;
+ bh->b_end_io = end_buffer_io_sync;
+ ll_rw_block(READ, 1, &bh);
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh))
+ goto unlock;
+ }
+
+ memset((char *) kmap(page) + offset, 0, length);
+ flush_dcache_page(page);
+ kunmap(page);
+
+ mark_buffer_dirty(bh);
+ err = 0;
+
+unlock:
+ UnlockPage(page);
+ page_cache_release(page);
+out:
+ return err;
+}
+
int block_write_full_page(struct page *page, get_block_t *get_block)
{
struct inode *inode = (struct inode*)page->mapping->host;
@@ -2606,8 +2672,8 @@
if (signal_pending(tsk)) {
int stopped = 0;
spin_lock_irq(&tsk->sigmask_lock);
- if (sigismember(&tsk->signal, SIGSTOP)) {
- sigdelset(&tsk->signal, SIGSTOP);
+ if (sigismember(&tsk->pending.signal, SIGSTOP)) {
+ sigdelset(&tsk->pending.signal, SIGSTOP);
stopped = 1;
}
recalc_sigpending(tsk);
@@ -2625,9 +2691,9 @@
static int __init bdflush_init(void)
{
DECLARE_MUTEX_LOCKED(sem);
- kernel_thread(bdflush, &sem, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+ kernel_thread(bdflush, &sem, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
down(&sem);
- kernel_thread(kupdate, &sem, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+ kernel_thread(kupdate, &sem, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
down(&sem);
return 0;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)