int ret;
spin_lock(&inode->i_bufferlist_lock);
- ret = !list_empty(&inode->i_dirty_buffers) ||
- !list_empty(&inode->i_dirty_data_buffers);
+ ret = !list_empty(&inode->i_dirty_buffers);
spin_unlock(&inode->i_bufferlist_lock);
return ret;
while ((entry = inode->i_dirty_buffers.next) !=
&inode->i_dirty_buffers)
__remove_inode_queue(BH_ENTRY(entry));
- while ((entry = inode->i_dirty_data_buffers.next) !=
- &inode->i_dirty_data_buffers)
- __remove_inode_queue(BH_ENTRY(entry));
spin_unlock(&inode->i_bufferlist_lock);
}
* block_read_full_page() against that page will discover all the uptodate
* buffers, will set the page uptodate and will perform no I/O.
*/
-static inline void __mark_dirty(struct buffer_head *bh)
-{
- __set_page_dirty_nobuffers(bh->b_page);
-}
/**
* mark_buffer_dirty - mark a buffer_head as needing writeout
void mark_buffer_dirty(struct buffer_head *bh)
{
if (!atomic_set_buffer_dirty(bh))
- __mark_dirty(bh);
+ __set_page_dirty_nobuffers(bh->b_page);
}
/*
partial = 1;
} else {
mark_buffer_uptodate(bh, 1);
- if (!atomic_set_buffer_dirty(bh)) {
- __mark_dirty(bh);
- buffer_insert_inode_data_queue(bh, inode);
- }
+ mark_buffer_dirty(bh);
}
}
int err;
err = fsync_inode_buffers(inode);
- err |= fsync_inode_data_buffers(inode);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
* we'll end up waiting on them in commit.
*/
ret = fsync_inode_buffers(inode);
- ret |= fsync_inode_data_buffers(inode);
-
ext3_force_commit(inode->i_sb);
return ret;
INIT_LIST_HEAD(&inode->i_data.io_pages);
INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_dirty_buffers);
- INIT_LIST_HEAD(&inode->i_dirty_data_buffers);
INIT_LIST_HEAD(&inode->i_devices);
sema_init(&inode->i_sem, 1);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
struct inode *inode = dentry->d_inode;
int rc = 0;
- rc = fsync_inode_data_buffers(inode);
-
if (!(inode->i_state & I_DIRTY))
return rc;
if (datasync || !(inode->i_state & I_DIRTY_DATASYNC))
/*
* write out dirty pages of bmap
*/
- fsync_inode_data_buffers(ipbmap);
+ filemap_fdatasync(ipbmap->i_mapping);
+ filemap_fdatawait(ipbmap->i_mapping);
ipbmap->i_state |= I_DIRTY;
diWriteSpecial(ipbmap);
/*
* write out dirty pages of imap
*/
- fsync_inode_data_buffers(ipimap);
+ filemap_fdatasync(ipimap->i_mapping);
+ filemap_fdatawait(ipimap->i_mapping);
diWriteSpecial(ipimap);
jERROR(1, ("diFreeSpecial called with NULL ip!\n"));
return;
}
- fsync_inode_data_buffers(ip);
+ filemap_fdatasync(ip->i_mapping);
+ filemap_fdatawait(ip->i_mapping);
truncate_inode_pages(ip->i_mapping, 0);
iput(ip);
}
* We need to make sure all of the "written" metapages
* actually make it to disk
*/
- fsync_inode_data_buffers(sbi->ipbmap);
- fsync_inode_data_buffers(sbi->ipimap);
- fsync_inode_data_buffers(sbi->direct_inode);
+ filemap_fdatasync(sbi->ipbmap->i_mapping);
+ filemap_fdatasync(sbi->ipimap->i_mapping);
+ filemap_fdatasync(sbi->direct_inode->i_mapping);
+ filemap_fdatawait(sbi->ipbmap->i_mapping);
+ filemap_fdatawait(sbi->ipimap->i_mapping);
+ filemap_fdatawait(sbi->direct_inode->i_mapping);
lrd.logtid = 0;
lrd.backchain = 0;
page_index = lblock >> l2BlocksPerPage;
page_offset = (lblock - (page_index << l2BlocksPerPage)) <<
l2bsize;
- if ((page_offset + size) > PAGE_SIZE) {
+ if ((page_offset + size) > PAGE_CACHE_SIZE) {
spin_unlock(&meta_lock);
jERROR(1, ("MetaData crosses page boundary!!\n"));
return NULL;
__free_metapage(mp);
spin_unlock(&meta_lock);
return NULL;
- } else
+ } else {
INCREMENT(mpStat.pagealloc);
+ unlock_page(mp->page);
+ }
} else {
jFYI(1,
("__get_metapage: Calling read_cache_page\n"));
return NULL;
} else
INCREMENT(mpStat.pagealloc);
- lock_page(mp->page);
}
mp->data = (void *) (kmap(mp->page) + page_offset);
}
page_offset =
(mp->index - (page_index << l2BlocksPerPage)) << l2bsize;
+ lock_page(mp->page);
rc = mp->mapping->a_ops->prepare_write(NULL, mp->page, page_offset,
page_offset +
mp->logical_size);
jERROR(1, ("prepare_write return %d!\n", rc));
ClearPageUptodate(mp->page);
kunmap(mp->page);
+ unlock_page(mp->page);
clear_bit(META_dirty, &mp->flag);
return;
}
jERROR(1, ("commit_write returned %d\n", rc));
}
+ unlock_page(mp->page);
clear_bit(META_dirty, &mp->flag);
jFYI(1, ("__write_metapage done\n"));
mp->data = 0;
if (test_bit(META_dirty, &mp->flag))
__write_metapage(mp);
- unlock_page(mp->page);
if (test_bit(META_sync, &mp->flag)) {
sync_metapage(mp);
clear_bit(META_sync, &mp->flag);
/*
* If in the metapage cache, we've got the page locked
*/
+ lock_page(mp->page);
block_flushpage(mp->page, 0);
+ unlock_page(mp->page);
} else {
spin_unlock(&meta_lock);
page = find_lock_page(mapping, lblock>>l2BlocksPerPage);
clear_bit(META_dirty, &mp->flag);
set_bit(META_discard, &mp->flag);
kunmap(mp->page);
- unlock_page(mp->page);
page_cache_release(mp->page);
INCREMENT(mpStat.pagefree);
mp->data = 0;
* committing transactions and use i_sem instead.
*/
if ((!S_ISDIR(ip->i_mode))
- && (tblk->flag & COMMIT_DELETE) == 0)
- fsync_inode_data_buffers(ip);
+ && (tblk->flag & COMMIT_DELETE) == 0) {
+ filemap_fdatasync(ip->i_mapping);
+ filemap_fdatawait(ip->i_mapping);
+ }
/*
* Mark inode as not dirty. It will still be on the dirty
* We need to clean out the direct_inode pages since this inode
* is not in the inode hash.
*/
- fsync_inode_data_buffers(sbi->direct_inode);
+ filemap_fdatasync(sbi->direct_inode->i_mapping);
+ filemap_fdatawait(sbi->direct_inode->i_mapping);
truncate_inode_pages(sbi->direct_mapping, 0);
iput(sbi->direct_inode);
sbi->direct_inode = NULL;
jERROR(1, ("jfs_umount failed with return code %d\n", rc));
}
out_mount_failed:
- fsync_inode_data_buffers(sbi->direct_inode);
+ filemap_fdatasync(sbi->direct_inode->i_mapping);
+ filemap_fdatawait(sbi->direct_inode->i_mapping);
truncate_inode_pages(sbi->direct_mapping, 0);
make_bad_inode(sbi->direct_inode);
iput(sbi->direct_inode);
int err;
err = fsync_inode_buffers(inode);
- err |= fsync_inode_data_buffers(inode);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
BUG ();
n_err = fsync_inode_buffers(p_s_inode) ;
- n_err |= fsync_inode_data_buffers(p_s_inode);
reiserfs_commit_for_inode(p_s_inode) ;
unlock_kernel() ;
return ( n_err < 0 ) ? -EIO : 0;
int err;
err = fsync_inode_buffers(inode);
- err |= fsync_inode_data_buffers(inode);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
int err;
err = fsync_inode_buffers(inode);
- err |= fsync_inode_data_buffers(inode);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
struct list_head i_dentry;
struct list_head i_dirty_buffers; /* uses i_bufferlist_lock */
- struct list_head i_dirty_data_buffers;
spinlock_t i_bufferlist_lock;
unsigned long i_ino;
bh, &inode->i_dirty_buffers);
}
-static inline void
-buffer_insert_inode_data_queue(struct buffer_head *bh, struct inode *inode)
-{
- buffer_insert_list(&inode->i_bufferlist_lock,
- bh, &inode->i_dirty_data_buffers);
-}
-
#define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state)
static inline void mark_buffer_async(struct buffer_head * bh, int on)
return fsync_buffers_list(&inode->i_bufferlist_lock,
&inode->i_dirty_buffers);
}
-static inline int fsync_inode_data_buffers(struct inode *inode)
-{
- return fsync_buffers_list(&inode->i_bufferlist_lock,
- &inode->i_dirty_data_buffers);
-}
extern int inode_has_buffers(struct inode *);
extern int filemap_fdatasync(struct address_space *);
extern int filemap_fdatawait(struct address_space *);
* completly asynchronous or performance will go to /dev/null.
*/
retval = filemap_fdatasync(mapping);
- if (retval == 0)
- retval = fsync_inode_data_buffers(inode);
if (retval == 0)
retval = filemap_fdatawait(mapping);
if (retval < 0)