}
}
+/*
+ * For a data-integrity writeout, we need to wait upon any in-progress I/O
+ * and then start new I/O and then wait upon it.
+ */
+void sync_dirty_buffer(struct buffer_head *bh)
+{
+ WARN_ON(atomic_read(&bh->b_count) < 1);
+ lock_buffer(bh);
+ if (test_clear_buffer_dirty(bh)) {
+ get_bh(bh);
+ bh->b_end_io = end_buffer_io_sync;
+ submit_bh(WRITE, bh);
+ wait_on_buffer(bh);
+ } else {
+ unlock_buffer(bh);
+ }
+}
+
/*
* Sanity checks for try_to_free_buffers.
*/
}
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
group_release_blocks(desc, bh2, group_freed);
freed += group_freed;
write_unlock(&EXT2_I(inode)->i_meta_lock);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
ext2_debug ("allocating block %d. ", block);
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
sb->s_dirt = 1;
error_return:
brelse(bitmap_bh);
ext2_set_bit(i, bitmap_bh->b_data);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
* But we now rely upon generic_osync_inode()
* and b_inode_buffers. But not for directories.
*/
- if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
+ sync_dirty_buffer(bh);
parent = nr;
}
if (n == num)
raw_inode->i_block[n] = ei->i_data[n];
mark_buffer_dirty(bh);
if (do_sync) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk ("IO error syncing ext2 inode [%s:%08lx]\n",
sb->s_id, (unsigned long) ino);
{
es->s_wtime = cpu_to_le32(get_seconds());
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
- ll_rw_block(WRITE, 1, &EXT2_SB(sb)->s_sbh);
- wait_on_buffer(EXT2_SB(sb)->s_sbh);
+ sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
}
}
mark_buffer_dirty(new_bh);
if (IS_SYNC(inode)) {
- ll_rw_block(WRITE, 1, &new_bh);
- wait_on_buffer(new_bh);
+ sync_dirty_buffer(new_bh);
error = -EIO;
if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
goto cleanup;
HDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(HDR(bh)->h_refcount) - 1);
mark_buffer_dirty(bh);
- if (IS_SYNC(inode)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
DQUOT_FREE_BLOCK(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
es->s_wtime = cpu_to_le32(get_seconds());
BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "marking dirty");
mark_buffer_dirty(EXT3_SB(sb)->s_sbh);
- if (sync) {
- ll_rw_block(WRITE, 1, &EXT3_SB(sb)->s_sbh);
- wait_on_buffer(EXT3_SB(sb)->s_sbh);
- }
+ if (sync)
+ sync_dirty_buffer(EXT3_SB(sb)->s_sbh);
}
{
struct buffer_head *bh = jh2bh(descriptor);
set_buffer_uptodate(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
__brelse(bh); /* One for getblk() */
journal_unlock_journal_head(descriptor);
}
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
if (wait)
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
+ else
+ ll_rw_block(WRITE, 1, &bh);
/* If we have just flushed the log (by marking s_start==0), then
* any future commit will have to be careful to update the
bh = journal->j_sb_buffer;
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
return 0;
}
atomic_inc(&bh->b_count);
spin_unlock(&journal_datalist_lock);
need_brelse = 1;
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
spin_lock(&journal_datalist_lock);
/* The buffer may become locked again at any
time if it is redirtied */
}
atomic_inc(&bh->b_count);
spin_unlock(&journal_datalist_lock);
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
__brelse(bh);
goto out;
}
j_sb->s_flag |= JFS_BAD_SAIT;
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
return;
}
}
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
return 0;
#if 0
set_buffer_uptodate(bp);
mark_buffer_dirty(bp, 1);
- if (IS_SYNC(dip)) {
- ll_rw_block(WRITE, 1, &bp);
- wait_on_buffer(bp);
- }
+ if (IS_SYNC(dip))
+ sync_dirty_buffer(bp);
brelse(bp);
#endif /* 0 */
ssize -= copy_size;
/* synchronously update superblock */
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
/*
memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock));
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh2);
- wait_on_buffer(bh2);
+ sync_dirty_buffer(bh2);
brelse(bh2);
}
/* write primary superblock */
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
goto resume;
bh = minix_update_inode(inode);
if (bh && buffer_dirty(bh))
{
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk ("IO error syncing minix inode [%s:%08lx]\n",
memcpy(bh_primary->b_data, bh_backup->b_data,
sb->s_blocksize);
mark_buffer_dirty(bh_primary);
- ll_rw_block(WRITE, 1, &bh_primary);
- wait_on_buffer(bh_primary);
+ sync_dirty_buffer(bh_primary);
if (buffer_uptodate(bh_primary)) {
brelse(bh_backup);
return bh_primary;
bh = qnx4_update_inode(inode);
if (bh && buffer_dirty(bh))
{
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk ("IO error syncing qnx4 inode [%s:%08lx]\n",
}
mark_buffer_dirty(jl->j_commit_bh) ;
- ll_rw_block(WRITE, 1, &(jl->j_commit_bh)) ;
- wait_on_buffer(jl->j_commit_bh) ;
+ sync_dirty_buffer(jl->j_commit_bh) ;
if (!buffer_uptodate(jl->j_commit_bh)) {
reiserfs_panic(s, "journal-615: buffer write failed\n") ;
}
jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
- ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
- wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
+ sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;
if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
printk( "reiserfs: journal-837: IO error during journal replay\n" );
return -EIO ;
mark_buffer_dirty(bitmap[i].bh) ;
set_buffer_uptodate(bitmap[i].bh);
- ll_rw_block(WRITE, 1, &bitmap[i].bh);
- wait_on_buffer(bitmap[i].bh);
+ sync_dirty_buffer(bitmap[i].bh);
// update bitmap_info stuff
bitmap[i].first_zero_hint=1;
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
bh = sysv_update_inode(inode);
if (bh && buffer_dirty(bh)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk ("IO error syncing sysv inode [%s:%08lx]\n",
inode->i_sb->s_id, inode->i_ino);
static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
{
mark_buffer_dirty_inode(bh, inode);
- if (IS_SYNC(inode)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
}
static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
mark_buffer_dirty(bh);
if (do_sync)
{
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk("IO error syncing udf inode [%s:%08lx]\n",
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
memset (bh->b_data, 0, sb->s_blocksize); \
set_buffer_uptodate(bh); \
mark_buffer_dirty (bh); \
- if (IS_SYNC(inode)) { \
- ll_rw_block (WRITE, 1, &bh); \
- wait_on_buffer (bh); \
- } \
+ if (IS_SYNC(inode)) \
+ sync_dirty_buffer(bh); \
brelse (bh); \
}
clear_buffer_dirty(bh);
bh->b_blocknr = result + i;
mark_buffer_dirty (bh);
- if (IS_SYNC(inode)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
brelse (bh);
}
else
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
dir->i_version++;
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
mark_buffer_dirty(bh);
- if (IS_DIRSYNC(dir)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (IS_DIRSYNC(dir))
+ sync_dirty_buffer(bh);
brelse (bh);
}
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode);
mark_buffer_dirty(bh);
- if (IS_DIRSYNC(dir)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_DIRSYNC(dir))
+ sync_dirty_buffer(bh);
brelse (bh);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
dir->i_version++;
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
mark_inode_dirty(inode);
mark_buffer_dirty(bh);
- if (IS_DIRSYNC(inode)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (IS_DIRSYNC(inode))
+ sync_dirty_buffer(bh);
brelse(bh);
UFSD(("EXIT\n"))
return 0;
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
}
mark_buffer_dirty(bh);
- if (IS_SYNC(inode)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
out:
memset (ufs_inode, 0, sizeof(struct ufs_inode));
mark_buffer_dirty(bh);
- if (do_sync) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (do_sync)
+ sync_dirty_buffer(bh);
brelse (bh);
UFSD(("EXIT\n"))
}
}
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
+ ubh_wait_on_buffer (ind_ubh);
ubh_ll_rw_block (WRITE, 1, &ind_ubh);
ubh_wait_on_buffer (ind_ubh);
}
}
}
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
+ ubh_wait_on_buffer (dind_bh);
ubh_ll_rw_block (WRITE, 1, &dind_bh);
ubh_wait_on_buffer (dind_bh);
}
}
}
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
+ ubh_wait_on_buffer (tind_bh);
ubh_ll_rw_block (WRITE, 1, &tind_bh);
ubh_wait_on_buffer (tind_bh);
}
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void ll_rw_block(int, int, struct buffer_head * bh[]);
+void sync_dirty_buffer(struct buffer_head *bh);
int submit_bh(int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
}
static inline void hfs_buffer_sync(hfs_buffer buffer) {
- while (buffer_locked(buffer)) {
- wait_on_buffer(buffer);
- }
- if (buffer_dirty(buffer)) {
- ll_rw_block(WRITE, 1, &buffer);
- wait_on_buffer(buffer);
- }
+ if (buffer_dirty(buffer))
+ sync_dirty_buffer(buffer);
}
static inline void *hfs_buffer_data(const hfs_buffer buffer) {
EXPORT_SYMBOL(__brelse);
EXPORT_SYMBOL(__bforget);
EXPORT_SYMBOL(ll_rw_block);
+EXPORT_SYMBOL(sync_dirty_buffer);
EXPORT_SYMBOL(submit_bh);
EXPORT_SYMBOL(unlock_buffer);
EXPORT_SYMBOL(__wait_on_buffer);