return __block_write_full_page(inode, page, get_block);
}
-/*
- * Commence writeout of all the buffers against a page. The
- * page must be locked. Returns zero on success or a negative
- * errno.
- */
-int writeout_one_page(struct page *page)
-{
- struct buffer_head * const head = page_buffers(page);
- struct buffer_head *arr[MAX_BUF_PER_PAGE];
- struct buffer_head *bh;
- int nr = 0;
- BUG_ON(!PageLocked(page));
- bh = head;
- do {
- if (!buffer_locked(bh) && buffer_dirty(bh) &&
- buffer_mapped(bh) && buffer_uptodate(bh))
- arr[nr++] = bh;
- } while ((bh = bh->b_this_page) != head);
- if (nr)
- ll_rw_block(WRITE, nr, arr);
- return 0;
-}
-EXPORT_SYMBOL(writeout_one_page);
-
-/*
- * Wait for completion of I/O of all buffers against a page. The page
- * must be locked. Returns zero on success or a negative errno.
- */
-int waitfor_one_page(struct page *page)
-{
- int error = 0;
- struct buffer_head *bh, *head = page_buffers(page);
-
- bh = head;
- do {
- wait_on_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh))
- error = -EIO;
- } while ((bh = bh->b_this_page) != head);
- return error;
-}
-EXPORT_SYMBOL(waitfor_one_page);
-
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
{
int err = 0;
dir->i_version = ++event;
page->mapping->a_ops->commit_write(NULL, page, from, to);
- if (IS_SYNC(dir)) {
- int err2;
- err = writeout_one_page(page);
- err2 = waitfor_one_page(page);
- if (err == 0)
- err = err2;
- }
+ if (IS_SYNC(dir))
+ err = write_one_page(page, 1);
+ else
+ unlock_page(page);
return err;
}
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode);
err = ext2_commit_chunk(page, from, to);
- unlock_page(page);
ext2_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
/* OFFSET_CACHE */
-out_unlock:
- unlock_page(page);
+out_put:
ext2_put_page(page);
out:
return err;
+out_unlock:
+ unlock_page(page);
+ goto out_put;
}
/*
pde->rec_len = cpu_to_le16(to-from);
dir->inode = 0;
err = ext2_commit_chunk(page, from, to);
- unlock_page(page);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
mark_inode_dirty(inode);
out:
if (!page)
return -ENOMEM;
err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
- if (err)
+ if (err) {
+ unlock_page(page);
goto fail;
-
+ }
base = page_address(page);
de = (struct ext2_dir_entry_2 *) base;
err = ext2_commit_chunk(page, 0, chunk_size);
fail:
- unlock_page(page);
page_cache_release(page);
return err;
}
lock_page(page);
/* we're done with this page - no need to check for errors */
- if (page_has_buffers(page)) {
- writeout_one_page(page);
- waitfor_one_page(page);
- }
-
- unlock_page(page);
+ if (page_has_buffers(page))
+ write_one_page(page, 1);
+ else
+ unlock_page(page);
page_cache_release(page);
}
struct inode *dir = (struct inode *)page->mapping->host;
int err = 0;
page->mapping->a_ops->commit_write(NULL, page, from, to);
- if (IS_SYNC(dir)) {
- int err2;
- err = writeout_one_page(page);
- err2 = waitfor_one_page(page);
- if (err == 0)
- err = err2;
- }
+ if (IS_SYNC(dir))
+ err = write_one_page(page, 1);
+ else
+ unlock_page(page);
return err;
}
err = dir_commit_chunk(page, from, to);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
-out_unlock:
- unlock_page(page);
+out_put:
dir_put_page(page);
out:
return err;
+out_unlock:
+ unlock_page(page);
+ goto out_put;
}
int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
if (err == 0) {
de->inode = 0;
err = dir_commit_chunk(page, from, to);
+ } else {
+ unlock_page(page);
}
- unlock_page(page);
dir_put_page(page);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
mark_inode_dirty(inode);
if (!page)
return -ENOMEM;
err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * sbi->s_dirsize);
- if (err)
+ if (err) {
+ unlock_page(page);
goto fail;
+ }
base = (char*)page_address(page);
memset(base, 0, PAGE_CACHE_SIZE);
err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
fail:
- unlock_page(page);
page_cache_release(page);
return err;
}
if (err == 0) {
de->inode = inode->i_ino;
err = dir_commit_chunk(page, from, to);
+ } else {
+ unlock_page(page);
}
- unlock_page(page);
dir_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
int err = 0;
page->mapping->a_ops->commit_write(NULL, page, from, to);
- if (IS_SYNC(dir)) {
- int err2;
- err = writeout_one_page(page);
- err2 = waitfor_one_page(page);
- if (err == 0)
- err = err2;
- }
+ if (IS_SYNC(dir))
+ err = write_one_page(page, 1);
+ else
+ unlock_page(page);
return err;
}
err = dir_commit_chunk(page, from, to);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
-out_unlock:
- unlock_page(page);
out_page:
dir_put_page(page);
out:
return err;
+out_unlock:
+ unlock_page(page);
+ goto out_page;
}
int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
BUG();
de->inode = 0;
err = dir_commit_chunk(page, from, to);
- unlock_page(page);
dir_put_page(page);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
mark_inode_dirty(inode);
if (!page)
return -ENOMEM;
err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * SYSV_DIRSIZE);
- if (err)
+ if (err) {
+ unlock_page(page);
goto fail;
+ }
base = (char*)page_address(page);
memset(base, 0, PAGE_CACHE_SIZE);
err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
fail:
- unlock_page(page);
page_cache_release(page);
return err;
}
BUG();
de->inode = cpu_to_fs16(inode->i_sb, inode->i_ino);
err = dir_commit_chunk(page, from, to);
- unlock_page(page);
dir_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
extern int generic_direct_IO(int, struct inode *, struct kiobuf *, unsigned long, int, get_block_t *);
-extern int waitfor_one_page(struct page *);
-extern int writeout_one_page(struct page *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
extern ssize_t block_write(struct file *, const char *, size_t, loff_t *);
extern int file_fsync(struct file *, struct dentry *, int);
-extern int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsigned long end_idx);
extern int generic_osync_inode(struct inode *, int);
#define OSYNC_METADATA (1<<0)
#define OSYNC_DATA (1<<1)
/* mm/page-writeback.c */
int generic_writeback_mapping(struct address_space *mapping, int *nr_to_write);
+int write_one_page(struct page *page, int wait);
/* readahead.c */
#define VM_MAX_READAHEAD 128 /* kbytes */
EXPORT_SYMBOL(generic_file_write);
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_ro_fops);
-EXPORT_SYMBOL(generic_buffer_fdatasync);
EXPORT_SYMBOL(file_lock_list);
EXPORT_SYMBOL(locks_init_lock);
EXPORT_SYMBOL(locks_copy_lock);
write_unlock(&mapping->page_lock);
}
-static int do_buffer_fdatasync(struct address_space *mapping,
- struct list_head *head, unsigned long start,
- unsigned long end, int (*fn)(struct page *))
-{
- struct list_head *curr;
- struct page *page;
- int retval = 0;
-
- write_lock(&mapping->page_lock);
- curr = head->next;
- while (curr != head) {
- page = list_entry(curr, struct page, list);
- curr = curr->next;
- if (!page_has_buffers(page))
- continue;
- if (page->index >= end)
- continue;
- if (page->index < start)
- continue;
-
- page_cache_get(page);
- write_unlock(&mapping->page_lock);
- lock_page(page);
-
- /* The buffers could have been free'd while we waited for the page lock */
- if (page_has_buffers(page))
- retval |= fn(page);
-
- unlock_page(page);
- write_lock(&mapping->page_lock);
- curr = page->list.next;
- page_cache_release(page);
- }
- write_unlock(&mapping->page_lock);
-
- return retval;
-}
-
-/*
- * Two-stage data sync: first start the IO, then go back and
- * collect the information..
- */
-int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsigned long end_idx)
-{
- struct address_space *mapping = inode->i_mapping;
- int retval;
-
- /* writeout dirty buffers on pages from both clean and dirty lists */
- retval = do_buffer_fdatasync(mapping, &mapping->dirty_pages,
- start_idx, end_idx, writeout_one_page);
- retval = do_buffer_fdatasync(mapping, &mapping->io_pages,
- start_idx, end_idx, writeout_one_page);
- retval |= do_buffer_fdatasync(mapping, &mapping->clean_pages,
- start_idx, end_idx, writeout_one_page);
- retval |= do_buffer_fdatasync(mapping, &mapping->locked_pages,
- start_idx, end_idx, writeout_one_page);
-
- /* now wait for locked buffers on pages from both clean and dirty lists */
- retval |= do_buffer_fdatasync(mapping, &mapping->dirty_pages,
- start_idx, end_idx, waitfor_one_page);
- retval |= do_buffer_fdatasync(mapping, &mapping->io_pages,
- start_idx, end_idx, waitfor_one_page);
- retval |= do_buffer_fdatasync(mapping, &mapping->clean_pages,
- start_idx, end_idx, waitfor_one_page);
- retval |= do_buffer_fdatasync(mapping, &mapping->locked_pages,
- start_idx, end_idx, waitfor_one_page);
-
- return retval;
-}
-
/*
* In-memory filesystems have to fail their
* writepage function - and this has to be
}
EXPORT_SYMBOL(generic_writeback_mapping);
+/**
+ * write_one_page - write out a single page and optionally wait on I/O
+ *
+ * @page - the page to write
+ * @wait - if true, wait on writeout
+ *
+ * The page must be locked by the caller and will come unlocked when I/O
+ * completes.
+ *
+ * write_one_page() returns a negative error code if I/O failed.
+ */
+int write_one_page(struct page *page, int wait)
+{
+ struct address_space *mapping = page->mapping;
+ int ret = 0;
+
+ BUG_ON(!PageLocked(page));
+
+ write_lock(&mapping->page_lock);
+ list_del(&page->list);
+ list_add(&page->list, &mapping->locked_pages);
+ write_unlock(&mapping->page_lock);
+
+ if (TestClearPageDirty(page)) {
+ page_cache_get(page);
+ ret = mapping->a_ops->writepage(page);
+ if (ret == 0 && wait) {
+ wait_on_page(page);
+ if (PageError(page))
+ ret = -EIO;
+ }
+ page_cache_release(page);
+ } else {
+ unlock_page(page);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(write_one_page);
+
/*
* Add a page to the dirty page list.
*