--------------------------- address_space_operations --------------------------
prototypes:
- int (*writepage)(struct page *);
+ int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
int (*sync_page)(struct page *);
- int (*writepages)(struct address_space *, int *nr_to_write);
+ int (*writepages)(struct address_space *, struct writeback_control *);
int (*set_page_dirty)(struct page *page);
int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
->writepage() is used for two purposes: for "memory cleansing" and for
"sync". These are quite different operations and the behaviour may differ
-depending upon the mode. (Yes, there should be two a_ops for this, or
-writepage should take a writeback_control*)
+depending upon the mode.
-If writepage is called for sync (current->flags & PF_SYNC) then it *must*
-write the page, even if that would involve blocking on in-progress I/O.
+If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
+it *must* start I/O against the page, even if that would involve
+blocking on in-progress I/O.
-If writepage is called for memory cleansing (!(current->flags & PF_SYNC))
-then its role is to get as much writeout underway as possible. So writepage
-should try to avoid blocking against currently-in-progress I/O.
+If writepage is called for memory cleansing (sync_mode ==
+WBC_SYNC_NONE) then its role is to get as much writeout underway as
+possible. So writepage should try to avoid blocking against
+currently-in-progress I/O.
If the filesystem is not called for "sync" and it determines that it
would need to block against in-progress I/O to be able to start new I/O
int (*readlink) (struct dentry *, char *,int);
struct dentry * (*follow_link) (struct dentry *, struct dentry *);
int (*readpage) (struct file *, struct page *);
- int (*writepage) (struct file *, struct page *);
+ int (*writepage) (struct page *page, struct writeback_control *wbc);
int (*bmap) (struct inode *,int);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int);
/* Page cache stuff */
/* writepage() - should never be called - catch it anyway */
-static int blkmtd_writepage(struct page *page)
+static int blkmtd_writepage(struct page *page, struct writeback_control *wbc)
{
printk("blkmtd: writepage called!!!\n");
+ unlock_page(page);
return -EIO;
}
return 0;
}
-static int adfs_writepage(struct page *page)
+static int adfs_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page, adfs_get_block);
+ return block_write_full_page(page, adfs_get_block, wbc);
}
static int adfs_readpage(struct file *file, struct page *page)
return -ENOSPC;
}
-static int affs_writepage(struct page *page)
+static int affs_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page, affs_get_block);
+ return block_write_full_page(page, affs_get_block, wbc);
}
static int affs_readpage(struct file *file, struct page *page)
{
return err;
}
-static int bfs_writepage(struct page *page)
+static int bfs_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page, bfs_get_block);
+ return block_write_full_page(page, bfs_get_block, wbc);
}
static int bfs_readpage(struct file *file, struct page *page)
nr_segs, blkdev_get_blocks);
}
-static int blkdev_writepage(struct page * page)
+static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page, blkdev_get_block);
+ return block_write_full_page(page, blkdev_get_block, wbc);
}
static int blkdev_readpage(struct file * file, struct page * page)
* with submit_bh(). At the address_space level PageWriteback prevents this
* contention from occurring.
*/
-static int __block_write_full_page(struct inode *inode,
- struct page *page, get_block_t *get_block)
+static int __block_write_full_page(struct inode *inode, struct page *page,
+ get_block_t *get_block, struct writeback_control *wbc)
{
int err;
unsigned long block;
do {
get_bh(bh);
if (buffer_mapped(bh) && buffer_dirty(bh)) {
- if (called_for_sync()) {
+ if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else {
if (test_set_buffer_locked(bh)) {
/*
* The generic ->writepage function for buffer-backed address_spaces
*/
-int block_write_full_page(struct page *page, get_block_t *get_block)
+int block_write_full_page(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc)
{
struct inode * const inode = page->mapping->host;
const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
/* Is the page fully inside i_size? */
if (page->index < end_index)
- return __block_write_full_page(inode, page, get_block);
+ return __block_write_full_page(inode, page, get_block, wbc);
/* Is the page fully outside i_size? (truncate in progress) */
offset = inode->i_size & (PAGE_CACHE_SIZE-1);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
- return __block_write_full_page(inode, page, get_block);
+ return __block_write_full_page(inode, page, get_block, wbc);
}
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
}
static int
-cifs_writepage(struct page* page)
+cifs_writepage(struct page* page, struct writeback_control *wbc)
{
int rc = -EFAULT;
int xid;
goto reread;
}
-static int ext2_writepage(struct page *page)
+static int ext2_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,ext2_get_block);
+ return block_write_full_page(page, ext2_get_block, wbc);
}
static int ext2_readpage(struct file *file, struct page *page)
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
+#include <linux/writeback.h>
#include <linux/mpage.h>
#include <linux/uio.h>
#include "xattr.h"
* disastrous. Any write() or metadata operation will sync the fs for
* us.
*/
-static int ext3_writepage(struct page *page)
+static int ext3_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct buffer_head *page_bufs;
goto out_fail;
needed = ext3_writepage_trans_blocks(inode);
- if (current->flags & PF_MEMALLOC)
+ if (wbc->for_reclaim)
handle = ext3_journal_try_start(inode, needed);
else
handle = ext3_journal_start(inode, needed);
PAGE_CACHE_SIZE, NULL, bget_one);
}
- ret = block_write_full_page(page, ext3_get_block);
+ ret = block_write_full_page(page, ext3_get_block, wbc);
/*
* The page can become unlocked at any point now, and
return 0;
}
-static int fat_writepage(struct page *page)
+static int fat_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,fat_get_block);
+ return block_write_full_page(page,fat_get_block, wbc);
}
static int fat_readpage(struct file *file, struct page *page)
{
return __hfs_notify_change(dentry, attr, HFS_HDR);
}
-static int hfs_writepage(struct page *page)
+static int hfs_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,hfs_get_block);
+ return block_write_full_page(page,hfs_get_block, wbc);
}
static int hfs_readpage(struct file *file, struct page *page)
{
return 0;
}
-static int hpfs_writepage(struct page *page)
+static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,hpfs_get_block);
+ return block_write_full_page(page,hpfs_get_block, wbc);
}
static int hpfs_readpage(struct file *file, struct page *page)
{
return jfs_get_blocks(ip, lblock, 1, bh_result, create);
}
-static int jfs_writepage(struct page *page)
+static int jfs_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page, jfs_get_block);
+ return block_write_full_page(page, jfs_get_block, wbc);
}
static int jfs_writepages(struct address_space *mapping,
return V2_minix_get_block(inode, block, bh_result, create);
}
-static int minix_writepage(struct page *page)
+static int minix_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,minix_get_block);
+ return block_write_full_page(page, minix_get_block, wbc);
}
static int minix_readpage(struct file *file, struct page *page)
{
*/
static struct bio *
mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
- sector_t *last_block_in_bio, int *ret)
+ sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
confused:
if (bio)
bio = mpage_bio_submit(WRITE, bio);
- *ret = page->mapping->a_ops->writepage(page);
+ *ret = page->mapping->a_ops->writepage(page, wbc);
out:
return bio;
}
sector_t last_block_in_bio = 0;
int ret = 0;
int done = 0;
- int sync = called_for_sync();
struct pagevec pvec;
- int (*writepage)(struct page *);
+ int (*writepage)(struct page *page, struct writeback_control *wbc);
if (wbc->nonblocking && bdi_write_congested(bdi)) {
blk_run_queues();
struct page *page = list_entry(mapping->io_pages.prev,
struct page, list);
list_del(&page->list);
- if (PageWriteback(page) && !sync) {
+ if (PageWriteback(page) && wbc->sync_mode == WB_SYNC_NONE) {
if (PageDirty(page)) {
list_add(&page->list, &mapping->dirty_pages);
continue;
lock_page(page);
- if (sync)
+ if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
if (page->mapping == mapping && !PageWriteback(page) &&
test_clear_page_dirty(page)) {
if (writepage) {
- ret = (*writepage)(page);
+ ret = (*writepage)(page, wbc);
} else {
bio = mpage_writepage(bio, page, get_block,
- &last_block_in_bio, &ret);
+ &last_block_in_bio, &ret, wbc);
}
if (ret || (--(wbc->nr_to_write) <= 0))
done = 1;
* Write an mmapped page to the server.
*/
int
-nfs_writepage(struct page *page)
+nfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
unsigned long end_index;
*
* Return 0 on success and -errno on error.
*/
-static int ntfs_writepage(struct page *page)
+static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
{
s64 attr_pos;
struct inode *vi;
return;
}
-static int qnx4_writepage(struct page *page)
+static int qnx4_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,qnx4_get_block);
+ return block_write_full_page(page,qnx4_get_block, wbc);
}
static int qnx4_readpage(struct file *file, struct page *page)
{
}
}
-static int reiserfs_write_full_page(struct page *page) {
+static int reiserfs_write_full_page(struct page *page, struct writeback_control *wbc) {
struct inode *inode = page->mapping->host ;
unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT ;
unsigned last_offset = PAGE_CACHE_SIZE;
}
-static int reiserfs_writepage (struct page * page)
+static int reiserfs_writepage (struct page * page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host ;
reiserfs_wait_on_write_block(inode->i_sb) ;
- return reiserfs_write_full_page(page) ;
+ return reiserfs_write_full_page(page, wbc) ;
}
* We are called with the page locked and we unlock it when done.
*/
static int
-smb_writepage(struct page *page)
+smb_writepage(struct page *page, struct writeback_control *wbc)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
return 0;
}
-static int sysv_writepage(struct page *page)
+static int sysv_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,get_block);
+ return block_write_full_page(page,get_block,wbc);
}
static int sysv_readpage(struct file *file, struct page *page)
{
return 0;
}
-static int udf_adinicb_writepage(struct page *page)
+static int udf_adinicb_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
char *kaddr;
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
+#include <linux/writeback.h>
#include <linux/slab.h>
#include "udf_i.h"
}
}
-static int udf_writepage(struct page *page)
+static int udf_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page, udf_get_block);
+ return block_write_full_page(page, udf_get_block, wbc);
}
static int udf_readpage(struct file *file, struct page *page)
{
struct page *page;
char *kaddr;
+ struct writeback_control udf_wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = 1,
+ };
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
else
UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
- inode->i_data.a_ops->writepage(page);
+ inode->i_data.a_ops->writepage(page, &udf_wbc);
page_cache_release(page);
mark_inode_dirty(inode);
return NULL;
}
-static int ufs_writepage(struct page *page)
+static int ufs_writepage(struct page *page, struct writeback_control *wbc)
{
- return block_write_full_page(page,ufs_getfrag_block);
+ return block_write_full_page(page,ufs_getfrag_block,wbc);
}
static int ufs_readpage(struct file *file, struct page *page)
{
STATIC int
linvfs_writepage(
- struct page *page)
+ struct page *page,
+ struct writeback_control *wbc)
{
int error;
int need_trans = 1;
*/
int try_to_release_page(struct page * page, int gfp_mask);
int block_invalidatepage(struct page *page, unsigned long offset);
-int block_write_full_page(struct page*, get_block_t*);
+int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc);
int block_read_full_page(struct page*, get_block_t*);
int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
struct writeback_control;
struct address_space_operations {
- int (*writepage)(struct page *);
+ int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
int (*sync_page)(struct page *);
/*
* linux/fs/nfs/write.c
*/
-extern int nfs_writepage(struct page *);
+extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
#define PF_FREEZE 0x00008000 /* this task should be frozen for suspend */
#define PF_IOTHREAD 0x00010000 /* this thread is needed for doing I/O to swap */
#define PF_FROZEN 0x00020000 /* frozen for system suspend */
-#define PF_SYNC 0x00040000 /* performing fsync(), etc */
-#define PF_FSTRANS 0x00080000 /* inside a filesystem transaction */
-#define PF_KSWAPD 0x00100000 /* I am kswapd */
+#define PF_FSTRANS 0x00040000 /* inside a filesystem transaction */
+#define PF_KSWAPD 0x00080000 /* I am kswapd */
/*
* Ptrace flags
struct sysinfo;
struct address_space;
struct zone;
+struct writeback_control;
/*
* A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
#ifdef CONFIG_SWAP
/* linux/mm/page_io.c */
extern int swap_readpage(struct file *, struct page *);
-extern int swap_writepage(struct page *);
+extern int swap_writepage(struct page *page, struct writeback_control *wbc);
extern int rw_swap_page_sync(int, swp_entry_t, struct page *);
/* linux/mm/swap_state.c */
int nonblocking; /* Don't get stuck on request queues */
int encountered_congestion; /* An output: a queue is full */
int for_kupdate; /* A kupdate writeback */
+ int for_reclaim; /* Invoked from the page allocator */
};
/*
read-only. */
-/*
- * Tell the writeback paths that they are being called for a "data integrity"
- * operation such as fsync().
- */
-static inline int called_for_sync(void)
-{
- return current->flags & PF_SYNC;
-}
-
#endif /* WRITEBACK_H */
* cleansing writeback. The difference between these two operations is that
* if a dirty page/buffer is encountered, it must be waited upon, and not just
* skipped over.
- *
- * The PF_SYNC flag is set across this operation and the various functions
- * which care about this distinction must use called_for_sync() to find out
- * which behaviour they should implement.
*/
int filemap_fdatawrite(struct address_space *mapping)
{
if (mapping->backing_dev_info->memory_backed)
return 0;
- current->flags |= PF_SYNC;
write_lock(&mapping->page_lock);
list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
write_unlock(&mapping->page_lock);
ret = do_writepages(mapping, &wbc);
- current->flags &= ~PF_SYNC;
return ret;
}
{
struct address_space *mapping = page->mapping;
int ret = 0;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ };
BUG_ON(!PageLocked(page));
list_add(&page->list, &mapping->locked_pages);
page_cache_get(page);
write_unlock(&mapping->page_lock);
- ret = mapping->a_ops->writepage(page);
+ ret = mapping->a_ops->writepage(page, &wbc);
if (ret == 0 && wait) {
wait_on_page_writeback(page);
if (PageError(page))
#include <linux/swapops.h>
#include <linux/buffer_head.h> /* for block_sync_page() */
#include <linux/mpage.h>
+#include <linux/writeback.h>
#include <asm/pgtable.h>
static struct bio *
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
*/
-int swap_writepage(struct page *page)
+int swap_writepage(struct page *page, struct writeback_control *wbc)
{
struct bio *bio;
int ret = 0;
int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page)
{
int ret;
+ struct writeback_control swap_wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ };
lock_page(page);
ret = swap_readpage(NULL, page);
wait_on_page_locked(page);
} else {
- ret = swap_writepage(page);
+ ret = swap_writepage(page, &swap_wbc);
wait_on_page_writeback(page);
}
page->mapping = NULL;
/*
* Move the page from the page cache to the swap cache.
*/
-static int shmem_writepage(struct page *page)
+static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
struct shmem_inode_info *info;
swp_entry_t *entry, swap;
#include <linux/shm.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
+#include <linux/writeback.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
* and now we must reincrement count to try again later.
*/
if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
- swap_writepage(page);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ };
+
+ swap_writepage(page, &wbc);
lock_page(page);
wait_on_page_writeback(page);
}
goto keep_locked;
if (test_clear_page_dirty(page)) {
int res;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .nonblocking = 1,
+ .for_reclaim = 1,
+ };
write_lock(&mapping->page_lock);
list_move(&page->list, &mapping->locked_pages);
write_unlock(&mapping->page_lock);
SetPageReclaim(page);
- res = mapping->a_ops->writepage(page);
+ res = mapping->a_ops->writepage(page, &wbc);
if (res == WRITEPAGE_ACTIVATE) {
ClearPageReclaim(page);