static void
__writeback_single_inode(struct inode *inode, int sync, int *nr_to_write)
{
+ if (current_is_pdflush() && (inode->i_state & I_LOCK))
+ return;
+
while (inode->i_state & I_LOCK) {
__iget(inode);
spin_unlock(&inode_lock);
* had their first dirtying at a time earlier than *older_than_this.
*
* Called under inode_lock.
+ *
+ * If we're a pdlfush thread, then implement pdlfush collision avoidance
+ * against the entire list.
*/
static void __sync_list(struct list_head *head, int sync_mode,
int *nr_to_write, unsigned long *older_than_this)
while ((tmp = head->prev) != head) {
struct inode *inode = list_entry(tmp, struct inode, i_list);
struct address_space *mapping = inode->i_mapping;
+ struct backing_dev_info *bdi;
+
int really_sync;
/* Was this inode dirtied after __sync_list was called? */
time_after(mapping->dirtied_when, *older_than_this))
break;
+ bdi = mapping->backing_dev_info;
+ if (current_is_pdflush() && !writeback_acquire(bdi))
+ break;
+
really_sync = (sync_mode == WB_SYNC_ALL);
if ((sync_mode == WB_SYNC_LAST) && (head->prev == head))
really_sync = 1;
__writeback_single_inode(inode, really_sync, nr_to_write);
+
+ if (current_is_pdflush())
+ writeback_release(bdi);
+
if (nr_to_write && *nr_to_write == 0)
break;
}
*
* If `older_than_this' is non-zero then only flush inodes which have a
* flushtime older than *older_than_this.
+ *
+ * This is a "memory cleansing" operation, not a "data integrity" operation.
*/
void writeback_unlocked_inodes(int *nr_to_write, int sync_mode,
unsigned long *older_than_this)
if (sb->s_writeback_gen == writeback_gen)
continue;
sb->s_writeback_gen = writeback_gen;
-
- if (current->flags & PF_FLUSHER) {
- if (sb->s_flags & MS_FLUSHING) {
- /*
- * There's no point in two pdflush threads
- * flushing the same device. But for other
- * callers, we want to perform the flush
- * because the fdatasync is how we implement
- * writer throttling.
- */
- continue;
- }
- sb->s_flags |= MS_FLUSHING;
- }
-
if (!list_empty(&sb->s_dirty)) {
spin_unlock(&sb_lock);
__sync_list(&sb->s_dirty, sync_mode,
nr_to_write, older_than_this);
spin_lock(&sb_lock);
}
- if (current->flags & PF_FLUSHER)
- sb->s_flags &= ~MS_FLUSHING;
if (nr_to_write && *nr_to_write == 0)
break;
}
}
/*
- * Called under inode_lock
+ * Called under inode_lock.
*/
static int __try_to_writeback_unused_list(struct list_head *head, int nr_inodes)
{
inode = list_entry(tmp, struct inode, i_list);
if (!atomic_read(&inode->i_count)) {
+ struct backing_dev_info *bdi;
+
+ bdi = inode->i_mapping->backing_dev_info;
+ if (current_is_pdflush() && !writeback_acquire(bdi))
+ goto out;
+
__sync_single_inode(inode, 0, NULL);
+
+ if (current_is_pdflush())
+ writeback_release(bdi);
+
nr_inodes--;
/*
tmp = head;
}
}
-
+out:
return nr_inodes;
}
}
}
-void try_to_writeback_unused_inodes(unsigned long pexclusive)
+/*
+ * FIXME: the try_to_writeback_unused functions look dreadfully similar to
+ * writeback_unlocked_inodes...
+ */
+void try_to_writeback_unused_inodes(unsigned long unused)
{
struct super_block * sb;
int nr_inodes = inodes_stat.nr_unused;
}
spin_unlock(&sb_lock);
spin_unlock(&inode_lock);
- clear_bit(0, (unsigned long *)pexclusive);
}
/**
dispose_list(freeable);
/*
- * If we didn't freed enough clean inodes schedule
- * a sync of the dirty inodes, we cannot do it
- * from here or we're either synchronously dogslow
- * or we deadlock with oom.
+ * If we didn't free enough clean inodes then schedule writeback of
+ * the dirty inodes. We cannot do it from here or we're either
+ * synchronously dogslow or we deadlock with oom.
*/
- if (goal) {
- static unsigned long exclusive;
-
- if (!test_and_set_bit(0, &exclusive)) {
- if (pdflush_operation(try_to_writeback_unused_inodes,
- (unsigned long)&exclusive))
- clear_bit(0, &exclusive);
- }
- }
+ if (goal)
+ pdflush_operation(try_to_writeback_unused_inodes, 0);
}
+
/*
* This is called from kswapd when we think we need some
* more memory, but aren't really sure how much. So we