<funcprototype>
<funcdef>struct pardevice *<function>parport_open</function></funcdef>
<paramdef>int <parameter>devnum</parameter></paramdef>
+ <paramdef>const char *<parameter>name</parameter></paramdef>
<paramdef>int <parameter>(*pf)</parameter>
<funcparams>void *</funcparams></paramdef>
<paramdef>int <parameter>(*kf)</parameter>
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 8
-EXTRAVERSION =-pre7
+EXTRAVERSION =-pre8
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
lock_kernel();
s = get_super(to_kdev_t(dev));
+ unlock_kernel();
if (s == NULL)
goto out;
err = vfs_statfs(s, &sbuf);
+ drop_super(s);
if (err)
goto out;
/* Changed to hpux_ustat: */
err = copy_to_user(ubuf,&tmp,sizeof(struct hpux_ustat)) ? -EFAULT : 0;
out:
- unlock_kernel();
return err;
}
serverworks_free_gatt_pages();
serverworks_free_page_map(&page_dir);
+ serverworks_free_page_map(&serverworks_private.scratch_dir);
return 0;
}
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
O_TARGET := drm.o
-export-objs := gamma_drv.o tdfx_drv.o r128_drv.o mga_drv.o i810_drv.o \
- ffb_drv.o
-list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o
+list-multi := gamma.o tdfx.o r128.o mga.o i810.o radeon.o ffb.o
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o
down(&bdev->bd_sem);
/* syncing will go here */
lock_kernel();
- if (kind == BDEV_FILE || kind == BDEV_FS)
+ if (kind == BDEV_FILE)
fsync_dev(rdev);
+ else if (kind == BDEV_FS)
+ fsync_no_super(rdev);
if (atomic_dec_and_test(&bdev->bd_openers)) {
/* invalidating buffers will go here */
invalidate_buffers(rdev);
return sync_buffers(dev, 1);
}
+int fsync_no_super(kdev_t dev)
+{
+ sync_buffers(dev, 0);
+ return sync_buffers(dev, 1);
+}
+
int fsync_dev(kdev_t dev)
{
sync_buffers(dev, 0);
if (sb && sb_has_quota_enabled(sb, type))
ret = set_dqblk(sb, id, type, flags, (struct dqblk *) addr);
out:
+ if (sb)
+ drop_super(sb);
unlock_kernel();
return ret;
}
__sync_one(list_entry(tmp, struct inode, i_list), 0);
}
-static inline int wait_on_dirty(struct list_head *head)
-{
- struct list_head * tmp;
- list_for_each(tmp, head) {
- struct inode *inode = list_entry(tmp, struct inode, i_list);
- if (!inode->i_state & I_DIRTY)
- continue;
- __iget(inode);
- spin_unlock(&inode_lock);
- __wait_on_inode(inode);
- iput(inode);
- spin_lock(&inode_lock);
- return 1;
- }
- return 0;
-}
-
static inline void wait_on_locked(struct list_head *head)
{
struct list_head * tmp;
return 1;
}
-/**
- * sync_inodes
- * @dev: device to sync the inodes from.
- *
- * sync_inodes goes through the super block's dirty list,
- * writes them out, and puts them back on the normal list.
- */
-
-/*
- * caller holds exclusive lock on sb->s_umount
- */
-
void sync_inodes_sb(struct super_block *sb)
{
spin_lock(&inode_lock);
- sync_list(&sb->s_dirty);
- wait_on_locked(&sb->s_locked_inodes);
+ while (!list_empty(&sb->s_dirty)||!list_empty(&sb->s_locked_inodes)) {
+ sync_list(&sb->s_dirty);
+ wait_on_locked(&sb->s_locked_inodes);
+ }
spin_unlock(&inode_lock);
}
+/*
+ * Note:
+ * We don't need to grab a reference to superblock here. If it has non-empty
+ * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
+ * past sync_inodes_sb() until both ->s_dirty and ->s_locked_inodes are
+ * empty. Since __sync_one() regains inode_lock before it finally moves
+ * inode from superblock lists we are OK.
+ */
+
void sync_unlocked_inodes(void)
{
- struct super_block * sb = sb_entry(super_blocks.next);
+ struct super_block * sb;
+ spin_lock(&inode_lock);
+ spin_lock(&sb_lock);
+ sb = sb_entry(super_blocks.next);
for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
if (!list_empty(&sb->s_dirty)) {
- spin_lock(&inode_lock);
+ spin_unlock(&sb_lock);
sync_list(&sb->s_dirty);
- spin_unlock(&inode_lock);
+ spin_lock(&sb_lock);
+ }
+ }
+ spin_unlock(&sb_lock);
+ spin_unlock(&inode_lock);
+}
+
+/*
+ * Find a superblock with inodes that need to be synced
+ */
+
+static struct super_block *get_super_to_sync(void)
+{
+ struct list_head *p;
+restart:
+ spin_lock(&inode_lock);
+ spin_lock(&sb_lock);
+ list_for_each(p, &super_blocks) {
+ struct super_block *s = list_entry(p,struct super_block,s_list);
+ if (list_empty(&s->s_dirty) && list_empty(&s->s_locked_inodes))
+ continue;
+ s->s_count++;
+ spin_unlock(&sb_lock);
+ spin_unlock(&inode_lock);
+ down_read(&s->s_umount);
+ if (!s->s_root) {
+ drop_super(s);
+ goto restart;
}
+ return s;
}
+ spin_unlock(&sb_lock);
+ spin_unlock(&inode_lock);
+ return NULL;
}
+/**
+ * sync_inodes
+ * @dev: device to sync the inodes from.
+ *
+ * sync_inodes goes through the super block's dirty list,
+ * writes them out, and puts them back on the normal list.
+ */
+
void sync_inodes(kdev_t dev)
{
- struct super_block * sb = sb_entry(super_blocks.next);
+ struct super_block * s;
/*
* Search the super_blocks array for the device(s) to sync.
*/
- for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
- if (!sb->s_dev)
- continue;
- if (dev && sb->s_dev != dev)
- continue;
- down_read(&sb->s_umount);
- if (sb->s_dev && (sb->s_dev == dev || !dev)) {
- spin_lock(&inode_lock);
- do {
- sync_list(&sb->s_dirty);
- } while (wait_on_dirty(&sb->s_locked_inodes));
- spin_unlock(&inode_lock);
+ if (dev) {
+ if ((s = get_super(dev)) != NULL) {
+ sync_inodes_sb(s);
+ drop_super(s);
+ }
+ } else {
+ while ((s = get_super_to_sync()) != NULL) {
+ sync_inodes_sb(s);
+ drop_super(s);
}
- up_read(&sb->s_umount);
- if (dev)
- break;
}
}
*/
static void try_to_sync_unused_inodes(void)
{
- struct super_block * sb = sb_entry(super_blocks.next);
+ struct super_block * sb;
+
+ spin_lock(&sb_lock);
+ sb = sb_entry(super_blocks.next);
for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
if (!sb->s_dev)
continue;
+ spin_unlock(&sb_lock);
if (!try_to_sync_unused_list(&sb->s_dirty))
- break;
+ return;
+ spin_lock(&sb_lock);
}
+ spin_unlock(&sb_lock);
}
/**
int invalidate_device(kdev_t dev, int do_sync)
{
- struct super_block *sb = get_super(dev);
+ struct super_block *sb;
int res;
if (do_sync)
fsync_dev(dev);
res = 0;
- if (sb)
+ sb = get_super(dev);
+ if (sb) {
res = invalidate_inodes(sb);
+ drop_super(sb);
+ }
invalidate_buffers(dev);
return res;
}
/* this is initialized in init/main.c */
kdev_t ROOT_DEV;
-int nr_super_blocks;
-int max_super_blocks = NR_SUPER;
LIST_HEAD(super_blocks);
+spinlock_t sb_lock = SPIN_LOCK_UNLOCKED;
/*
* Handling of filesystem drivers list.
mnt->mnt_parent = mnt;
spin_lock(&dcache_lock);
- list_add(&mnt->mnt_instances, &sb->s_mounts);
list_add(&mnt->mnt_list, vfsmntlist.prev);
spin_unlock(&dcache_lock);
if (sb->s_type->fs_flags & FS_SINGLE)
return mnt;
}
-static struct vfsmount *clone_mnt(struct vfsmount *old_mnt, struct dentry *root)
+static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root)
{
- char *name = old_mnt->mnt_devname;
+ char *name = old->mnt_devname;
struct vfsmount *mnt = alloc_vfsmnt();
+ struct super_block *sb = old->mnt_sb;
if (!mnt)
goto out;
if (mnt->mnt_devname)
strcpy(mnt->mnt_devname, name);
}
- mnt->mnt_sb = old_mnt->mnt_sb;
+ mnt->mnt_sb = sb;
mnt->mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
- spin_lock(&dcache_lock);
- list_add(&mnt->mnt_instances, &old_mnt->mnt_instances);
- spin_unlock(&dcache_lock);
+ atomic_inc(&sb->s_active);
out:
return mnt;
}
struct super_block *sb = mnt->mnt_sb;
dput(mnt->mnt_root);
- spin_lock(&dcache_lock);
- list_del(&mnt->mnt_instances);
- spin_unlock(&dcache_lock);
if (mnt->mnt_devname)
kfree(mnt->mnt_devname);
kmem_cache_free(mnt_cache, mnt);
kill_super(sb);
}
-
/* Use octal escapes, like mount does, for embedded spaces etc. */
static unsigned char need_escaping[] = { ' ', '\t', '\n', '\\' };
#undef MANGLE
#undef FREEROOM
}
+
+static inline void __put_super(struct super_block *sb)
+{
+ spin_lock(&sb_lock);
+ if (!--sb->s_count)
+ kfree(sb);
+ spin_unlock(&sb_lock);
+}
+
+static inline struct super_block * find_super(kdev_t dev)
+{
+ struct list_head *p;
+
+ list_for_each(p, &super_blocks) {
+ struct super_block * s = sb_entry(p);
+ if (s->s_dev == dev) {
+ s->s_count++;
+ return s;
+ }
+ }
+ return NULL;
+}
+
+void drop_super(struct super_block *sb)
+{
+ up_read(&sb->s_umount);
+ __put_super(sb);
+}
+
+static void put_super(struct super_block *sb)
+{
+ up_write(&sb->s_umount);
+ __put_super(sb);
+}
+
+static inline void write_super(struct super_block *sb)
+{
+ lock_super(sb);
+ if (sb->s_root && sb->s_dirt)
+ if (sb->s_op && sb->s_op->write_super)
+ sb->s_op->write_super(sb);
+ unlock_super(sb);
+}
/*
* Note: check the dirty flag before waiting, so we don't
{
struct super_block * sb;
- for (sb = sb_entry(super_blocks.next);
- sb != sb_entry(&super_blocks);
- sb = sb_entry(sb->s_list.next)) {
- if (!sb->s_dev)
- continue;
- if (dev && sb->s_dev != dev)
- continue;
- if (!sb->s_dirt)
- continue;
- lock_super(sb);
- if (sb->s_dev && sb->s_dirt && (!dev || dev == sb->s_dev))
- if (sb->s_op && sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
+ if (dev) {
+ sb = get_super(dev);
+ if (sb) {
+ if (sb->s_dirt)
+ write_super(sb);
+ drop_super(sb);
+ }
+ return;
}
+restart:
+ spin_lock(&sb_lock);
+ sb = sb_entry(super_blocks.next);
+ while (sb != sb_entry(&super_blocks))
+ if (sb->s_dirt) {
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+ down_read(&sb->s_umount);
+ write_super(sb);
+ drop_super(sb);
+ goto restart;
+ } else
+ sb = sb_entry(sb->s_list.next);
+ spin_unlock(&sb_lock);
}
/**
if (!dev)
return NULL;
restart:
- s = sb_entry(super_blocks.next);
- while (s != sb_entry(&super_blocks))
- if (s->s_dev == dev) {
- /* Yes, it sucks. As soon as we get refcounting... */
- lock_super(s);
- unlock_super(s);
- if (s->s_dev == dev)
- return s;
- goto restart;
- } else
- s = sb_entry(s->s_list.next);
+ spin_lock(&sb_lock);
+ s = find_super(dev);
+ if (s) {
+ spin_unlock(&sb_lock);
+ /* Yes, it sucks. As soon as we get refcounting... */
+ /* Almost there - next two lines will go away RSN */
+ lock_super(s);
+ unlock_super(s);
+ down_read(&s->s_umount);
+ if (s->s_root)
+ return s;
+ drop_super(s);
+ goto restart;
+ }
+ spin_unlock(&sb_lock);
return NULL;
}
if (s == NULL)
goto out;
err = vfs_statfs(s, &sbuf);
+ drop_super(s);
if (err)
goto out;
* the request.
*/
-static struct super_block *get_empty_super(void)
+static struct super_block *alloc_super(void)
{
- struct super_block *s;
-
- for (s = sb_entry(super_blocks.next);
- s != sb_entry(&super_blocks);
- s = sb_entry(s->s_list.next)) {
- if (s->s_dev)
- continue;
- return s;
- }
- /* Need a new one... */
- if (nr_super_blocks >= max_super_blocks)
- return NULL;
- s = kmalloc(sizeof(struct super_block), GFP_USER);
+ struct super_block *s = kmalloc(sizeof(struct super_block), GFP_USER);
if (s) {
- nr_super_blocks++;
memset(s, 0, sizeof(struct super_block));
INIT_LIST_HEAD(&s->s_dirty);
INIT_LIST_HEAD(&s->s_locked_inodes);
- list_add (&s->s_list, super_blocks.prev);
INIT_LIST_HEAD(&s->s_files);
- INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount);
sema_init(&s->s_lock, 1);
+ s->s_count = 1;
+ atomic_set(&s->s_active, 1);
sema_init(&s->s_vfs_rename_sem,1);
sema_init(&s->s_nfsd_free_path_sem,1);
sema_init(&s->s_dquot.dqio_sem, 1);
sema_init(&s->s_dquot.dqoff_sem, 1);
+ s->s_maxbytes = MAX_NON_LFS;
}
return s;
}
void *data, int silent)
{
struct super_block * s;
- s = get_empty_super();
+ s = alloc_super();
if (!s)
goto out;
s->s_dev = dev;
s->s_bdev = bdev;
s->s_flags = flags;
- s->s_dirt = 0;
s->s_type = type;
- s->s_dquot.flags = 0;
- s->s_maxbytes = MAX_NON_LFS;
+ spin_lock(&sb_lock);
+ list_add (&s->s_list, super_blocks.prev);
+ spin_unlock(&sb_lock);
lock_super(s);
if (!type->read_super(s, data, silent))
goto out_fail;
s->s_bdev = 0;
s->s_type = NULL;
unlock_super(s);
+ atomic_dec(&s->s_active);
+ spin_lock(&sb_lock);
+ list_del(&s->s_list);
+ spin_unlock(&sb_lock);
+ __put_super(s);
return NULL;
}
if (sb) {
if (fs_type == sb->s_type &&
((flags ^ sb->s_flags) & MS_RDONLY) == 0) {
+/*
+ * We are heavily relying on mount_sem here. We _will_ get rid of that
+ * ugliness RSN (and then atomicity of ->s_active will play), but first
+ * we need to get rid of "reuse" branch of get_empty_super() and that
+ * requires reference counters. Chicken and egg problem, but fortunately
+ * we can use the fact that right now all accesses to ->s_active are
+ * under mount_sem.
+ */
+ if (atomic_read(&sb->s_active)) {
+ spin_lock(&sb_lock);
+ sb->s_count--;
+ spin_unlock(&sb_lock);
+ }
+ atomic_inc(&sb->s_active);
+ up_read(&sb->s_umount);
path_release(&nd);
return sb;
}
+ drop_super(sb);
} else {
mode_t mode = FMODE_READ; /* we always need it ;-) */
if (!(flags & MS_RDONLY))
sb = fs_type->kern_mnt->mnt_sb;
if (!sb)
BUG();
+ atomic_inc(&sb->s_active);
do_remount_sb(sb, flags, data);
return sb;
}
struct file_system_type *fs = sb->s_type;
struct super_operations *sop = sb->s_op;
- spin_lock(&dcache_lock);
- if (!list_empty(&sb->s_mounts)) {
- spin_unlock(&dcache_lock);
+ if (!atomic_dec_and_test(&sb->s_active))
return;
- }
- spin_unlock(&dcache_lock);
down_write(&sb->s_umount);
lock_kernel();
sb->s_root = NULL;
sb->s_type = NULL;
unlock_super(sb);
unlock_kernel();
- up_write(&sb->s_umount);
if (bdev) {
blkdev_put(bdev, BDEV_FS);
bdput(bdev);
} else
put_unnamed_dev(dev);
+ spin_lock(&sb_lock);
+ list_del(&sb->s_list);
+ spin_unlock(&sb_lock);
+ put_super(sb);
}
/*
mnt->mnt_root = dget(sb->s_root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
- spin_lock(&dcache_lock);
- list_add(&mnt->mnt_instances, &sb->s_mounts);
- spin_unlock(&dcache_lock);
type->kern_mnt = mnt;
return mnt;
}
spin_lock(&dcache_lock);
- if (mnt->mnt_instances.next != mnt->mnt_instances.prev) {
+ if (atomic_read(&sb->s_active) > 1) {
if (atomic_read(&mnt->mnt_count) > 2) {
spin_unlock(&dcache_lock);
return -EBUSY;
mnt->mnt_root = dget(sb->s_root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
- spin_lock(&dcache_lock);
- list_add(&mnt->mnt_instances, &sb->s_mounts);
- spin_unlock(&dcache_lock);
/* Something was mounted here while we slept */
while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
check_disk_change(ROOT_DEV);
sb = get_super(ROOT_DEV);
if (sb) {
+ /* FIXME */
fs_type = sb->s_type;
+ atomic_inc(&sb->s_active);
+ up_read(&sb->s_umount);
goto mount_it;
}
#define INIT_THREAD { \
INIT_SP, /* ksp */ \
0, /* wchan */ \
- (struct pt_regs *)INIT_SP - 1, /* regs */ \
+ 0, /* regs */ \
KERNEL_DS, /*fs*/ \
swapper_pg_dir, /* pgdir */ \
0, /* last_syscall */ \
unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) ((tsk)->thread.regs->nip)
-#define KSTK_ESP(tsk) ((tsk)->thread.regs->gpr[1])
+#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
/*
* NOTE! The task struct and the stack go together
};
extern struct inodes_stat_t inodes_stat;
-extern int max_super_blocks, nr_super_blocks;
extern int leases_enable, dir_notify_enable, lease_break_time;
#define NR_FILE 8192 /* this can well be larger on a larger system */
#include <linux/cramfs_fs_sb.h>
extern struct list_head super_blocks;
+extern spinlock_t sb_lock;
#define sb_entry(list) list_entry((list), struct super_block, s_list)
struct super_block {
struct dentry *s_root;
struct rw_semaphore s_umount;
struct semaphore s_lock;
+ int s_count;
+ atomic_t s_active;
struct list_head s_dirty; /* dirty inodes */
struct list_head s_locked_inodes;/* inodes being synced */
struct list_head s_files;
struct block_device *s_bdev;
- struct list_head s_mounts; /* vfsmount(s) of this one */
struct quota_mount_options s_dquot; /* Diskquota specific options */
union {
extern void sync_dev(kdev_t);
extern int fsync_dev(kdev_t);
extern int fsync_super(struct super_block *);
+extern int fsync_no_super(kdev_t);
extern void sync_inodes_sb(struct super_block *);
extern int fsync_inode_buffers(struct inode *);
extern int osync_inode_buffers(struct inode *);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(kdev_t);
+extern void drop_super(struct super_block *sb);
static inline int is_mounted(kdev_t dev)
{
struct super_block *sb = get_super(dev);
if (sb) {
- /* drop_super(sb); will go here */
+ drop_super(sb);
return 1;
}
return 0;
struct vfsmount *mnt_parent; /* fs we are mounted on */
struct dentry *mnt_mountpoint; /* dentry of mountpoint */
struct dentry *mnt_root; /* root of the mounted tree */
- struct list_head mnt_instances; /* other vfsmounts of the same fs */
struct super_block *mnt_sb; /* pointer to superblock */
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
#define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index))
extern struct page * __find_get_page(struct address_space *mapping,
- unsigned long offset, struct page **hash);
+ unsigned long index, struct page **hash);
+#define find_get_page(mapping, index) \
+ __find_get_page(mapping, index, page_hash(mapping, index))
extern struct page * __find_lock_page (struct address_space * mapping,
unsigned long index, struct page **hash);
extern void lock_page(struct page *page);
/* linux/mm/swap.c */
extern int memory_pressure;
-extern void age_page_down(struct page *);
-extern void age_page_down_nolock(struct page *);
-extern void age_page_down_ageonly(struct page *);
extern void deactivate_page(struct page *);
extern void deactivate_page_nolock(struct page *);
extern void activate_page(struct page *);
EXPORT_SYMBOL(update_atime);
EXPORT_SYMBOL(get_fs_type);
EXPORT_SYMBOL(get_super);
+EXPORT_SYMBOL(drop_super);
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(names_cachep);
EXPORT_SYMBOL(fput);
EXPORT_SYMBOL(__pollwait);
EXPORT_SYMBOL(poll_freewait);
EXPORT_SYMBOL(ROOT_DEV);
+EXPORT_SYMBOL(__find_get_page);
EXPORT_SYMBOL(__find_lock_page);
EXPORT_SYMBOL(grab_cache_page);
EXPORT_SYMBOL(read_cache_page);
0444, NULL, &proc_dointvec},
{FS_MAXFILE, "file-max", &files_stat.max_files, sizeof(int),
0644, NULL, &proc_dointvec},
- {FS_NRSUPER, "super-nr", &nr_super_blocks, sizeof(int),
- 0444, NULL, &proc_dointvec},
- {FS_MAXSUPER, "super-max", &max_super_blocks, sizeof(int),
- 0644, NULL, &proc_dointvec},
{FS_NRDQUOT, "dquot-nr", &nr_dquots, 2*sizeof(int),
0444, NULL, &proc_dointvec},
{FS_MAXDQUOT, "dquot-max", &max_dquots, sizeof(int),
int error;
down (&inode->i_sem);
- if (inode->i_size < (loff_t) idx * PAGE_CACHE_SIZE)
+ if (inode->i_size <= (loff_t) idx * PAGE_CACHE_SIZE)
goto sigbus;
*ptr = shmem_getpage_locked(inode, idx);
if (IS_ERR (*ptr))
8, /* do swap I/O in clusters of this size */
};
-/*
- * We use this (minimal) function in the case where we
- * know we can't deactivate the page (yet).
- */
-void age_page_down_ageonly(struct page * page)
-{
- page->age /= 2;
-}
-
-void age_page_down_nolock(struct page * page)
-{
- /* The actual page aging bit */
- page->age /= 2;
-
- /*
- * The page is now an old page. Move to the inactive
- * list (if possible ... see below).
- */
- if (!page->age)
- deactivate_page_nolock(page);
-}
-
-void age_page_down(struct page * page)
-{
- /* The actual page aging bit */
- page->age /= 2;
-
- /*
- * The page is now an old page. Move to the inactive
- * list (if possible ... see below).
- */
- if (!page->age)
- deactivate_page(page);
-}
-
-
/**
* (de)activate_page - move pages from/to active and inactive lists
* @page: the page we want to move
#define MAX(a,b) ((a) > (b) ? (a) : (b))
+static inline void age_page_up(struct page *page)
+{
+ unsigned age = page->age + PAGE_AGE_ADV;
+ if (age > PAGE_AGE_MAX)
+ age = PAGE_AGE_MAX;
+ page->age = age;
+}
+
+static inline void age_page_down(struct page * page)
+{
+ page->age /= 2;
+}
+
/*
* The swap-out function returns 1 if it successfully
* scanned all the pages it was asked to (`count').
static unsigned int zone_inactive_shortage(zone_t *zone)
{
- unsigned int inactive;
+ unsigned int sum;
if (!zone->size)
return 0;
- inactive = zone->inactive_dirty_pages;
- inactive += zone->inactive_clean_pages;
- inactive += zone->free_pages;
-
- return inactive < zone->pages_high;
+ sum = zone->pages_high;
+ sum -= zone->inactive_dirty_pages;
+ sum -= zone->inactive_clean_pages;
+ sum -= zone->free_pages;
+
+ if (sum > 0)
+ return sum;
+ return 0;
}
static unsigned int zone_free_plenty(zone_t *zone)
/* Don't look at this pte if it's been accessed recently. */
if (ptep_test_and_clear_young(page_table)) {
- page->age += PAGE_AGE_ADV;
- if (page->age > PAGE_AGE_MAX)
- page->age = PAGE_AGE_MAX;
+ age_page_up(page);
return;
}
return cleaned_pages;
}
-static inline void age_page_up(struct page *page)
-{
- unsigned age = page->age + PAGE_AGE_ADV;
- if (age > PAGE_AGE_MAX)
- age = PAGE_AGE_MAX;
- page->age = age;
-}
-
-
/**
* refill_inactive_scan - scan the active list and find pages to deactivate
* @priority: the priority at which to scan
age_page_up(page);
page_active = 1;
} else {
- age_page_down_ageonly(page);
+ age_page_down(page);
/*
* Since we don't hold a reference on the page
* ourselves, we have to do our test a bit more