static unsigned int d_hash_mask;
static unsigned int d_hash_shift;
-static struct list_head *dentry_hashtable;
+static struct hlist_head *dentry_hashtable;
static LIST_HEAD(dentry_unused);
-static int max_dentries;
-static void * hashtable_end;
-
-static inline int is_bucket(void * addr)
-{
- return ((addr < (void *)dentry_hashtable)
- || (addr > hashtable_end) ? 0 : 1);
-}
/* Statistics gathering. */
struct dentry_stat_t dentry_stat = {
while (next != head) {
tmp = next;
next = tmp->next;
+ prefetch(next);
alias = list_entry(tmp, struct dentry, d_alias);
if (!d_unhashed(alias)) {
if (alias->d_flags & DCACHE_DISCONNECTED)
if (tmp == &dentry_unused)
break;
list_del_init(tmp);
+ prefetch(dentry_unused.prev);
dentry_stat.nr_unused--;
dentry = list_entry(tmp, struct dentry, d_lru);
* done under dcache_lock.
*
*/
-void shrink_dcache_anon(struct list_head *head)
+void shrink_dcache_anon(struct hlist_head *head)
{
- struct list_head *lp;
+ struct hlist_node *lp;
int found;
do {
found = 0;
spin_lock(&dcache_lock);
- list_for_each(lp, head) {
- struct dentry *this = list_entry(lp, struct dentry, d_hash);
+ hlist_for_each(lp, head) {
+ struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
list_del(&this->d_lru);
/* don't add non zero d_count dentries
dentry->d_mounted = 0;
dentry->d_cookie = NULL;
dentry->d_bucket = NULL;
- INIT_LIST_HEAD(&dentry->d_hash);
+ INIT_HLIST_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
INIT_LIST_HEAD(&dentry->d_alias);
return res;
}
-static inline struct list_head * d_hash(struct dentry * parent, unsigned long hash)
+static inline struct hlist_head * d_hash(struct dentry * parent, unsigned long hash)
{
hash += (unsigned long) parent / L1_CACHE_BYTES;
hash = hash ^ (hash >> D_HASHBITS);
res->d_flags |= DCACHE_DISCONNECTED;
res->d_vfs_flags &= ~DCACHE_UNHASHED;
list_add(&res->d_alias, &inode->i_dentry);
- list_add(&res->d_hash, &inode->i_sb->s_anon);
+ hlist_add_head(&res->d_hash, &inode->i_sb->s_anon);
spin_unlock(&res->d_lock);
}
inode = NULL; /* don't drop reference */
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
- struct list_head *head = d_hash(parent,hash);
+ struct hlist_head *head = d_hash(parent,hash);
struct dentry *found = NULL;
- struct list_head *tmp;
- int lookup_count = 0;
+ struct hlist_node *node;
rcu_read_lock();
- /* lookup is terminated when flow reaches any bucket head */
- for(tmp = head->next; !is_bucket(tmp); tmp = tmp->next) {
+ hlist_for_each (node, head) {
struct dentry *dentry;
unsigned long move_count;
struct qstr * qstr;
+ prefetch(node->next);
+
smp_read_barrier_depends();
- dentry = list_entry(tmp, struct dentry, d_hash);
+ dentry = hlist_entry(node, struct dentry, d_hash);
/* if lookup ends up in a different bucket
* due to concurrent rename, fail it
if (unlikely(dentry->d_bucket != head))
break;
- /* to avoid race if dentry keep coming back to original
- * bucket due to double moves
- */
- if (unlikely(++lookup_count > max_dentries))
- break;
-
/*
* We must take a snapshot of d_move_count followed by
* read memory barrier before any search key comparison
unsigned long dent_addr = (unsigned long) dentry;
unsigned long min_addr = PAGE_OFFSET;
unsigned long align_mask = 0x0F;
- struct list_head *base, *lhp;
+ struct hlist_head *base;
+ struct hlist_node *lhp;
if (dent_addr < min_addr)
goto out;
goto out;
spin_lock(&dcache_lock);
- lhp = base = d_hash(dparent, dentry->d_name.hash);
- while ((lhp = lhp->next) != base) {
+ base = d_hash(dparent, dentry->d_name.hash);
+ hlist_for_each(lhp,base) {
+ prefetch(lhp->next);
/* read_barrier_depends() not required for d_hash list
* as it is parsed under dcache_lock
*/
- if (dentry == list_entry(lhp, struct dentry, d_hash)) {
+ if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
dget(dentry);
spin_unlock(&dcache_lock);
return 1;
void d_rehash(struct dentry * entry)
{
- struct list_head *list = d_hash(entry->d_parent, entry->d_name.hash);
+ struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
spin_lock(&dcache_lock);
- if (!list_empty(&entry->d_hash) && !d_unhashed(entry)) BUG();
entry->d_vfs_flags &= ~DCACHE_UNHASHED;
entry->d_bucket = list;
- list_add_rcu(&entry->d_hash, list);
+ hlist_add_head_rcu(&entry->d_hash, list);
spin_unlock(&dcache_lock);
}
* We could be nicer about the deleted file, and let it show
* up under the name it got deleted rather than the name that
* deleted it.
- *
- * Careful with the hash switch. The hash switch depends on
- * the fact that any list-entry can be a head of the list.
- * Think about it.
*/
/**
/* Move the dentry to the target hash queue, if on different bucket */
if (dentry->d_bucket != target->d_bucket) {
dentry->d_bucket = target->d_bucket;
- list_del_rcu(&dentry->d_hash);
- list_add_rcu(&dentry->d_hash, &target->d_hash);
+ hlist_del_rcu(&dentry->d_hash);
+ hlist_add_head_rcu(&dentry->d_hash, target->d_bucket);
}
/* Unhash the target: dput() will then get rid of it */
continue;
}
parent = dentry->d_parent;
+ prefetch(parent);
namelen = dentry->d_name.len;
buflen -= namelen + 1;
if (buflen < 0)
static void __init dcache_init(unsigned long mempages)
{
- struct list_head *d;
+ struct hlist_head *d;
unsigned long order;
unsigned int nr_hash;
int i;
if (!dentry_cache)
panic("Cannot create dentry cache");
- /* approximate maximum number of dentries in one hash bucket */
- max_dentries = (mempages * (PAGE_SIZE / sizeof(struct dentry)));
-
set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
#if PAGE_SHIFT < 13
mempages >>= (13 - PAGE_SHIFT);
#endif
- mempages *= sizeof(struct list_head);
+ mempages *= sizeof(struct hlist_head);
for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
;
unsigned long tmp;
nr_hash = (1UL << order) * PAGE_SIZE /
- sizeof(struct list_head);
+ sizeof(struct hlist_head);
d_hash_mask = (nr_hash - 1);
tmp = nr_hash;
while ((tmp >>= 1UL) != 0UL)
d_hash_shift++;
- dentry_hashtable = (struct list_head *)
+ dentry_hashtable = (struct hlist_head *)
__get_free_pages(GFP_ATOMIC, order);
} while (dentry_hashtable == NULL && --order >= 0);
if (!dentry_hashtable)
panic("Failed to allocate dcache hash table\n");
- hashtable_end = dentry_hashtable + nr_hash;
-
d = dentry_hashtable;
i = nr_hash;
do {
- INIT_LIST_HEAD(d);
+ INIT_HLIST_HEAD(d);
d++;
i--;
} while (i);
* Only add valid (hashed) inodes to the superblock's
* dirty list. Add blockdev inodes as well.
*/
- if (list_empty(&inode->i_hash) && !S_ISBLK(inode->i_mode))
+ if (hlist_unhashed(&inode->i_hash) && !S_ISBLK(inode->i_mode))
goto out;
/*
static void hugetlbfs_delete_inode(struct inode *inode)
{
- list_del_init(&inode->i_hash);
+ hlist_del_init(&inode->i_hash);
list_del_init(&inode->i_list);
inode->i_state |= I_FREEING;
inodes_stat.nr_inodes--;
{
struct super_block *super_block = inode->i_sb;
- if (list_empty(&inode->i_hash))
+ if (hlist_unhashed(&inode->i_hash))
goto out_truncate;
if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
/* write_inode_now() ? */
inodes_stat.nr_unused--;
- list_del_init(&inode->i_hash);
+ hlist_del_init(&inode->i_hash);
out_truncate:
list_del_init(&inode->i_list);
inode->i_state |= I_FREEING;
LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused);
-static struct list_head *inode_hashtable;
-static LIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
+static struct hlist_head *inode_hashtable;
+static HLIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
/*
* A simple spinlock to protect the list manipulations.
void inode_init_once(struct inode *inode)
{
memset(inode, 0, sizeof(*inode));
- INIT_LIST_HEAD(&inode->i_hash);
+ INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_data.clean_pages);
INIT_LIST_HEAD(&inode->i_data.dirty_pages);
INIT_LIST_HEAD(&inode->i_data.locked_pages);
continue;
invalidate_inode_buffers(inode);
if (!atomic_read(&inode->i_count)) {
- list_del_init(&inode->i_hash);
+ hlist_del_init(&inode->i_hash);
list_del(&inode->i_list);
list_add(&inode->i_list, dispose);
inode->i_state |= I_FREEING;
if (!can_unuse(inode))
continue;
}
- list_del_init(&inode->i_hash);
+ hlist_del_init(&inode->i_hash);
list_move(&inode->i_list, &freeable);
inode->i_state |= I_FREEING;
nr_pruned++;
* by hand after calling find_inode now! This simplifies iunique and won't
* add any additional branch in the common code.
*/
-static struct inode * find_inode(struct super_block * sb, struct list_head *head, int (*test)(struct inode *, void *), void *data)
+static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data)
{
- struct list_head *tmp;
- struct inode * inode;
+ struct hlist_node *node;
+ struct inode * inode = NULL;
- tmp = head;
- for (;;) {
- tmp = tmp->next;
- inode = NULL;
- if (tmp == head)
- break;
- inode = list_entry(tmp, struct inode, i_hash);
+ hlist_for_each (node, head) {
+ prefetch(node->next);
+ inode = hlist_entry(node, struct inode, i_hash);
if (inode->i_sb != sb)
continue;
if (!test(inode, data))
continue;
break;
}
- return inode;
+ return node ? inode : NULL;
}
/*
* find_inode_fast is the fast path version of find_inode, see the comment at
* iget_locked for details.
*/
-static struct inode * find_inode_fast(struct super_block * sb, struct list_head *head, unsigned long ino)
+static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
{
- struct list_head *tmp;
- struct inode * inode;
+ struct hlist_node *node;
+ struct inode * inode = NULL;
- tmp = head;
- for (;;) {
- tmp = tmp->next;
- inode = NULL;
- if (tmp == head)
- break;
- inode = list_entry(tmp, struct inode, i_hash);
+ hlist_for_each (node, head) {
+ prefetch(node->next);
+ inode = list_entry(node, struct inode, i_hash);
if (inode->i_ino != ino)
continue;
if (inode->i_sb != sb)
continue;
break;
}
- return inode;
+ return node ? inode : NULL;
}
/**
* We no longer cache the sb_flags in i_flags - see fs.h
* -- rmk@arm.uk.linux.org
*/
-static struct inode * get_new_inode(struct super_block *sb, struct list_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
+static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
{
struct inode * inode;
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
- list_add(&inode->i_hash, head);
+ hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
* get_new_inode_fast is the fast path version of get_new_inode, see the
* comment at iget_locked for details.
*/
-static struct inode * get_new_inode_fast(struct super_block *sb, struct list_head *head, unsigned long ino)
+static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino)
{
struct inode * inode;
inode->i_ino = ino;
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
- list_add(&inode->i_hash, head);
+ hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
{
static ino_t counter = 0;
struct inode *inode;
- struct list_head * head;
+ struct hlist_head * head;
ino_t res;
spin_lock(&inode_lock);
retry:
* Note, @test is called with the inode_lock held, so can't sleep.
*/
static inline struct inode *ifind(struct super_block *sb,
- struct list_head *head, int (*test)(struct inode *, void *),
+ struct hlist_head *head, int (*test)(struct inode *, void *),
void *data)
{
struct inode *inode;
* Otherwise NULL is returned.
*/
static inline struct inode *ifind_fast(struct super_block *sb,
- struct list_head *head, unsigned long ino)
+ struct hlist_head *head, unsigned long ino)
{
struct inode *inode;
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
- struct list_head *head = inode_hashtable + hash(sb, hashval);
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
return ifind(sb, head, test, data);
}
*/
struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
- struct list_head *head = inode_hashtable + hash(sb, ino);
+ struct hlist_head *head = inode_hashtable + hash(sb, ino);
return ifind_fast(sb, head, ino);
}
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *), void *data)
{
- struct list_head *head = inode_hashtable + hash(sb, hashval);
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
inode = ifind(sb, head, test, data);
*/
struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
- struct list_head *head = inode_hashtable + hash(sb, ino);
+ struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
inode = ifind_fast(sb, head, ino);
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
- struct list_head *head = &anon_hash_chain;
+ struct hlist_head *head = &anon_hash_chain;
if (inode->i_sb)
head = inode_hashtable + hash(inode->i_sb, hashval);
spin_lock(&inode_lock);
- list_add(&inode->i_hash, head);
+ hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode_lock);
}
void remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_lock);
- list_del_init(&inode->i_hash);
+ hlist_del_init(&inode->i_hash);
spin_unlock(&inode_lock);
}
{
struct super_operations *op = inode->i_sb->s_op;
- list_del_init(&inode->i_hash);
+ hlist_del_init(&inode->i_hash);
list_del_init(&inode->i_list);
inode->i_state|=I_FREEING;
inodes_stat.nr_inodes--;
{
struct super_block *sb = inode->i_sb;
- if (!list_empty(&inode->i_hash)) {
+ if (!hlist_unhashed(&inode->i_hash)) {
if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
list_del(&inode->i_list);
list_add(&inode->i_list, &inode_unused);
write_inode_now(inode, 1);
spin_lock(&inode_lock);
inodes_stat.nr_unused--;
- list_del_init(&inode->i_hash);
+ hlist_del_init(&inode->i_hash);
}
list_del_init(&inode->i_list);
inode->i_state|=I_FREEING;
*/
void __init inode_init(unsigned long mempages)
{
- struct list_head *head;
+ struct hlist_head *head;
unsigned long order;
unsigned int nr_hash;
int i;
unsigned long tmp;
nr_hash = (1UL << order) * PAGE_SIZE /
- sizeof(struct list_head);
+ sizeof(struct hlist_head);
i_hash_mask = (nr_hash - 1);
tmp = nr_hash;
while ((tmp >>= 1UL) != 0UL)
i_hash_shift++;
- inode_hashtable = (struct list_head *)
+ inode_hashtable = (struct hlist_head *)
__get_free_pages(GFP_ATOMIC, order);
} while (inode_hashtable == NULL && --order >= 0);
head = inode_hashtable;
i = nr_hash;
do {
- INIT_LIST_HEAD(head);
+ INIT_HLIST_HEAD(head);
head++;
i--;
} while (i);
INIT_LIST_HEAD(&s->s_io);
INIT_LIST_HEAD(&s->s_files);
INIT_LIST_HEAD(&s->s_instances);
- INIT_LIST_HEAD(&s->s_anon);
+ INIT_HLIST_HEAD(&s->s_anon);
init_rwsem(&s->s_umount);
sema_init(&s->s_lock, 1);
down_write(&s->s_umount);
atomic_t d_count;
unsigned long d_vfs_flags; /* moved here to be on same cacheline */
spinlock_t d_lock; /* per dentry lock */
- unsigned int d_flags;
- unsigned long d_move_count; /* to indicated moved dentry while lockless lookup */
struct inode * d_inode; /* Where the name belongs to - NULL is negative */
- struct dentry * d_parent; /* parent directory */
- struct list_head * d_bucket; /* lookup hash bucket */
- struct list_head d_hash; /* lookup hash list */
struct list_head d_lru; /* LRU list */
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
- int d_mounted;
- struct qstr d_name;
- struct qstr * d_qstr; /* quick str ptr used in lockless lookup and concurrent d_move */
unsigned long d_time; /* used by d_revalidate */
struct dentry_operations *d_op;
struct super_block * d_sb; /* The root of the dentry tree */
+ unsigned int d_flags;
+ int d_mounted;
void * d_fsdata; /* fs-specific data */
struct rcu_head d_rcu;
struct dcookie_struct * d_cookie; /* cookie, if any */
+ unsigned long d_move_count; /* to indicated moved dentry while lockless lookup */
+ struct qstr * d_qstr; /* quick str ptr used in lockless lookup and concurrent d_move */
+ struct dentry * d_parent; /* parent directory */
+ struct qstr d_name;
+ struct hlist_node d_hash; /* lookup hash list */
+ struct hlist_head * d_bucket; /* lookup hash bucket */
unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
} ____cacheline_aligned;
static __inline__ void __d_drop(struct dentry * dentry)
{
dentry->d_vfs_flags |= DCACHE_UNHASHED;
- list_del_rcu(&dentry->d_hash);
+ hlist_del_rcu(&dentry->d_hash);
}
static __inline__ void d_drop(struct dentry * dentry)
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_anon(struct list_head *);
+extern void shrink_dcache_anon(struct hlist_head *);
extern int d_invalidate(struct dentry *);
/* only used at mount-time */
};
struct inode {
- struct list_head i_hash;
+ struct hlist_node i_hash;
struct list_head i_list;
struct list_head i_dentry;
unsigned long i_ino;
struct list_head s_dirty; /* dirty inodes */
struct list_head s_io; /* parked for writeback */
- struct list_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_files;
struct block_device *s_bdev;
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, ({ read_barrier_depends(); 0;}), n = pos->next)
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
+
+static __inline__ int hlist_unhashed(struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static __inline__ int hlist_empty(struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static __inline__ void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static __inline__ void hlist_del(struct hlist_node *n)
+{
+ if (n->pprev)
+ __hlist_del(n);
+}
+
+#define hlist_del_rcu hlist_del /* list_del_rcu is identical too? */
+
+static __inline__ void hlist_del_init(struct hlist_node *n)
+{
+ if (n->pprev) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static __inline__ void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+static __inline__ void hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ n->pprev = &h->first;
+ smp_wmb();
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+}
+
+/* next must be != NULL */
+static __inline__ void hlist_add_before(struct hlist_node *n, struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+/* Cannot easily do prefetch unfortunately */
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos; \
+ pos = pos->next)
+
#else
#warning "don't include kernel headers in userspace"
#endif /* __KERNEL__ */