]> git.hungrycats.org Git - linux/commitdiff
mm/list_lru.c: fix list_lru_count_node() to be race free
authorSahitya Tummala <stummala@codeaurora.org>
Mon, 10 Jul 2017 22:49:57 +0000 (15:49 -0700)
committerSasha Levin <alexander.levin@verizon.com>
Mon, 31 Jul 2017 17:37:54 +0000 (13:37 -0400)
[ Upstream commit 2c80cd57c74339889a8752b20862a16c28929c3a ]

list_lru_count_node() iterates over all memcgs to get the total number of
entries on the node but it can race with memcg_drain_all_list_lrus(),
which migrates the entries from a dead cgroup to another.  This can return
incorrect number of entries from list_lru_count_node().

Fix this by keeping track of entries per node and simply return it in
list_lru_count_node().

Link: http://lkml.kernel.org/r/1498707555-30525-1-git-send-email-stummala@codeaurora.org
Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Alexander Polakov <apolyakov@beget.ru>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
include/linux/list_lru.h
mm/list_lru.c

index 2a6b9947aaa3191e7f24708dcd8726ccb2418b9f..743b34f56f2be813371a4c55f1e8c36578bd3d2f 100644 (file)
@@ -44,6 +44,7 @@ struct list_lru_node {
        /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
        struct list_lru_memcg   *memcg_lrus;
 #endif
+       long nr_items;
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
index 84b4c21d78d757725d970167f07cf3e62be7308f..2a6a2e4b64ba3c498dbb62e6eba7336cd3980a7e 100644 (file)
@@ -103,6 +103,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
        if (list_empty(item)) {
                list_add_tail(item, &l->list);
                l->nr_items++;
+               nlru->nr_items++;
                spin_unlock(&nlru->lock);
                return true;
        }
@@ -122,6 +123,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
        if (!list_empty(item)) {
                list_del_init(item);
                l->nr_items--;
+               nlru->nr_items--;
                spin_unlock(&nlru->lock);
                return true;
        }
@@ -169,15 +171,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
 
 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 {
-       long count = 0;
-       int memcg_idx;
+       struct list_lru_node *nlru;
 
-       count += __list_lru_count_one(lru, nid, -1);
-       if (list_lru_memcg_aware(lru)) {
-               for_each_memcg_cache_index(memcg_idx)
-                       count += __list_lru_count_one(lru, nid, memcg_idx);
-       }
-       return count;
+       nlru = &lru->node[nid];
+       return nlru->nr_items;
 }
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
@@ -212,6 +209,7 @@ restart:
                        assert_spin_locked(&nlru->lock);
                case LRU_REMOVED:
                        isolated++;
+                       nlru->nr_items--;
                        /*
                         * If the lru lock has been dropped, our list
                         * traversal is now invalid and so we have to