]> git.hungrycats.org Git - linux/commitdiff
mm: get rid of vmacache_flush_all() entirely
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Sep 2018 09:57:48 +0000 (23:57 -1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Sep 2018 20:49:00 +0000 (22:49 +0200)
commit 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 upstream.

Jann Horn points out that the vmacache_flush_all() function is not only
potentially expensive, it's buggy too.  It also happens to be entirely
unnecessary, because the sequence number overflow case can be avoided by
simply making the sequence number be 64-bit.  That doesn't even grow the
data structures in question, because the other adjacent fields are
already 64-bit.

So simplify the whole thing by just making the sequence number overflow
case go away entirely, which gets rid of all the complications and makes
the code faster too.  Win-win.

[ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics
  also just goes away entirely with this ]

Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Will Deacon <will.deacon@arm.com>
Acked-by: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/mm_types.h
include/linux/sched.h
include/linux/vm_event_item.h
include/linux/vmacache.h
mm/debug.c
mm/vmacache.c

index 36f4695aa60448c994ba4a27405c7deb50249d78..ad2a081bac6674f04a03bfb4aa113a814ba5dffa 100644 (file)
@@ -392,7 +392,7 @@ struct kioctx_table;
 struct mm_struct {
        struct vm_area_struct *mmap;            /* list of VMAs */
        struct rb_root mm_rb;
-       u32 vmacache_seqnum;                   /* per-thread vmacache */
+       u64 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
        unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
index 725498cc5d301dfc378184e1243bd4a8e3385d7b..b30540d6d1253aec74816153fed803283daf8c38 100644 (file)
@@ -1454,7 +1454,7 @@ struct task_struct {
 
        struct mm_struct *mm, *active_mm;
        /* per-thread vma caching */
-       u32 vmacache_seqnum;
+       u64 vmacache_seqnum;
        struct vm_area_struct *vmacache[VMACACHE_SIZE];
 #if defined(SPLIT_RSS_COUNTING)
        struct task_rss_stat    rss_stat;
index 8ef3a61fdc74c3f45cda7cf46f3bcfe6fe14ed51..fdac5800872d73dd0468f5bd038adf430c4a612b 100644 (file)
@@ -88,7 +88,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #ifdef CONFIG_DEBUG_VM_VMACACHE
                VMACACHE_FIND_CALLS,
                VMACACHE_FIND_HITS,
-               VMACACHE_FULL_FLUSHES,
 #endif
                NR_VM_EVENT_ITEMS
 };
index c3fa0fd43949952957603b35e28b26ddc53fb0d3..4f58ff2dacd6985e441e61180e52984f059f4f92 100644 (file)
@@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
        memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
 }
 
-extern void vmacache_flush_all(struct mm_struct *mm);
 extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
                                                    unsigned long addr);
@@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
 static inline void vmacache_invalidate(struct mm_struct *mm)
 {
        mm->vmacache_seqnum++;
-
-       /* deal with overflows */
-       if (unlikely(mm->vmacache_seqnum == 0))
-               vmacache_flush_all(mm);
 }
 
 #endif /* __LINUX_VMACACHE_H */
index 668aa35191ca1243d9be055b5e03ed9b17e17976..689b6e911cae7815efaa7d4dbfdc6a8fdb56490f 100644 (file)
@@ -168,7 +168,7 @@ EXPORT_SYMBOL(dump_vma);
 
 void dump_mm(const struct mm_struct *mm)
 {
-       pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
+       pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
 #ifdef CONFIG_MMU
                "get_unmapped_area %p\n"
 #endif
@@ -198,7 +198,7 @@ void dump_mm(const struct mm_struct *mm)
 #endif
                "%s",   /* This is here to hold the comma */
 
-               mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+               mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
 #ifdef CONFIG_MMU
                mm->get_unmapped_area,
 #endif
index fd09dc9c6812bb86d483b1638d519b565958049a..9c8ff3d4eda92c0f2f90f2bada5ae3a30e925d96 100644 (file)
@@ -5,44 +5,6 @@
 #include <linux/mm.h>
 #include <linux/vmacache.h>
 
-/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
-       struct task_struct *g, *p;
-
-       count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
-       /*
-        * Single threaded tasks need not iterate the entire
-        * list of process. We can avoid the flushing as well
-        * since the mm's seqnum was increased and don't have
-        * to worry about other threads' seqnum. Current's
-        * flush will occur upon the next lookup.
-        */
-       if (atomic_read(&mm->mm_users) == 1)
-               return;
-
-       rcu_read_lock();
-       for_each_process_thread(g, p) {
-               /*
-                * Only flush the vmacache pointers as the
-                * mm seqnum is already set and curr's will
-                * be set upon invalidation when the next
-                * lookup is done.
-                */
-               if (mm == p->mm)
-                       vmacache_flush(p);
-       }
-       rcu_read_unlock();
-}
-
 /*
  * This task may be accessing a foreign mm via (for example)
  * get_user_pages()->find_vma().  The vmacache is task-local and this