]> git.hungrycats.org Git - linux/commitdiff
mm: make find_extend_vma() fail if write lock not held
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 16 Jun 2023 22:58:54 +0000 (15:58 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Jul 2023 11:14:46 +0000 (13:14 +0200)
commit f440fa1ac955e2898893f9301568435eb5cdfc4b upstream.

Make calls to extend_vma() and find_extend_vma() fail if the write lock
is required.

To avoid making this a flag-day event, this still allows the old
read-locking case for the trivial situations, and passes in a flag to
say "is it write-locked".  That way write-lockers can say "yes, I'm
being careful", and legacy users will continue to work in all the common
cases until they have been fully converted to the new world order.

Co-Developed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/binfmt_elf.c
fs/exec.c
include/linux/mm.h
mm/memory.c
mm/mmap.c
mm/nommu.c

index 8a884e795f6a7b63756dfd31c13f7154c66e6462..61004b7bba24af4aeea307cb7fbf5f602b1e75a2 100644 (file)
@@ -320,10 +320,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
         * Grow the stack manually; some architectures have a limit on how
         * far ahead a user-space access may be in order to grow the stack.
         */
-       if (mmap_read_lock_killable(mm))
+       if (mmap_write_lock_killable(mm))
                return -EINTR;
-       vma = find_extend_vma(mm, bprm->p);
-       mmap_read_unlock(mm);
+       vma = find_extend_vma_locked(mm, bprm->p, true);
+       mmap_write_unlock(mm);
        if (!vma)
                return -EFAULT;
 
index 7c44d0c65b1b4c7bcb91905110afc330d52a2bc8..c727092a11e2134e10b4dc61708b45d6b774b68c 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -204,7 +204,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 
 #ifdef CONFIG_STACK_GROWSUP
        if (write) {
-               ret = expand_downwards(bprm->vma, pos);
+               /* We claim to hold the lock - nobody to race with */
+               ret = expand_downwards(bprm->vma, pos, true);
                if (ret < 0)
                        return NULL;
        }
@@ -852,7 +853,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
        stack_base = vma->vm_end - stack_expand;
 #endif
        current->mm->start_stack = bprm->p;
-       ret = expand_stack(vma, stack_base);
+       ret = expand_stack_locked(vma, stack_base, true);
        if (ret)
                ret = -EFAULT;
 
index 872fc849c9faccbe537cb8c103510130536e10c6..48433d8f5f23ddbafedccbf2a2f3ec0d283415ec 100644 (file)
@@ -3065,11 +3065,13 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
 
 extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
-extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+int expand_stack_locked(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked);
+#define expand_stack(vma,addr) expand_stack_locked(vma,addr,false)
 
 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
-extern int expand_downwards(struct vm_area_struct *vma,
-               unsigned long address);
+int expand_downwards(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked);
 #if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #else
@@ -3170,6 +3172,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 #endif
 
 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
+               unsigned long addr, bool write_locked);
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
                        unsigned long pfn, unsigned long size, pgprot_t);
 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
index 54d5e7217d4dd3f278b40918db2b1e036a8739dc..43807daba12399f31920bf84dfa4e56d8648edb8 100644 (file)
@@ -5336,7 +5336,7 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
                        goto fail;
        }
 
-       if (expand_stack(vma, addr))
+       if (expand_stack_locked(vma, addr, true))
                goto fail;
 
 success:
index bf54576e05ef4b4ebbbb2ee87ce7d0a9a9d4d0bd..01af54e16b7da8230bbfca2cfdd42120281875d1 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1898,7 +1898,8 @@ static int acct_stack_growth(struct vm_area_struct *vma,
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
-int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+int expand_upwards(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *next;
@@ -1922,6 +1923,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (gap_addr < address || gap_addr > TASK_SIZE)
                gap_addr = TASK_SIZE;
 
+       if (!write_locked)
+               return -EAGAIN;
        next = find_vma_intersection(mm, vma->vm_end, gap_addr);
        if (next && vma_is_accessible(next)) {
                if (!(next->vm_flags & VM_GROWSUP))
@@ -1991,7 +1994,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-int expand_downwards(struct vm_area_struct *vma, unsigned long address)
+int expand_downwards(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
        struct mm_struct *mm = vma->vm_mm;
        MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
@@ -2005,10 +2009,13 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
        /* Enforce stack_guard_gap */
        prev = mas_prev(&mas, 0);
        /* Check that both stack segments have the same anon_vma? */
-       if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
-                       vma_is_accessible(prev)) {
-               if (address - prev->vm_end < stack_guard_gap)
+       if (prev) {
+               if (!(prev->vm_flags & VM_GROWSDOWN) &&
+                   vma_is_accessible(prev) &&
+                   (address - prev->vm_end < stack_guard_gap))
                        return -ENOMEM;
+               if (!write_locked && (prev->vm_end == address))
+                       return -EAGAIN;
        }
 
        if (mas_preallocate(&mas, GFP_KERNEL))
@@ -2087,13 +2094,14 @@ static int __init cmdline_parse_stack_guard_gap(char *p)
 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
 
 #ifdef CONFIG_STACK_GROWSUP
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+int expand_stack_locked(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
-       return expand_upwards(vma, address);
+       return expand_upwards(vma, address, write_locked);
 }
 
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+               unsigned long addr, bool write_locked)
 {
        struct vm_area_struct *vma, *prev;
 
@@ -2101,20 +2109,25 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       if (!prev)
+               return NULL;
+       if (expand_stack_locked(prev, addr, write_locked))
                return NULL;
        if (prev->vm_flags & VM_LOCKED)
                populate_vma_page_range(prev, addr, prev->vm_end, NULL);
        return prev;
 }
 #else
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+int expand_stack_locked(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
-       return expand_downwards(vma, address);
+       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
+               return -EINVAL;
+       return expand_downwards(vma, address, write_locked);
 }
 
-struct vm_area_struct *
-find_extend_vma(struct mm_struct *mm, unsigned long addr)
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm,
+               unsigned long addr, bool write_locked)
 {
        struct vm_area_struct *vma;
        unsigned long start;
@@ -2125,10 +2138,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return NULL;
        if (vma->vm_start <= addr)
                return vma;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               return NULL;
        start = vma->vm_start;
-       if (expand_stack(vma, addr))
+       if (expand_stack_locked(vma, addr, write_locked))
                return NULL;
        if (vma->vm_flags & VM_LOCKED)
                populate_vma_page_range(vma, addr, start, NULL);
@@ -2136,6 +2147,11 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 }
 #endif
 
+struct vm_area_struct *find_extend_vma(struct mm_struct *mm,
+               unsigned long addr)
+{
+       return find_extend_vma_locked(mm, addr, false);
+}
 EXPORT_SYMBOL_GPL(find_extend_vma);
 
 /*
index 57ba243c6a37f466962891f6d71b7dc0910ffce7..d671042fabd69f9b4aa73d361aa8c8945f7cd74e 100644 (file)
@@ -643,7 +643,8 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
  * expand a stack to a given address
  * - not supported under NOMMU conditions
  */
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
+int expand_stack_locked(struct vm_area_struct *vma, unsigned long address,
+               bool write_locked)
 {
        return -ENOMEM;
 }