]> git.hungrycats.org Git - linux/commitdiff
mm/hugetlb.c: fix UAF of vma in hugetlb fault pathway
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Sat, 14 Sep 2024 19:41:19 +0000 (12:41 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Oct 2024 14:33:48 +0000 (16:33 +0200)
commit 98b74bb4d7e96b4da5ef3126511febe55b76b807 upstream.

Syzbot reports a UAF in hugetlb_fault().  This happens because
vmf_anon_prepare() could drop the per-VMA lock and allow the current VMA
to be freed before hugetlb_vma_unlock_read() is called.

We can fix this by using a modified version of vmf_anon_prepare() that
doesn't release the VMA lock on failure, and then release it ourselves
after hugetlb_vma_unlock_read().

Link: https://lkml.kernel.org/r/20240914194243.245-2-vishal.moola@gmail.com
Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/hugetlb.c

index be77b409de93f7b2952fd3c53dfb21c98765a4bd..423d20453b90c948bdd2feb0ba50af579e3494f1 100644 (file)
@@ -6075,7 +6075,7 @@ retry_avoidcopy:
         * When the original hugepage is shared one, it does not have
         * anon_vma prepared.
         */
-       ret = vmf_anon_prepare(vmf);
+       ret = __vmf_anon_prepare(vmf);
        if (unlikely(ret))
                goto out_release_all;
 
@@ -6274,7 +6274,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
                }
 
                if (!(vma->vm_flags & VM_MAYSHARE)) {
-                       ret = vmf_anon_prepare(vmf);
+                       ret = __vmf_anon_prepare(vmf);
                        if (unlikely(ret))
                                goto out;
                }
@@ -6406,6 +6406,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
        folio_unlock(folio);
 out:
        hugetlb_vma_unlock_read(vma);
+
+       /*
+        * We must check to release the per-VMA lock. __vmf_anon_prepare() is
+        * the only way ret can be set to VM_FAULT_RETRY.
+        */
+       if (unlikely(ret & VM_FAULT_RETRY))
+               vma_end_read(vma);
+
        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
        return ret;
 
@@ -6627,6 +6635,14 @@ out_ptl:
        }
 out_mutex:
        hugetlb_vma_unlock_read(vma);
+
+       /*
+        * We must check to release the per-VMA lock. __vmf_anon_prepare() in
+        * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
+        */
+       if (unlikely(ret & VM_FAULT_RETRY))
+               vma_end_read(vma);
+
        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
        /*
         * Generally it's safe to hold refcount during waiting page lock. But