]> git.hungrycats.org Git - linux/commitdiff
Btrfs: fix scrub preventing unused block groups from being deleted
authorFilipe Manana <fdmanana@suse.com>
Thu, 19 Nov 2015 11:45:48 +0000 (11:45 +0000)
committerZygo Blaxell <zblaxell@serenity.furryterror.org>
Tue, 19 Jan 2016 05:13:14 +0000 (00:13 -0500)
Currently scrub can race with the cleaner kthread when the later attempts
to delete an unused block group, and the result is preventing the cleaner
kthread from ever deleting later the block group - unless the block group
becomes used and unused again. The following diagram illustrates that
race:

              CPU 1                                 CPU 2

 cleaner kthread
   btrfs_delete_unused_bgs()

     gets block group X from
     fs_info->unused_bgs and
     removes it from that list

                                             scrub_enumerate_chunks()

                                               searches device tree using
                                               its commit root

                                               finds device extent for
                                               block group X

                                               gets block group X from the tree
                                               fs_info->block_group_cache_tree
                                               (via btrfs_lookup_block_group())

                                               sets bg X to RO

     sees the block group is
     already RO and therefore
     doesn't delete it nor adds
     it back to unused list

So fix this by making scrub add the block group again to the list of
unused block groups if the block group is still unused when it finished
scrubbing it and it hasn't been removed already.

Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
(cherry picked from commit 758f2dfcf8a249b1f1510aa32e625c2ec20642a3)
(cherry picked from commit 3587aca48159078c69d21bc7280c2f35732a3e40)

fs/btrfs/scrub.c

index 053bf76c54bce64e8615b31917db2b8f3a0b0262..339be4acafa330308bdbe8c290b5dcf621f88999 100644 (file)
@@ -3493,6 +3493,28 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        spin_unlock(&cache->lock);
                }
 
+               /*
+                * We might have prevented the cleaner kthread from deleting
+                * this block group if it was already unused because we raced
+                * and set it to RO mode first. So add it back to the unused
+                * list, otherwise it might not ever be deleted unless a manual
+                * balance is triggered or it becomes used and unused again.
+                */
+               spin_lock(&cache->lock);
+               if (!cache->removed && !cache->ro && cache->reserved == 0 &&
+                   btrfs_block_group_used(&cache->item) == 0) {
+                       spin_unlock(&cache->lock);
+                       spin_lock(&fs_info->unused_bgs_lock);
+                       if (list_empty(&cache->bg_list)) {
+                               btrfs_get_block_group(cache);
+                               list_add_tail(&cache->bg_list,
+                                             &fs_info->unused_bgs);
+                       }
+                       spin_unlock(&fs_info->unused_bgs_lock);
+               } else {
+                       spin_unlock(&cache->lock);
+               }
+
                btrfs_put_block_group(cache);
                if (ret)
                        break;