btrfs_run_defrag_inodes(root->fs_info);
/*
- * Acquires fs_info->bg_delete_sem to avoid racing with
- * relocation (btrfs_relocate_chunk) and relocation acquires
- * fs_info->cleaner_mutex (btrfs_relocate_block_group) after
- * acquiring fs_info->bg_delete_sem. So we can't hold, nor need
- * to, fs_info->cleaner_mutex when deleting unused block groups.
+ * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
+ * with relocation (btrfs_relocate_chunk) and relocation
+ * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
+ * after acquiring fs_info->delete_unused_bgs_mutex. So we
+ * can't hold, nor need to, fs_info->cleaner_mutex when deleting
+ * unused block groups.
*/
btrfs_delete_unused_bgs(root->fs_info);
sleep:
spin_lock_init(&fs_info->unused_bgs_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->unused_bg_unpin_mutex);
+ mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
mutex_init(&fs_info->cleaner_delayed_iput_mutex);
init_rwsem(&fs_info->commit_root_sem);
init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
- init_rwsem(&fs_info->bg_delete_sem);
sema_init(&fs_info->uuid_tree_rescan_sem, 1);
btrfs_init_dev_replace_locks(fs_info);
int ret = 0;
int need_commit = 2;
int have_pinned_space;
- int have_bg_delete_sem = 0;
/* make sure bytes are sectorsize aligned */
bytes = ALIGN(bytes, root->sectorsize);
}
data_sinfo = fs_info->data_sinfo;
- if (!data_sinfo) {
- down_read(&root->fs_info->bg_delete_sem);
- have_bg_delete_sem = 1;
+ if (!data_sinfo)
goto alloc;
- }
again:
/* make sure we have enough space to handle the data first */
if (used + bytes > data_sinfo->total_bytes) {
struct btrfs_trans_handle *trans;
- /*
- * We may need to allocate new chunk, so we should block
- * btrfs_delete_unused_bgs()
- */
- if (!have_bg_delete_sem) {
- spin_unlock(&data_sinfo->lock);
- down_read(&root->fs_info->bg_delete_sem);
- have_bg_delete_sem = 1;
- goto again;
- }
-
/*
* if we don't have enough free bytes in this space then we need
* to alloc a new chunk.
* the fs.
*/
trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- up_read(&root->fs_info->bg_delete_sem);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
if (ret < 0) {
- if (ret != -ENOSPC) {
- up_read(&root->fs_info->bg_delete_sem);
+ if (ret != -ENOSPC)
return ret;
- } else {
+ else {
have_pinned_space = 1;
goto commit_trans;
}
}
trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- up_read(&root->fs_info->bg_delete_sem);
+ if (IS_ERR(trans))
return PTR_ERR(trans);
- }
if (have_pinned_space >= 0 ||
test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
&trans->transaction->flags) ||
need_commit > 0) {
ret = btrfs_commit_transaction(trans, root);
- if (ret) {
- up_read(&root->fs_info->bg_delete_sem);
+ if (ret)
return ret;
- }
/*
* The cleaner kthread might still be doing iput
* operations. Wait for it to finish so that
trace_btrfs_space_reservation(root->fs_info,
"space_info:enospc",
data_sinfo->flags, bytes, 1);
- up_read(&root->fs_info->bg_delete_sem);
return -ENOSPC;
}
data_sinfo->bytes_may_use += bytes;
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
- if (have_bg_delete_sem)
- up_read(&root->fs_info->bg_delete_sem);
-
return ret;
}
}
spin_unlock(&fs_info->unused_bgs_lock);
- down_write(&root->fs_info->bg_delete_sem);
+ mutex_lock(&fs_info->delete_unused_bgs_mutex);
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
end_trans:
btrfs_end_transaction(trans, root);
next:
- up_write(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_put_block_group(block_group);
spin_lock(&fs_info->unused_bgs_lock);
}
* we release the path used to search the chunk/dev tree and before
* the current task acquires this mutex and calls us.
*/
- ASSERT(rwsem_is_locked(&root->fs_info->bg_delete_sem));
+ ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
ret = btrfs_can_relocate(extent_root, chunk_offset);
if (ret)
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
- down_read(&root->fs_info->bg_delete_sem);
+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
goto error;
}
BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
if (ret)
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto error;
if (ret > 0)
else
BUG_ON(ret);
}
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (found_key.offset == 0)
break;
goto error;
}
- down_read(&fs_info->bg_delete_sem);
+ mutex_lock(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
}
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);
if (ret) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
ret = 0;
break;
}
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != key.objectid) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
break;
}
btrfs_release_path(path);
if (!ret) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto loop;
}
if (counting) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
spin_lock(&fs_info->balance_lock);
bctl->stat.expected++;
spin_unlock(&fs_info->balance_lock);
count_meta < bctl->meta.limit_min)
|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
count_sys < bctl->sys.limit_min)) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto loop;
}
!chunk_reserved && !bytes_used) {
trans = btrfs_start_transaction(chunk_root, 0);
if (IS_ERR(trans)) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
ret = PTR_ERR(trans);
goto error;
}
BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans, chunk_root);
if (ret < 0) {
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
}
chunk_reserved = 1;
ret = btrfs_relocate_chunk(chunk_root,
found_key.offset);
- up_read(&fs_info->bg_delete_sem);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
goto error;
if (ret == -ENOSPC) {
key.type = BTRFS_DEV_EXTENT_KEY;
do {
- down_read(&root->fs_info->bg_delete_sem);
+ mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
goto done;
}
ret = btrfs_previous_item(root, path, 0, key.type);
if (ret)
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto done;
if (ret) {
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid) {
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
length = btrfs_dev_extent_length(l, dev_extent);
if (key.offset + length <= new_size) {
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
btrfs_release_path(path);
ret = btrfs_relocate_chunk(root, chunk_offset);
- up_read(&root->fs_info->bg_delete_sem);
+ mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
goto done;
if (ret == -ENOSPC)