There are two problems in qgroup:
a). The PAGE_CACHE is 4K, even when we are writing a data of 1K,
qgroup will reserve a 4K size. It will cause the last 3K in a qgroup
is not available to user.
b). When user is writing a inline data, qgroup will not reserve it,
it means this is a window we can exceed the limit of a qgroup.
The main idea of this patch is reserving the data size of write_bytes
rather than the reserve_bytes. It means qgroup will not care about
the data size btrfs will reserve for user, but only care about the
data size user is going to write. Then reserve it when user want to
write and release it in transaction committed.
In this way, qgroup can be released from the complex procedure in
btrfs and only do the reserve when user want to write and account
when the data is written in commit_transaction().
Signed-off-by: Dongsheng Yang <yangds.fnst@cn.fujitsu.com>
Signed-off-by: Chris Mason <clm@fb.com>
(cherry picked from commit
e2d1f92399afb6ec518b68867ed10db2585b283a)
BTRFS_RESERVE_FLUSH_ALL,
};
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
+int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
num_pages *= 16;
num_pages *= PAGE_CACHE_SIZE;
- ret = btrfs_check_data_free_space(inode, num_pages);
+ ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
if (ret)
goto out_put;
* This will check the space that the inode allocates from to make sure we have
* enough space for bytes.
*/
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
+int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
{
struct btrfs_space_info *data_sinfo;
struct btrfs_root *root = BTRFS_I(inode)->root;
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
- ret = btrfs_qgroup_reserve(root, bytes);
+ ret = btrfs_qgroup_reserve(root, write_bytes);
if (ret)
goto out;
data_sinfo->bytes_may_use += bytes;
data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
WARN_ON(data_sinfo->bytes_may_use < bytes);
- btrfs_qgroup_free(root, bytes);
data_sinfo->bytes_may_use -= bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
data_sinfo->flags, bytes, 0);
u64 qgroup_reserved)
{
btrfs_block_rsv_release(root, rsv, (u64)-1);
- if (qgroup_reserved)
- btrfs_qgroup_free(root, qgroup_reserved);
}
/**
to_free = 0;
}
spin_unlock(&BTRFS_I(inode)->lock);
- if (dropped) {
- if (root->fs_info->quota_enabled)
- btrfs_qgroup_free(root, dropped * root->nodesize);
+ if (dropped)
to_free += btrfs_calc_trans_metadata_size(root, dropped);
- }
if (to_free) {
btrfs_block_rsv_release(root, block_rsv, to_free);
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
- if (root->fs_info->quota_enabled) {
- btrfs_qgroup_free(root, dropped * root->nodesize);
- }
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
{
int ret;
- ret = btrfs_check_data_free_space(inode, num_bytes);
+ ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
if (ret)
return ret;
set_extent_dirty(root->fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
- if (reserved) {
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- num_bytes, -1);
+ if (reserved)
trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
- }
return 0;
}
btrfs_put_block_group(cache);
trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
pin = 0;
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- buf->len, -1);
}
out:
if (pin)
ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- len, -1);
}
btrfs_put_block_group(cache);
BUG_ON(ret); /* logic error */
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
0, owner, offset, ins, 1);
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins->offset, 1);
btrfs_put_block_group(block_group);
return ret;
}
return ERR_PTR(ret);
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root_objectid,
- ins.offset, 1);
-
buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
BUG_ON(IS_ERR(buf)); /* -ENOMEM */
}
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
- ret = btrfs_check_data_free_space(inode, reserve_bytes);
+ ret = btrfs_check_data_free_space(inode, reserve_bytes, write_bytes);
if (ret == -ENOSPC &&
(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC))) {
* Make sure we have enough space before we do the
* allocation.
*/
- ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
+ ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start, alloc_end - alloc_start);
if (ret)
return ret;
}
goto out_free;
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
/*
* here we're doing allocation and writeback of the
* compressed pages
if (ret < 0)
goto out_unlock;
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
-
em = alloc_extent_map();
if (!em) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
-
return em;
}
break;
}
- btrfs_qgroup_update_reserved_bytes(root->fs_info,
- root->root_key.objectid,
- ins.offset, 1);
-
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
/*
* reservation tracking
*/
- u64 may_use;
u64 reserved;
/*
return ret;
}
-int btrfs_qgroup_update_reserved_bytes(struct btrfs_fs_info *fs_info,
- u64 ref_root,
- u64 num_bytes,
- int sign)
-{
- struct btrfs_root *quota_root;
- struct btrfs_qgroup *qgroup;
- int ret = 0;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
-
- if (!is_fstree(ref_root) || !fs_info->quota_enabled)
- return 0;
-
- if (num_bytes == 0)
- return 0;
-
- spin_lock(&fs_info->qgroup_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root)
- goto out;
-
- qgroup = find_qgroup_rb(fs_info, ref_root);
- if (!qgroup)
- goto out;
-
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- (uintptr_t)qgroup, GFP_ATOMIC);
- if (ret < 0)
- goto out;
-
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
- struct btrfs_qgroup_list *glist;
-
- qg = u64_to_ptr(unode->aux);
-
- qg->reserved += sign * num_bytes;
-
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- (uintptr_t)glist->group, GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
- }
-
-out:
- spin_unlock(&fs_info->qgroup_lock);
- return ret;
-}
-
-/*
- * reserve some space for a qgroup and all its parents. The reservation takes
- * place with start_transaction or dealloc_reserve, similar to ENOSPC
- * accounting. If not enough space is available, EDQUOT is returned.
- * We assume that the requested space is new for all qgroups.
- */
int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
{
struct btrfs_root *quota_root;
qg = u64_to_ptr(unode->aux);
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
- qg->reserved + qg->may_use + (s64)qg->rfer + num_bytes >
+ qg->reserved + (s64)qg->rfer + num_bytes >
qg->max_rfer) {
ret = -EDQUOT;
goto out;
}
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
- qg->reserved + qg->may_use + (s64)qg->excl + num_bytes >
+ qg->reserved + (s64)qg->excl + num_bytes >
qg->max_excl) {
ret = -EDQUOT;
goto out;
qg = u64_to_ptr(unode->aux);
- qg->may_use += num_bytes;
+ qg->reserved += num_bytes;
}
out:
qg = u64_to_ptr(unode->aux);
- qg->may_use -= num_bytes;
+ qg->reserved -= num_bytes;
list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(fs_info->qgroup_ulist,
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
struct btrfs_qgroup_inherit *inherit);
-int btrfs_qgroup_update_reserved_bytes(struct btrfs_fs_info *fs_info,
- u64 ref_root,
- u64 num_bytes,
- int sign);
int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes);
void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes);
mutex_lock(&inode->i_mutex);
ret = btrfs_check_data_free_space(inode, cluster->end +
- 1 - cluster->start);
+ 1 - cluster->start, 0);
if (ret)
goto out;