CHUNK_ALLOC_FORCE = 2,
};
-static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
struct btrfs_delayed_extent_op *extra_op);
-static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
+void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei);
-static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod);
-static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op);
-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
int force);
-static int find_next_key(struct btrfs_path *path, int level,
+int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
-static void dump_space_info(struct btrfs_fs_info *fs_info,
+void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
-static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
+int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
-static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
+void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes);
-static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
+void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes);
-static noinline int
+noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
{
smp_mb();
cache->cached == BTRFS_CACHE_ERROR;
}
-static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
+int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
* this adds the block group to the fs_info rb tree for the block group
* cache
*/
-static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
+int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group_cache *block_group)
{
struct rb_node **p;
* This will return the block group at or after bytenr if contains is 0, else
* it will return the block group that contains the bytenr
*/
-static struct btrfs_block_group_cache *
+struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
int contains)
{
return ret;
}
-static int add_excluded_extent(struct btrfs_fs_info *fs_info,
+int add_excluded_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
u64 end = start + num_bytes - 1;
return 0;
}
-static void free_excluded_extents(struct btrfs_block_group_cache *cache)
+void free_excluded_extents(struct btrfs_block_group_cache *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 start, end;
start, end, EXTENT_UPTODATE);
}
-static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
+int exclude_super_stripes(struct btrfs_block_group_cache *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 bytenr;
return 0;
}
-static struct btrfs_caching_control *
+struct btrfs_caching_control *
get_caching_control(struct btrfs_block_group_cache *cache)
{
struct btrfs_caching_control *ctl;
return ctl;
}
-static void put_caching_control(struct btrfs_caching_control *ctl)
+void put_caching_control(struct btrfs_caching_control *ctl)
{
if (refcount_dec_and_test(&ctl->count))
kfree(ctl);
}
#ifdef CONFIG_BTRFS_DEBUG
-static void fragment_free_space(struct btrfs_block_group_cache *block_group)
+void fragment_free_space(struct btrfs_block_group_cache *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 start = block_group->key.objectid;
return total_added;
}
-static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
+int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{
struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
struct btrfs_fs_info *fs_info = block_group->fs_info;
return ret;
}
-static noinline void caching_thread(struct btrfs_work *work)
+noinline void caching_thread(struct btrfs_work *work)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info;
btrfs_put_block_group(block_group);
}
-static int cache_block_group(struct btrfs_block_group_cache *cache,
+int cache_block_group(struct btrfs_block_group_cache *cache,
int load_cache_only)
{
DEFINE_WAIT(wait);
/*
* return the block group that starts at or after bytenr
*/
-static struct btrfs_block_group_cache *
+struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
{
return block_group_cache_tree_search(info, bytenr, 0);
return block_group_cache_tree_search(info, bytenr, 1);
}
-static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
+struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
struct list_head *head = &info->space_info;
return NULL;
}
-static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
+void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
bool metadata, u64 root_objectid)
{
struct btrfs_space_info *space_info;
return BTRFS_REF_TYPE_INVALID;
}
-static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
+u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
{
u32 high_crc = ~(u32)0;
u32 low_crc = ~(u32)0;
return ((u64)high_crc << 31) ^ (u64)low_crc;
}
-static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
+u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *ref)
{
return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
btrfs_extent_data_ref_offset(leaf, ref));
}
-static int match_extent_data_ref(struct extent_buffer *leaf,
+int match_extent_data_ref(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *ref,
u64 root_objectid, u64 owner, u64 offset)
{
return 1;
}
-static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid,
return err;
}
-static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
+noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid, u64 owner,
return ret;
}
-static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
+noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
int refs_to_drop, int *last_ref)
{
return ret;
}
-static noinline u32 extent_data_ref_count(struct btrfs_path *path,
+noinline u32 extent_data_ref_count(struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref)
{
struct btrfs_key key;
return num_refs;
}
-static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
+noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
return ret;
}
-static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
+noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
return ret;
}
-static inline int extent_ref_type(u64 parent, u64 owner)
+inline int extent_ref_type(u64 parent, u64 owner)
{
int type;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
return type;
}
-static int find_next_key(struct btrfs_path *path, int level,
+int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key)
{
* NOTE: inline back refs are ordered in the same way that back ref
* items in the tree are ordered.
*/
-static noinline_for_stack
+noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
/*
* helper to add new inline back ref
*/
-static noinline_for_stack
+noinline_for_stack
void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
btrfs_mark_buffer_dirty(leaf);
}
-static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+int lookup_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes, u64 parent,
/*
* helper to update/remove inline back ref
*/
-static noinline_for_stack
+noinline_for_stack
void update_inline_extent_backref(struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
btrfs_mark_buffer_dirty(leaf);
}
-static noinline_for_stack
+noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, u64 num_bytes, u64 parent,
return ret;
}
-static int insert_extent_backref(struct btrfs_trans_handle *trans,
+int insert_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add)
return ret;
}
-static int remove_extent_backref(struct btrfs_trans_handle *trans,
+int remove_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_drop, int is_data, int *last_ref)
}
#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
-static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
+int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
u64 *discarded_bytes)
{
int j, ret = 0;
* updating a tree block's flags
*
*/
-static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
return ret;
}
-static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
+int run_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
return ret;
}
-static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
+void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei)
{
}
}
-static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
+int run_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
return err;
}
-static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
+int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
}
/* helper function to actually process a single delayed ref entry */
-static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
+int run_one_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
return ret;
}
-static inline struct btrfs_delayed_ref_node *
+inline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
return ref;
}
-static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
spin_lock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(head);
}
-static int cleanup_extent_op(struct btrfs_trans_handle *trans,
+int cleanup_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
return ret ? ret : 1;
}
-static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+int cleanup_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head)
{
return 0;
}
-static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
+struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
struct btrfs_trans_handle *trans)
{
struct btrfs_delayed_ref_root *delayed_refs =
return head;
}
-static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
+int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *locked_ref,
unsigned long *run_refs)
{
* Returns 0 on success or if called with an already aborted transaction.
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
*/
-static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
+noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long nr)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
* correlates in most cases to the order added. To expose dependencies on this
* order, we start to process the tree in the middle instead of the beginning
*/
-static u64 find_middle(struct rb_root *root)
+u64 find_middle(struct rb_root *root)
{
struct rb_node *n = root->rb_node;
struct btrfs_delayed_ref_node *entry;
}
#endif
-static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
+inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
{
u64 num_bytes;
struct btrfs_work work;
};
-static inline struct async_delayed_refs *
+inline struct async_delayed_refs *
to_async_delayed_refs(struct btrfs_work *work)
{
return container_of(work, struct async_delayed_refs, work);
}
-static void delayed_ref_async_start(struct btrfs_work *work)
+void delayed_ref_async_start(struct btrfs_work *work)
{
struct async_delayed_refs *async = to_async_delayed_refs(work);
struct btrfs_trans_handle *trans;
return ret;
}
-static noinline int check_delayed_ref(struct btrfs_root *root,
+noinline int check_delayed_ref(struct btrfs_root *root,
struct btrfs_path *path,
u64 objectid, u64 offset, u64 bytenr)
{
return ret;
}
-static noinline int check_committed_ref(struct btrfs_root *root,
+noinline int check_committed_ref(struct btrfs_root *root,
struct btrfs_path *path,
u64 objectid, u64 offset, u64 bytenr)
{
return ret;
}
-static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
int full_backref, int inc)
return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
}
-static int write_one_cache_group(struct btrfs_trans_handle *trans,
+int write_one_cache_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_block_group_cache *cache)
}
-static struct btrfs_block_group_cache *
+struct btrfs_block_group_cache *
next_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache)
{
return cache;
}
-static int cache_save_setup(struct btrfs_block_group_cache *block_group,
+int cache_save_setup(struct btrfs_block_group_cache *block_group,
struct btrfs_trans_handle *trans,
struct btrfs_path *path)
{
wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
}
-static const char *alloc_name(u64 flags)
+const char *alloc_name(u64 flags)
{
switch (flags) {
case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
};
}
-static int create_space_info(struct btrfs_fs_info *info, u64 flags)
+int create_space_info(struct btrfs_fs_info *info, u64 flags)
{
struct btrfs_space_info *space_info;
return ret;
}
-static void update_space_info(struct btrfs_fs_info *info, u64 flags,
+void update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
u64 bytes_readonly,
struct btrfs_space_info **space_info)
*space_info = found;
}
-static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
*
* should be called with balance_lock held
*/
-static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
+u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0;
* progress (either running or paused) picks the target profile (if it's
* already available), otherwise falls back to plain reducing.
*/
-static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
+u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 num_devices = fs_info->fs_devices->rw_devices;
u64 target;
return extended_to_chunk(flags | allowed);
}
-static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
+u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
{
unsigned seq;
u64 flags;
return btrfs_reduce_alloc_profile(fs_info, flags);
}
-static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
+u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 flags;
return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
}
-static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
+u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
bool may_use_included)
{
ASSERT(s_info);
btrfs_qgroup_free_data(inode, reserved, start, len);
}
-static void force_metadata_allocation(struct btrfs_fs_info *info)
+void force_metadata_allocation(struct btrfs_fs_info *info)
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
rcu_read_unlock();
}
-static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
+inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
{
return (global->size << 1);
}
-static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
+int should_alloc_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *sinfo, int force)
{
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
return 1;
}
-static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
+u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
{
u64 num_dev;
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
*/
-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
int force)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
return ret;
}
-static int can_overcommit(struct btrfs_fs_info *fs_info,
+int can_overcommit(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush,
bool system_chunk)
return 0;
}
-static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
+void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
unsigned long nr_pages, int nr_items)
{
struct super_block *sb = fs_info->sb;
}
}
-static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
+inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
u64 to_reclaim)
{
u64 bytes;
/*
* shrink metadata reservation for delalloc
*/
-static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
+void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
u64 orig, bool wait_ordered)
{
struct btrfs_space_info *space_info;
* get us somewhere and then commit the transaction if it does. Otherwise it
* will return -ENOSPC.
*/
-static int may_commit_transaction(struct btrfs_fs_info *fs_info,
+int may_commit_transaction(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{
struct reserve_ticket *ticket = NULL;
* and may fail for various reasons. The caller is supposed to examine the
* state of @space_info to detect the outcome.
*/
-static void flush_space(struct btrfs_fs_info *fs_info,
+void flush_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 num_bytes,
int state)
{
return;
}
-static inline u64
+inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
bool system_chunk)
return to_reclaim;
}
-static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
+inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 used, bool system_chunk)
{
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
-static void wake_all_tickets(struct list_head *head)
+void wake_all_tickets(struct list_head *head)
{
struct reserve_ticket *ticket;
* will loop and continuously try to flush as long as we are making progress.
* We count progress as clearing off tickets each time we have to loop.
*/
-static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
+void btrfs_async_reclaim_metadata_space(struct work_struct *work)
{
struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
INIT_WORK(work, btrfs_async_reclaim_metadata_space);
}
-static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
+void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
} while (flush_state < COMMIT_TRANS);
}
-static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
+int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
struct reserve_ticket *ticket, u64 orig_bytes)
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
+int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush,
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-static int reserve_metadata_bytes(struct btrfs_root *root,
+int reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
return ret;
}
-static struct btrfs_block_rsv *get_block_rsv(
+struct btrfs_block_rsv *get_block_rsv(
const struct btrfs_trans_handle *trans,
const struct btrfs_root *root)
{
return block_rsv;
}
-static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
+int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
{
int ret = -ENOSPC;
return ret;
}
-static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
+void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes, bool update_size)
{
spin_lock(&block_rsv->lock);
* This is for space we already have accounted in space_info->bytes_may_use, so
* basically when we're returning space from block_rsv's.
*/
-static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
+void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
* space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
* we use this helper.
*/
-static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
+void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
}
}
-static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
+u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
struct btrfs_block_rsv *dest, u64 num_bytes,
u64 *qgroup_to_release_ret)
* or return if we already have enough space. This will also handle the resreve
* tracepoint for the reserved amount.
*/
-static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
+int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_root *root = inode->root;
* This is the same as btrfs_block_rsv_release, except that it handles the
* tracepoint for the reservation.
*/
-static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
+void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL);
}
-static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
+void update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
struct btrfs_space_info *sinfo = block_rsv->space_info;
spin_unlock(&sinfo->lock);
}
-static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
+void init_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_space_info *space_info;
update_global_block_rsv(fs_info);
}
-static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
+void release_global_block_rsv(struct btrfs_fs_info *fs_info)
{
block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
(u64)-1, NULL);
btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
}
-static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
struct btrfs_inode *inode)
{
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
btrfs_free_reserved_data_space(inode, reserved, start, len);
}
-static int update_block_group(struct btrfs_trans_handle *trans,
+int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *info, u64 bytenr,
u64 num_bytes, int alloc)
{
return 0;
}
-static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
+u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
{
struct btrfs_block_group_cache *cache;
u64 bytenr;
return bytenr;
}
-static int pin_down_extent(struct btrfs_fs_info *fs_info,
+int pin_down_extent(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
u64 bytenr, u64 num_bytes, int reserved)
{
return ret;
}
-static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
+int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes)
{
int ret;
return ret;
}
-static void
+void
btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
{
atomic_inc(&bg->reservations);
* reservation and the block group has become read only we cannot make the
* reservation and return -EAGAIN, otherwise this function always succeeds.
*/
-static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 ram_bytes, u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
* reserve set to 0 in order to clear the reservation.
*/
-static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
* Returns the free cluster for the given space info and sets empty_cluster to
* what it should be based on the mount options.
*/
-static struct btrfs_free_cluster *
+struct btrfs_free_cluster *
fetch_cluster_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 *empty_cluster)
{
return ret;
}
-static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+int unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end,
const bool return_free_space)
{
return 0;
}
-static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
* a given extent, and if there are no other delayed refs to be processed, it
* removes it from the tree.
*/
-static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
+noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
u64 bytenr)
{
struct btrfs_delayed_ref_head *head;
* Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
* any of the information in this block group.
*/
-static noinline void
+noinline void
wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
u64 num_bytes)
{
put_caching_control(caching_ctl);
}
-static noinline int
+noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
{
struct btrfs_caching_control *caching_ctl;
LOOP_NO_EMPTY_SIZE = 3,
};
-static inline void
+inline void
btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
int delalloc)
{
down_read(&cache->data_rwsem);
}
-static inline void
+inline void
btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
int delalloc)
{
down_read(&cache->data_rwsem);
}
-static struct btrfs_block_group_cache *
+struct btrfs_block_group_cache *
btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
int delalloc)
}
}
-static inline void
+inline void
btrfs_release_block_group(struct btrfs_block_group_cache *cache,
int delalloc)
{
* If there is no suitable free space, we will record the max size of
* the free space extent currently.
*/
-static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
+noinline int find_free_extent(struct btrfs_fs_info *fs_info,
u64 ram_bytes, u64 num_bytes, u64 empty_size,
u64 hint_byte, struct btrfs_key *ins,
u64 flags, int delalloc)
return ret;
}
-static void dump_space_info(struct btrfs_fs_info *fs_info,
+void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups)
{
return ret;
}
-static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
+int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len,
int pin, int delalloc)
{
return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
}
-static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod)
return ret;
}
-static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
return ret;
}
-static struct extent_buffer *
+struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 bytenr, int level, u64 owner)
{
return buf;
}
-static struct btrfs_block_rsv *
+struct btrfs_block_rsv *
use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u32 blocksize)
{
return ERR_PTR(ret);
}
-static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
+void unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u32 blocksize)
{
block_rsv_add_bytes(block_rsv, blocksize, false);
#define DROP_REFERENCE 1
#define UPDATE_BACKREF 2
-static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
+noinline void reada_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct walk_control *wc,
struct btrfs_path *path)
*
* NOTE: return value 1 means we should stop walking down.
*/
-static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc, int lookup_info)
*
* NOTE: return value 1 means we should stop walking down.
*/
-static noinline int do_walk_down(struct btrfs_trans_handle *trans,
+noinline int do_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc, int *lookup_info)
*
* NOTE: return value 1 means we should stop walking up.
*/
-static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+noinline int walk_up_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc)
return -EUCLEAN;
}
-static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc)
return 0;
}
-static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
+noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct walk_control *wc, int max_level)
return ret;
}
-static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
+u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 num_devices;
u64 stripped;
return flags;
}
-static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
return ret;
}
-static int find_first_block_group(struct btrfs_fs_info *fs_info,
+int find_first_block_group(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *key)
{
"failed to add kobject for block cache, ignoring");
}
-static void link_block_group(struct btrfs_block_group_cache *cache)
+void link_block_group(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *space_info = cache->space_info;
struct btrfs_fs_info *fs_info = cache->fs_info;
}
}
-static struct btrfs_block_group_cache *
+struct btrfs_block_group_cache *
btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
u64 start, u64 size)
{
* Iterate all chunks and verify that each of them has the corresponding block
* group
*/
-static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
+int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
{
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map *em;
return 0;
}
-static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
* it while performing the free space search since we have already
* held back allocations.
*/
-static int btrfs_trim_free_extents(struct btrfs_device *device,
+int btrfs_trim_free_extents(struct btrfs_device *device,
u64 minlen, u64 *trimmed)
{
u64 start = 0, len = 0;