]> git.hungrycats.org Git - linux/commitdiff
f2fs: add write priority option based on zone UFS
authorLiao Yuanhong <liaoyuanhong@vivo.com>
Mon, 15 Jul 2024 12:34:51 +0000 (20:34 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2024 10:00:44 +0000 (12:00 +0200)
[ Upstream commit 8444ce524947daf441546b5b3a0c418706dade35 ]

Currently, we are using a mix of traditional UFS and zone UFS to support
some functionalities that cannot be achieved on zone UFS alone. However,
there are some issues with this approach. There exists a significant
performance difference between traditional UFS and zone UFS. Under normal
usage, we prioritize writes to zone UFS. However, in critical conditions
(such as when the entire UFS is almost full), we cannot determine whether
data will be written to traditional UFS or zone UFS. This can lead to
significant performance fluctuations, which is not conducive to
development and testing. To address this, we have added an option
zlu_io_enable under sys with the following three modes:
1) zlu_io_enable == 0:Normal mode, prioritize writing to zone UFS;
2) zlu_io_enable == 1:Zone UFS only mode, only allow writing to zone UFS;
3) zlu_io_enable == 2:Traditional UFS priority mode, prioritize writing to
traditional UFS.

Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
Signed-off-by: Wu Bo <bo.wu@vivo.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Stable-dep-of: 65a6ce4726c2 ("f2fs: fix to don't panic system for no free segment fault injection")
Signed-off-by: Sasha Levin <sashal@kernel.org>
Documentation/ABI/testing/sysfs-fs-f2fs
fs/f2fs/f2fs.h
fs/f2fs/segment.c
fs/f2fs/super.c
fs/f2fs/sysfs.c

index cad6c3dc1f9c1f4f93c1b845865a605e66ca59d1..3500920ab7ce020687f47bf2e7afe33547cbfc61 100644 (file)
@@ -763,3 +763,17 @@ Date:              November 2023
 Contact:       "Chao Yu" <chao@kernel.org>
 Description:   It controls to enable/disable IO aware feature for background discard.
                By default, the value is 1 which indicates IO aware is on.
+
+What:          /sys/fs/f2fs/<disk>/blkzone_alloc_policy
+Date:          July 2024
+Contact:       "Yuanhong Liao" <liaoyuanhong@vivo.com>
+Description:   The zone UFS we are currently using consists of two parts:
+               conventional zones and sequential zones. It can be used to control which part
+               to prioritize for writes, with a default value of 0.
+
+               ========================  =========================================
+               value                                     description
+               blkzone_alloc_policy = 0  Prioritize writing to sequential zones
+               blkzone_alloc_policy = 1  Only allow writing to sequential zones
+               blkzone_alloc_policy = 2  Prioritize writing to conventional zones
+               ========================  =========================================
index 9c8acb98f4dbf6125687b7ef25e805d892888c17..140f82886837abfe56554b20cf51104730870ec2 100644 (file)
@@ -134,6 +134,12 @@ typedef u32 nid_t;
 
 #define COMPRESS_EXT_NUM               16
 
+enum blkzone_allocation_policy {
+       BLKZONE_ALLOC_PRIOR_SEQ,        /* Prioritize writing to sequential zones */
+       BLKZONE_ALLOC_ONLY_SEQ,         /* Only allow writing to sequential zones */
+       BLKZONE_ALLOC_PRIOR_CONV,       /* Prioritize writing to conventional zones */
+};
+
 /*
  * An implementation of an rwsem that is explicitly unfair to readers. This
  * prevents priority inversion when a low-priority reader acquires the read lock
@@ -1563,6 +1569,8 @@ struct f2fs_sb_info {
 #ifdef CONFIG_BLK_DEV_ZONED
        unsigned int blocks_per_blkz;           /* F2FS blocks per zone */
        unsigned int max_open_zones;            /* max open zone resources of the zoned device */
+       /* For adjust the priority writing position of data in zone UFS */
+       unsigned int blkzone_alloc_policy;
 #endif
 
        /* for node-related operations */
index 425479d7692167fa05b91ad095997d1eb0675b36..5646ffed70f7798dee2f2941e7ccb27b2b545614 100644 (file)
@@ -2701,17 +2701,40 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
                        goto got_it;
        }
 
+#ifdef CONFIG_BLK_DEV_ZONED
        /*
         * If we format f2fs on zoned storage, let's try to get pinned sections
         * from beginning of the storage, which should be a conventional one.
         */
        if (f2fs_sb_has_blkzoned(sbi)) {
-               segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
+               /* Prioritize writing to conventional zones */
+               if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
+                       segno = 0;
+               else
+                       segno = max(first_zoned_segno(sbi), *newseg);
                hint = GET_SEC_FROM_SEG(sbi, segno);
        }
+#endif
 
 find_other_zone:
        secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+       if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
+               /* Write only to sequential zones */
+               if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
+                       hint = GET_SEC_FROM_SEG(sbi, first_zoned_segno(sbi));
+                       secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+               } else
+                       secno = find_first_zero_bit(free_i->free_secmap,
+                                                               MAIN_SECS(sbi));
+               if (secno >= MAIN_SECS(sbi)) {
+                       ret = -ENOSPC;
+                       goto out_unlock;
+               }
+       }
+#endif
+
        if (secno >= MAIN_SECS(sbi)) {
                secno = find_first_zero_bit(free_i->free_secmap,
                                                        MAIN_SECS(sbi));
index b4c8ac6c0859897f0cf097bf8d2e9f389fe74032..f7eeea30278ea766ff281fb115d9efc873d64cba 100644 (file)
@@ -4231,6 +4231,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
        sbi->aligned_blksize = true;
 #ifdef CONFIG_BLK_DEV_ZONED
        sbi->max_open_zones = UINT_MAX;
+       sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
 #endif
 
        for (i = 0; i < max_devices; i++) {
index 09d3ecfaa4f1a88028f7aa7ccd10748137f07800..15c8678581afcfb105dd2bed2745047e5e5ab325 100644 (file)
@@ -627,6 +627,15 @@ out:
        }
 #endif
 
+#ifdef CONFIG_BLK_DEV_ZONED
+       if (!strcmp(a->attr.name, "blkzone_alloc_policy")) {
+               if (t < BLKZONE_ALLOC_PRIOR_SEQ || t > BLKZONE_ALLOC_PRIOR_CONV)
+                       return -EINVAL;
+               sbi->blkzone_alloc_policy = t;
+               return count;
+       }
+#endif
+
 #ifdef CONFIG_F2FS_FS_COMPRESSION
        if (!strcmp(a->attr.name, "compr_written_block") ||
                !strcmp(a->attr.name, "compr_saved_block")) {
@@ -1033,6 +1042,7 @@ F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
 F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
 #ifdef CONFIG_BLK_DEV_ZONED
 F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
 #endif
 
 /* STAT_INFO ATTR */
@@ -1187,6 +1197,7 @@ static struct attribute *f2fs_attrs[] = {
 #endif
 #ifdef CONFIG_BLK_DEV_ZONED
        ATTR_LIST(unusable_blocks_per_sec),
+       ATTR_LIST(blkzone_alloc_policy),
 #endif
 #ifdef CONFIG_F2FS_FS_COMPRESSION
        ATTR_LIST(compr_written_block),