]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
btrfs: correct typos in multiple comments across various files
authorShen Lichuan <shenlichuan@vivo.com>
Tue, 24 Sep 2024 03:09:44 +0000 (11:09 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 11 Nov 2024 13:34:14 +0000 (14:34 +0100)
Fix some confusing spelling errors that were currently identified,
the details are as follows:

block-group.c: 2800:  uncompressible  ==> incompressible
extent-tree.c: 3131: EXTEMT ==> EXTENT
extent_io.c: 3124:  utlizing  ==> utilizing
extent_map.c: 1323:  ealier ==> earlier
extent_map.c: 1325: possiblity ==> possibility
fiemap.c: 189: emmitted ==> emitted
fiemap.c: 197: emmitted ==> emitted
fiemap.c: 203: emmitted ==> emitted
transaction.h: 36: trasaction ==> transaction
volumes.c: 5312: filesysmte ==> filesystem
zoned.c: 1977: trasnsaction ==> transaction

Signed-off-by: Shen Lichuan <shenlichuan@vivo.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
13 files changed:
fs/btrfs/block-group.c
fs/btrfs/dev-replace.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_map.c
fs/btrfs/fiemap.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/space-info.c
fs/btrfs/transaction.h
fs/btrfs/volumes.c
fs/btrfs/zoned.c

index 4423d8b716a58fa0ce1098efff83c0d741e52491..4427c1b835e8b24670d8244c2c40e9f84c8a56e5 100644 (file)
@@ -2797,7 +2797,7 @@ next:
                 * uncompressed data size, because the compression is only done
                 * when writeback triggered and we don't know how much space we
                 * are actually going to need, so we reserve the uncompressed
-                * size because the data may be uncompressible in the worst case.
+                * size because the data may be incompressible in the worst case.
                 */
                if (ret == 0) {
                        bool used;
index 604399e59a3d107e8dd089aa3e6d2bddc44a97d0..ac8e97ed13f7510d2b7f4449938e7aac3befc1c4 100644 (file)
@@ -45,7 +45,7 @@
  *
  * - Copy existing extents
  *
- *   This happens by re-using scrub facility, as scrub also iterates through
+ *   This happens by reusing scrub facility, as scrub also iterates through
  *   existing extents from commit root.
  *
  *   Location:         scrub_write_block_to_dev_replace() from
index d9f511babd89ab5636e4aa1101c5ac7ad9dac1d9..79373f0ab6cee339c1223cf6cf7e49dc648b9fe9 100644 (file)
@@ -3144,7 +3144,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                break;
                        }
 
-                       /* Quick path didn't find the EXTEMT/METADATA_ITEM */
+                       /* Quick path didn't find the EXTENT/METADATA_ITEM */
                        if (path->slots[0] - extent_slot > 5)
                                break;
                        extent_slot--;
index 89a7e85f2b3878bca68d1462744e14dd7fbf9267..6aa39e0be2e8ea681961efad24ab79095241716a 100644 (file)
@@ -3186,7 +3186,7 @@ out:
        }
        /*
         * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
-        * so it can be cleaned up without utlizing page->mapping.
+        * so it can be cleaned up without utilizing page->mapping.
         */
        set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
 
index 1d93e1202c33944bffcb659bbc85ae649eb0d739..a8b86f12b00dcea0d6858cd42257d4f0ca2a3b15 100644 (file)
@@ -1326,9 +1326,9 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
         * not possible to know which task made more progress because we can
         * cycle back to the first root and first inode if it's not the first
         * time the shrinker ran, see the above logic. Also a task that started
-        * later may finish ealier than another task and made less progress. So
+        * later may finish earlier than another task and made less progress. So
         * make this simple and update to the progress of the last task that
-        * finished, with the occasional possiblity of having two consecutive
+        * finished, with the occasional possibility of having two consecutive
         * runs of the shrinker process the same inodes.
         */
        spin_lock(&fs_info->extent_map_shrinker_lock);
index df7f09f3b02e06a8465a7ff8776aeab0ef9ea787..b80c07ad8c5e71b0e85a7e8ea29a0a2c031cf63c 100644 (file)
@@ -186,7 +186,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
                         * we have in the cache is the last delalloc range we
                         * found while the file extent item we found can be
                         * either for a whole delalloc range we previously
-                        * emmitted or only a part of that range.
+                        * emitted or only a part of that range.
                         *
                         * We have two cases here:
                         *
@@ -194,13 +194,13 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
                         *    cached extent's end. In this case just ignore the
                         *    current file extent item because we don't want to
                         *    overlap with previous ranges that may have been
-                        *    emmitted already;
+                        *    emitted already;
                         *
                         * 2) The file extent item starts behind the currently
                         *    cached extent but its end offset goes beyond the
                         *    end offset of the cached extent. We don't want to
                         *    overlap with a previous range that may have been
-                        *    emmitted already, so we emit the currently cached
+                        *    emitted already, so we emit the currently cached
                         *    extent and then partially store the current file
                         *    extent item's range in the cache, for the subrange
                         *    going the cached extent's end to the end of the
index 20336b1bf4a5b0e972ccbc5cb5f85dd3ba5265b2..5d8da882d4871955c7219e9dbe9265ed915307bc 100644 (file)
@@ -5987,7 +5987,7 @@ again:
         * offset.  This means that new entries created during readdir
         * are *guaranteed* to be seen in the future by that readdir.
         * This has broken buggy programs which operate on names as
-        * they're returned by readdir.  Until we re-use freed offsets
+        * they're returned by readdir.  Until we reuse freed offsets
         * we have this hack to stop new entries from being returned
         * under the assumption that they'll never reach this huge
         * offset.
index 8773f989c3be24734933462b1c2cd7a5ef585a98..4276c4607c56843ee7fde734e51433042abbe729 100644 (file)
@@ -469,7 +469,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
                        /*
                         * If a qgroup exists for a subvolume ID, it is possible
                         * that subvolume has been deleted, in which case
-                        * re-using that ID would lead to incorrect accounting.
+                        * reusing that ID would lead to incorrect accounting.
                         *
                         * Ensure that we skip any such subvol ids.
                         *
index 43431065d9818910cd557536cacc1a9ffadfe954..e141132b5c8dfa91187768a6937371484ce59a84 100644 (file)
@@ -1954,7 +1954,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
        ASSERT(sctx->raid56_data_stripes);
 
        /*
-        * For data stripe search, we cannot re-use the same extent/csum paths,
+        * For data stripe search, we cannot reuse the same extent/csum paths,
         * as the data stripe bytenr may be smaller than previous extent.  Thus
         * we have to use our own extent/csum paths.
         */
index d5a9cd8a4fd8d7d5913ace27eb18ba07fbb328bc..ee23fae73f47364858e25d5fc2d213ec547f5562 100644 (file)
@@ -1279,7 +1279,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
  *   If we are freeing inodes, we want to make sure all delayed iputs have
  *   completed, because they could have been on an inode with i_nlink == 0, and
  *   thus have been truncated and freed up space.  But again this space is not
- *   immediately re-usable, it comes in the form of a delayed ref, which must be
+ *   immediately reusable, it comes in the form of a delayed ref, which must be
  *   run and then the transaction must be committed.
  *
  * COMMIT_TRANS
index dd9ce9b9f69e38730025e54ecb3fab0ce89c3ff1..184fa5c0062abf2ab802e0789ca75b098284ad18 100644 (file)
@@ -33,7 +33,7 @@ struct btrfs_path;
  */
 #define BTRFS_TRANS_DIO_WRITE_STUB     ((void *) 1)
 
-/* Radix-tree tag for roots that are part of the trasaction. */
+/* Radix-tree tag for roots that are part of the transaction. */
 #define BTRFS_ROOT_TRANS_TAG                   0
 
 enum btrfs_trans_state {
index 5895397364aacc192b07762f2b0224b2c820b3dc..82f3a2ed2d9ce8567604a8b1a3e7b781bf04a9b4 100644 (file)
@@ -5431,7 +5431,7 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
        ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
        data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
 
-       /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
+       /* stripe_size is fixed in zoned filesystem. Reduce ndevs instead. */
        if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
                ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
                                             ctl->stripe_size) + ctl->nparity,
index 69d03feea4e0ecfc84784943950b2bd39178bed8..dbcbf754d2846d25f95955e168ea407dd7022484 100644 (file)
@@ -1973,7 +1973,7 @@ int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
        if (block_group->meta_write_pointer > eb->start)
                return -EBUSY;
 
-       /* If for_sync, this hole will be filled with trasnsaction commit. */
+       /* If for_sync, this hole will be filled with transaction commit. */
        if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
                return -EAGAIN;
        return -EBUSY;