]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/zswap: fix typos: s/zwap/zswap/
authorSeongJae Park <sj@kernel.org>
Fri, 3 Oct 2025 20:38:49 +0000 (13:38 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 22 Oct 2025 01:51:22 +0000 (18:51 -0700)
As the subject says.

Link: https://lkml.kernel.org/r/20251003203851.43128-3-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Acked-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Acked-by: Nhat Pham <nphamcs@gmail.com>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c
mm/zswap.c

index 4deda33625f41aa81db0276454f56b826a033146..3ae5cbcaed752b57fb97e979cb022628873a5df1 100644 (file)
@@ -5443,7 +5443,7 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
  * @size: size of compressed object
  *
  * This forces the charge after obj_cgroup_may_zswap() allowed
- * compression and storage in zwap for this cgroup to go ahead.
+ * compression and storage in zswap for this cgroup to go ahead.
  */
 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
 {
index 80619c8589a74f86fd256ddf72bb51e1fb12b251..f6b1c8832a4ff65a6adb4d584ab9f5e08add56f3 100644 (file)
@@ -879,7 +879,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
         * acomp instance, then get those requests done simultaneously. but in this
         * case, zswap actually does store and load page by page, there is no
         * existing method to send the second page before the first page is done
-        * in one thread doing zwap.
+        * in one thread doing zswap.
         * but in different threads running on different cpu, we have different
         * acomp instance, so multiple threads can do (de)compression in parallel.
         */
@@ -1128,7 +1128,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
         *
         * 1. We extract the swp_entry_t to the stack, allowing
         *    zswap_writeback_entry() to pin the swap entry and
-        *    then validate the zwap entry against that swap entry's
+        *    then validate the zswap entry against that swap entry's
         *    tree using pointer value comparison. Only when that
         *    is successful can the entry be dereferenced.
         *