From: jianyun.gao Date: Mon, 29 Sep 2025 00:26:08 +0000 (+0800) Subject: mm: fix some typos in mm module X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=cfe28757e77d610ad25fbcb8096301a8ec4a3b38;p=users%2Fjedix%2Flinux-maple.git mm: fix some typos in mm module Below are some typos in the code comments: intevals ==> intervals addesses ==> addresses unavaliable ==> unavailable facor ==> factor droping ==> dropping exlusive ==> exclusive decription ==> description confict ==> conflict desriptions ==> descriptions otherwize ==> otherwise vlaue ==> value cheching ==> checking exisitng ==> existing modifed ==> modified differenciate ==> differentiate refernece ==> reference permissons ==> permissions indepdenent ==> independent spliting ==> splitting Just fix it. Link: https://lkml.kernel.org/r/20250929002608.1633825-1-jianyungao89@gmail.com Signed-off-by: jianyun.gao Reviewed-by: SeongJae Park Reviewed-by: Wei Yang Reviewed-by: Dev Jain Reviewed-by: Liam R. Howlett Acked-by: Chris Li Signed-off-by: Andrew Morton --- diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 2fc722f998f8..a212877ed240 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1264,7 +1264,7 @@ enum damon_sysfs_cmd { DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS, /* * @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring - * intevals. + * intervals. */ DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS, /* diff --git a/mm/gup.c b/mm/gup.c index a8ba5112e4d0..d2524fe09338 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2710,7 +2710,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * * *) ptes can be read atomically by the architecture. * - * *) valid user addesses are below TASK_MAX_SIZE + * *) valid user addresses are below TASK_MAX_SIZE * * The last two assumptions can be relaxed by the addition of helper functions. * diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fe6606d91b31..c07b7192aff2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2954,7 +2954,7 @@ typedef enum { * NOTE: This is mostly identical to MAP_CHG_NEEDED, except * that currently vma_needs_reservation() has an unwanted side * effect to either use end() or commit() to complete the - * transaction. Hence it needs to differenciate from NEEDED. + * transaction. Hence it needs to differentiate from NEEDED. */ MAP_CHG_ENFORCED = 2, } map_chg_state; @@ -6027,7 +6027,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, /* * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We * could defer the flush until now, since by holding i_mmap_rwsem we - * guaranteed that the last refernece would not be dropped. But we must + * guaranteed that the last reference would not be dropped. But we must * do the flushing before we return, as otherwise i_mmap_rwsem will be * dropped and the last reference to the shared PMDs page might be * dropped as well. @@ -7213,7 +7213,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, } else if (unlikely(is_pte_marker(pte))) { /* * Do nothing on a poison marker; page is - * corrupted, permissons do not apply. Here + * corrupted, permissions do not apply. Here * pte_marker_uffd_wp()==true implies !poison * because they're mutual exclusive. */ diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index ba0fb1b6a5a8..96ee2bd16ee1 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start, if (likely(pmd_leaf(*pmd))) { /* * Higher order allocations from buddy allocator must be able to - * be treated as indepdenent small pages (as they can be freed + * be treated as independent small pages (as they can be freed * individually). */ if (!PageReserved(head)) @@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, ret = hugetlb_vmemmap_split_folio(h, folio); /* - * Spliting the PMD requires allocating a page, thus lets fail + * Splitting the PMD requires allocating a page, thus let's fail * early once we encounter the first OOM. No point in retrying * as it can be dynamically done on remap with the memory * we get back from the vmemmap deduplication. @@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, /* * Pages to be freed may have been accumulated. If we * encounter an ENOMEM, free what we have and try again. - * This can occur in the case that both spliting fails + * This can occur in the case that both splitting fails * halfway and head page allocation also failed. In this * case __hugetlb_vmemmap_optimize_folio() would free memory * allowing more vmemmap remaps to occur. diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c index 8bca7fece47f..bd612e5aa7b4 100644 --- a/mm/kmsan/core.c +++ b/mm/kmsan/core.c @@ -33,7 +33,7 @@ bool kmsan_enabled __read_mostly; /* * Per-CPU KMSAN context to be used in interrupts, where current->kmsan is - * unavaliable. + * unavailable. */ DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx); diff --git a/mm/ksm.c b/mm/ksm.c index 7bc726b50b2f..1a5c897b40b1 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -389,7 +389,7 @@ static unsigned long ewma(unsigned long prev, unsigned long curr) * exponentially weighted moving average. The new pages_to_scan value is * multiplied with that change factor: * - * new_pages_to_scan *= change facor + * new_pages_to_scan *= change factor * * The new_pages_to_scan value is limited by the cpu min and max values. It * calculates the cpu percent for the last scan and calculates the new diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index 0ea5c13f10a2..864811fff409 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -519,7 +519,7 @@ static inline void __init_node_memory_type(int node, struct memory_dev_type *mem * for each device getting added in the same NUMA node * with this specific memtype, bump the map count. We * Only take memtype device reference once, so that - * changing a node memtype can be done by droping the + * changing a node memtype can be done by dropping the * only reference count taken here. */ diff --git a/mm/memory.c b/mm/memory.c index 7e32eb79ba99..d350c7a6301b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4371,7 +4371,7 @@ static inline bool should_try_to_free_swap(struct folio *folio, * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * user. Try freeing the swapcache to get rid of the swapcache - * reference only in case it's likely that we'll be the exlusive user. + * reference only in case it's likely that we'll be the exclusive user. */ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && folio_ref_count(folio) == (1 + folio_nr_pages(folio)); @@ -5448,7 +5448,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa /** * set_pte_range - Set a range of PTEs to point to pages in a folio. - * @vmf: Fault decription. + * @vmf: Fault description. * @folio: The folio that contains @page. * @page: The first page to create a PTE for. * @nr: The number of PTEs to create. diff --git a/mm/secretmem.c b/mm/secretmem.c index 62066ddb1e9c..c1bd9a4b663d 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -227,7 +227,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags) struct file *file; int fd, err; - /* make sure local flags do not confict with global fcntl.h */ + /* make sure local flags do not conflict with global fcntl.h */ BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC); if (!secretmem_enable || !can_set_direct_map()) diff --git a/mm/slab_common.c b/mm/slab_common.c index bfe7c40eeee1..9ab116156444 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -256,7 +256,7 @@ out: * @object_size: The size of objects to be created in this cache. * @args: Additional arguments for the cache creation (see * &struct kmem_cache_args). - * @flags: See the desriptions of individual flags. The common ones are listed + * @flags: See the descriptions of individual flags. The common ones are listed * in the description below. * * Not to be called directly, use the kmem_cache_create() wrapper with the same diff --git a/mm/slub.c b/mm/slub.c index 9d73cca9f1de..08a4e482192b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2409,7 +2409,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init, memset((char *)kasan_reset_tag(x) + inuse, 0, s->size - inuse - rsize); /* - * Restore orig_size, otherwize kmalloc redzone overwritten + * Restore orig_size, otherwise kmalloc redzone overwritten * would be reported */ set_orig_size(s, x, orig_size); diff --git a/mm/swapfile.c b/mm/swapfile.c index 10760240a3a2..cb2392ed8e0e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1677,7 +1677,7 @@ static bool swap_entries_put_map_nr(struct swap_info_struct *si, /* * Check if it's the last ref of swap entry in the freeing path. - * Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM. + * Qualified value includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM. */ static inline bool __maybe_unused swap_is_last_ref(unsigned char count) { diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index af61b95c89e4..0630f188c847 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1578,7 +1578,7 @@ static int validate_move_areas(struct userfaultfd_ctx *ctx, /* * For now, we keep it simple and only move between writable VMAs. - * Access flags are equal, therefore cheching only the source is enough. + * Access flags are equal, therefore checking only the source is enough. */ if (!(src_vma->vm_flags & VM_WRITE)) return -EINVAL; diff --git a/mm/vma.c b/mm/vma.c index a1ec405bda25..a2e1ae954662 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -109,7 +109,7 @@ static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_nex static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next) { struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev; - struct vm_area_struct *src = vmg->middle; /* exisitng merge case. */ + struct vm_area_struct *src = vmg->middle; /* existing merge case. */ struct anon_vma *tgt_anon = tgt->anon_vma; struct anon_vma *src_anon = vmg->anon_vma; @@ -798,7 +798,7 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * Returns: The merged VMA if merge succeeds, or NULL otherwise. * * ASSUMPTIONS: - * - The caller must assign the VMA to be modifed to @vmg->middle. + * - The caller must assign the VMA to be modified to @vmg->middle. * - The caller must have set @vmg->prev to the previous VMA, if there is one. * - The caller must not set @vmg->next, as we determine this. * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.