From: Zhen Lei Date: Wed, 2 Jun 2021 03:53:03 +0000 (+1000) Subject: mm: fix spelling mistakes X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=b9c895e2d87ecee6eb9e9369b84345f1d3ecdd39;p=users%2Fjedix%2Flinux-maple.git mm: fix spelling mistakes Fix some spelling mistakes in comments: each having differents usage ==> each has a different usage statments ==> statements adresses ==> addresses aggresive ==> aggressive datas ==> data posion ==> poison higer ==> higher precisly ==> precisely wont ==> won't We moves tha ==> We move the endianess ==> endianness Link: https://lkml.kernel.org/r/20210519065853.7723-2-thunder.leizhen@huawei.com Signed-off-by: Zhen Lei Reviewed-by: Souptick Joarder Signed-off-by: Andrew Morton Signed-off-by: Stephen Rothwell --- diff --git a/include/linux/memremap.h b/include/linux/memremap.h index b46f63dcaed31..119f130ef8f10 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,7 +26,7 @@ struct vmem_altmap { }; /* - * Specialize ZONE_DEVICE memory into multiple types each having differents + * Specialize ZONE_DEVICE memory into multiple types each has a different * usage. * * MEMORY_DEVICE_PRIVATE: diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index a5869050ad289..38a516ec7d98c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -399,7 +399,7 @@ struct mm_struct { unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES - /* Base adresses for compatible mmap() */ + /* Base addresses for compatible mmap() */ unsigned long mmap_compat_base; unsigned long mmap_compat_legacy_base; #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 11f6499165aa5..7b1a6482c7bf5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -114,7 +114,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype) struct pglist_data; /* - * Add a wild amount of padding here to ensure datas fall into separate + * Add a wild amount of padding here to ensure data fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 8a3ce8dad2bac..57501c406f1d6 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1277,7 +1277,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * could potentially call huge_pmd_unshare. Because of * this, take semaphore in write mode here and set * TTU_RMAP_LOCKED to indicate we have taken the lock - * at this higer level. + * at this higher level. */ mapping = hugetlb_page_mapping_lock_write(hpage); if (mapping) { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e136013128b09..f27d386db3d10 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -783,7 +783,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z /* * {on,off}lining is constrained to full memory sections (or more - * precisly to memory blocks from the user space POV). + * precisely to memory blocks from the user space POV). * memmap_on_memory is an exception because it reserves initial part * of the physical memory space for vmemmaps. That space is pageblock * aligned. @@ -1580,7 +1580,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) /* * {on,off}lining is constrained to full memory sections (or more - * precisly to memory blocks from the user space POV). + * precisely to memory blocks from the user space POV). * memmap_on_memory is an exception because it reserves initial part * of the physical memory space for vmemmaps. That space is pageblock * aligned. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 032c5b4ff020a..f60a2e7e3f416 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3128,7 +3128,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus) int cpu; /* - * Allocate in the BSS so we wont require allocation in + * Allocate in the BSS so we won't require allocation in * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y */ static cpumask_t cpus_with_pcps; diff --git a/mm/swapfile.c b/mm/swapfile.c index 24b61f44a8cb4..6212bc0336021 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2967,7 +2967,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p, return 0; } - /* swap partition endianess hack... */ + /* swap partition endianness hack... */ if (swab32(swap_header->info.version) == 1) { swab32s(&swap_header->info.version); swab32s(&swap_header->info.last_page);