]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: fixup documentation regarding pte_numa() and PROT_NUMA
authorDavid Hildenbrand <david@redhat.com>
Thu, 25 Aug 2022 16:46:59 +0000 (18:46 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Aug 2022 05:03:30 +0000 (22:03 -0700)
pte_numa() no longer exists -- replaced by pte_protnone() -- and PROT_NUMA
probably never existed: MM_CP_PROT_NUMA also ends up using PROT_NONE.

Let's fixup the doc.

Link: https://lkml.kernel.org/r/20220825164659.89824-4-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm_types.h

index d6ec33438dc118de2b404044cd40626f74846305..fb53717d571c0a51444708dc94783037b1dfac7f 100644 (file)
@@ -616,22 +616,22 @@ struct mm_struct {
 #endif
 #ifdef CONFIG_NUMA_BALANCING
                /*
-                * numa_next_scan is the next time that the PTEs will be marked
-                * pte_numa. NUMA hinting faults will gather statistics and
-                * migrate pages to new nodes if necessary.
+                * numa_next_scan is the next time that PTEs will be remapped
+                * PROT_NONE to trigger NUMA hinting faults; such faults gather
+                * statistics and migrate pages to new nodes if necessary.
                 */
                unsigned long numa_next_scan;
 
-               /* Restart point for scanning and setting pte_numa */
+               /* Restart point for scanning and remapping PTEs. */
                unsigned long numa_scan_offset;
 
-               /* numa_scan_seq prevents two threads setting pte_numa */
+               /* numa_scan_seq prevents two threads remapping PTEs. */
                int numa_scan_seq;
 #endif
                /*
                 * An operation with batched TLB flushing is going on. Anything
                 * that can move process memory needs to flush the TLB when
-                * moving a PROT_NONE or PROT_NUMA mapped page.
+                * moving a PROT_NONE mapped page.
                 */
                atomic_t tlb_flush_pending;
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH