]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: convert core mm to mm_flags_*() accessors
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Tue, 12 Aug 2025 15:44:11 +0000 (16:44 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:24:51 +0000 (17:24 -0700)
As part of the effort to move to mm->flags becoming a bitmap field,
convert existing users to making use of the mm_flags_*() accessors which
will, when the conversion is complete, be the only means of accessing
mm_struct flags.

This will result in the debug output being that of a bitmap output, which
will result in a minor change here, but since this is for debug only, this
should have no bearing.

Otherwise, no functional changes intended.

Link: https://lkml.kernel.org/r/1eb2266f4408798a55bda00cb04545a3203aa572.1755012943.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Barry Song <baohua@kernel.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Marc Rutland <mark.rutland@arm.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
14 files changed:
include/linux/huge_mm.h
include/linux/khugepaged.h
include/linux/ksm.h
include/linux/mm.h
include/linux/mman.h
include/linux/oom.h
mm/debug.c
mm/gup.c
mm/huge_memory.c
mm/khugepaged.c
mm/ksm.c
mm/mmap.c
mm/oom_kill.c
mm/util.c

index 14d424830fa88d5a977e59704febf35362af6ef1..84b7eebe0d68576745cc18fdd0bf1cf9baf470fd 100644 (file)
@@ -327,7 +327,7 @@ static inline bool vma_thp_disabled(struct vm_area_struct *vma,
         * example, s390 kvm.
         */
        return (vm_flags & VM_NOHUGEPAGE) ||
-              test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
+              mm_flags_test(MMF_DISABLE_THP, vma->vm_mm);
 }
 
 static inline bool thp_disabled_by_hw(void)
index ff612046374503c533cd9302b9de4131d9daffed..eb1946a70cff78bb6fdf946dab6633cbdf38f9cd 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _LINUX_KHUGEPAGED_H
 #define _LINUX_KHUGEPAGED_H
 
+#include <linux/mm.h>
+
 extern unsigned int khugepaged_max_ptes_none __read_mostly;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern struct attribute_group khugepaged_attr_group;
@@ -20,13 +22,13 @@ extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 
 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
+       if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm))
                __khugepaged_enter(mm);
 }
 
 static inline void khugepaged_exit(struct mm_struct *mm)
 {
-       if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+       if (mm_flags_test(MMF_VM_HUGEPAGE, mm))
                __khugepaged_exit(mm);
 }
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
index c17b955e7b0b0e25048fa08f6cb0474a6162ebe9..22e67ca7cba3a797a82de11c29fd14e3375e2aa2 100644 (file)
@@ -56,13 +56,13 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm)
 static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 {
        /* Adding mm to ksm is best effort on fork. */
-       if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+       if (mm_flags_test(MMF_VM_MERGEABLE, oldmm))
                __ksm_enter(mm);
 }
 
 static inline int ksm_execve(struct mm_struct *mm)
 {
-       if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+       if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
                return __ksm_enter(mm);
 
        return 0;
@@ -70,7 +70,7 @@ static inline int ksm_execve(struct mm_struct *mm)
 
 static inline void ksm_exit(struct mm_struct *mm)
 {
-       if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+       if (mm_flags_test(MMF_VM_MERGEABLE, mm))
                __ksm_exit(mm);
 }
 
index 4ed4a0b9dad61b8a100fc33c9cdb9b101751ba4d..34311ebe62cc32380f3ece1a8a8db3bff4369c53 100644 (file)
@@ -1949,7 +1949,7 @@ static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
 {
        VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
 
-       if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
+       if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
                return false;
 
        return folio_maybe_dma_pinned(folio);
index de9e8e6229a44374f23a9efbe30cd6918226b2dd..0ba8a7e8b90aedeb503894536e979d79ddabe820 100644 (file)
@@ -201,7 +201,7 @@ static inline bool arch_memory_deny_write_exec_supported(void)
 static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
 {
        /* If MDWE is disabled, we have nothing to deny. */
-       if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
+       if (!mm_flags_test(MMF_HAS_MDWE, current->mm))
                return false;
 
        /* If the new VMA is not executable, we have nothing to deny. */
index 1e0fc6931ce9675f61be4e49f65607b18a6fb43c..7b02bc1d0a7eaedde6f5f07fdfd0de4cc2c39e12 100644 (file)
@@ -91,7 +91,7 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
  */
 static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
 {
-       if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+       if (unlikely(mm_flags_test(MMF_UNSTABLE, mm)))
                return VM_FAULT_SIGBUS;
        return 0;
 }
index b4388f4dcd4d214f92f2fffeaafa7b6ac8aee383..64ddb0c4b4be43ecb93fa1a5495a9426524e9a47 100644 (file)
@@ -182,7 +182,7 @@ void dump_mm(const struct mm_struct *mm)
                "start_code %lx end_code %lx start_data %lx end_data %lx\n"
                "start_brk %lx brk %lx start_stack %lx\n"
                "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
-               "binfmt %px flags %lx\n"
+               "binfmt %px flags %*pb\n"
 #ifdef CONFIG_AIO
                "ioctx_table %px\n"
 #endif
@@ -211,7 +211,7 @@ void dump_mm(const struct mm_struct *mm)
                mm->start_code, mm->end_code, mm->start_data, mm->end_data,
                mm->start_brk, mm->brk, mm->start_stack,
                mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
-               mm->binfmt, mm->flags,
+               mm->binfmt, NUM_MM_FLAG_BITS, __mm_flags_get_bitmap(mm),
 #ifdef CONFIG_AIO
                mm->ioctx_table,
 #endif
index 0bc4d140fc07fb28d9b1841aa09010c16a374f5b..134a2c86f4402daad8e610006f43b5ce8af2d587 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -475,10 +475,10 @@ EXPORT_SYMBOL_GPL(unpin_folios);
  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
  * cache bouncing on large SMP machines for concurrent pinned gups.
  */
-static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
+static inline void mm_set_has_pinned_flag(struct mm_struct *mm)
 {
-       if (!test_bit(MMF_HAS_PINNED, mm_flags))
-               set_bit(MMF_HAS_PINNED, mm_flags);
+       if (!mm_flags_test(MMF_HAS_PINNED, mm))
+               mm_flags_set(MMF_HAS_PINNED, mm);
 }
 
 #ifdef CONFIG_MMU
@@ -1693,7 +1693,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                mmap_assert_locked(mm);
 
        if (flags & FOLL_PIN)
-               mm_set_has_pinned_flag(&mm->flags);
+               mm_set_has_pinned_flag(mm);
 
        /*
         * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
@@ -3218,7 +3218,7 @@ static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
                return -EINVAL;
 
        if (gup_flags & FOLL_PIN)
-               mm_set_has_pinned_flag(&current->mm->flags);
+               mm_set_has_pinned_flag(current->mm);
 
        if (!(gup_flags & FOLL_FAST_ONLY))
                might_lock_read(&current->mm->mmap_lock);
index b8bb078a1a348b9a9a57929d847db396189dd808..a2f476e7419a39fdd34144778178f9c20e105654 100644 (file)
@@ -251,13 +251,13 @@ struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
        if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
                return huge_zero_folio;
 
-       if (test_bit(MMF_HUGE_ZERO_FOLIO, &mm->flags))
+       if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
                return READ_ONCE(huge_zero_folio);
 
        if (!get_huge_zero_folio())
                return NULL;
 
-       if (test_and_set_bit(MMF_HUGE_ZERO_FOLIO, &mm->flags))
+       if (mm_flags_test_and_set(MMF_HUGE_ZERO_FOLIO, mm))
                put_huge_zero_folio();
 
        return READ_ONCE(huge_zero_folio);
@@ -268,7 +268,7 @@ void mm_put_huge_zero_folio(struct mm_struct *mm)
        if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
                return;
 
-       if (test_bit(MMF_HUGE_ZERO_FOLIO, &mm->flags))
+       if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm))
                put_huge_zero_folio();
 }
 
@@ -1145,7 +1145,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
 
        off_sub = (off - ret) & (size - 1);
 
-       if (test_bit(MMF_TOPDOWN, &current->mm->flags) && !off_sub)
+       if (mm_flags_test(MMF_TOPDOWN, current->mm) && !off_sub)
                return ret + size;
 
        ret += off_sub;
index b486c1d19b2dd2630e498c3643f6b31adcca5251..573cb9696b7a2f4dcef12fc7023beabd2cd1fe07 100644 (file)
@@ -410,7 +410,7 @@ static inline int hpage_collapse_test_exit(struct mm_struct *mm)
 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
 {
        return hpage_collapse_test_exit(mm) ||
-              test_bit(MMF_DISABLE_THP, &mm->flags);
+               mm_flags_test(MMF_DISABLE_THP, mm);
 }
 
 static bool hugepage_pmd_enabled(void)
@@ -445,7 +445,7 @@ void __khugepaged_enter(struct mm_struct *mm)
 
        /* __khugepaged_exit() must not run from under us */
        VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
-       if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
+       if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm)))
                return;
 
        mm_slot = mm_slot_alloc(mm_slot_cache);
@@ -472,7 +472,7 @@ void __khugepaged_enter(struct mm_struct *mm)
 void khugepaged_enter_vma(struct vm_area_struct *vma,
                          vm_flags_t vm_flags)
 {
-       if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
+       if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) &&
            hugepage_pmd_enabled()) {
                if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
                                            PMD_ORDER))
@@ -497,7 +497,7 @@ void __khugepaged_exit(struct mm_struct *mm)
        spin_unlock(&khugepaged_mm_lock);
 
        if (free) {
-               clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
+               mm_flags_clear(MMF_VM_HUGEPAGE, mm);
                mm_slot_free(mm_slot_cache, mm_slot);
                mmdrop(mm);
        } else if (mm_slot) {
@@ -1459,7 +1459,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
                /*
                 * Not strictly needed because the mm exited already.
                 *
-                * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
+                * mm_clear(mm, MMF_VM_HUGEPAGE);
                 */
 
                /* khugepaged_mm_lock actually not necessary for the below */
index 160787bb121cc0f718f871b40f53de38741aabc9..2ef29802a49b9e6cb996d76634291155e8456f7e 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1217,8 +1217,8 @@ mm_exiting:
                        spin_unlock(&ksm_mmlist_lock);
 
                        mm_slot_free(mm_slot_cache, mm_slot);
-                       clear_bit(MMF_VM_MERGEABLE, &mm->flags);
-                       clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
+                       mm_flags_clear(MMF_VM_MERGEABLE, mm);
+                       mm_flags_clear(MMF_VM_MERGE_ANY, mm);
                        mmdrop(mm);
                } else
                        spin_unlock(&ksm_mmlist_lock);
@@ -2620,8 +2620,8 @@ no_vmas:
                spin_unlock(&ksm_mmlist_lock);
 
                mm_slot_free(mm_slot_cache, mm_slot);
-               clear_bit(MMF_VM_MERGEABLE, &mm->flags);
-               clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
+               mm_flags_clear(MMF_VM_MERGEABLE, mm);
+               mm_flags_clear(MMF_VM_MERGE_ANY, mm);
                mmap_read_unlock(mm);
                mmdrop(mm);
        } else {
@@ -2742,7 +2742,7 @@ static int __ksm_del_vma(struct vm_area_struct *vma)
 vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file,
                         vm_flags_t vm_flags)
 {
-       if (test_bit(MMF_VM_MERGE_ANY, &mm->flags) &&
+       if (mm_flags_test(MMF_VM_MERGE_ANY, mm) &&
            __ksm_should_add_vma(file, vm_flags))
                vm_flags |= VM_MERGEABLE;
 
@@ -2784,16 +2784,16 @@ int ksm_enable_merge_any(struct mm_struct *mm)
 {
        int err;
 
-       if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+       if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
                return 0;
 
-       if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
+       if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) {
                err = __ksm_enter(mm);
                if (err)
                        return err;
        }
 
-       set_bit(MMF_VM_MERGE_ANY, &mm->flags);
+       mm_flags_set(MMF_VM_MERGE_ANY, mm);
        ksm_add_vmas(mm);
 
        return 0;
@@ -2815,7 +2815,7 @@ int ksm_disable_merge_any(struct mm_struct *mm)
 {
        int err;
 
-       if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+       if (!mm_flags_test(MMF_VM_MERGE_ANY, mm))
                return 0;
 
        err = ksm_del_vmas(mm);
@@ -2824,7 +2824,7 @@ int ksm_disable_merge_any(struct mm_struct *mm)
                return err;
        }
 
-       clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
+       mm_flags_clear(MMF_VM_MERGE_ANY, mm);
        return 0;
 }
 
@@ -2832,9 +2832,9 @@ int ksm_disable(struct mm_struct *mm)
 {
        mmap_assert_write_locked(mm);
 
-       if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
+       if (!mm_flags_test(MMF_VM_MERGEABLE, mm))
                return 0;
-       if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+       if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
                return ksm_disable_merge_any(mm);
        return ksm_del_vmas(mm);
 }
@@ -2852,7 +2852,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                if (!vma_ksm_compatible(vma))
                        return 0;
 
-               if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
+               if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) {
                        err = __ksm_enter(mm);
                        if (err)
                                return err;
@@ -2912,7 +2912,7 @@ int __ksm_enter(struct mm_struct *mm)
                list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
        spin_unlock(&ksm_mmlist_lock);
 
-       set_bit(MMF_VM_MERGEABLE, &mm->flags);
+       mm_flags_set(MMF_VM_MERGEABLE, mm);
        mmgrab(mm);
 
        if (needs_wakeup)
@@ -2954,8 +2954,8 @@ void __ksm_exit(struct mm_struct *mm)
 
        if (easy_to_free) {
                mm_slot_free(mm_slot_cache, mm_slot);
-               clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
-               clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+               mm_flags_clear(MMF_VM_MERGE_ANY, mm);
+               mm_flags_clear(MMF_VM_MERGEABLE, mm);
                mmdrop(mm);
        } else if (mm_slot) {
                mmap_write_lock(mm);
index 7306253cc3b57d70ded7e1864baeb8045650c751..7a057e0e8da923bc45f3e9d02f35d2b1fc6ac200 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -802,7 +802,7 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *fi
                                           unsigned long pgoff, unsigned long flags,
                                           vm_flags_t vm_flags)
 {
-       if (test_bit(MMF_TOPDOWN, &mm->flags))
+       if (mm_flags_test(MMF_TOPDOWN, mm))
                return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
                                                      flags, vm_flags);
        return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
@@ -1284,7 +1284,7 @@ void exit_mmap(struct mm_struct *mm)
         * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
         * because the memory has been already freed.
         */
-       set_bit(MMF_OOM_SKIP, &mm->flags);
+       mm_flags_set(MMF_OOM_SKIP, mm);
        mmap_write_lock(mm);
        mt_clear_in_rcu(&mm->mm_mt);
        vma_iter_set(&vmi, vma->vm_end);
@@ -1859,14 +1859,14 @@ loop_out:
                        mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
                        mas_store(&vmi.mas, XA_ZERO_ENTRY);
                        /* Avoid OOM iterating a broken tree */
-                       set_bit(MMF_OOM_SKIP, &mm->flags);
+                       mm_flags_set(MMF_OOM_SKIP, mm);
                }
                /*
                 * The mm_struct is going to exit, but the locks will be dropped
                 * first.  Set the mm_struct as unstable is advisable as it is
                 * not fully initialised.
                 */
-               set_bit(MMF_UNSTABLE, &mm->flags);
+               mm_flags_set(MMF_UNSTABLE, mm);
        }
 out:
        mmap_write_unlock(mm);
index 25923cfec9c6036c6c08b600dc2776aa2ea6df56..17650f0b516e86e9b020b9d777c3b85cdb1d0863 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/mm/oom_kill.c
- * 
+ *
  *  Copyright (C)  1998,2000  Rik van Riel
  *     Thanks go out to Claus Fischer for some serious inspiration and
  *     for goading me into coding this file...
@@ -218,7 +218,7 @@ long oom_badness(struct task_struct *p, unsigned long totalpages)
         */
        adj = (long)p->signal->oom_score_adj;
        if (adj == OOM_SCORE_ADJ_MIN ||
-                       test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
+                       mm_flags_test(MMF_OOM_SKIP, p->mm) ||
                        in_vfork(p)) {
                task_unlock(p);
                return LONG_MIN;
@@ -325,7 +325,7 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
         * any memory is quite low.
         */
        if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
-               if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
+               if (mm_flags_test(MMF_OOM_SKIP, task->signal->oom_mm))
                        goto next;
                goto abort;
        }
@@ -524,7 +524,7 @@ static bool __oom_reap_task_mm(struct mm_struct *mm)
         * should imply barriers already and the reader would hit a page fault
         * if it stumbled over a reaped memory.
         */
-       set_bit(MMF_UNSTABLE, &mm->flags);
+       mm_flags_set(MMF_UNSTABLE, mm);
 
        for_each_vma(vmi, vma) {
                if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP))
@@ -583,7 +583,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
         * under mmap_lock for reading because it serializes against the
         * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
         */
-       if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
+       if (mm_flags_test(MMF_OOM_SKIP, mm)) {
                trace_skip_task_reaping(tsk->pid);
                goto out_unlock;
        }
@@ -619,7 +619,7 @@ static void oom_reap_task(struct task_struct *tsk)
                schedule_timeout_idle(HZ/10);
 
        if (attempts <= MAX_OOM_REAP_RETRIES ||
-           test_bit(MMF_OOM_SKIP, &mm->flags))
+           mm_flags_test(MMF_OOM_SKIP, mm))
                goto done;
 
        pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
@@ -634,7 +634,7 @@ done:
         * Hide this mm from OOM killer because it has been either reaped or
         * somebody can't call mmap_write_unlock(mm).
         */
-       set_bit(MMF_OOM_SKIP, &mm->flags);
+       mm_flags_set(MMF_OOM_SKIP, mm);
 
        /* Drop a reference taken by queue_oom_reaper */
        put_task_struct(tsk);
@@ -670,7 +670,7 @@ static void wake_oom_reaper(struct timer_list *timer)
        unsigned long flags;
 
        /* The victim managed to terminate on its own - see exit_mmap */
-       if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
+       if (mm_flags_test(MMF_OOM_SKIP, mm)) {
                put_task_struct(tsk);
                return;
        }
@@ -695,7 +695,7 @@ static void wake_oom_reaper(struct timer_list *timer)
 static void queue_oom_reaper(struct task_struct *tsk)
 {
        /* mm is already queued? */
-       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
+       if (mm_flags_test_and_set(MMF_OOM_REAP_QUEUED, tsk->signal->oom_mm))
                return;
 
        get_task_struct(tsk);
@@ -892,7 +892,7 @@ static bool task_will_free_mem(struct task_struct *task)
         * This task has already been drained by the oom reaper so there are
         * only small chances it will free some more
         */
-       if (test_bit(MMF_OOM_SKIP, &mm->flags))
+       if (mm_flags_test(MMF_OOM_SKIP, mm))
                return false;
 
        if (atomic_read(&mm->mm_users) <= 1)
@@ -977,7 +977,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
                        continue;
                if (is_global_init(p)) {
                        can_oom_reap = false;
-                       set_bit(MMF_OOM_SKIP, &mm->flags);
+                       mm_flags_set(MMF_OOM_SKIP, mm);
                        pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
                                        task_pid_nr(victim), victim->comm,
                                        task_pid_nr(p), p->comm);
@@ -1235,7 +1235,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
                reap = true;
        else {
                /* Error only if the work has not been done already */
-               if (!test_bit(MMF_OOM_SKIP, &mm->flags))
+               if (!mm_flags_test(MMF_OOM_SKIP, mm))
                        ret = -EINVAL;
        }
        task_unlock(p);
@@ -1251,7 +1251,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
         * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
         * possible change in exit_mmap is seen
         */
-       if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm))
+       if (mm_flags_test(MMF_OOM_SKIP, mm) && !__oom_reap_task_mm(mm))
                ret = -EAGAIN;
        mmap_read_unlock(mm);
 
index f814e6a59ab1d354b8cd04ebf3903626f6b23a6c..d235b74f7aff78721af5f31fed4b47e912a71a2a 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -471,17 +471,17 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 
        if (mmap_is_legacy(rlim_stack)) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-               clear_bit(MMF_TOPDOWN, &mm->flags);
+               mm_flags_clear(MMF_TOPDOWN, mm);
        } else {
                mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               set_bit(MMF_TOPDOWN, &mm->flags);
+               mm_flags_set(MMF_TOPDOWN, mm);
        }
 }
 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 {
        mm->mmap_base = TASK_UNMAPPED_BASE;
-       clear_bit(MMF_TOPDOWN, &mm->flags);
+       mm_flags_clear(MMF_TOPDOWN, mm);
 }
 #endif
 #ifdef CONFIG_MMU