]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: compaction: clean up comment for sched contention
authorMiaohe Lin <linmiaohe@huawei.com>
Fri, 29 Apr 2022 06:16:17 +0000 (23:16 -0700)
committerakpm <akpm@linux-foundation.org>
Fri, 29 Apr 2022 06:16:17 +0000 (23:16 -0700)
Since commit cf66f0700c8f ("mm, compaction: do not consider a need to
reschedule as contention"), async compaction won't abort when scheduling
is needed.  Correct the relevant comment accordingly.

Link: https://lkml.kernel.org/r/20220418141253.24298-5-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Charan Teja Kalla <charante@codeaurora.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Pintu Kumar <pintu@codeaurora.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/compaction.c
mm/internal.h

index ee2ddf77191f52caa0c1af129d12a40307bc020b..e839b26fb3d8f76c82b4b8b64dad660574ee11b3 100644 (file)
@@ -513,15 +513,12 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
  * very heavily contended. The lock should be periodically unlocked to avoid
  * having disabled IRQs for a long time, even when there is nobody waiting on
  * the lock. It might also be that allowing the IRQs will result in
- * need_resched() becoming true. If scheduling is needed, async compaction
- * aborts. Sync compaction schedules.
+ * need_resched() becoming true. If scheduling is needed, compaction schedules.
  * Either compaction type will also abort if a fatal signal is pending.
  * In either case if the lock was locked, it is dropped and not regained.
  *
- * Returns true if compaction should abort due to fatal signal pending, or
- *             async compaction due to need_resched()
- * Returns false when compaction can continue (sync compaction might have
- *             scheduled)
+ * Returns true if compaction should abort due to fatal signal pending.
+ * Returns false when compaction can continue.
  */
 static bool compact_unlock_should_abort(spinlock_t *lock,
                unsigned long flags, bool *locked, struct compact_control *cc)
@@ -574,7 +571,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                /*
                 * Periodically drop the lock (if held) regardless of its
                 * contention, to give chance to IRQs. Abort if fatal signal
-                * pending or async compaction detects need_resched()
+                * pending.
                 */
                if (!(blockpfn % SWAP_CLUSTER_MAX)
                    && compact_unlock_should_abort(&cc->zone->lock, flags,
index 69a5eabe0943fdeab9e346bb20f7fe5afb3fbf0f..ddd09245a6db0f02ccd7b6b88f6e00de2a5d93c7 100644 (file)
@@ -402,7 +402,7 @@ struct compact_control {
        bool direct_compaction;         /* False from kcompactd or /proc/... */
        bool proactive_compaction;      /* kcompactd proactive compaction */
        bool whole_zone;                /* Whole zone should/has been scanned */
-       bool contended;                 /* Signal lock or sched contention */
+       bool contended;                 /* Signal lock contention */
        bool rescan;                    /* Rescanning the same pageblock */
        bool alloc_contig;              /* alloc_contig_range allocation */
 };