]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: page_alloc: move mark_free_page() into snapshot.c
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 16 May 2023 06:38:17 +0000 (14:38 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 9 Jun 2023 23:25:24 +0000 (16:25 -0700)
The mark_free_page() is only used in kernel/power/snapshot.c, move it out
to reduce a bit of page_alloc.c

Link: https://lkml.kernel.org/r/20230516063821.121844-10-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Iurii Zaikin <yzaikin@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Len Brown <len.brown@intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/suspend.h
kernel/power/snapshot.c
mm/page_alloc.c

index d0d4598a7b3f6f71a433f12a8e6b5e15f263f9ff..3950a7bf33aef0733c1b055124d359baa29f26c4 100644 (file)
@@ -364,9 +364,6 @@ struct pbe {
        struct pbe *next;
 };
 
-/* mm/page_alloc.c */
-extern void mark_free_pages(struct zone *zone);
-
 /**
  * struct platform_hibernation_ops - hibernation platform support
  *
index cd8b7b35f1e8bb9589b3d819f809b56a7a46daf1..45ef0bf81c8573dfb5340c25955cac0b2877845d 100644 (file)
@@ -1228,6 +1228,58 @@ unsigned int snapshot_additional_pages(struct zone *zone)
        return 2 * rtree;
 }
 
+/*
+ * Touch the watchdog for every WD_PAGE_COUNT pages.
+ */
+#define WD_PAGE_COUNT  (128*1024)
+
+static void mark_free_pages(struct zone *zone)
+{
+       unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
+       unsigned long flags;
+       unsigned int order, t;
+       struct page *page;
+
+       if (zone_is_empty(zone))
+               return;
+
+       spin_lock_irqsave(&zone->lock, flags);
+
+       max_zone_pfn = zone_end_pfn(zone);
+       for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
+               if (pfn_valid(pfn)) {
+                       page = pfn_to_page(pfn);
+
+                       if (!--page_count) {
+                               touch_nmi_watchdog();
+                               page_count = WD_PAGE_COUNT;
+                       }
+
+                       if (page_zone(page) != zone)
+                               continue;
+
+                       if (!swsusp_page_is_forbidden(page))
+                               swsusp_unset_page_free(page);
+               }
+
+       for_each_migratetype_order(order, t) {
+               list_for_each_entry(page,
+                               &zone->free_area[order].free_list[t], buddy_list) {
+                       unsigned long i;
+
+                       pfn = page_to_pfn(page);
+                       for (i = 0; i < (1UL << order); i++) {
+                               if (!--page_count) {
+                                       touch_nmi_watchdog();
+                                       page_count = WD_PAGE_COUNT;
+                               }
+                               swsusp_set_page_free(pfn_to_page(pfn + i));
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+}
+
 #ifdef CONFIG_HIGHMEM
 /**
  * count_free_highmem_pages - Compute the total number of free highmem pages.
index 40fa763c507480c6ea261970b25f28db16804efb..8d306203e5556060ad1dbbafdd16a3c45b99601e 100644 (file)
@@ -2365,61 +2365,6 @@ void drain_all_pages(struct zone *zone)
        __drain_all_pages(zone, false);
 }
 
-#ifdef CONFIG_HIBERNATION
-
-/*
- * Touch the watchdog for every WD_PAGE_COUNT pages.
- */
-#define WD_PAGE_COUNT  (128*1024)
-
-void mark_free_pages(struct zone *zone)
-{
-       unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
-       unsigned long flags;
-       unsigned int order, t;
-       struct page *page;
-
-       if (zone_is_empty(zone))
-               return;
-
-       spin_lock_irqsave(&zone->lock, flags);
-
-       max_zone_pfn = zone_end_pfn(zone);
-       for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-               if (pfn_valid(pfn)) {
-                       page = pfn_to_page(pfn);
-
-                       if (!--page_count) {
-                               touch_nmi_watchdog();
-                               page_count = WD_PAGE_COUNT;
-                       }
-
-                       if (page_zone(page) != zone)
-                               continue;
-
-                       if (!swsusp_page_is_forbidden(page))
-                               swsusp_unset_page_free(page);
-               }
-
-       for_each_migratetype_order(order, t) {
-               list_for_each_entry(page,
-                               &zone->free_area[order].free_list[t], buddy_list) {
-                       unsigned long i;
-
-                       pfn = page_to_pfn(page);
-                       for (i = 0; i < (1UL << order); i++) {
-                               if (!--page_count) {
-                                       touch_nmi_watchdog();
-                                       page_count = WD_PAGE_COUNT;
-                               }
-                               swsusp_set_page_free(pfn_to_page(pfn + i));
-                       }
-               }
-       }
-       spin_unlock_irqrestore(&zone->lock, flags);
-}
-#endif /* CONFIG_PM */
-
 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
                                                        unsigned int order)
 {