]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/khugepaged: recover from poisoned file-backed memory
authorJiaqi Yan <jiaqiyan@google.com>
Mon, 5 Dec 2022 23:40:59 +0000 (15:40 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 10 Feb 2023 23:36:01 +0000 (15:36 -0800)
Make collapse_file roll back when copying pages failed. More concretely:
- extract copying operations into a separate loop
- postpone the updates for nr_none until both scanning and copying
  succeeded
- postpone joining small xarray entries until both scanning and copying
  succeeded
- postpone the update operations to NR_XXX_THPS until both scanning and
  copying succeeded
- for non-SHMEM file, roll back filemap_nr_thps_inc if scan succeeded but
  copying failed

Tested manually:
0. Enable khugepaged on system under test. Mount tmpfs at /mnt/ramdisk.
1. Start a two-thread application. Each thread allocates a chunk of
   non-huge memory buffer from /mnt/ramdisk.
2. Pick 4 random buffer address (2 in each thread) and inject
   uncorrectable memory errors at physical addresses.
3. Signal both threads to make their memory buffer collapsible, i.e.
   calling madvise(MADV_HUGEPAGE).
4. Wait and then check kernel log: khugepaged is able to recover from
   poisoned pages by skipping them.
5. Signal both threads to inspect their buffer contents and make sure no
   data corruption.

Link: https://lkml.kernel.org/r/20221205234059.42971-3-jiaqiyan@google.com
Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: <tongtiangen@huawei.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index ee9ec1f1a6a1da4494efabd71dfa121c782045a1..1face2ae587728306624df4da681284fae6e89ae 100644 (file)
@@ -1862,6 +1862,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
 {
        struct address_space *mapping = file->f_mapping;
        struct page *hpage;
+       struct page *page;
+       struct page *tmp;
+       struct folio *folio;
        pgoff_t index = 0, end = start + HPAGE_PMD_NR;
        LIST_HEAD(pagelist);
        XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1906,8 +1909,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
 
        xas_set(&xas, start);
        for (index = start; index < end; index++) {
-               struct page *page = xas_next(&xas);
-               struct folio *folio;
+               page = xas_next(&xas);
 
                VM_BUG_ON(index != xas.xa_index);
                if (is_shmem) {
@@ -2089,10 +2091,7 @@ out_unlock:
        }
        nr = thp_nr_pages(hpage);
 
-       if (is_shmem)
-               __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
-       else {
-               __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+       if (!is_shmem) {
                filemap_nr_thps_inc(mapping);
                /*
                 * Paired with smp_mb() in do_dentry_open() to ensure
@@ -2103,21 +2102,10 @@ out_unlock:
                smp_mb();
                if (inode_is_open_for_write(mapping->host)) {
                        result = SCAN_FAIL;
-                       __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
                        filemap_nr_thps_dec(mapping);
                        goto xa_locked;
                }
        }
-
-       if (nr_none) {
-               __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
-               /* nr_none is always 0 for non-shmem. */
-               __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
-       }
-
-       /* Join all the small entries into a single multi-index entry */
-       xas_set_order(&xas, start, HPAGE_PMD_ORDER);
-       xas_store(&xas, hpage);
 xa_locked:
        xas_unlock_irq(&xas);
 xa_unlocked:
@@ -2130,21 +2118,35 @@ xa_unlocked:
        try_to_unmap_flush();
 
        if (result == SCAN_SUCCEED) {
-               struct page *page, *tmp;
-               struct folio *folio;
-
                /*
                 * Replacing old pages with new one has succeeded, now we
-                * need to copy the content and free the old pages.
+                * attempt to copy the contents.
                 */
                index = start;
-               list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+               list_for_each_entry(page, &pagelist, lru) {
                        while (index < page->index) {
                                clear_highpage(hpage + (index % HPAGE_PMD_NR));
                                index++;
                        }
-                       copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
-                                     page);
+                       if (copy_mc_page(hpage + (page->index % HPAGE_PMD_NR),
+                                        page) > 0) {
+                               result = SCAN_COPY_MC;
+                               break;
+                       }
+                       index++;
+               }
+               while (result == SCAN_SUCCEED && index < end) {
+                       clear_highpage(hpage + (index % HPAGE_PMD_NR));
+                       index++;
+               }
+       }
+
+       if (result == SCAN_SUCCEED) {
+               /*
+                * Copying old pages to huge one has succeeded, now we
+                * need to free the old pages.
+                */
+               list_for_each_entry_safe(page, tmp, &pagelist, lru) {
                        list_del(&page->lru);
                        page->mapping = NULL;
                        page_ref_unfreeze(page, 1);
@@ -2152,12 +2154,23 @@ xa_unlocked:
                        ClearPageUnevictable(page);
                        unlock_page(page);
                        put_page(page);
-                       index++;
                }
-               while (index < end) {
-                       clear_highpage(hpage + (index % HPAGE_PMD_NR));
-                       index++;
+
+               xas_lock_irq(&xas);
+               if (is_shmem)
+                       __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
+               else
+                       __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+
+               if (nr_none) {
+                       __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
+                       /* nr_none is always 0 for non-shmem. */
+                       __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
                }
+               /* Join all the small entries into a single multi-index entry. */
+               xas_set_order(&xas, start, HPAGE_PMD_ORDER);
+               xas_store(&xas, hpage);
+               xas_unlock_irq(&xas);
 
                folio = page_folio(hpage);
                folio_mark_uptodate(folio);
@@ -2175,8 +2188,6 @@ xa_unlocked:
                unlock_page(hpage);
                hpage = NULL;
        } else {
-               struct page *page;
-
                /* Something went wrong: roll back page cache changes */
                xas_lock_irq(&xas);
                if (nr_none) {
@@ -2210,6 +2221,13 @@ xa_unlocked:
                        xas_lock_irq(&xas);
                }
                VM_BUG_ON(nr_none);
+               /*
+                * Undo the updates of filemap_nr_thps_inc for non-SHMEM file only.
+                * This undo is not needed unless failure is due to SCAN_COPY_MC.
+                */
+               if (!is_shmem && result == SCAN_COPY_MC)
+                       filemap_nr_thps_dec(mapping);
+
                xas_unlock_irq(&xas);
 
                hpage->mapping = NULL;