From: Kefeng Wang Date: Fri, 16 Aug 2024 09:04:32 +0000 (+0800) Subject: mm: memory-failure: add unmap_posioned_folio() X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=e8c250e12cc69276039cdc6daa36de5cac03fc6a;p=users%2Fjedix%2Flinux-maple.git mm: memory-failure: add unmap_posioned_folio() Add unmap_posioned_folio() helper which will be reused by do_migrate_range() from memory hotplug soon. Link: https://lkml.kernel.org/r/20240816090435.888946-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: David Hildenbrand Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Oscar Salvador Signed-off-by: Andrew Morton --- diff --git a/mm/internal.h b/mm/internal.h index cbe4849f6e73..cde62b16b71c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1048,6 +1048,8 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask) /* * mm/memory-failure.c */ +#ifdef CONFIG_MEMORY_FAILURE +int unmap_posioned_folio(struct folio *folio, enum ttu_flags ttu); void shake_folio(struct folio *folio); extern int hwpoison_filter(struct page *p); @@ -1068,6 +1070,13 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p, unsigned long ksm_addr); unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +#else +static inline int unmap_posioned_folio(struct folio *folio, enum ttu_flags ttu) +{ + return 0; +} +#endif + extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7066fc84f351..44a3fb43bf8c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1554,6 +1554,30 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) return ret; } +int unmap_posioned_folio(struct folio *folio, enum ttu_flags ttu) +{ + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { + struct address_space *mapping; + /* + * For hugetlb pages in shared mappings, try_to_unmap + * could potentially call huge_pmd_unshare. Because of + * this, take semaphore in write mode here and set + * TTU_RMAP_LOCKED to indicate we have taken the lock + * at this higher level. + */ + mapping = hugetlb_folio_mapping_lock_write(folio); + if (!mapping) + return -EAGAIN; + + try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); + i_mmap_unlock_write(mapping); + } else { + try_to_unmap(folio, ttu); + } + + return 0; +} + /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. @@ -1615,23 +1639,8 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p, */ collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); - if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { - /* - * For hugetlb pages in shared mappings, try_to_unmap - * could potentially call huge_pmd_unshare. Because of - * this, take semaphore in write mode here and set - * TTU_RMAP_LOCKED to indicate we have taken the lock - * at this higher level. - */ - mapping = hugetlb_folio_mapping_lock_write(folio); - if (mapping) { - try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); - i_mmap_unlock_write(mapping); - } else - pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); - } else { - try_to_unmap(folio, ttu); - } + if (unmap_posioned_folio(folio, ttu)) + pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); unmap_success = !folio_mapped(folio); if (!unmap_success)