ret = PTR_ERR(new_folio);
                                        break;
                                }
-                               copy_user_large_folio(new_folio,
+                               ret = copy_user_large_folio(new_folio,
                                                      page_folio(ptepage),
                                                      addr, dst_vma);
                                put_page(ptepage);
+                               if (ret) {
+                                       folio_put(new_folio);
+                                       break;
+                               }
 
                                /* Install the new hugetlb folio if src pte stable */
                                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                goto out_release_all;
        }
 
-       copy_user_large_folio(new_folio, page_folio(old_page), address, vma);
+       if (copy_user_large_folio(new_folio, page_folio(old_page), address, vma)) {
+               ret = VM_FAULT_HWPOISON_LARGE;
+               goto out_release_all;
+       }
        __folio_mark_uptodate(new_folio);
 
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
                        *foliop = NULL;
                        goto out;
                }
-               copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
+               ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
                folio_put(*foliop);
                *foliop = NULL;
+               if (ret) {
+                       folio_put(folio);
+                       goto out;
+               }
        }
 
        /*
 
  * operation.  The target subpage will be processed last to keep its
  * cache lines hot.
  */
-static inline void process_huge_page(
+static inline int process_huge_page(
        unsigned long addr_hint, unsigned int pages_per_huge_page,
-       void (*process_subpage)(unsigned long addr, int idx, void *arg),
+       int (*process_subpage)(unsigned long addr, int idx, void *arg),
        void *arg)
 {
-       int i, n, base, l;
+       int i, n, base, l, ret;
        unsigned long addr = addr_hint &
                ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 
                /* Process subpages at the end of huge page */
                for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
                        cond_resched();
-                       process_subpage(addr + i * PAGE_SIZE, i, arg);
+                       ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
+                       if (ret)
+                               return ret;
                }
        } else {
                /* If target subpage in second half of huge page */
                /* Process subpages at the begin of huge page */
                for (i = 0; i < base; i++) {
                        cond_resched();
-                       process_subpage(addr + i * PAGE_SIZE, i, arg);
+                       ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
+                       if (ret)
+                               return ret;
                }
        }
        /*
                int right_idx = base + 2 * l - 1 - i;
 
                cond_resched();
-               process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
+               ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
+               if (ret)
+                       return ret;
                cond_resched();
-               process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
+               ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
+               if (ret)
+                       return ret;
        }
+       return 0;
 }
 
 static void clear_gigantic_page(struct page *page,
        }
 }
 
-static void clear_subpage(unsigned long addr, int idx, void *arg)
+static int clear_subpage(unsigned long addr, int idx, void *arg)
 {
        struct page *page = arg;
 
        clear_user_highpage(page + idx, addr);
+       return 0;
 }
 
 void clear_huge_page(struct page *page,
        process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
 }
 
-static void copy_user_gigantic_page(struct folio *dst, struct folio *src,
+static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
                                     unsigned long addr,
                                     struct vm_area_struct *vma,
                                     unsigned int pages_per_huge_page)
                src_page = folio_page(src, i);
 
                cond_resched();
-               copy_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma);
+               if (copy_mc_user_highpage(dst_page, src_page,
+                                         addr + i*PAGE_SIZE, vma)) {
+                       memory_failure_queue(page_to_pfn(src_page), 0);
+                       return -EHWPOISON;
+               }
        }
+       return 0;
 }
 
 struct copy_subpage_arg {
        struct vm_area_struct *vma;
 };
 
-static void copy_subpage(unsigned long addr, int idx, void *arg)
+static int copy_subpage(unsigned long addr, int idx, void *arg)
 {
        struct copy_subpage_arg *copy_arg = arg;
 
-       copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
-                          addr, copy_arg->vma);
+       if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
+                                 addr, copy_arg->vma)) {
+               memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
+               return -EHWPOISON;
+       }
+       return 0;
 }
 
-void copy_user_large_folio(struct folio *dst, struct folio *src,
-                          unsigned long addr_hint, struct vm_area_struct *vma)
+int copy_user_large_folio(struct folio *dst, struct folio *src,
+                         unsigned long addr_hint, struct vm_area_struct *vma)
 {
        unsigned int pages_per_huge_page = folio_nr_pages(dst);
        unsigned long addr = addr_hint &
                .vma = vma,
        };
 
-       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
-               copy_user_gigantic_page(dst, src, addr, vma,
-                                       pages_per_huge_page);
-               return;
-       }
+       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
+               return copy_user_gigantic_page(dst, src, addr, vma,
+                                              pages_per_huge_page);
 
-       process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
+       return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
 }
 
 long copy_folio_from_user(struct folio *dst_folio,