PageAnonExclusive(subpage);
 
                if (folio_test_hugetlb(folio)) {
+                       /*
+                        * The try_to_unmap() is only passed a hugetlb page
+                        * in the case where the hugetlb page is poisoned.
+                        */
+                       VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
                        /*
                         * huge_pmd_unshare may unmap an entire PMD page.
                         * There is no way of knowing exactly which PMDs may
                                        break;
                                }
                        }
+                       pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
                } else {
                        flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
-               }
-
-               /*
-                * Nuke the page table entry. When having to clear
-                * PageAnonExclusive(), we always have to flush.
-                */
-               if (should_defer_flush(mm, flags) && !anon_exclusive) {
                        /*
-                        * We clear the PTE but do not flush so potentially
-                        * a remote CPU could still be writing to the folio.
-                        * If the entry was previously clean then the
-                        * architecture must guarantee that a clear->dirty
-                        * transition on a cached TLB entry is written through
-                        * and traps if the PTE is unmapped.
+                        * Nuke the page table entry. When having to clear
+                        * PageAnonExclusive(), we always have to flush.
                         */
-                       pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+                       if (should_defer_flush(mm, flags) && !anon_exclusive) {
+                               /*
+                                * We clear the PTE but do not flush so potentially
+                                * a remote CPU could still be writing to the folio.
+                                * If the entry was previously clean then the
+                                * architecture must guarantee that a clear->dirty
+                                * transition on a cached TLB entry is written through
+                                * and traps if the PTE is unmapped.
+                                */
+                               pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-                       set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
-               } else {
-                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                               set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
+                       } else {
+                               pteval = ptep_clear_flush(vma, address, pvmw.pte);
+                       }
                }
 
                /*