* @end:       end address of the vmemmap virtual address range that we want to
  *             remap.
  * @reuse:     reuse address.
+ * @flags:     modifications to vmemmap_remap_walk flags
  *
  * Return: %0 on success, negative error code otherwise.
  */
 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
-                              unsigned long reuse)
+                              unsigned long reuse, unsigned long flags)
 {
        LIST_HEAD(vmemmap_pages);
        struct vmemmap_remap_walk walk = {
                .remap_pte      = vmemmap_restore_pte,
                .reuse_addr     = reuse,
                .vmemmap_pages  = &vmemmap_pages,
-               .flags          = 0,
+               .flags          = flags,
        };
 
        /* See the comment in the vmemmap_remap_free(). */
 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
 
-/**
- * hugetlb_vmemmap_restore - restore previously optimized (by
- *                          hugetlb_vmemmap_optimize()) vmemmap pages which
- *                          will be reallocated and remapped.
- * @h:         struct hstate.
- * @head:      the head page whose vmemmap pages will be restored.
- *
- * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
- * negative error code otherwise.
- */
-int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
+static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head, unsigned long flags)
 {
        int ret;
        unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
         * When a HugeTLB page is freed to the buddy allocator, previously
         * discarded vmemmap pages must be allocated and remapping.
         */
-       ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
+       ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
        if (!ret) {
                ClearHPageVmemmapOptimized(head);
                static_branch_dec(&hugetlb_optimize_vmemmap_key);
        return ret;
 }
 
+/**
+ * hugetlb_vmemmap_restore - restore previously optimized (by
+ *                             hugetlb_vmemmap_optimize()) vmemmap pages which
+ *                             will be reallocated and remapped.
+ * @h:         struct hstate.
+ * @head:      the head page whose vmemmap pages will be restored.
+ *
+ * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
+ * negative error code otherwise.
+ */
+int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
+{
+       return __hugetlb_vmemmap_restore(h, head, 0);
+}
+
 /**
  * hugetlb_vmemmap_restore_folios - restore vmemmap for every folio on the list.
  * @h:                 hstate.
 
        list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
                if (folio_test_hugetlb_vmemmap_optimized(folio)) {
-                       ret = hugetlb_vmemmap_restore(h, &folio->page);
+                       ret = __hugetlb_vmemmap_restore(h, &folio->page,
+                                               VMEMMAP_REMAP_NO_TLB_FLUSH);
                        if (ret)
                                break;
                        restored++;
                list_move(&folio->lru, non_hvo_folios);
        }
 
+       if (restored)
+               flush_tlb_all();
        if (!ret)
                ret = restored;
        return ret;