void folio_prep_large_rmappable(struct folio *folio);
 bool can_split_folio(struct folio *folio, int *pextra_pins);
-int split_huge_page_to_list(struct page *page, struct list_head *list);
+int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+               unsigned int new_order);
 static inline int split_huge_page(struct page *page)
 {
-       return split_huge_page_to_list(page, NULL);
+       return split_huge_page_to_list_to_order(page, NULL, 0);
 }
 void deferred_split_folio(struct folio *folio);
 
        return false;
 }
 static inline int
-split_huge_page_to_list(struct page *page, struct list_head *list)
+split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+               unsigned int new_order)
 {
        return 0;
 }
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static inline int split_folio_to_list(struct folio *folio,
-               struct list_head *list)
+static inline int split_folio_to_list_to_order(struct folio *folio,
+               struct list_head *list, int new_order)
 {
-       return split_huge_page_to_list(&folio->page, list);
+       return split_huge_page_to_list_to_order(&folio->page, list, new_order);
 }
 
-static inline int split_folio(struct folio *folio)
+static inline int split_folio_to_order(struct folio *folio, int new_order)
 {
-       return split_folio_to_list(folio, NULL);
+       return split_folio_to_list_to_order(folio, NULL, new_order);
 }
 
+#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
+#define split_folio(f) split_folio_to_order(f, 0)
+
 /*
  * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
  * limitations in the implementation like arm64 MTE can override this to
 
                struct lruvec *lruvec, struct list_head *list)
 {
        VM_BUG_ON_PAGE(!PageHead(head), head);
-       VM_BUG_ON_PAGE(PageCompound(tail), head);
        VM_BUG_ON_PAGE(PageLRU(tail), head);
        lockdep_assert_held(&lruvec->lru_lock);
 
 }
 
 static void __split_huge_page_tail(struct folio *folio, int tail,
-               struct lruvec *lruvec, struct list_head *list)
+               struct lruvec *lruvec, struct list_head *list,
+               unsigned int new_order)
 {
        struct page *head = &folio->page;
        struct page *page_tail = head + tail;
         * which needs correct compound_head().
         */
        clear_compound_head(page_tail);
+       if (new_order) {
+               prep_compound_page(page_tail, new_order);
+               folio_prep_large_rmappable(new_folio);
+       }
 
        /* Finally unfreeze refcount. Additional reference from page cache. */
-       page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
-                                         folio_test_swapcache(folio)));
+       page_ref_unfreeze(page_tail,
+               1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
+                            folio_nr_pages(new_folio) : 0));
 
        if (folio_test_young(folio))
                folio_set_young(new_folio);
 }
 
 static void __split_huge_page(struct page *page, struct list_head *list,
-               pgoff_t end)
+               pgoff_t end, unsigned int new_order)
 {
        struct folio *folio = page_folio(page);
        struct page *head = &folio->page;
        struct address_space *swap_cache = NULL;
        unsigned long offset = 0;
        int i, nr_dropped = 0;
+       unsigned int new_nr = 1 << new_order;
        int order = folio_order(folio);
        unsigned int nr = 1 << order;
 
        /* complete memcg works before add pages to LRU */
-       split_page_memcg(head, order, 0);
+       split_page_memcg(head, order, new_order);
 
        if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
                offset = swp_offset(folio->swap);
 
        ClearPageHasHWPoisoned(head);
 
-       for (i = nr - 1; i >= 1; i--) {
-               __split_huge_page_tail(folio, i, lruvec, list);
+       for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
+               __split_huge_page_tail(folio, i, lruvec, list, new_order);
                /* Some pages can be beyond EOF: drop them from page cache */
                if (head[i].index >= end) {
                        struct folio *tail = page_folio(head + i);
                }
        }
 
-       ClearPageCompound(head);
+       if (!new_order)
+               ClearPageCompound(head);
+       else {
+               struct folio *new_folio = (struct folio *)head;
+
+               folio_set_order(new_folio, new_order);
+       }
        unlock_page_lruvec(lruvec);
        /* Caller disabled irqs, so they are still disabled here */
 
-       split_page_owner(head, order, 0);
+       split_page_owner(head, order, new_order);
 
        /* See comment in __split_huge_page_tail() */
        if (PageAnon(head)) {
                /* Additional pin to swap cache */
                if (PageSwapCache(head)) {
-                       page_ref_add(head, 2);
+                       page_ref_add(head, 1 + new_nr);
                        xa_unlock(&swap_cache->i_pages);
                } else {
                        page_ref_inc(head);
                }
        } else {
                /* Additional pin to page cache */
-               page_ref_add(head, 2);
+               page_ref_add(head, 1 + new_nr);
                xa_unlock(&head->mapping->i_pages);
        }
        local_irq_enable();
        if (folio_test_swapcache(folio))
                split_swap_cluster(folio->swap);
 
-       for (i = 0; i < nr; i++) {
+       /*
+        * set page to its compound_head when split to non order-0 pages, so
+        * we can skip unlocking it below, since PG_locked is transferred to
+        * the compound_head of the page and the caller will unlock it.
+        */
+       if (new_order)
+               page = compound_head(page);
+
+       for (i = 0; i < nr; i += new_nr) {
                struct page *subpage = head + i;
                if (subpage == page)
                        continue;
 }
 
 /*
- * This function splits huge page into normal pages. @page can point to any
- * subpage of huge page to split. Split doesn't change the position of @page.
+ * This function splits huge page into pages in @new_order. @page can point to
+ * any subpage of huge page to split. Split doesn't change the position of
+ * @page.
+ *
+ * NOTE: order-1 anonymous folio is not supported because _deferred_list,
+ * which is used by partially mapped folios, is stored in subpage 2 and an
+ * order-1 folio only has subpage 0 and 1. File-backed order-1 folios are OK,
+ * since they do not use _deferred_list.
  *
  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
  * The huge page must be locked.
  *
  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
  *
- * Both head page and tail pages will inherit mapping, flags, and so on from
- * the hugepage.
+ * Pages in new_order will inherit mapping, flags, and so on from the hugepage.
  *
- * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
- * they are not mapped.
+ * GUP pin and PG_locked transferred to @page or the compound page @page belongs
+ * to. Rest subpages can be freed if they are not mapped.
  *
  * Returns 0 if the hugepage is split successfully.
  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
  * us.
  */
-int split_huge_page_to_list(struct page *page, struct list_head *list)
+int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+                                    unsigned int new_order)
 {
        struct folio *folio = page_folio(page);
        struct deferred_split *ds_queue = get_deferred_split_queue(folio);
-       XA_STATE(xas, &folio->mapping->i_pages, folio->index);
+       /* reset xarray order to new order after split */
+       XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
        int extra_pins, ret;
        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
        VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
 
+       /* Cannot split anonymous THP to order-1 */
+       if (new_order == 1 && folio_test_anon(folio)) {
+               VM_WARN_ONCE(1, "Cannot split to order-1 folio");
+               return -EINVAL;
+       }
+
+       if (new_order) {
+               /* Only swapping a whole PMD-mapped folio is supported */
+               if (folio_test_swapcache(folio))
+                       return -EINVAL;
+               /* Split shmem folio to non-zero order not supported */
+               if (shmem_mapping(folio->mapping)) {
+                       VM_WARN_ONCE(1,
+                               "Cannot split shmem folio to non-0 order");
+                       return -EINVAL;
+               }
+               /* No split if the file system does not support large folio */
+               if (!mapping_large_folio_support(folio->mapping)) {
+                       VM_WARN_ONCE(1,
+                               "Cannot split file folio to non-0 order");
+                       return -EINVAL;
+               }
+       }
+
+
        is_hzp = is_huge_zero_page(&folio->page);
        if (is_hzp) {
                pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
                if (folio_order(folio) > 1 &&
                    !list_empty(&folio->_deferred_list)) {
                        ds_queue->split_queue_len--;
-                       list_del(&folio->_deferred_list);
+                       /*
+                        * Reinitialize page_deferred_list after removing the
+                        * page from the split_queue, otherwise a subsequent
+                        * split will see list corruption when checking the
+                        * page_deferred_list.
+                        */
+                       list_del_init(&folio->_deferred_list);
                }
                spin_unlock(&ds_queue->split_queue_lock);
                if (mapping) {
                        int nr = folio_nr_pages(folio);
 
                        xas_split(&xas, folio, folio_order(folio));
-                       if (folio_test_pmd_mappable(folio)) {
+                       if (folio_test_pmd_mappable(folio) &&
+                           new_order < HPAGE_PMD_ORDER) {
                                if (folio_test_swapbacked(folio)) {
                                        __lruvec_stat_mod_folio(folio,
                                                        NR_SHMEM_THPS, -nr);
                        }
                }
 
-               __split_huge_page(page, list, end);
+               __split_huge_page(page, list, end, new_order);
                ret = 0;
        } else {
                spin_unlock(&ds_queue->split_queue_lock);