DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC);
 DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE);
+DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
+DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
+DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
 
 static struct attribute *stats_attrs[] = {
        &anon_fault_alloc_attr.attr,
        &file_alloc_attr.attr,
        &file_fallback_attr.attr,
        &file_fallback_charge_attr.attr,
+       &split_attr.attr,
+       &split_failed_attr.attr,
+       &split_deferred_attr.attr,
        NULL,
 };
 
        XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
-       bool is_thp = folio_test_pmd_mappable(folio);
+       int order = folio_order(folio);
        int extra_pins, ret;
        pgoff_t end;
        bool is_hzp;
                i_mmap_unlock_read(mapping);
 out:
        xas_destroy(&xas);
-       if (is_thp)
+       if (order == HPAGE_PMD_ORDER)
                count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
+       count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
        return ret;
 }
 
        if (list_empty(&folio->_deferred_list)) {
                if (folio_test_pmd_mappable(folio))
                        count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+               count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
                list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
                ds_queue->split_queue_len++;
 #ifdef CONFIG_MEMCG