struct kmem_cache *s;
 };
 
+static inline void free_nonslab_page(struct page *page)
+{
+       unsigned int order = compound_order(page);
+
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       kfree_hook(page_address(page));
+       mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
+       __free_pages(page, order);
+}
+
 /*
  * This function progressively scans the array with free objects (with
  * a limited look ahead) and extract objects belonging to the same
        if (!s) {
                /* Handle kalloc'ed objects */
                if (unlikely(!PageSlab(page))) {
-                       BUG_ON(!PageCompound(page));
-                       kfree_hook(object);
-                       __free_pages(page, compound_order(page));
+                       free_nonslab_page(page);
                        p[size] = NULL; /* mark object processed */
                        return size;
                }
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
-               unsigned int order = compound_order(page);
-
-               BUG_ON(!PageCompound(page));
-               kfree_hook(object);
-               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-                                     -(PAGE_SIZE << order));
-               __free_pages(page, order);
+               free_nonslab_page(page);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);