spin_unlock(&n->list_lock);
 }
 
+static inline void __remove_partial(struct kmem_cache_node *n,
+                                       struct page *page)
+{
+       list_del(&page->lru);
+       n->nr_partial--;
+}
+
 static void remove_partial(struct kmem_cache *s, struct page *page)
 {
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
 
        spin_lock(&n->list_lock);
-       list_del(&page->lru);
-       n->nr_partial--;
+       __remove_partial(n, page);
        spin_unlock(&n->list_lock);
 }
 
                                                        struct page *page)
 {
        if (slab_trylock(page)) {
-               list_del(&page->lru);
-               n->nr_partial--;
+               __remove_partial(n, page);
                __SetPageSlubFrozen(page);
                return 1;
        }
        spin_lock_irqsave(&n->list_lock, flags);
        list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
-                       list_del(&page->lru);
+                       __remove_partial(n, page);
                        discard_slab(s, page);
-                       n->nr_partial--;
                } else {
                        list_slab_objects(s, page,
                                "Objects remaining on kmem_cache_close()");
                                 * may have freed the last object and be
                                 * waiting to release the slab.
                                 */
-                               list_del(&page->lru);
-                               n->nr_partial--;
+                               __remove_partial(n, page);
                                slab_unlock(page);
                                discard_slab(s, page);
                        } else {