After moving the lockless_freelist to kmem_cache_cpu we no longer need
page->lockless_freelist. Restructure the use of the struct page fields in
such a way that we never touch the mapping field.
This is turn allows us to remove the special casing of SLUB when determining
the mapping of a page (needed for corner cases of virtual caches machines that
need to flush caches of processors mapping a page).
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
        VM_BUG_ON(PageSlab(page));
        if (unlikely(PageSwapCache(page)))
                mapping = &swapper_space;
-#ifdef CONFIG_SLUB
-       else if (unlikely(PageSlab(page)))
-               mapping = NULL;
-#endif
        else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
                mapping = NULL;
        return mapping;
 
 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
            spinlock_t ptl;
 #endif
-           struct {                    /* SLUB uses */
-               void **lockless_freelist;
-               struct kmem_cache *slab;        /* Pointer to slab */
-           };
-           struct {
-               struct page *first_page;        /* Compound pages */
-           };
+           struct kmem_cache *slab;    /* SLUB: Pointer to slab */
+           struct page *first_page;    /* Compound tail pages */
        };
        union {
                pgoff_t index;          /* Our offset within mapping. */
 
        set_freepointer(s, last, NULL);
 
        page->freelist = start;
-       page->lockless_freelist = NULL;
        page->inuse = 0;
 out:
        if (flags & __GFP_WAIT)
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                - pages);
 
-       page->mapping = NULL;
        __free_pages(page, s->order);
 }