};
 
 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
-void __init page_cgroup_init(void);
+
+#ifdef CONFIG_SPARSEMEM
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+extern void __init page_cgroup_init(void);
+#else
+void __init page_cgroup_init_flatmem(void);
+static inline void __init page_cgroup_init(void)
+{
+}
+#endif
+
 struct page_cgroup *lookup_page_cgroup(struct page *page);
 
 enum {
 {
 }
 
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+
 #endif
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 
        struct page_cgroup *base, *pc;
        unsigned long table_size;
        unsigned long start_pfn, nr_pages, index;
-       struct page *page;
-       unsigned int order;
 
        start_pfn = NODE_DATA(nid)->node_start_pfn;
        nr_pages = NODE_DATA(nid)->node_spanned_pages;
                return 0;
 
        table_size = sizeof(struct page_cgroup) * nr_pages;
-       order = get_order(table_size);
-       page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
-       if (!page)
-               page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
-       if (!page)
+
+       base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
+                       table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       if (!base)
                return -ENOMEM;
-       base = page_address(page);
        for (index = 0; index < nr_pages; index++) {
                pc = base + index;
                __init_page_cgroup(pc, start_pfn + index);
        return 0;
 }
 
-void __init page_cgroup_init(void)
+void __init page_cgroup_init_flatmem(void)
 {
 
        int nid, fail;
        if (!section->page_cgroup) {
                nid = page_to_nid(pfn_to_page(pfn));
                table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
-               if (slab_is_available()) {
-                       base = kmalloc_node(table_size,
-                                       GFP_KERNEL | __GFP_NOWARN, nid);
-                       if (!base)
-                               base = vmalloc_node(table_size, nid);
-               } else {
-                       base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
-                               table_size,
-                               PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-               }
+               VM_BUG_ON(!slab_is_available());
+               base = kmalloc_node(table_size,
+                               GFP_KERNEL | __GFP_NOWARN, nid);
+               if (!base)
+                       base = vmalloc_node(table_size, nid);
        } else {
                /*
                 * We don't have to allocate page_cgroup again, but