It needs walk_page_range().
Reported-by: Michal Simek <monstr@monstr.eu>
Tested-by: Michal Simek <monstr@monstr.eu>
Cc: Stefani Seibold <stefani@seibold.net>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greg Ungerer <gerg@snapgear.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
                        p->nivcsw);
 }
 
+#ifdef CONFIG_MMU
+
 struct stack_stats {
        struct vm_area_struct *vma;
        unsigned long   startpage;
                mmput(mm);
        }
 }
+#else
+static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
+{
+}
+#endif         /* CONFIG_MMU */
 
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task)
 
 mmu-y                  := nommu.o
 mmu-$(CONFIG_MMU)      := fremap.o highmem.o madvise.o memory.o mincore.o \
                           mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
-                          vmalloc.o
+                          vmalloc.o pagewalk.o
 
 obj-y                  := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
                           maccess.o page_alloc.o page-writeback.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
                           page_isolation.o mm_init.o mmu_context.o \
-                          pagewalk.o $(mmu-y)
+                          $(mmu-y)
 obj-y += init-mm.o
 
 obj-$(CONFIG_BOUNCE)   += bounce.o