extern void thread_stack_cache_init(void);
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p);
+#else
 static inline unsigned long stack_not_used(struct task_struct *p)
 {
-       unsigned long *n = end_of_stack(p);
-
-       do {    /* Skip over canary */
-# ifdef CONFIG_STACK_GROWSUP
-               n--;
-# else
-               n++;
-# endif
-       } while (!*n);
-
-# ifdef CONFIG_STACK_GROWSUP
-       return (unsigned long)end_of_stack(p) - (unsigned long)n;
-# else
-       return (unsigned long)n - (unsigned long)end_of_stack(p);
-# endif
+       return 0;
 }
 #endif
 extern void set_task_stack_end_magic(struct task_struct *tsk);
 
 }
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p)
+{
+       unsigned long *n = end_of_stack(p);
+
+       do {    /* Skip over canary */
+# ifdef CONFIG_STACK_GROWSUP
+               n--;
+# else
+               n++;
+# endif
+       } while (!*n);
+
+# ifdef CONFIG_STACK_GROWSUP
+       return (unsigned long)end_of_stack(p) - (unsigned long)n;
+# else
+       return (unsigned long)n - (unsigned long)end_of_stack(p);
+# endif
+}
+
 /* Count the maximum pages reached in kernel stacks */
 static inline void kstack_histogram(unsigned long used_stack)
 {
 
 
 void sched_show_task(struct task_struct *p)
 {
-       unsigned long free = 0;
+       unsigned long free;
        int ppid;
 
        if (!try_get_task_stack(p))
 
        if (task_is_running(p))
                pr_cont("  running task    ");
-#ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
-#endif
        ppid = 0;
        rcu_read_lock();
        if (pid_alive(p))