struct inodes_stat_t inodes_stat;
 
 static DEFINE_PER_CPU(unsigned int, nr_inodes);
+static DEFINE_PER_CPU(unsigned int, nr_unused);
 
 static struct kmem_cache *inode_cachep __read_mostly;
 
 
 static inline int get_nr_inodes_unused(void)
 {
-       return inodes_stat.nr_unused;
+       int i;
+       int sum = 0;
+       for_each_possible_cpu(i)
+               sum += per_cpu(nr_unused, i);
+       return sum < 0 ? 0 : sum;
 }
 
 int get_nr_dirty_inodes(void)
                   void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        inodes_stat.nr_inodes = get_nr_inodes();
+       inodes_stat.nr_unused = get_nr_inodes_unused();
        return proc_dointvec(table, write, buffer, lenp, ppos);
 }
 #endif
        spin_lock(&inode_lru_lock);
        if (list_empty(&inode->i_lru)) {
                list_add(&inode->i_lru, &inode_lru);
-               inodes_stat.nr_unused++;
+               this_cpu_inc(nr_unused);
        }
        spin_unlock(&inode_lru_lock);
 }
        spin_lock(&inode_lru_lock);
        if (!list_empty(&inode->i_lru)) {
                list_del_init(&inode->i_lru);
-               inodes_stat.nr_unused--;
+               this_cpu_dec(nr_unused);
        }
        spin_unlock(&inode_lru_lock);
 }
                    (inode->i_state & ~I_REFERENCED)) {
                        list_del_init(&inode->i_lru);
                        spin_unlock(&inode->i_lock);
-                       inodes_stat.nr_unused--;
+                       this_cpu_dec(nr_unused);
                        continue;
                }
 
                spin_unlock(&inode->i_lock);
 
                list_move(&inode->i_lru, &freeable);
-               inodes_stat.nr_unused--;
+               this_cpu_dec(nr_unused);
        }
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_INODESTEAL, reap);