*
  * inode->i_lock protects:
  *   inode->i_state, inode->i_hash, __iget()
+ * inode_lru_lock protects:
+ *   inode_lru, inode->i_lru
  *
  * Lock ordering:
  * inode_lock
  *   inode->i_lock
+ *     inode_lru_lock
  */
 
 /*
  */
 
 static LIST_HEAD(inode_lru);
+static DEFINE_SPINLOCK(inode_lru_lock);
 static struct hlist_head *inode_hashtable __read_mostly;
 
 /*
 
 static void inode_lru_list_add(struct inode *inode)
 {
+       spin_lock(&inode_lru_lock);
        if (list_empty(&inode->i_lru)) {
                list_add(&inode->i_lru, &inode_lru);
                inodes_stat.nr_unused++;
        }
+       spin_unlock(&inode_lru_lock);
 }
 
 static void inode_lru_list_del(struct inode *inode)
 {
+       spin_lock(&inode_lru_lock);
        if (!list_empty(&inode->i_lru)) {
                list_del_init(&inode->i_lru);
                inodes_stat.nr_unused--;
        }
+       spin_unlock(&inode_lru_lock);
 }
 
 static inline void __inode_sb_list_add(struct inode *inode)
                }
 
                inode->i_state |= I_FREEING;
-               if (!(inode->i_state & (I_DIRTY | I_SYNC)))
-                       inodes_stat.nr_unused--;
+               inode_lru_list_del(inode);
                spin_unlock(&inode->i_lock);
-               list_move(&inode->i_lru, &dispose);
+               list_add(&inode->i_lru, &dispose);
        }
        spin_unlock(&inode_lock);
 
                }
 
                inode->i_state |= I_FREEING;
-               if (!(inode->i_state & (I_DIRTY | I_SYNC)))
-                       inodes_stat.nr_unused--;
+               inode_lru_list_del(inode);
                spin_unlock(&inode->i_lock);
-               list_move(&inode->i_lru, &dispose);
+               list_add(&inode->i_lru, &dispose);
        }
        spin_unlock(&inode_lock);
 
 
 /*
  * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
- * temporary list and then are freed outside inode_lock by dispose_list().
+ * temporary list and then are freed outside inode_lru_lock by dispose_list().
  *
  * Any inodes which are pinned purely because of attached pagecache have their
  * pagecache removed.  If the inode has metadata buffers attached to
 
        down_read(&iprune_sem);
        spin_lock(&inode_lock);
+       spin_lock(&inode_lru_lock);
        for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
                struct inode *inode;
 
 
                inode = list_entry(inode_lru.prev, struct inode, i_lru);
 
+               /*
+                * we are inverting the inode_lru_lock/inode->i_lock here,
+                * so use a trylock. If we fail to get the lock, just move the
+                * inode to the back of the list so we don't spin on it.
+                */
+               if (!spin_trylock(&inode->i_lock)) {
+                       list_move(&inode->i_lru, &inode_lru);
+                       continue;
+               }
+
                /*
                 * Referenced or dirty inodes are still in use. Give them
                 * another pass through the LRU as we canot reclaim them now.
                 */
-               spin_lock(&inode->i_lock);
                if (atomic_read(&inode->i_count) ||
                    (inode->i_state & ~I_REFERENCED)) {
                        spin_unlock(&inode->i_lock);
                if (inode_has_buffers(inode) || inode->i_data.nrpages) {
                        __iget(inode);
                        spin_unlock(&inode->i_lock);
+                       spin_unlock(&inode_lru_lock);
                        spin_unlock(&inode_lock);
                        if (remove_inode_buffers(inode))
                                reap += invalidate_mapping_pages(&inode->i_data,
                                                                0, -1);
                        iput(inode);
                        spin_lock(&inode_lock);
+                       spin_lock(&inode_lru_lock);
 
                        if (inode != list_entry(inode_lru.next,
                                                struct inode, i_lru))
                                continue;       /* wrong inode or list_empty */
-                       spin_lock(&inode->i_lock);
+                       /* avoid lock inversions with trylock */
+                       if (!spin_trylock(&inode->i_lock))
+                               continue;
                        if (!can_unuse(inode)) {
                                spin_unlock(&inode->i_lock);
                                continue;
                __count_vm_events(KSWAPD_INODESTEAL, reap);
        else
                __count_vm_events(PGINODESTEAL, reap);
+       spin_unlock(&inode_lru_lock);
        spin_unlock(&inode_lock);
 
        dispose_list(&freeable);