if (dentry->d_op->d_delete(dentry))
                        return false;
        }
+
+       if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
+               return false;
+
        /* retain; LRU fodder */
        dentry->d_lockref.count--;
        if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
        return true;
 }
 
+void d_mark_dontcache(struct inode *inode)
+{
+       struct dentry *de;
+
+       spin_lock(&inode->i_lock);
+       hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
+               spin_lock(&de->d_lock);
+               de->d_flags |= DCACHE_DONTCACHE;
+               spin_unlock(&de->d_lock);
+       }
+       inode->i_state |= I_DONTCACHE;
+       spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(d_mark_dontcache);
+
 /*
  * Finish off a dentry we've decided to kill.
  * dentry->d_lock must be held, returns with it unlocked.
 
         */
        iflags = XFS_INEW;
        if (flags & XFS_IGET_DONTCACHE)
-               VFS_I(ip)->i_state |= I_DONTCACHE;
+               d_mark_dontcache(VFS_I(ip));
        ip->i_udquot = NULL;
        ip->i_gdquot = NULL;
        ip->i_pdquot = NULL;
 
 
 #define DCACHE_REFERENCED              0x00000040 /* Recently used, don't discard. */
 
+#define DCACHE_DONTCACHE               0x00000080 /* Purge from memory on final dput() */
+
 #define DCACHE_CANT_MOUNT              0x00000100
 #define DCACHE_GENOCIDE                        0x00000200
 #define DCACHE_SHRINK_LIST             0x00000400
 
        return !inode->i_nlink || inode_unhashed(inode) ||
                (inode->i_state & I_DONTCACHE);
 }
+extern void d_mark_dontcache(struct inode *inode);
 
 extern struct inode *ilookup5_nowait(struct super_block *sb,
                unsigned long hashval, int (*test)(struct inode *, void *),