*/
 void inode_wb_list_del(struct inode *inode)
 {
-       spin_lock(&inode_wb_list_lock);
+       struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+       spin_lock(&bdi->wb.list_lock);
        list_del_init(&inode->i_wb_list);
-       spin_unlock(&inode_wb_list_lock);
+       spin_unlock(&bdi->wb.list_lock);
 }
 
-
 /*
  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
  * furthest end of its superblock's dirty-inode list.
  * the case then the inode must have been redirtied while it was being written
  * out and we don't reset its dirtied_when.
  */
-static void redirty_tail(struct inode *inode)
+static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
 {
-       struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-
-       assert_spin_locked(&inode_wb_list_lock);
+       assert_spin_locked(&wb->list_lock);
        if (!list_empty(&wb->b_dirty)) {
                struct inode *tail;
 
 /*
  * requeue inode for re-scanning after bdi->b_io list is exhausted.
  */
-static void requeue_io(struct inode *inode)
+static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
 {
-       struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
-
-       assert_spin_locked(&inode_wb_list_lock);
+       assert_spin_locked(&wb->list_lock);
        list_move(&inode->i_wb_list, &wb->b_more_io);
 }
 
 {
        /*
         * Prevent speculative execution through
-        * spin_unlock(&inode_wb_list_lock);
+        * spin_unlock(&wb->list_lock);
         */
 
        smp_mb();
  */
 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
 {
-       assert_spin_locked(&inode_wb_list_lock);
+       assert_spin_locked(&wb->list_lock);
        list_splice_init(&wb->b_more_io, &wb->b_io);
        move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
 }
 /*
  * Wait for writeback on an inode to complete.
  */
-static void inode_wait_for_writeback(struct inode *inode)
+static void inode_wait_for_writeback(struct inode *inode,
+                                    struct bdi_writeback *wb)
 {
        DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
        wait_queue_head_t *wqh;
        wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
        while (inode->i_state & I_SYNC) {
                spin_unlock(&inode->i_lock);
-               spin_unlock(&inode_wb_list_lock);
+               spin_unlock(&wb->list_lock);
                __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
-               spin_lock(&inode_wb_list_lock);
+               spin_lock(&wb->list_lock);
                spin_lock(&inode->i_lock);
        }
 }
 
 /*
- * Write out an inode's dirty pages.  Called under inode_wb_list_lock and
+ * Write out an inode's dirty pages.  Called under wb->list_lock and
  * inode->i_lock.  Either the caller has an active reference on the inode or
  * the inode has I_WILL_FREE set.
  *
  * livelocks, etc.
  */
 static int
-writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+                      struct writeback_control *wbc)
 {
        struct address_space *mapping = inode->i_mapping;
        unsigned dirty;
        int ret;
 
-       assert_spin_locked(&inode_wb_list_lock);
+       assert_spin_locked(&wb->list_lock);
        assert_spin_locked(&inode->i_lock);
 
        if (!atomic_read(&inode->i_count))
                 * completed a full scan of b_io.
                 */
                if (wbc->sync_mode != WB_SYNC_ALL) {
-                       requeue_io(inode);
+                       requeue_io(inode, wb);
                        return 0;
                }
 
                /*
                 * It's a data-integrity sync.  We must wait.
                 */
-               inode_wait_for_writeback(inode);
+               inode_wait_for_writeback(inode, wb);
        }
 
        BUG_ON(inode->i_state & I_SYNC);
        inode->i_state |= I_SYNC;
        inode->i_state &= ~I_DIRTY_PAGES;
        spin_unlock(&inode->i_lock);
-       spin_unlock(&inode_wb_list_lock);
+       spin_unlock(&wb->list_lock);
 
        ret = do_writepages(mapping, wbc);
 
                        ret = err;
        }
 
-       spin_lock(&inode_wb_list_lock);
+       spin_lock(&wb->list_lock);
        spin_lock(&inode->i_lock);
        inode->i_state &= ~I_SYNC;
        if (!(inode->i_state & I_FREEING)) {
                                /*
                                 * slice used up: queue for next turn
                                 */
-                               requeue_io(inode);
+                               requeue_io(inode, wb);
                        } else {
                                /*
                                 * Writeback blocked by something other than
                                 * retrying writeback of the dirty page/inode
                                 * that cannot be performed immediately.
                                 */
-                               redirty_tail(inode);
+                               redirty_tail(inode, wb);
                        }
                } else if (inode->i_state & I_DIRTY) {
                        /*
                         * submission or metadata updates after data IO
                         * completion.
                         */
-                       redirty_tail(inode);
+                       redirty_tail(inode, wb);
                } else {
                        /*
                         * The inode is clean.  At this point we either have
                                 * superblock, move all inodes not belonging
                                 * to it back onto the dirty list.
                                 */
-                               redirty_tail(inode);
+                               redirty_tail(inode, wb);
                                continue;
                        }
 
                spin_lock(&inode->i_lock);
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
                        spin_unlock(&inode->i_lock);
-                       requeue_io(inode);
+                       requeue_io(inode, wb);
                        continue;
                }
 
                __iget(inode);
 
                pages_skipped = wbc->pages_skipped;
-               writeback_single_inode(inode, wbc);
+               writeback_single_inode(inode, wb, wbc);
                if (wbc->pages_skipped != pages_skipped) {
                        /*
                         * writeback is not making progress due to locked
                         * buffers.  Skip this inode for now.
                         */
-                       redirty_tail(inode);
+                       redirty_tail(inode, wb);
                }
                spin_unlock(&inode->i_lock);
-               spin_unlock(&inode_wb_list_lock);
+               spin_unlock(&wb->list_lock);
                iput(inode);
                cond_resched();
-               spin_lock(&inode_wb_list_lock);
+               spin_lock(&wb->list_lock);
                if (wbc->nr_to_write <= 0) {
                        wbc->more_io = 1;
                        return 1;
 
        if (!wbc->wb_start)
                wbc->wb_start = jiffies; /* livelock avoidance */
-       spin_lock(&inode_wb_list_lock);
+       spin_lock(&wb->list_lock);
 
        if (list_empty(&wb->b_io))
                queue_io(wb, wbc->older_than_this);
                struct super_block *sb = inode->i_sb;
 
                if (!pin_sb_for_writeback(sb)) {
-                       requeue_io(inode);
+                       requeue_io(inode, wb);
                        continue;
                }
                ret = writeback_sb_inodes(sb, wb, wbc, false);
                if (ret)
                        break;
        }
-       spin_unlock(&inode_wb_list_lock);
+       spin_unlock(&wb->list_lock);
        /* Leave any unwritten inodes on b_io */
 }
 
 {
        WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
-       spin_lock(&inode_wb_list_lock);
+       spin_lock(&wb->list_lock);
        if (list_empty(&wb->b_io))
                queue_io(wb, wbc->older_than_this);
        writeback_sb_inodes(sb, wb, wbc, true);
-       spin_unlock(&inode_wb_list_lock);
+       spin_unlock(&wb->list_lock);
 }
 
 /*
                 * become available for writeback. Otherwise
                 * we'll just busyloop.
                 */
-               spin_lock(&inode_wb_list_lock);
+               spin_lock(&wb->list_lock);
                if (!list_empty(&wb->b_more_io))  {
                        inode = wb_inode(wb->b_more_io.prev);
                        trace_wbc_writeback_wait(&wbc, wb->bdi);
                        spin_lock(&inode->i_lock);
-                       inode_wait_for_writeback(inode);
+                       inode_wait_for_writeback(inode, wb);
                        spin_unlock(&inode->i_lock);
                }
-               spin_unlock(&inode_wb_list_lock);
+               spin_unlock(&wb->list_lock);
        }
 
        return wrote;
                        }
 
                        spin_unlock(&inode->i_lock);
-                       spin_lock(&inode_wb_list_lock);
+                       spin_lock(&bdi->wb.list_lock);
                        inode->dirtied_when = jiffies;
                        list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
-                       spin_unlock(&inode_wb_list_lock);
+                       spin_unlock(&bdi->wb.list_lock);
 
                        if (wakeup_bdi)
                                bdi_wakeup_thread_delayed(bdi);
  */
 int write_inode_now(struct inode *inode, int sync)
 {
+       struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
        int ret;
        struct writeback_control wbc = {
                .nr_to_write = LONG_MAX,
                wbc.nr_to_write = 0;
 
        might_sleep();
-       spin_lock(&inode_wb_list_lock);
+       spin_lock(&wb->list_lock);
        spin_lock(&inode->i_lock);
-       ret = writeback_single_inode(inode, &wbc);
+       ret = writeback_single_inode(inode, wb, &wbc);
        spin_unlock(&inode->i_lock);
-       spin_unlock(&inode_wb_list_lock);
+       spin_unlock(&wb->list_lock);
        if (sync)
                inode_sync_wait(inode);
        return ret;
  */
 int sync_inode(struct inode *inode, struct writeback_control *wbc)
 {
+       struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
        int ret;
 
-       spin_lock(&inode_wb_list_lock);
+       spin_lock(&wb->list_lock);
        spin_lock(&inode->i_lock);
-       ret = writeback_single_inode(inode, wbc);
+       ret = writeback_single_inode(inode, wb, wbc);
        spin_unlock(&inode->i_lock);
-       spin_unlock(&inode_wb_list_lock);
+       spin_unlock(&wb->list_lock);
        return ret;
 }
 EXPORT_SYMBOL(sync_inode);
 
 static int bdi_sync_supers(void *);
 static void sync_supers_timer_fn(unsigned long);
 
+void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
+{
+       if (wb1 < wb2) {
+               spin_lock(&wb1->list_lock);
+               spin_lock_nested(&wb2->list_lock, 1);
+       } else {
+               spin_lock(&wb2->list_lock);
+               spin_lock_nested(&wb1->list_lock, 1);
+       }
+}
+
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
        struct inode *inode;
 
        nr_dirty = nr_io = nr_more_io = 0;
-       spin_lock(&inode_wb_list_lock);
+       spin_lock(&wb->list_lock);
        list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
                nr_dirty++;
        list_for_each_entry(inode, &wb->b_io, i_wb_list)
                nr_io++;
        list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
                nr_more_io++;
-       spin_unlock(&inode_wb_list_lock);
+       spin_unlock(&wb->list_lock);
 
        global_dirty_limits(&background_thresh, &dirty_thresh);
        bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
        INIT_LIST_HEAD(&wb->b_dirty);
        INIT_LIST_HEAD(&wb->b_io);
        INIT_LIST_HEAD(&wb->b_more_io);
+       spin_lock_init(&wb->list_lock);
        setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
 }
 
        if (bdi_has_dirty_io(bdi)) {
                struct bdi_writeback *dst = &default_backing_dev_info.wb;
 
-               spin_lock(&inode_wb_list_lock);
+               bdi_lock_two(&bdi->wb, dst);
                list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
                list_splice(&bdi->wb.b_io, &dst->b_io);
                list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
-               spin_unlock(&inode_wb_list_lock);
+               spin_unlock(&bdi->wb.list_lock);
+               spin_unlock(&dst->list_lock);
        }
 
        bdi_unregister(bdi);