return false;
 }
 
-/**
- * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
- * @inode: inode to be removed
- * @wb: bdi_writeback @inode is being removed from
- *
- * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
- * clear %WB_has_dirty_io if all are empty afterwards.
- */
-static void inode_io_list_del_locked(struct inode *inode,
-                                    struct bdi_writeback *wb)
-{
-       assert_spin_locked(&wb->list_lock);
-       assert_spin_locked(&inode->i_lock);
-
-       inode->i_state &= ~I_SYNC_QUEUED;
-       list_del_init(&inode->i_io_list);
-       wb_io_lists_depopulated(wb);
-}
-
 static void wb_wakeup(struct bdi_writeback *wb)
 {
        spin_lock_bh(&wb->work_lock);
 }
 EXPORT_SYMBOL_GPL(__inode_attach_wb);
 
+/**
+ * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
+ * @inode: inode of interest with i_lock held
+ * @wb: target bdi_writeback
+ *
+ * Remove the inode from wb's io lists and if necessarily put onto b_attached
+ * list.  Only inodes attached to cgwb's are kept on this list.
+ */
+static void inode_cgwb_move_to_attached(struct inode *inode,
+                                       struct bdi_writeback *wb)
+{
+       assert_spin_locked(&wb->list_lock);
+       assert_spin_locked(&inode->i_lock);
+
+       inode->i_state &= ~I_SYNC_QUEUED;
+       if (wb != &wb->bdi->wb)
+               list_move(&inode->i_io_list, &wb->b_attached);
+       else
+               list_del_init(&inode->i_io_list);
+       wb_io_lists_depopulated(wb);
+}
+
 /**
  * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
  * @inode: inode of interest with i_lock held
        wb_get(new_wb);
 
        /*
-        * Transfer to @new_wb's IO list if necessary.  The specific list
-        * @inode was on is ignored and the inode is put on ->b_dirty which
-        * is always correct including from ->b_dirty_time.  The transfer
-        * preserves @inode->dirtied_when ordering.
+        * Transfer to @new_wb's IO list if necessary.  If the @inode is dirty,
+        * the specific list @inode was on is ignored and the @inode is put on
+        * ->b_dirty which is always correct including from ->b_dirty_time.
+        * The transfer preserves @inode->dirtied_when ordering.  If the @inode
+        * was clean, it means it was on the b_attached list, so move it onto
+        * the b_attached list of @new_wb.
         */
        if (!list_empty(&inode->i_io_list)) {
-               struct inode *pos;
-
-               inode_io_list_del_locked(inode, old_wb);
                inode->i_wb = new_wb;
-               list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
-                       if (time_after_eq(inode->dirtied_when,
-                                         pos->dirtied_when))
-                               break;
-               inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
+
+               if (inode->i_state & I_DIRTY_ALL) {
+                       struct inode *pos;
+
+                       list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
+                               if (time_after_eq(inode->dirtied_when,
+                                                 pos->dirtied_when))
+                                       break;
+                       inode_io_list_move_locked(inode, new_wb,
+                                                 pos->i_io_list.prev);
+               } else {
+                       inode_cgwb_move_to_attached(inode, new_wb);
+               }
        } else {
                inode->i_wb = new_wb;
        }
 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
 
+static void inode_cgwb_move_to_attached(struct inode *inode,
+                                       struct bdi_writeback *wb)
+{
+       assert_spin_locked(&wb->list_lock);
+       assert_spin_locked(&inode->i_lock);
+
+       inode->i_state &= ~I_SYNC_QUEUED;
+       list_del_init(&inode->i_io_list);
+       wb_io_lists_depopulated(wb);
+}
+
 static struct bdi_writeback *
 locked_inode_to_wb_and_lock_list(struct inode *inode)
        __releases(&inode->i_lock)
 
        wb = inode_to_wb_and_lock_list(inode);
        spin_lock(&inode->i_lock);
-       inode_io_list_del_locked(inode, wb);
+
+       inode->i_state &= ~I_SYNC_QUEUED;
+       list_del_init(&inode->i_io_list);
+       wb_io_lists_depopulated(wb);
+
        spin_unlock(&inode->i_lock);
        spin_unlock(&wb->list_lock);
 }
                inode->i_state &= ~I_SYNC_QUEUED;
        } else {
                /* The inode is clean. Remove from writeback lists. */
-               inode_io_list_del_locked(inode, wb);
+               inode_cgwb_move_to_attached(inode, wb);
        }
 }
 
         * responsible for the writeback lists.
         */
        if (!(inode->i_state & I_DIRTY_ALL))
-               inode_io_list_del_locked(inode, wb);
+               inode_cgwb_move_to_attached(inode, wb);
        spin_unlock(&wb->list_lock);
        inode_sync_complete(inode);
 out: