#include <trace/events/block.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static int submit_bh_blkcg(int rw, struct buffer_head *bh,
-                          unsigned long bio_flags,
-                          struct cgroup_subsys_state *blkcg_css);
+static int submit_bh_wbc(int rw, struct buffer_head *bh,
+                        unsigned long bio_flags,
+                        struct writeback_control *wbc);
 
 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
 
        unsigned int blocksize, bbits;
        int nr_underway = 0;
        int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
-       struct cgroup_subsys_state *blkcg_css = inode_to_wb_blkcg_css(inode);
 
        head = create_page_buffers(page, inode,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh_blkcg(write_op, bh, 0, blkcg_css);
+                       submit_bh_wbc(write_op, bh, 0, wbc);
                        nr_underway++;
                }
                bh = next;
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        clear_buffer_dirty(bh);
-                       submit_bh_blkcg(write_op, bh, 0, blkcg_css);
+                       submit_bh_wbc(write_op, bh, 0, wbc);
                        nr_underway++;
                }
                bh = next;
        }
 }
 
-static int submit_bh_blkcg(int rw, struct buffer_head *bh,
-                          unsigned long bio_flags,
-                          struct cgroup_subsys_state *blkcg_css)
+static int submit_bh_wbc(int rw, struct buffer_head *bh,
+                        unsigned long bio_flags, struct writeback_control *wbc)
 {
        struct bio *bio;
+       int ret = 0;
 
        BUG_ON(!buffer_locked(bh));
        BUG_ON(!buffer_mapped(bh));
         */
        bio = bio_alloc(GFP_NOIO, 1);
 
-       if (blkcg_css)
-               bio_associate_blkcg(bio, blkcg_css);
+       if (wbc)
+               wbc_init_bio(wbc, bio);
 
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
 
 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
 {
-       return submit_bh_blkcg(rw, bh, bio_flags, NULL);
+       return submit_bh_wbc(rw, bh, bio_flags, NULL);
 }
 EXPORT_SYMBOL_GPL(_submit_bh);
 
 int submit_bh(int rw, struct buffer_head *bh)
 {
-       return submit_bh_blkcg(rw, bh, 0, NULL);
+       return submit_bh_wbc(rw, bh, 0, NULL);
 }
 EXPORT_SYMBOL(submit_bh);
 
 
                wb_put(wb);
 }
 
+/**
+ * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
+ * @wbc: writeback_control of interest
+ * @inode: target inode
+ *
+ * @inode is locked and about to be written back under the control of @wbc.
+ * Record @inode's writeback context into @wbc and unlock the i_lock.  On
+ * writeback completion, wbc_detach_inode() should be called.  This is used
+ * to track the cgroup writeback context.
+ */
+void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+                                struct inode *inode)
+{
+       wbc->wb = inode_to_wb(inode);
+       wb_get(wbc->wb);
+       spin_unlock(&inode->i_lock);
+}
+
+/**
+ * wbc_detach_inode - disassociate wbc from its target inode
+ * @wbc: writeback_control of interest
+ *
+ * To be called after a writeback attempt of an inode finishes and undoes
+ * wbc_attach_and_unlock_inode().  Can be called under any context.
+ */
+void wbc_detach_inode(struct writeback_control *wbc)
+{
+       wb_put(wbc->wb);
+       wbc->wb = NULL;
+}
+
 /**
  * inode_congested - test whether an inode is congested
  * @inode: inode to test for congestion
             !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
                goto out;
        inode->i_state |= I_SYNC;
-       spin_unlock(&inode->i_lock);
+       wbc_attach_and_unlock_inode(wbc, inode);
 
        ret = __writeback_single_inode(inode, wbc);
 
+       wbc_detach_inode(wbc);
        spin_lock(&wb->list_lock);
        spin_lock(&inode->i_lock);
        /*
                        continue;
                }
                inode->i_state |= I_SYNC;
-               spin_unlock(&inode->i_lock);
+               wbc_attach_and_unlock_inode(&wbc, inode);
 
                write_chunk = writeback_chunk_size(wb, work);
                wbc.nr_to_write = write_chunk;
                 */
                __writeback_single_inode(inode, &wbc);
 
+               wbc_detach_inode(&wbc);
                work->nr_pages -= write_chunk - wbc.nr_to_write;
                wrote += write_chunk - wbc.nr_to_write;
                spin_lock(&wb->list_lock);
 
                if (bio == NULL)
                        goto confused;
 
-               bio_associate_blkcg(bio, inode_to_wb_blkcg_css(inode));
+               wbc_init_bio(wbc, bio);
        }
 
        /*
 
        return inode->i_wb;
 }
 
-static inline struct cgroup_subsys_state *
-inode_to_wb_blkcg_css(struct inode *inode)
-{
-       return inode_to_wb(inode)->blkcg_css;
-}
-
 struct wb_iter {
        int                     start_blkcg_id;
        struct radix_tree_iter  tree_iter;
 {
 }
 
-static inline struct cgroup_subsys_state *
-inode_to_wb_blkcg_css(struct inode *inode)
-{
-       return blkcg_root_css;
-}
-
 struct wb_iter {
        int             next_id;
 };
 
        unsigned for_reclaim:1;         /* Invoked from the page allocator */
        unsigned range_cyclic:1;        /* range_start is cyclic */
        unsigned for_sync:1;            /* sync(2) WB_SYNC_ALL writeback */
+#ifdef CONFIG_CGROUP_WRITEBACK
+       struct bdi_writeback *wb;       /* wb this writeback is issued under */
+#endif
 };
 
 /*
 
 #ifdef CONFIG_CGROUP_WRITEBACK
 
+#include <linux/cgroup.h>
+#include <linux/bio.h>
+
 void __inode_attach_wb(struct inode *inode, struct page *page);
+void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+                                struct inode *inode)
+       __releases(&inode->i_lock);
+void wbc_detach_inode(struct writeback_control *wbc);
 
 /**
  * inode_attach_wb - associate an inode with its wb
        }
 }
 
+/**
+ * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
+ * @wbc: writeback_control of interest
+ * @inode: target inode
+ *
+ * This function is to be used by __filemap_fdatawrite_range(), which is an
+ * alternative entry point into writeback code, and first ensures @inode is
+ * associated with a bdi_writeback and attaches it to @wbc.
+ */
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+                                              struct inode *inode)
+{
+       spin_lock(&inode->i_lock);
+       inode_attach_wb(inode, NULL);
+       wbc_attach_and_unlock_inode(wbc, inode);
+}
+
+/**
+ * wbc_init_bio - writeback specific initializtion of bio
+ * @wbc: writeback_control for the writeback in progress
+ * @bio: bio to be initialized
+ *
+ * @bio is a part of the writeback in progress controlled by @wbc.  Perform
+ * writeback specific initialization.  This is used to apply the cgroup
+ * writeback context.
+ */
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+       /*
+        * pageout() path doesn't attach @wbc to the inode being written
+        * out.  This is intentional as we don't want the function to block
+        * behind a slow cgroup.  Ultimately, we want pageout() to kick off
+        * regular writeback instead of writing things out itself.
+        */
+       if (wbc->wb)
+               bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+}
+
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
 static inline void inode_attach_wb(struct inode *inode, struct page *page)
 {
 }
 
+static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+                                              struct inode *inode)
+       __releases(&inode->i_lock)
+{
+       spin_unlock(&inode->i_lock);
+}
+
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+                                              struct inode *inode)
+{
+}
+
+static inline void wbc_detach_inode(struct writeback_control *wbc)
+{
+}
+
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+}
+
 #endif /* CONFIG_CGROUP_WRITEBACK */
 
 /*
 
        if (!mapping_cap_writeback_dirty(mapping))
                return 0;
 
+       wbc_attach_fdatawrite_inode(&wbc, mapping->host);
        ret = do_writepages(mapping, &wbc);
+       wbc_detach_inode(&wbc);
        return ret;
 }