static size_t huge_class_size;
 
 static void zram_free_page(struct zram *zram, size_t index);
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+                               u32 index, int offset, struct bio *bio);
+
 
 static int zram_slot_trylock(struct zram *zram, u32 index)
 {
        return zram->disksize;
 }
 
-static inline bool zram_allocated(struct zram *zram, u32 index)
-{
-
-       return (zram->table[index].flags >> (ZRAM_FLAG_SHIFT + 1)) ||
-                                       zram->table[index].handle;
-}
-
 static inline struct zram *dev_to_zram(struct device *dev)
 {
        return (struct zram *)dev_to_disk(dev)->private_data;
        zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
 }
 
+static inline bool zram_allocated(struct zram *zram, u32 index)
+{
+       return zram_get_obj_size(zram, index) ||
+                       zram_test_flag(zram, index, ZRAM_SAME) ||
+                       zram_test_flag(zram, index, ZRAM_WB);
+}
+
 #if PAGE_SIZE != 4096
 static inline bool is_partial_io(struct bio_vec *bvec)
 {
        }
 
        for (index = 0; index < nr_pages; index++) {
+               /*
+                * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
+                * See the comment in writeback_store.
+                */
                zram_slot_lock(zram, index);
-               if (!zram_allocated(zram, index))
+               if (!zram_allocated(zram, index) ||
+                               zram_test_flag(zram, index, ZRAM_UNDER_WB))
                        goto next;
-
                zram_set_flag(zram, index, ZRAM_IDLE);
 next:
                zram_slot_unlock(zram, index);
        return 1;
 }
 
+#define HUGE_WRITEBACK 0x1
+#define IDLE_WRITEBACK 0x2
+
+static ssize_t writeback_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t len)
+{
+       struct zram *zram = dev_to_zram(dev);
+       unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+       unsigned long index;
+       struct bio bio;
+       struct bio_vec bio_vec;
+       struct page *page;
+       ssize_t ret, sz;
+       char mode_buf[8];
+       unsigned long mode = -1UL;
+       unsigned long blk_idx = 0;
+
+       sz = strscpy(mode_buf, buf, sizeof(mode_buf));
+       if (sz <= 0)
+               return -EINVAL;
+
+       /* ignore trailing newline */
+       if (mode_buf[sz - 1] == '\n')
+               mode_buf[sz - 1] = 0x00;
+
+       if (!strcmp(mode_buf, "idle"))
+               mode = IDLE_WRITEBACK;
+       else if (!strcmp(mode_buf, "huge"))
+               mode = HUGE_WRITEBACK;
+
+       if (mode == -1UL)
+               return -EINVAL;
+
+       down_read(&zram->init_lock);
+       if (!init_done(zram)) {
+               ret = -EINVAL;
+               goto release_init_lock;
+       }
+
+       if (!zram->backing_dev) {
+               ret = -ENODEV;
+               goto release_init_lock;
+       }
+
+       page = alloc_page(GFP_KERNEL);
+       if (!page) {
+               ret = -ENOMEM;
+               goto release_init_lock;
+       }
+
+       for (index = 0; index < nr_pages; index++) {
+               struct bio_vec bvec;
+
+               bvec.bv_page = page;
+               bvec.bv_len = PAGE_SIZE;
+               bvec.bv_offset = 0;
+
+               if (!blk_idx) {
+                       blk_idx = alloc_block_bdev(zram);
+                       if (!blk_idx) {
+                               ret = -ENOSPC;
+                               break;
+                       }
+               }
+
+               zram_slot_lock(zram, index);
+               if (!zram_allocated(zram, index))
+                       goto next;
+
+               if (zram_test_flag(zram, index, ZRAM_WB) ||
+                               zram_test_flag(zram, index, ZRAM_SAME) ||
+                               zram_test_flag(zram, index, ZRAM_UNDER_WB))
+                       goto next;
+
+               if ((mode & IDLE_WRITEBACK &&
+                         !zram_test_flag(zram, index, ZRAM_IDLE)) &&
+                   (mode & HUGE_WRITEBACK &&
+                         !zram_test_flag(zram, index, ZRAM_HUGE)))
+                       goto next;
+               /*
+                * Clearing ZRAM_UNDER_WB is duty of caller.
+                * IOW, zram_free_page never clear it.
+                */
+               zram_set_flag(zram, index, ZRAM_UNDER_WB);
+               /* Need for hugepage writeback racing */
+               zram_set_flag(zram, index, ZRAM_IDLE);
+               zram_slot_unlock(zram, index);
+               if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
+                       zram_slot_lock(zram, index);
+                       zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+                       zram_clear_flag(zram, index, ZRAM_IDLE);
+                       zram_slot_unlock(zram, index);
+                       continue;
+               }
+
+               bio_init(&bio, &bio_vec, 1);
+               bio_set_dev(&bio, zram->bdev);
+               bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
+               bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
+
+               bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
+                               bvec.bv_offset);
+               /*
+                * XXX: A single page IO would be inefficient for write
+                * but it would be not bad as starter.
+                */
+               ret = submit_bio_wait(&bio);
+               if (ret) {
+                       zram_slot_lock(zram, index);
+                       zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+                       zram_clear_flag(zram, index, ZRAM_IDLE);
+                       zram_slot_unlock(zram, index);
+                       continue;
+               }
+
+               /*
+                * We released zram_slot_lock so need to check if the slot was
+                * changed. If there is freeing for the slot, we can catch it
+                * easily by zram_allocated.
+                * A subtle case is the slot is freed/reallocated/marked as
+                * ZRAM_IDLE again. To close the race, idle_store doesn't
+                * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
+                * Thus, we could close the race by checking ZRAM_IDLE bit.
+                */
+               zram_slot_lock(zram, index);
+               if (!zram_allocated(zram, index) ||
+                         !zram_test_flag(zram, index, ZRAM_IDLE)) {
+                       zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+                       zram_clear_flag(zram, index, ZRAM_IDLE);
+                       goto next;
+               }
+
+               zram_free_page(zram, index);
+               zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+               zram_set_flag(zram, index, ZRAM_WB);
+               zram_set_element(zram, index, blk_idx);
+               blk_idx = 0;
+               atomic64_inc(&zram->stats.pages_stored);
+next:
+               zram_slot_unlock(zram, index);
+       }
+
+       if (blk_idx)
+               free_block_bdev(zram, blk_idx);
+       ret = len;
+       __free_page(page);
+release_init_lock:
+       up_read(&zram->init_lock);
+
+       return ret;
+}
+
 struct zram_work {
        struct work_struct work;
        struct zram *zram;
        else
                return read_from_bdev_async(zram, bvec, entry, parent);
 }
-
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
-                                       u32 index, struct bio *parent,
-                                       unsigned long *pentry)
-{
-       struct bio *bio;
-       unsigned long entry;
-
-       bio = bio_alloc(GFP_ATOMIC, 1);
-       if (!bio)
-               return -ENOMEM;
-
-       entry = alloc_block_bdev(zram);
-       if (!entry) {
-               bio_put(bio);
-               return -ENOSPC;
-       }
-
-       bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
-       bio_set_dev(bio, zram->bdev);
-       if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
-                                       bvec->bv_offset)) {
-               bio_put(bio);
-               free_block_bdev(zram, entry);
-               return -EIO;
-       }
-
-       if (!parent) {
-               bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
-               bio->bi_end_io = zram_page_end_io;
-       } else {
-               bio->bi_opf = parent->bi_opf;
-               bio_chain(bio, parent);
-       }
-
-       submit_bio(bio);
-       *pentry = entry;
-
-       return 0;
-}
-
 #else
 static inline void reset_bdev(struct zram *zram) {};
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
-                                       u32 index, struct bio *parent,
-                                       unsigned long *pentry)
-
-{
-       return -EIO;
-}
-
 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
                        unsigned long entry, struct bio *parent, bool sync)
 {
        atomic64_dec(&zram->stats.pages_stored);
        zram_set_handle(zram, index, 0);
        zram_set_obj_size(zram, index, 0);
-       WARN_ON_ONCE(zram->table[index].flags & ~(1UL << ZRAM_LOCK));
+       WARN_ON_ONCE(zram->table[index].flags &
+               ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
 }
 
 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
        struct page *page = bvec->bv_page;
        unsigned long element = 0;
        enum zram_pageflags flags = 0;
-       bool allow_wb = true;
 
        mem = kmap_atomic(page);
        if (page_same_filled(mem, &element)) {
                return ret;
        }
 
-       if (unlikely(comp_len >= huge_class_size)) {
+       if (comp_len >= huge_class_size)
                comp_len = PAGE_SIZE;
-               if (zram->backing_dev && allow_wb) {
-                       zcomp_stream_put(zram->comp);
-                       ret = write_to_bdev(zram, bvec, index, bio, &element);
-                       if (!ret) {
-                               flags = ZRAM_WB;
-                               ret = 1;
-                               goto out;
-                       }
-                       allow_wb = false;
-                       goto compress_again;
-               }
-       }
-
        /*
         * handle allocation has 2 paths:
         * a) fast path is executed with preemption disabled (for
 static DEVICE_ATTR_RW(comp_algorithm);
 #ifdef CONFIG_ZRAM_WRITEBACK
 static DEVICE_ATTR_RW(backing_dev);
+static DEVICE_ATTR_WO(writeback);
 #endif
 
 static struct attribute *zram_disk_attrs[] = {
        &dev_attr_comp_algorithm.attr,
 #ifdef CONFIG_ZRAM_WRITEBACK
        &dev_attr_backing_dev.attr,
+       &dev_attr_writeback.attr,
 #endif
        &dev_attr_io_stat.attr,
        &dev_attr_mm_stat.attr,