atomic64_inc(&zram->stats.notify_free);
 }
 
+static int zram_rw_page(struct block_device *bdev, sector_t sector,
+                      struct page *page, int rw)
+{
+       int offset, err;
+       u32 index;
+       struct zram *zram;
+       struct bio_vec bv;
+
+       zram = bdev->bd_disk->private_data;
+       if (!valid_io_request(zram, sector, PAGE_SIZE)) {
+               atomic64_inc(&zram->stats.invalid_io);
+               return -EINVAL;
+       }
+
+       down_read(&zram->init_lock);
+       if (unlikely(!init_done(zram))) {
+               err = -EIO;
+               goto out_unlock;
+       }
+
+       index = sector >> SECTORS_PER_PAGE_SHIFT;
+       offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+
+       bv.bv_page = page;
+       bv.bv_len = PAGE_SIZE;
+       bv.bv_offset = 0;
+
+       err = zram_bvec_rw(zram, &bv, index, offset, rw);
+out_unlock:
+       up_read(&zram->init_lock);
+       /*
+        * If I/O fails, just return error(ie, non-zero) without
+        * calling page_endio.
+        * It causes resubmit the I/O with bio request by upper functions
+        * of rw_page(e.g., swap_readpage, __swap_writepage) and
+        * bio->bi_end_io does things to handle the error
+        * (e.g., SetPageError, set_page_dirty and extra works).
+        */
+       if (err == 0)
+               page_endio(page, rw, 0);
+       return err;
+}
+
 static const struct block_device_operations zram_devops = {
        .swap_slot_free_notify = zram_slot_free_notify,
+       .rw_page = zram_rw_page,
        .owner = THIS_MODULE
 };