#include <linux/mtd/blktrans.h>
 #include <linux/mtd/mtd.h>
 #include <linux/blkdev.h>
+#include <linux/blk-mq.h>
 #include <linux/blkpg.h>
 #include <linux/spinlock.h>
 #include <linux/hdreg.h>
 
        dev->disk->private_data = NULL;
        blk_cleanup_queue(dev->rq);
+       blk_mq_free_tag_set(dev->tag_set);
+       kfree(dev->tag_set);
        put_disk(dev->disk);
        list_del(&dev->list);
        kfree(dev);
 }
 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
 
-static void mtd_blktrans_work(struct work_struct *work)
+static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
+{
+       struct request *rq;
+
+       rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
+       if (rq) {
+               list_del_init(&rq->queuelist);
+               blk_mq_start_request(rq);
+               return rq;
+       }
+
+       return NULL;
+}
+
+static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
+       __releases(&dev->queue_lock)
+       __acquires(&dev->queue_lock)
 {
-       struct mtd_blktrans_dev *dev =
-               container_of(work, struct mtd_blktrans_dev, work);
        struct mtd_blktrans_ops *tr = dev->tr;
-       struct request_queue *rq = dev->rq;
        struct request *req = NULL;
        int background_done = 0;
 
-       spin_lock_irq(rq->queue_lock);
-
        while (1) {
                blk_status_t res;
 
                dev->bg_stop = false;
-               if (!req && !(req = blk_fetch_request(rq))) {
+               if (!req && !(req = mtd_next_request(dev))) {
                        if (tr->background && !background_done) {
-                               spin_unlock_irq(rq->queue_lock);
+                               spin_unlock_irq(&dev->queue_lock);
                                mutex_lock(&dev->lock);
                                tr->background(dev);
                                mutex_unlock(&dev->lock);
-                               spin_lock_irq(rq->queue_lock);
+                               spin_lock_irq(&dev->queue_lock);
                                /*
                                 * Do background processing just once per idle
                                 * period.
                        break;
                }
 
-               spin_unlock_irq(rq->queue_lock);
+               spin_unlock_irq(&dev->queue_lock);
 
                mutex_lock(&dev->lock);
                res = do_blktrans_request(dev->tr, dev, req);
                mutex_unlock(&dev->lock);
 
-               spin_lock_irq(rq->queue_lock);
-
-               if (!__blk_end_request_cur(req, res))
+               if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
+                       __blk_mq_end_request(req, res);
                        req = NULL;
+               }
 
                background_done = 0;
+               spin_lock_irq(&dev->queue_lock);
        }
-
-       spin_unlock_irq(rq->queue_lock);
 }
 
-static void mtd_blktrans_request(struct request_queue *rq)
+static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
+                                const struct blk_mq_queue_data *bd)
 {
        struct mtd_blktrans_dev *dev;
-       struct request *req = NULL;
 
-       dev = rq->queuedata;
+       dev = hctx->queue->queuedata;
+       if (!dev) {
+               blk_mq_start_request(bd->rq);
+               return BLK_STS_IOERR;
+       }
+
+       spin_lock_irq(&dev->queue_lock);
+       list_add_tail(&bd->rq->queuelist, &dev->rq_list);
+       mtd_blktrans_work(dev);
+       spin_unlock_irq(&dev->queue_lock);
 
-       if (!dev)
-               while ((req = blk_fetch_request(rq)) != NULL)
-                       __blk_end_request_all(req, BLK_STS_IOERR);
-       else
-               queue_work(dev->wq, &dev->work);
+       return BLK_STS_OK;
 }
 
 static int blktrans_open(struct block_device *bdev, fmode_t mode)
        .getgeo         = blktrans_getgeo,
 };
 
+static const struct blk_mq_ops mtd_mq_ops = {
+       .queue_rq       = mtd_queue_rq,
+};
+
 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 {
        struct mtd_blktrans_ops *tr = new->tr;
 
        /* Create the request queue */
        spin_lock_init(&new->queue_lock);
-       new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+       INIT_LIST_HEAD(&new->rq_list);
 
-       if (!new->rq)
+       new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
+       if (!new->tag_set)
                goto error3;
 
+       new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
+                               BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+       if (IS_ERR(new->rq)) {
+               ret = PTR_ERR(new->rq);
+               new->rq = NULL;
+               goto error4;
+       }
+
        if (tr->flush)
                blk_queue_write_cache(new->rq, true, false);
 
 
        gd->queue = new->rq;
 
-       /* Create processing workqueue */
-       new->wq = alloc_workqueue("%s%d", 0, 0,
-                                 tr->name, new->mtd->index);
-       if (!new->wq)
-               goto error4;
-       INIT_WORK(&new->work, mtd_blktrans_work);
-
        if (new->readonly)
                set_disk_ro(gd, 1);
 
        }
        return 0;
 error4:
-       blk_cleanup_queue(new->rq);
+       kfree(new->tag_set);
 error3:
        put_disk(new->disk);
 error2:
        /* Stop new requests to arrive */
        del_gendisk(old->disk);
 
-       /* Stop workqueue. This will perform any pending request. */
-       destroy_workqueue(old->wq);
-
        /* Kill current requests */
        spin_lock_irqsave(&old->queue_lock, flags);
        old->rq->queuedata = NULL;
-       blk_start_queue(old->rq);
        spin_unlock_irqrestore(&old->queue_lock, flags);
 
+       /* freeze+quiesce queue to ensure all requests are flushed */
+       blk_mq_freeze_queue(old->rq);
+       blk_mq_quiesce_queue(old->rq);
+       blk_mq_unquiesce_queue(old->rq);
+       blk_mq_unfreeze_queue(old->rq);
+
        /* If the device is currently open, tell trans driver to close it,
                then put mtd device, and don't touch it again */
        mutex_lock(&old->lock);