}
 EXPORT_SYMBOL(blk_get_queue);
 
-/**
- * blk_get_request - allocate a request
- * @q: request queue to allocate a request for
- * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
- * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
- */
-struct request *blk_get_request(struct request_queue *q, unsigned int op,
-                               blk_mq_req_flags_t flags)
-{
-       WARN_ON_ONCE(op & REQ_NOWAIT);
-       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
-       return blk_mq_alloc_request(q, op, flags);
-}
-EXPORT_SYMBOL(blk_get_request);
-
-void blk_put_request(struct request *req)
-{
-       blk_mq_free_request(req);
-}
-EXPORT_SYMBOL(blk_put_request);
-
 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
 {
        char b[BDEVNAME_SIZE];
 
        struct request *rq;
        struct pd_req *req;
 
-       rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
+       rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        req = blk_mq_rq_to_pdu(rq);
 
        req->func = func;
        blk_execute_rq(disk->gd, rq, 0);
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
        return 0;
 }
 
 
        if (scsi_req(rq)->result)
                ret = -EIO;
 out:
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
        return ret;
 }
 
 
        struct request *req;
        int err;
 
-       req = blk_get_request(q, REQ_OP_DRV_IN, 0);
+       req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
        blk_execute_rq(vblk->disk, req, false);
        err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
 out:
-       blk_put_request(req);
+       blk_mq_free_request(req);
        return err;
 }
 
 
 
        bdev = pgpath->path.dev->bdev;
        q = bdev_get_queue(bdev);
-       clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
+       clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
                        BLK_MQ_REQ_NOWAIT);
        if (IS_ERR(clone)) {
                /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
                                                    clone->io_start_time_ns);
        }
 
-       blk_put_request(clone);
+       blk_mq_free_request(clone);
 }
 
 /*
 
        mq = &md->queue;
 
        /* Dispatch locking to the block layer */
-       req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
+       req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
        if (IS_ERR(req)) {
                count = PTR_ERR(req);
                goto out_put;
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
        blk_execute_rq(NULL, req, 0);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
-       blk_put_request(req);
+       blk_mq_free_request(req);
 
        if (!ret) {
                pr_info("%s: Locking boot partition ro until next power on\n",
         * Dispatch the ioctl() into the block request queue.
         */
        mq = &md->queue;
-       req = blk_get_request(mq->queue,
+       req = blk_mq_alloc_request(mq->queue,
                idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
        blk_execute_rq(NULL, req, 0);
        ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
        err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
-       blk_put_request(req);
+       blk_mq_free_request(req);
 
 cmd_done:
        kfree(idata->buf);
         * Dispatch the ioctl()s into the block request queue.
         */
        mq = &md->queue;
-       req = blk_get_request(mq->queue,
+       req = blk_mq_alloc_request(mq->queue,
                idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
        for (i = 0; i < num_of_cmds && !err; i++)
                err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
 
-       blk_put_request(req);
+       blk_mq_free_request(req);
 
 cmd_err:
        for (i = 0; i < num_of_cmds; i++) {
        int ret;
 
        /* Ask the block layer about the card status */
-       req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+       req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
        if (IS_ERR(req))
                return PTR_ERR(req);
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
                *val = ret;
                ret = 0;
        }
-       blk_put_request(req);
+       blk_mq_free_request(req);
 
        return ret;
 }
                return -ENOMEM;
 
        /* Ask the block layer for the EXT CSD */
-       req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+       req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out_free;
        req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
        blk_execute_rq(NULL, req, 0);
        err = req_to_mmc_queue_req(req)->drv_op_result;
-       blk_put_request(req);
+       blk_mq_free_request(req);
        if (err) {
                pr_err("FAILED %d\n", err);
                goto out_free;
 
 out_free_cmd:
        scsi_req_free_cmd(scsi_req(rq));
 out_put_request:
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
        return ret;
 }
 
 
 
 static void eh_lock_door_done(struct request *req, blk_status_t status)
 {
-       blk_put_request(req);
+       blk_mq_free_request(req);
 }
 
 /**
 
 out_free_cdb:
        scsi_req_free_cmd(req);
 out_put_request:
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
        return ret;
 }
 
        }
 
 error:
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
 
 error_free_buffer:
        kfree(buffer);
 
                scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
        ret = rq->result;
  out:
-       blk_put_request(req);
+       blk_mq_free_request(req);
 
        return ret;
 }
 {
        struct request *rq;
 
-       rq = blk_get_request(q, op, flags);
+       rq = blk_mq_alloc_request(q, op, flags);
        if (!IS_ERR(rq))
                scsi_initialize_rq(rq);
        return rq;
 
        if (atomic_read(&sdp->detaching)) {
                if (srp->bio) {
                        scsi_req_free_cmd(scsi_req(srp->rq));
-                       blk_put_request(srp->rq);
+                       blk_mq_free_request(srp->rq);
                        srp->rq = NULL;
                }
 
         */
        srp->rq = NULL;
        scsi_req_free_cmd(scsi_req(rq));
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
 
        write_lock_irqsave(&sfp->rq_list_lock, iflags);
        if (unlikely(srp->orphan)) {
 
        if (srp->rq) {
                scsi_req_free_cmd(scsi_req(srp->rq));
-               blk_put_request(srp->rq);
+               blk_mq_free_request(srp->rq);
        }
 
        if (srp->res_used)
 
        if (blk_rq_unmap_user(bio))
                ret = -EFAULT;
 out_put_request:
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
        return ret;
 }
 
 
                complete(SRpnt->waiting);
 
        blk_rq_unmap_user(tmp);
-       blk_put_request(req);
+       blk_mq_free_request(req);
 }
 
 static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
                err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
                                      GFP_KERNEL);
                if (err) {
-                       blk_put_request(req);
+                       blk_mq_free_request(req);
                        return err;
                }
        }
 
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by SCSI request timeout.
         */
-       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out_unlock;
                                    (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
 
 out:
-       blk_put_request(req);
+       blk_mq_free_request(req);
 out_unlock:
        up_read(&hba->clk_scaling_lock);
        return err;
        int task_tag, err;
 
        /*
-        * blk_get_request() is used here only to get a free tag.
+        * blk_mq_alloc_request() is used here only to get a free tag.
         */
-       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        ufshcd_release(hba);
-       blk_put_request(req);
+       blk_mq_free_request(req);
 
        return err;
 }
 
        down_read(&hba->clk_scaling_lock);
 
-       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out_unlock;
                                    (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
 
 out:
-       blk_put_request(req);
+       blk_mq_free_request(req);
 out_unlock:
        up_read(&hba->clk_scaling_lock);
        return err;
        if (error != BLK_STS_OK)
                pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
        kfree(rq->end_io_data);
-       blk_put_request(rq);
+       blk_mq_free_request(rq);
 }
 
 static int
        if (!buffer)
                return -ENOMEM;
 
-       req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN,
+       req = blk_mq_alloc_request(sdev->request_queue, REQ_OP_DRV_IN,
                              /*flags=*/BLK_MQ_REQ_PM);
        if (IS_ERR(req)) {
                ret = PTR_ERR(req);
        return 0;
 
 out_put:
-       blk_put_request(req);
+       blk_mq_free_request(req);
 out_free:
        kfree(buffer);
        return ret;
 
        int _read_id;
        int ret = 0;
 
-       req = blk_get_request(cmd->device->request_queue,
+       req = blk_mq_alloc_request(cmd->device->request_queue,
                              REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
        if (IS_ERR(req))
                return -EAGAIN;
        ufshpb_put_pre_req(hpb, pre_req);
 unlock_out:
        spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-       blk_put_request(req);
+       blk_mq_free_request(req);
        return ret;
 }
 
                return NULL;
 
 retry:
-       req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
+       req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
                              BLK_MQ_REQ_NOWAIT);
 
        if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
 
 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
 {
-       blk_put_request(rq->req);
+       blk_mq_free_request(rq->req);
        kmem_cache_free(hpb->map_req_cache, rq);
 }
 
 
        return 0;
 
 fail_put_request:
-       blk_put_request(req);
+       blk_mq_free_request(req);
 fail:
        kfree(pt);
        return ret;
                break;
        }
 
-       blk_put_request(req);
+       blk_mq_free_request(req);
        kfree(pt);
 }
 
 
 }
 
 void blk_rq_init(struct request_queue *q, struct request *rq);
-void blk_put_request(struct request *rq);
-struct request *blk_get_request(struct request_queue *q, unsigned int op,
-               blk_mq_req_flags_t flags);
 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
                struct bio_set *bs, gfp_t gfp_mask,
                int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);