aob->request.data = (u64) aobrq;
        scmrq->bdev = bdev;
        scmrq->retries = 4;
 -      scmrq->error = 0;
 +      scmrq->error = BLK_STS_OK;
        /* We don't use all msbs - place aidaws at the end of the aob page. */
        scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
-       scm_request_cluster_init(scmrq);
  }
  
- static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
- {
-       if (atomic_read(&bdev->queued_reqs)) {
-               /* Queue restart is triggered by the next interrupt. */
-               return;
-       }
-       blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
- }
- 
- void scm_request_requeue(struct scm_request *scmrq)
+ static void scm_request_requeue(struct scm_request *scmrq)
  {
        struct scm_blk_dev *bdev = scmrq->bdev;
        int i;
                return;
  
  requeue:
-       spin_lock_irqsave(&bdev->rq_lock, flags);
        scm_request_requeue(scmrq);
-       spin_unlock_irqrestore(&bdev->rq_lock, flags);
  }
  
- static void scm_blk_tasklet(struct scm_blk_dev *bdev)
 -void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
++void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
  {
-       struct scm_request *scmrq;
-       unsigned long flags;
- 
-       spin_lock_irqsave(&bdev->lock, flags);
-       while (!list_empty(&bdev->finished_requests)) {
-               scmrq = list_first_entry(&bdev->finished_requests,
-                                        struct scm_request, list);
-               list_del(&scmrq->list);
-               spin_unlock_irqrestore(&bdev->lock, flags);
+       struct scm_request *scmrq = data;
  
-               if (scmrq->error && scmrq->retries-- > 0) {
+       scmrq->error = error;
+       if (error) {
+               __scmrq_log_error(scmrq);
+               if (scmrq->retries-- > 0) {
                        scm_blk_handle_error(scmrq);
- 
-                       /* Request restarted or requeued, handle next. */
-                       spin_lock_irqsave(&bdev->lock, flags);
-                       continue;
+                       return;
                }
+       }
  
-               if (scm_test_cluster_request(scmrq)) {
-                       scm_cluster_request_irq(scmrq);
-                       spin_lock_irqsave(&bdev->lock, flags);
-                       continue;
-               }
+       scm_request_finish(scmrq);
+ }
  
-               scm_request_finish(scmrq);
-               spin_lock_irqsave(&bdev->lock, flags);
-       }
-       spin_unlock_irqrestore(&bdev->lock, flags);
-       /* Look out for more requests. */
-       blk_run_queue(bdev->rq);
+ static void scm_blk_request_done(struct request *req)
+ {
+       blk_mq_end_request(req, 0);
  }
  
  static const struct block_device_operations scm_blk_devops = {
 
        struct aob *aob;
        struct list_head list;
        u8 retries;
 -      int error;
 +      blk_status_t error;
- #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
-       struct {
-               enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
-               struct list_head list;
-               void **buf;
-       } cluster;
- #endif
  };
  
  #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
  int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
  void scm_blk_dev_cleanup(struct scm_blk_dev *);
  void scm_blk_set_available(struct scm_blk_dev *);
 -void scm_blk_irq(struct scm_device *, void *, int);
 +void scm_blk_irq(struct scm_device *, void *, blk_status_t);
  
- void scm_request_finish(struct scm_request *);
- void scm_request_requeue(struct scm_request *);
- 
  struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
  
  int scm_drv_init(void);