/* bfq_queue in service */
        struct bfq_queue *in_service_queue;
-       /* bfq_io_cq (bic) associated with the @in_service_queue */
-       struct bfq_io_cq *in_service_bic;
 
        /* on-disk position of the last served request */
        sector_t last_position;
        struct bfq_io_cq *bio_bic;
        /* bfqq associated with the task issuing current bio for merging */
        struct bfq_queue *bio_bfqq;
-
-       /*
-        * io context to put right after bfqd->lock is released. This
-        * filed is used to perform put_io_context, when needed, to
-        * after the scheduler lock has been released, and thus
-        * prevent an ioc->lock from being possibly taken while the
-        * scheduler lock is being held.
-        */
-       struct io_context *ioc_to_put;
 };
 
 enum bfqq_state_flags {
        }
 }
 
-/*
- * Next two functions release bfqd->lock and put the io context
- * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
- * to take an ioc->lock while the scheduler lock is being held.
- */
-static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
-{
-       struct io_context *ioc_to_put = bfqd->ioc_to_put;
-
-       bfqd->ioc_to_put = NULL;
-       spin_unlock_irq(&bfqd->lock);
-
-       if (ioc_to_put)
-               put_io_context(ioc_to_put);
-}
-
-static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
-                                      unsigned long flags)
-{
-       struct io_context *ioc_to_put = bfqd->ioc_to_put;
-
-       bfqd->ioc_to_put = NULL;
-       spin_unlock_irqrestore(&bfqd->lock, flags);
-
-       if (ioc_to_put)
-               put_io_context(ioc_to_put);
-}
-
 /**
  * bfq_gt - compare two timestamps.
  * @a: first ts.
        struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
        struct bfq_entity *entity = in_serv_entity;
 
-       if (bfqd->in_service_bic) {
-               /*
-                * Schedule the release of a reference to
-                * bfqd->in_service_bic->icq.ioc to right after the
-                * scheduler lock is released. This ioc is not
-                * released immediately, to not risk to possibly take
-                * an ioc->lock while holding the scheduler lock.
-                */
-               bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc;
-               bfqd->in_service_bic = NULL;
-       }
-
        bfq_clear_bfqq_wait_request(in_serv_bfqq);
        hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
        bfqd->in_service_queue = NULL;
        __bfq_deactivate_entity(entity, false);
        bfq_put_async_queues(bfqd, bfqg);
 
-       bfq_unlock_put_ioc_restore(bfqd, flags);
+       spin_unlock_irqrestore(&bfqd->lock, flags);
        /*
         * @blkg is going offline and will be ignored by
         * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
         * first time that the requests of some process are redirected to
         * it.
         *
-        * We redirect bfqq to new_bfqq and not the opposite, because we
-        * are in the context of the process owning bfqq, hence we have
-        * the io_cq of this process. So we can immediately configure this
-        * io_cq to redirect the requests of the process to new_bfqq.
+        * We redirect bfqq to new_bfqq and not the opposite, because
+        * we are in the context of the process owning bfqq, thus we
+        * have the io_cq of this process. So we can immediately
+        * configure this io_cq to redirect the requests of the
+        * process to new_bfqq. In contrast, the io_cq of new_bfqq is
+        * not available any more (new_bfqq->bic == NULL).
         *
-        * NOTE, even if new_bfqq coincides with the in-service queue, the
-        * io_cq of new_bfqq is not available, because, if the in-service
-        * queue is shared, bfqd->in_service_bic may not point to the
-        * io_cq of the in-service queue.
-        * Redirecting the requests of the process owning bfqq to the
-        * currently in-service queue is in any case the best option, as
-        * we feed the in-service queue with new requests close to the
-        * last request served and, by doing so, hopefully increase the
-        * throughput.
+        * Anyway, even in case new_bfqq coincides with the in-service
+        * queue, redirecting requests the in-service queue is the
+        * best option, as we feed the in-service queue with new
+        * requests close to the last request served and, by doing so,
+        * are likely to increase the throughput.
         */
        bfqq->new_bfqq = new_bfqq;
        new_bfqq->ref += process_refs;
 
        in_service_bfqq = bfqd->in_service_queue;
 
-       if (!in_service_bfqq || in_service_bfqq == bfqq ||
-           !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
+       if (!in_service_bfqq || in_service_bfqq == bfqq
+           || wr_from_too_long(in_service_bfqq) ||
            unlikely(in_service_bfqq == &bfqd->oom_bfqq))
                goto check_scheduled;
 
        bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
 }
 
-static void bfq_get_bic_reference(struct bfq_queue *bfqq)
-{
-       /*
-        * If bfqq->bic has a non-NULL value, the bic to which it belongs
-        * is about to begin using a shared bfq_queue.
-        */
-       if (bfqq->bic)
-               atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
-}
-
 static void
 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
                struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
        bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
                     bfqd->wr_busy_queues);
 
-       /*
-        * Grab a reference to the bic, to prevent it from being destroyed
-        * before being possibly touched by a bfq_split_bfqq().
-        */
-       bfq_get_bic_reference(bfqq);
-       bfq_get_bic_reference(new_bfqq);
        /*
         * Merge queues (that is, let bic redirect its requests to new_bfqq)
         */
 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
 {
        struct bfq_queue *bfqq = bfqd->in_service_queue;
-       struct bfq_io_cq *bic;
        u32 sl;
 
-       /* Processes have exited, don't wait. */
-       bic = bfqd->in_service_bic;
-       if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
-               return;
-
        bfq_mark_bfqq_wait_request(bfqq);
 
        /*
         */
        bfq_update_wr_data(bfqd, bfqq);
 
-       if (!bfqd->in_service_bic) {
-               atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
-               bfqd->in_service_bic = RQ_BIC(rq);
-       }
-
        /*
         * Expire bfqq, pretending that its budget expired, if bfqq
         * belongs to CLASS_IDLE and other queues are waiting for
        spin_lock_irq(&bfqd->lock);
 
        rq = __bfq_dispatch_request(hctx);
-       bfq_unlock_put_ioc(bfqd);
+       spin_unlock_irq(&bfqd->lock);
 
        return rq;
 }
                unsigned long flags;
 
                spin_lock_irqsave(&bfqd->lock, flags);
-               /*
-                * If the bic is using a shared queue, put the
-                * reference taken on the io_context when the bic
-                * started using a shared bfq_queue. This put cannot
-                * make ioc->ref_count reach 0, then no ioc->lock
-                * risks to be taken (leading to possible deadlock
-                * scenarios).
-                */
-               if (is_sync && bfq_bfqq_coop(bfqq))
-                       put_io_context(bic->icq.ioc);
-
                bfq_exit_bfqq(bfqd, bfqq);
                bic_set_bfqq(bic, NULL, is_sync);
-               bfq_unlock_put_ioc_restore(bfqd, flags);
+               spin_unlock_irqrestore(&bfqd->lock, flags);
        }
 }
 
                }
        }
 
-       bfq_unlock_put_ioc(bfqd);
+       spin_unlock_irq(&bfqd->lock);
 }
 
 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
                bfq_completed_request(bfqq, bfqd);
                bfq_put_rq_priv_body(bfqq);
 
-               bfq_unlock_put_ioc_restore(bfqd, flags);
+               spin_unlock_irqrestore(&bfqd->lock, flags);
        } else {
                /*
                 * Request rq may be still/already in the scheduler,
        const int is_sync = rq_is_sync(rq);
        struct bfq_queue *bfqq;
        bool new_queue = false;
+       bool split = false;
 
        spin_lock_irq(&bfqd->lock);
 
                                bic->saved_in_large_burst = true;
 
                        bfqq = bfq_split_bfqq(bic, bfqq);
-                       /*
-                        * A reference to bic->icq.ioc needs to be
-                        * released after a queue split. Do not do it
-                        * immediately, to not risk to possibly take
-                        * an ioc->lock while holding the scheduler
-                        * lock.
-                        */
-                       bfqd->ioc_to_put = bic->icq.ioc;
+                       split = true;
 
                        if (!bfqq)
                                bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
         */
        if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
                bfqq->bic = bic;
-               if (bfqd->ioc_to_put) { /* if true, there has been a split */
+               if (split) {
                        /*
                         * The queue has just been split from a shared
                         * queue: restore the idle window and the
        if (unlikely(bfq_bfqq_just_created(bfqq)))
                bfq_handle_burst(bfqd, bfqq);
 
-       bfq_unlock_put_ioc(bfqd);
+       spin_unlock_irq(&bfqd->lock);
 
        return 0;
 
        bfq_bfqq_expire(bfqd, bfqq, true, reason);
 
 schedule_dispatch:
-       bfq_unlock_put_ioc_restore(bfqd, flags);
+       spin_unlock_irqrestore(&bfqd->lock, flags);
        bfq_schedule_dispatch(bfqd);
 }