PAGE_SIZE);
 }
 
-static inline bool ublk_queue_can_use_recovery_reissue(
-               struct ublk_queue *ubq)
+/*
+ * Should I/O outstanding to the ublk server when it exits be reissued?
+ * If not, outstanding I/O will get errors.
+ */
+static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
 {
-       return (ubq->flags & UBLK_F_USER_RECOVERY) &&
-                       (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
+       return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+              (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
 }
 
-static inline bool ublk_queue_can_use_recovery(
-               struct ublk_queue *ubq)
+/*
+ * Should I/O issued while there is no ublk server queue? If not, I/O
+ * issued while there is no ublk server will get errors.
+ */
+static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
+{
+       return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
+}
+
+/*
+ * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
+ * of the device flags for smaller cache footprint - better for fast
+ * paths.
+ */
+static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
 {
        return ubq->flags & UBLK_F_USER_RECOVERY;
 }
 
-static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+/*
+ * Should ublk devices be stopped (i.e. no recovery possible) when the
+ * ublk server exits? If not, devices can be used again by a future
+ * incarnation of a ublk server via the start_recovery/end_recovery
+ * commands.
+ */
+static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
 {
-       return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
+       return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
 }
 
 static void ublk_free_disk(struct gendisk *disk)
 {
        WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
 
-       if (ublk_queue_can_use_recovery_reissue(ubq))
+       if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
                blk_mq_requeue_request(req, false);
        else
                ublk_put_req_ref(ubq, req);
                struct request *rq)
 {
        /* We cannot process this rq so just requeue it. */
-       if (ublk_queue_can_use_recovery(ubq))
+       if (ublk_nosrv_dev_should_queue_io(ubq->dev))
                blk_mq_requeue_request(rq, false);
        else
                blk_mq_end_request(rq, BLK_STS_IOERR);
                struct ublk_device *ub = ubq->dev;
 
                if (ublk_abort_requests(ub, ubq)) {
-                       if (ublk_can_use_recovery(ub))
-                               schedule_work(&ub->quiesce_work);
-                       else
+                       if (ublk_nosrv_should_stop_dev(ub))
                                schedule_work(&ub->stop_work);
+                       else
+                               schedule_work(&ub->quiesce_work);
                }
                return BLK_EH_DONE;
        }
         * Note: force_abort is guaranteed to be seen because it is set
         * before request queue is unqiuesced.
         */
-       if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+       if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
                return BLK_STS_IOERR;
 
        if (unlikely(ubq->canceling)) {
        ublk_cancel_cmd(ubq, io, issue_flags);
 
        if (need_schedule) {
-               if (ublk_can_use_recovery(ub))
-                       schedule_work(&ub->quiesce_work);
-               else
+               if (ublk_nosrv_should_stop_dev(ub))
                        schedule_work(&ub->stop_work);
+               else
+                       schedule_work(&ub->quiesce_work);
        }
 }
 
        mutex_lock(&ub->mutex);
        if (ub->dev_info.state == UBLK_S_DEV_DEAD)
                goto unlock;
-       if (ublk_can_use_recovery(ub)) {
+       if (ublk_nosrv_dev_should_queue_io(ub)) {
                if (ub->dev_info.state == UBLK_S_DEV_LIVE)
                        __ublk_quiesce_dev(ub);
                ublk_unquiesce_dev(ub);
        int i;
 
        mutex_lock(&ub->mutex);
-       if (!ublk_can_use_recovery(ub))
+       if (ublk_nosrv_should_stop_dev(ub))
                goto out_unlock;
        if (!ub->nr_queues_ready)
                goto out_unlock;
                        __func__, ub->dev_info.nr_hw_queues, header->dev_id);
 
        mutex_lock(&ub->mutex);
-       if (!ublk_can_use_recovery(ub))
+       if (ublk_nosrv_should_stop_dev(ub))
                goto out_unlock;
 
        if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {