unsigned int            nr_queues_ready;
        unsigned int            nr_privileged_daemon;
 
-       struct work_struct      quiesce_work;
-       struct work_struct      stop_work;
+       struct work_struct      nosrv_work;
 };
 
 /* header of ublk_params */
                struct ublk_device *ub = ubq->dev;
 
                if (ublk_abort_requests(ub, ubq)) {
-                       if (ublk_nosrv_should_stop_dev(ub))
-                               schedule_work(&ub->stop_work);
-                       else
-                               schedule_work(&ub->quiesce_work);
+                       schedule_work(&ub->nosrv_work);
                }
                return BLK_EH_DONE;
        }
        ublk_cancel_cmd(ubq, io, issue_flags);
 
        if (need_schedule) {
-               if (ublk_nosrv_should_stop_dev(ub))
-                       schedule_work(&ub->stop_work);
-               else
-                       schedule_work(&ub->quiesce_work);
+               schedule_work(&ub->nosrv_work);
        }
 }
 
        ub->dev_info.state = UBLK_S_DEV_QUIESCED;
 }
 
-static void ublk_quiesce_work_fn(struct work_struct *work)
-{
-       struct ublk_device *ub =
-               container_of(work, struct ublk_device, quiesce_work);
-
-       mutex_lock(&ub->mutex);
-       if (ub->dev_info.state != UBLK_S_DEV_LIVE)
-               goto unlock;
-       __ublk_quiesce_dev(ub);
- unlock:
-       mutex_unlock(&ub->mutex);
-       ublk_cancel_dev(ub);
-}
-
 static void ublk_unquiesce_dev(struct ublk_device *ub)
 {
        int i;
        ublk_cancel_dev(ub);
 }
 
+static void ublk_nosrv_work(struct work_struct *work)
+{
+       struct ublk_device *ub =
+               container_of(work, struct ublk_device, nosrv_work);
+
+       if (ublk_nosrv_should_stop_dev(ub)) {
+               ublk_stop_dev(ub);
+               return;
+       }
+
+       mutex_lock(&ub->mutex);
+       if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+               goto unlock;
+       __ublk_quiesce_dev(ub);
+ unlock:
+       mutex_unlock(&ub->mutex);
+       ublk_cancel_dev(ub);
+}
+
 /* device can only be started after all IOs are ready */
 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
 {
        return ret;
 }
 
-static void ublk_stop_work_fn(struct work_struct *work)
-{
-       struct ublk_device *ub =
-               container_of(work, struct ublk_device, stop_work);
-
-       ublk_stop_dev(ub);
-}
-
 /* align max io buffer size with PAGE_SIZE */
 static void ublk_align_max_io_size(struct ublk_device *ub)
 {
 static void ublk_remove(struct ublk_device *ub)
 {
        ublk_stop_dev(ub);
-       cancel_work_sync(&ub->stop_work);
-       cancel_work_sync(&ub->quiesce_work);
+       cancel_work_sync(&ub->nosrv_work);
        cdev_device_del(&ub->cdev, &ub->cdev_dev);
        ublk_put_device(ub);
        ublks_added--;
                goto out_unlock;
        mutex_init(&ub->mutex);
        spin_lock_init(&ub->lock);
-       INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
-       INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+       INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
 
        ret = ublk_alloc_dev_number(ub, header->dev_id);
        if (ret < 0)
 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
 {
        ublk_stop_dev(ub);
-       cancel_work_sync(&ub->stop_work);
-       cancel_work_sync(&ub->quiesce_work);
-
+       cancel_work_sync(&ub->nosrv_work);
        return 0;
 }