struct nbd_config *config;
        struct mutex config_lock;
        struct gendisk *disk;
+       struct workqueue_struct *recv_workq;
 
        struct list_head list;
        struct task_struct *task_recv;
 
 static unsigned int nbds_max = 16;
 static int max_part = 16;
-static struct workqueue_struct *recv_workqueue;
 static int part_shift;
 
 static int nbd_dev_dbg_init(struct nbd_device *nbd);
                /* We take the tx_mutex in an error path in the recv_work, so we
                 * need to queue_work outside of the tx_mutex.
                 */
-               queue_work(recv_workqueue, &args->work);
+               queue_work(nbd->recv_workq, &args->work);
 
                atomic_inc(&config->live_connections);
                wake_up(&config->conn_wait);
                kfree(nbd->config);
                nbd->config = NULL;
 
+               if (nbd->recv_workq)
+                       destroy_workqueue(nbd->recv_workq);
+               nbd->recv_workq = NULL;
+
                nbd->tag_set.timeout = 0;
                nbd->disk->queue->limits.discard_granularity = 0;
                nbd->disk->queue->limits.discard_alignment = 0;
                return -EINVAL;
        }
 
+       nbd->recv_workq = alloc_workqueue("knbd%d-recv",
+                                         WQ_MEM_RECLAIM | WQ_HIGHPRI |
+                                         WQ_UNBOUND, 0, nbd->index);
+       if (!nbd->recv_workq) {
+               dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
+               return -ENOMEM;
+       }
+
        blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
        nbd->task_recv = current;
 
                INIT_WORK(&args->work, recv_work);
                args->nbd = nbd;
                args->index = i;
-               queue_work(recv_workqueue, &args->work);
+               queue_work(nbd->recv_workq, &args->work);
        }
        nbd_size_update(nbd);
        return error;
        mutex_unlock(&nbd->config_lock);
        ret = wait_event_interruptible(config->recv_wq,
                                         atomic_read(&config->recv_threads) == 0);
-       if (ret)
+       if (ret) {
                sock_shutdown(nbd);
+               flush_workqueue(nbd->recv_workq);
+       }
        mutex_lock(&nbd->config_lock);
        nbd_bdev_reset(bdev);
        /* user requested, ignore socket errors */
        nbd_disconnect(nbd);
        nbd_clear_sock(nbd);
        mutex_unlock(&nbd->config_lock);
+       /*
+        * Make sure recv thread has finished, so it does not drop the last
+        * config ref and try to destroy the workqueue from inside the work
+        * queue.
+        */
+       flush_workqueue(nbd->recv_workq);
        if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
 
        if (nbds_max > 1UL << (MINORBITS - part_shift))
                return -EINVAL;
-       recv_workqueue = alloc_workqueue("knbd-recv",
-                                        WQ_MEM_RECLAIM | WQ_HIGHPRI |
-                                        WQ_UNBOUND, 0);
-       if (!recv_workqueue)
-               return -ENOMEM;
 
-       if (register_blkdev(NBD_MAJOR, "nbd")) {
-               destroy_workqueue(recv_workqueue);
+       if (register_blkdev(NBD_MAJOR, "nbd"))
                return -EIO;
-       }
 
        if (genl_register_family(&nbd_genl_family)) {
                unregister_blkdev(NBD_MAJOR, "nbd");
-               destroy_workqueue(recv_workqueue);
                return -EINVAL;
        }
        nbd_dbg_init();
 
        idr_destroy(&nbd_index_idr);
        genl_unregister_family(&nbd_genl_family);
-       destroy_workqueue(recv_workqueue);
        unregister_blkdev(NBD_MAJOR, "nbd");
 }