static struct class *nvme_class;
 
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
+{
+       if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+               return -EBUSY;
+       if (!queue_work(nvme_wq, &ctrl->reset_work))
+               return -EBUSY;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
+
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+       int ret;
+
+       ret = nvme_reset_ctrl(ctrl);
+       if (!ret)
+               flush_work(&ctrl->reset_work);
+       return ret;
+}
+
 static blk_status_t nvme_error_status(struct request *req)
 {
        switch (nvme_req(req)->status & 0x7ff) {
        if (nvme_keep_alive(ctrl)) {
                /* allocation failure, reset the controller */
                dev_err(ctrl->device, "keep-alive failed\n");
-               ctrl->ops->reset_ctrl(ctrl);
+               nvme_reset_ctrl_sync(ctrl);
                return;
        }
 }
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
                dev_warn(ctrl->device, "resetting controller\n");
-               return ctrl->ops->reset_ctrl(ctrl);
+               return nvme_reset_ctrl_sync(ctrl);
        case NVME_IOCTL_SUBSYS_RESET:
                return nvme_reset_subsystem(ctrl);
        case NVME_IOCTL_RESCAN:
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
        int ret;
 
-       ret = ctrl->ops->reset_ctrl(ctrl);
+       ret = nvme_reset_ctrl_sync(ctrl);
        if (ret < 0)
                return ret;
        return count;
 
        struct blk_mq_tag_set   tag_set;
 
        struct work_struct      delete_work;
-       struct work_struct      reset_work;
        struct delayed_work     connect_work;
 
        struct kref             ref;
                return;
        }
 
-       if (!queue_work(nvme_wq, &ctrl->reset_work))
-               dev_err(ctrl->ctrl.device,
-                       "NVME-FC{%d}: error_recovery: Failed to schedule "
-                       "reset work\n", ctrl->cnum);
+       nvme_reset_ctrl(&ctrl->ctrl);
 }
 
 static enum blk_eh_timer_return
        struct nvme_fc_ctrl *ctrl =
                container_of(work, struct nvme_fc_ctrl, delete_work);
 
-       cancel_work_sync(&ctrl->reset_work);
+       cancel_work_sync(&ctrl->ctrl.reset_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
 
        /*
 nvme_fc_reset_ctrl_work(struct work_struct *work)
 {
        struct nvme_fc_ctrl *ctrl =
-                       container_of(work, struct nvme_fc_ctrl, reset_work);
+               container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
        int ret;
 
        /* will block will waiting for io to terminate */
                        "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
 }
 
-/*
- * called by the nvme core layer, for sysfs interface that requests
- * a reset of the nvme controller
- */
-static int
-nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
-{
-       struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
-
-       dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
-
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-
-       if (!queue_work(nvme_wq, &ctrl->reset_work))
-               return -EBUSY;
-
-       flush_work(&ctrl->reset_work);
-
-       return 0;
-}
-
 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .name                   = "fc",
        .module                 = THIS_MODULE,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
-       .reset_ctrl             = nvme_fc_reset_nvme_ctrl,
        .free_ctrl              = nvme_fc_nvme_ctrl_freed,
        .submit_async_event     = nvme_fc_submit_async_event,
        .delete_ctrl            = nvme_fc_del_nvme_ctrl,
        kref_init(&ctrl->ref);
 
        INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
-       INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
+       INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
        spin_lock_init(&ctrl->lock);
 
 
        struct device *device;  /* char device */
        struct list_head node;
        struct ida ns_ida;
+       struct work_struct reset_work;
 
        struct opal_dev *opal_dev;
 
        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
-       int (*reset_ctrl)(struct nvme_ctrl *ctrl);
        void (*free_ctrl)(struct nvme_ctrl *ctrl);
        void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
        int (*delete_ctrl)(struct nvme_ctrl *ctrl);
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
 
 struct sg_io_hdr;
 
 
 struct nvme_dev;
 struct nvme_queue;
 
-static int nvme_reset(struct nvme_dev *dev);
 static void nvme_process_cq(struct nvme_queue *nvmeq);
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
 
        u32 db_stride;
        void __iomem *bar;
        unsigned long bar_mapped_size;
-       struct work_struct reset_work;
        struct work_struct remove_work;
        struct mutex shutdown_lock;
        bool subsystem;
        if (nvme_should_reset(dev, csts)) {
                nvme_warn_reset(dev, csts);
                nvme_dev_disable(dev, false);
-               nvme_reset(dev);
+               nvme_reset_ctrl(&dev->ctrl);
                return BLK_EH_HANDLED;
        }
 
                         "I/O %d QID %d timeout, reset controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
-               nvme_reset(dev);
+               nvme_reset_ctrl(&dev->ctrl);
 
                /*
                 * Mark the request as handled, since the inline shutdown
 
 static void nvme_reset_work(struct work_struct *work)
 {
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+       struct nvme_dev *dev =
+               container_of(work, struct nvme_dev, ctrl.reset_work);
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result = -ENODEV;
 
        nvme_put_ctrl(&dev->ctrl);
 }
 
-static int nvme_reset(struct nvme_dev *dev)
-{
-       if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
-               return -ENODEV;
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-       if (!queue_work(nvme_wq, &dev->reset_work))
-               return -EBUSY;
-       return 0;
-}
-
 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 {
        *val = readl(to_nvme_dev(ctrl)->bar + off);
        return 0;
 }
 
-static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
-{
-       struct nvme_dev *dev = to_nvme_dev(ctrl);
-       int ret = nvme_reset(dev);
-
-       if (!ret)
-               flush_work(&dev->reset_work);
-       return ret;
-}
-
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .name                   = "pcie",
        .module                 = THIS_MODULE,
        .reg_read32             = nvme_pci_reg_read32,
        .reg_write32            = nvme_pci_reg_write32,
        .reg_read64             = nvme_pci_reg_read64,
-       .reset_ctrl             = nvme_pci_reset_ctrl,
        .free_ctrl              = nvme_pci_free_ctrl,
        .submit_async_event     = nvme_pci_submit_async_event,
 };
        if (result)
                goto free;
 
-       INIT_WORK(&dev->reset_work, nvme_reset_work);
+       INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
        INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
        mutex_init(&dev->shutdown_lock);
        init_completion(&dev->ioq_wait);
        nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
-       queue_work(nvme_wq, &dev->reset_work);
+       queue_work(nvme_wq, &dev->ctrl.reset_work);
        return 0;
 
  release_pools:
        if (prepare)
                nvme_dev_disable(dev, false);
        else
-               nvme_reset(dev);
+               nvme_reset_ctrl(&dev->ctrl);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
 
        nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
-       cancel_work_sync(&dev->reset_work);
+       cancel_work_sync(&dev->ctrl.reset_work);
        pci_set_drvdata(pdev, NULL);
 
        if (!pci_device_is_present(pdev)) {
                nvme_dev_disable(dev, false);
        }
 
-       flush_work(&dev->reset_work);
+       flush_work(&dev->ctrl.reset_work);
        nvme_uninit_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, true);
        nvme_free_host_mem(dev);
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       nvme_reset(ndev);
+       nvme_reset_ctrl(&ndev->ctrl);
        return 0;
 }
 #endif
 
        dev_info(dev->ctrl.device, "restart after slot reset\n");
        pci_restore_state(pdev);
-       nvme_reset(dev);
+       nvme_reset_ctrl(&dev->ctrl);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
 
        /* other member variables */
        struct blk_mq_tag_set   tag_set;
        struct work_struct      delete_work;
-       struct work_struct      reset_work;
        struct work_struct      err_work;
 
        struct nvme_rdma_qe     async_event_sqe;
 
 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 {
-       struct nvme_rdma_ctrl *ctrl = container_of(work,
-                                       struct nvme_rdma_ctrl, reset_work);
+       struct nvme_rdma_ctrl *ctrl =
+               container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
        int ret;
        bool changed;
 
        WARN_ON(!queue_work(nvme_wq, &ctrl->delete_work));
 }
 
-static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
-{
-       struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-
-       if (!queue_work(nvme_wq, &ctrl->reset_work))
-               return -EBUSY;
-
-       flush_work(&ctrl->reset_work);
-
-       return 0;
-}
-
 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .name                   = "rdma",
        .module                 = THIS_MODULE,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
-       .reset_ctrl             = nvme_rdma_reset_ctrl,
        .free_ctrl              = nvme_rdma_free_ctrl,
        .submit_async_event     = nvme_rdma_submit_async_event,
        .delete_ctrl            = nvme_rdma_del_ctrl,
                        nvme_rdma_reconnect_ctrl_work);
        INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
        INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
-       INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
+       INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
 
        ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
        ctrl->ctrl.sqsize = opts->queue_size - 1;
 
 
        struct nvmet_ctrl       *target_ctrl;
        struct work_struct      delete_work;
-       struct work_struct      reset_work;
 };
 
 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
 
        /* queue error recovery */
-       queue_work(nvme_wq, &iod->queue->ctrl->reset_work);
+       nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
 
        /* fail with DNR on admin cmd timeout */
        nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
 
 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 {
-       struct nvme_loop_ctrl *ctrl = container_of(work,
-                                       struct nvme_loop_ctrl, reset_work);
+       struct nvme_loop_ctrl *ctrl =
+               container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
        bool changed;
        int ret;
 
        nvme_put_ctrl(&ctrl->ctrl);
 }
 
-static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
-{
-       struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
-
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-
-       if (!queue_work(nvme_wq, &ctrl->reset_work))
-               return -EBUSY;
-
-       flush_work(&ctrl->reset_work);
-
-       return 0;
-}
-
 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
        .name                   = "loop",
        .module                 = THIS_MODULE,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
-       .reset_ctrl             = nvme_loop_reset_ctrl,
        .free_ctrl              = nvme_loop_free_ctrl,
        .submit_async_event     = nvme_loop_submit_async_event,
        .delete_ctrl            = nvme_loop_del_ctrl,
        INIT_LIST_HEAD(&ctrl->list);
 
        INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
-       INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
+       INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
 
        ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
                                0 /* no quirks, we're perfect! */);