}
 CONFIGFS_ATTR(nvmet_passthru_, enable);
 
+static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
+               char *page)
+{
+       return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
+}
+
+static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+       unsigned int timeout;
+
+       if (kstrtouint(page, 0, &timeout))
+               return -EINVAL;
+       subsys->admin_timeout = timeout;
+       return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
+
 static struct configfs_attribute *nvmet_passthru_attrs[] = {
        &nvmet_passthru_attr_device_path,
        &nvmet_passthru_attr_enable,
+       &nvmet_passthru_attr_admin_timeout,
        NULL,
 };
 
 
        struct nvme_ctrl        *passthru_ctrl;
        char                    *passthru_ctrl_path;
        struct config_group     passthru_group;
+       unsigned int            admin_timeout;
 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
 };
 
 
        struct request_queue *q = ctrl->admin_q;
        struct nvme_ns *ns = NULL;
        struct request *rq = NULL;
+       unsigned int timeout = 0;
        u32 effects;
        u16 status;
        int ret;
                }
 
                q = ns->queue;
+       } else {
+               timeout = req->sq->ctrl->subsys->admin_timeout;
        }
 
        rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
                goto out_put_ns;
        }
 
+       if (timeout)
+               rq->timeout = timeout;
+
        if (req->sg_cnt) {
                ret = nvmet_passthru_map_sg(req, rq);
                if (unlikely(ret)) {