#include <asm/unaligned.h>
 
 #include "nvme.h"
+#include "fabrics.h"
 
 #define NVME_MINORS            (1U << MINORBITS)
 
                        result, timeout);
 }
 
+static void nvme_keep_alive_end_io(struct request *rq, int error)
+{
+       struct nvme_ctrl *ctrl = rq->end_io_data;
+
+       blk_mq_free_request(rq);
+
+       if (error) {
+               dev_err(ctrl->device,
+                       "failed nvme_keep_alive_end_io error=%d\n", error);
+               return;
+       }
+
+       schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static int nvme_keep_alive(struct nvme_ctrl *ctrl)
+{
+       struct nvme_command c;
+       struct request *rq;
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = nvme_admin_keep_alive;
+
+       rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
+                       NVME_QID_ANY);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       rq->timeout = ctrl->kato * HZ;
+       rq->end_io_data = ctrl;
+
+       blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
+
+       return 0;
+}
+
+static void nvme_keep_alive_work(struct work_struct *work)
+{
+       struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+                       struct nvme_ctrl, ka_work);
+
+       if (nvme_keep_alive(ctrl)) {
+               /* allocation failure, reset the controller */
+               dev_err(ctrl->device, "keep-alive failed\n");
+               ctrl->ops->reset_ctrl(ctrl);
+               return;
+       }
+}
+
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+{
+       if (unlikely(ctrl->kato == 0))
+               return;
+
+       INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+       schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
+
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
+{
+       if (unlikely(ctrl->kato == 0))
+               return;
+
+       cancel_delayed_work_sync(&ctrl->ka_work);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+
 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
 {
        struct nvme_command c = { };
 
        nvme_set_queue_limits(ctrl, ctrl->admin_q);
        ctrl->sgls = le32_to_cpu(id->sgls);
+       ctrl->kas = le16_to_cpu(id->kas);
 
        if (ctrl->ops->is_fabrics) {
                ctrl->icdoff = le16_to_cpu(id->icdoff);
                 */
                if (ctrl->cntlid != le16_to_cpu(id->cntlid))
                        ret = -EINVAL;
+
+               if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+                       dev_err(ctrl->dev,
+                               "keep-alive support is mandatory for fabrics\n");
+                       ret = -EINVAL;
+               }
        } else {
                ctrl->cntlid = le16_to_cpu(id->cntlid);
        }
 
        cmd.connect.fctype = nvme_fabrics_type_connect;
        cmd.connect.qid = 0;
        cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+       /*
+        * Set keep-alive timeout in seconds granularity (ms * 1000)
+        * and add a grace period for controller kato enforcement
+        */
+       cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 :
+               cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000);
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
        { NVMF_OPT_NR_IO_QUEUES,        "nr_io_queues=%d"       },
        { NVMF_OPT_TL_RETRY_COUNT,      "tl_retry_count=%d"     },
        { NVMF_OPT_RECONNECT_DELAY,     "reconnect_delay=%d"    },
+       { NVMF_OPT_KATO,                "keep_alive_tmo=%d"     },
        { NVMF_OPT_HOSTNQN,             "hostnqn=%s"            },
        { NVMF_OPT_ERR,                 NULL                    }
 };
                        }
                        opts->tl_retry_count = token;
                        break;
+               case NVMF_OPT_KATO:
+                       if (match_int(args, &token)) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
+                       if (opts->discovery_nqn) {
+                               pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
+                       if (token < 0) {
+                               pr_err("Invalid keep_alive_tmo %d\n", token);
+                               ret = -EINVAL;
+                               goto out;
+                       } else if (token == 0) {
+                               /* Allowed for debug */
+                               pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
+                       }
+                       opts->kato = token;
+                       break;
                case NVMF_OPT_HOSTNQN:
                        if (opts->host) {
                                pr_err("hostnqn already user-assigned: %s\n",
        }
 
 out:
+       if (!opts->discovery_nqn && !opts->kato)
+               opts->kato = NVME_DEFAULT_KATO;
        kfree(options);
        return ret;
 }
 
 #define NVMF_REQUIRED_OPTS     (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
 #define NVMF_ALLOWED_OPTS      (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
-                                NVMF_OPT_HOSTNQN)
+                                NVMF_OPT_KATO | NVMF_OPT_HOSTNQN)
 
 static struct nvme_ctrl *
 nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
 
        NVMF_OPT_QUEUE_SIZE     = 1 << 4,
        NVMF_OPT_NR_IO_QUEUES   = 1 << 5,
        NVMF_OPT_TL_RETRY_COUNT = 1 << 6,
+       NVMF_OPT_KATO           = 1 << 7,
        NVMF_OPT_HOSTNQN        = 1 << 8,
        NVMF_OPT_RECONNECT_DELAY = 1 << 9,
 };
  *                  kicking upper layer(s) error recovery.
  * @reconnect_delay: Time between two consecutive reconnect attempts.
  * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
+ * @kato:      Keep-alive timeout.
  * @host:      Virtual NVMe host, contains the NQN and Host ID.
  */
 struct nvmf_ctrl_options {
        unsigned short          tl_retry_count;
        unsigned int            reconnect_delay;
        bool                    discovery_nqn;
+       unsigned int            kato;
        struct nvmf_host        *host;
 };
 
 
 extern unsigned char shutdown_timeout;
 #define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
 
+#define NVME_DEFAULT_KATO      5
+#define NVME_KATO_GRACE                10
+
 enum {
        NVME_NS_LBA             = 0,
        NVME_NS_LIGHTNVM        = 1,
        u8 vwc;
        u32 vs;
        u32 sgls;
+       u16 kas;
+       unsigned int kato;
        bool subsystem;
        unsigned long quirks;
        struct work_struct scan_work;
        struct work_struct async_event_work;
+       struct delayed_work ka_work;
 
        /* Fabrics only */
        u16 sqsize;
 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
                        dma_addr_t dma_addr, u32 *result);
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
 
 struct sg_io_hdr;