nvmet_req_complete(req, status);
 }
 
+static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
+               struct nvme_ana_group_desc *desc)
+{
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       struct nvmet_ns *ns;
+       u32 count = 0;
+
+       if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
+               rcu_read_lock();
+               list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+                       if (ns->anagrpid == grpid)
+                               desc->nsids[count++] = cpu_to_le32(ns->nsid);
+               rcu_read_unlock();
+       }
+
+       desc->grpid = cpu_to_le32(grpid);
+       desc->nnsids = cpu_to_le32(count);
+       desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+       desc->state = req->port->ana_state[grpid];
+       memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
+       return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+}
+
+static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
+{
+       struct nvme_ana_rsp_hdr hdr = { 0, };
+       struct nvme_ana_group_desc *desc;
+       size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
+       size_t len;
+       u32 grpid;
+       u16 ngrps = 0;
+       u16 status;
+
+       status = NVME_SC_INTERNAL;
+       desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
+                       NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+       if (!desc)
+               goto out;
+
+       down_read(&nvmet_ana_sem);
+       for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+               if (!nvmet_ana_group_enabled[grpid])
+                       continue;
+               len = nvmet_format_ana_group(req, grpid, desc);
+               status = nvmet_copy_to_sgl(req, offset, desc, len);
+               if (status)
+                       break;
+               offset += len;
+               ngrps++;
+       }
+
+       hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+       hdr.ngrps = cpu_to_le16(ngrps);
+       up_read(&nvmet_ana_sem);
+
+       kfree(desc);
+
+       /* copy the header last once we know the number of groups */
+       status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
+out:
+       nvmet_req_complete(req, status);
+}
+
 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
         * the safest is to leave it as zeroes.
         */
 
-       /* we support multiple ports and multiples hosts: */
-       id->cmic = (1 << 0) | (1 << 1);
+       /* we support multiple ports, multiples hosts and ANA: */
+       id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
 
        /* no limit on data transfer sizes for now */
        id->mdts = 0;
 
        id->msdbd = ctrl->ops->msdbd;
 
+       id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
+       id->anatt = 10; /* random value */
+       id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
+       id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
+
        /*
         * Meh, we don't really support any power state.  Fake up the same
         * values that qemu does.
         * nuse = ncap = nsze isn't always true, but we have no way to find
         * that out from the underlying device.
         */
-       id->ncap = id->nuse = id->nsze =
-               cpu_to_le64(ns->size >> ns->blksize_shift);
+       id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
+       switch (req->port->ana_state[ns->anagrpid]) {
+       case NVME_ANA_INACCESSIBLE:
+       case NVME_ANA_PERSISTENT_LOSS:
+               break;
+       default:
+               id->nuse = id->nsze;
+               break;
+        }
 
        /*
         * We just provide a single LBA format that matches what the
         * controllers, but also with any other user of the block device.
         */
        id->nmic = (1 << 0);
+       id->anagrpid = cpu_to_le32(ns->anagrpid);
 
        memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
 
                case NVME_LOG_CMD_EFFECTS:
                        req->execute = nvmet_execute_get_log_cmd_effects_ns;
                        return 0;
+               case NVME_LOG_ANA:
+                       req->execute = nvmet_execute_get_log_page_ana;
+                       return 0;
                }
                break;
        case nvme_admin_identify:
 
  */
 DECLARE_RWSEM(nvmet_config_sem);
 
+u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+u64 nvmet_ana_chgcnt;
+DECLARE_RWSEM(nvmet_ana_sem);
+
 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
                const char *subsysnqn);
 
 {
        nvmet_ns_disable(ns);
 
+       down_write(&nvmet_ana_sem);
+       nvmet_ana_group_enabled[ns->anagrpid]--;
+       up_write(&nvmet_ana_sem);
+
        kfree(ns->device_path);
        kfree(ns);
 }
 
        ns->nsid = nsid;
        ns->subsys = subsys;
+
+       down_write(&nvmet_ana_sem);
+       ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
+       nvmet_ana_group_enabled[ns->anagrpid]++;
+       up_write(&nvmet_ana_sem);
+
        uuid_gen(&ns->uuid);
        ns->buffered_io = false;
 
 }
 EXPORT_SYMBOL_GPL(nvmet_sq_init);
 
+static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
+               struct nvmet_ns *ns)
+{
+       enum nvme_ana_state state = port->ana_state[ns->anagrpid];
+
+       if (unlikely(state == NVME_ANA_INACCESSIBLE))
+               return NVME_SC_ANA_INACCESSIBLE;
+       if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
+               return NVME_SC_ANA_PERSISTENT_LOSS;
+       if (unlikely(state == NVME_ANA_CHANGE))
+               return NVME_SC_ANA_TRANSITION;
+       return 0;
+}
+
 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
 {
        struct nvme_command *cmd = req->cmd;
        req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
        if (unlikely(!req->ns))
                return NVME_SC_INVALID_NS | NVME_SC_DNR;
+       ret = nvmet_check_ana_state(req->port, req->ns);
+       if (unlikely(ret))
+               return ret;
 
        if (req->ns->file)
                return nvmet_file_parse_io_cmd(req);
 {
        int error;
 
+       nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+
        buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
                        WQ_MEM_RECLAIM, 0);
        if (!buffered_io_wq) {
                error = -ENOMEM;
                goto out;
        }
+
        error = nvmet_init_discovery();
        if (error)
                goto out;
 
        loff_t                  size;
        u8                      nguid[16];
        uuid_t                  uuid;
+       u32                     anagrpid;
 
        bool                    buffered_io;
        bool                    enabled;
        struct list_head                subsystems;
        struct config_group             referrals_group;
        struct list_head                referrals;
+       enum nvme_ana_state             *ana_state;
        void                            *priv;
        bool                            enabled;
        int                             inline_data_size;
  */
 #define NVMET_MAX_NAMESPACES   1024
 
+/*
+ * 0 is not a valid ANA group ID, so we start numbering at 1.
+ *
+ * ANA Group 1 exists without manual intervention, has namespaces assigned to it
+ * by default, and is available in an optimized state through all ports.
+ */
+#define NVMET_MAX_ANAGRPS      1
+#define NVMET_DEFAULT_ANA_GRPID        1
+
 #define NVMET_KAS              10
 #define NVMET_DISC_KATO                120
 
 extern u64 nvmet_genctr;
 extern struct rw_semaphore nvmet_config_sem;
 
+extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+extern u64 nvmet_ana_chgcnt;
+extern struct rw_semaphore nvmet_ana_sem;
+
 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
                const char *hostnqn);