--- /dev/null
+a.out
+*.o
+*.ol
+*.os
+*~
+*.swp
+*.a
+*.so.*
+test/test
+
+cscope.*
--- /dev/null
+CFLAGS = -O2 -g -Wall -Werror -std=gnu99 -D_GNU_SOURCE
+LDFLAGS = -Ilib -Llib -lnvme
+
+QUIET_CC = @echo ' ' CC $@;
+
+test: test/test.c libnvme
+ $(QUIET_CC)$(CC) $(CPPFLAGS) $(CFLAGS) $< -o test/test $(LDFLAGS)
+
+libnvme:
+ @$(MAKE) -C lib/
+
+clean:
+ rm -f test/test
+ $(MAKE) -C lib clean
+
+
+.PHONY: libnvme test
--- /dev/null
+NAME=libnvme
+SPECFILE=$(NAME).spec
+VERSION=$(shell awk '/Version:/ { print $$2 }' $(SPECFILE))
+
+prefix ?= /usr
+includedir ?= $(prefix)/include
+libdir ?= $(prefix)/lib
+
+CFLAGS ?= -g -fomit-frame-pointer -O2 -I/usr/include -Invme/
+override CFLAGS += -Wall -fPIC
+SO_CFLAGS=-shared $(CFLAGS)
+L_CFLAGS=$(CFLAGS)
+LINK_FLAGS= -L /usr/lib64
+LINK_FLAGS+=$(LDFLAGS)
+ENABLE_SHARED ?= 1
+SED ?= sed
+INSTALL ?= install
+
+soname=$(NAME).so.1
+minor=0
+micro=1
+libname=$(soname).$(minor).$(micro)
+all_targets += $(NAME).a
+
+SED_PROCESS = \
+ $(SED) -e "s%@prefix@%$(prefix)%g" \
+ -e "s%@libdir@%$(libdir)%g" \
+ -e "s%@includedir@%$(includedir)%g" \
+ -e "s%@NAME@%$(NAME)%g" \
+ -e "s%@VERSION@%$(VERSION)%g" \
+ $< >$@
+
+%.pc: %.pc.in Makefile
+ $(SED_PROCESS)
+
+ifeq ($(ENABLE_SHARED),1)
+all_targets += $(libname)
+endif
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ifndef V
+ QUIET_CC = @echo ' ' CC $@;
+ QUIET_LINK = @echo ' ' LINK $@;
+ QUIET_AR = @echo ' ' AR $@;
+ QUIET_RANLIB = @echo '' RANLIB $@;
+endif
+endif
+
+all: $(all_targets)
+
+libnvme_srcs := nvme/ioctl.c nvme/filters.c nvme/fabrics.c nvme/util.c nvme/tree.c
+libnvme_objs := $(patsubst %.c,%.ol,$(libnvme_srcs))
+libnvme_sobjs := $(patsubst %.c,%.os,$(libnvme_srcs))
+
+$(libnvme_objs) $(libnvme_sobjs): libnvme.h nvme/types.h nvme/ioctl.h nvme/filters.h nvme/tree.h nvme/util.h nvme/cmd.h
+
+%.os: %.c
+ $(QUIET_CC)$(CC) $(SO_CFLAGS) -c -o $@ $<
+
+%.ol: %.c
+ $(QUIET_CC)$(CC) $(L_CFLAGS) -c -o $@ $<
+
+AR ?= ar
+RANLIB ?= ranlib
+libnvme.a: $(libnvme_objs)
+ @rm -f libnvme.a
+ $(QUIET_AR)$(AR) r libnvme.a $^
+ $(QUIET_RANLIB)$(RANLIB) libnvme.a
+
+$(libname): $(libnvme_sobjs) libnvme.map
+ $(QUIET_CC)$(CC) $(SO_CFLAGS) -Wl,--version-script=libnvme.map -Wl,-soname=$(soname) -o $@ $(libnvme_sobjs) $(LINK_FLAGS)
+
+install: $(all_targets) $(NAME).pc
+ $(INSTALL) -D -m 644 nvme/types.h $(includedir)/nvme/types.h
+ $(INSTALL) -D -m 644 nvme/ioctl.h $(includedir)/nvme/ioctl.h
+ $(INSTALL) -D -m 644 nvme/fabrics.h $(includedir)/nvme/fabrics.h
+ $(INSTALL) -D -m 644 nvme/filters.h $(includedir)/nvme/filters.h
+ $(INSTALL) -D -m 644 libnvme.h $(includedir)/libnvme.h
+ $(INSTALL) -D -m 644 libnvme.a $(libdir)/libnvme.a
+ $(INSTALL) -D -m 644 $(NAME).pc $(DESTDIR)$(libdir)/pkgconfig/$(NAME).pc
+ifeq ($(ENABLE_SHARED),1)
+ $(INSTALL) -D -m 755 $(libname) $(libdir)/$(libname)
+ ln -sf $(libname) $(libdir)/$(soname)
+ ln -sf $(libname) $(libdir)/libnvme.so
+endif
+
+$(libnvme_objs): libnvme.h
+
+clean:
+ rm -f $(all_targets) $(libnvme_objs) $(libnvme_sobjs) $(soname).new $(NAME).pc
+ rm -f *.so* *.a *.o
--- /dev/null
+#ifndef _LIBNVME_H
+#define _LIBNVME_H
+
+#include "nvme/types.h"
+#include "nvme/cmd.h"
+#include "nvme/ioctl.h"
+#include "nvme/fabrics.h"
+#include "nvme/filters.h"
+#include "nvme/tree.h"
+#include "nvme/util.h"
+
+#endif /* _LIBNVME_H */
--- /dev/null
+{
+ global:
+ nvme_submit_passthru;
+ local:
+ *;
+};
--- /dev/null
+prefix=@prefix@
+exec_prefix=${prefix}
+libdir=@libdir@
+includedir=@includedir@
+
+Name: @NAME@
+Version: @VERSION@
+Description: Manage "libnvme" subsystem devices (Non-volatile Memory Express)
+URL: http://github.com/linux-nvme/nvme-cli/
+
+Libs: -L${libdir} -lnvme
+Cflags: -I${includedir}
--- /dev/null
+Name: libnvme
+Version: 0.1
+Release: 1
+Summary: Linux-native nvme device management library
+License: LGPLv2+
+Group: System Environment/Libraries
+Source: %{name}-%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-root
+URL: http://github.com/linux-nvme/nvme-cli
+
+%description
+Provides library functions for accessing and managing nvme devices.
+
+%package devel
+Summary: Development files for Linux-native nvme
+Group: Development/System
+Requires: libnvme
+Provides: libnvme.so.1
+
+%description devel
+This package provides header files to include and libraries to link with
+for the Linux-native nvme.
+
+%prep
+%setup
+
+%build
+./configure --prefix=/usr --libdir=/%{_libdir} --mandir=/usr/share/man
+make
+
+%install
+[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT
+
+make install DESTDIR=$RPM_BUILD_ROOT
+
+%clean
+[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT
+
+%post -p /sbin/ldconfig
+
+%postun -p /sbin/ldconfig
+
+%files
+%defattr(-,root,root)
+%attr(0755,root,root) %{_libdir}/libnvme.so.*
+%doc COPYING
+
+%files devel
+%defattr(-,root,root)
+%attr(-,root,root) %{_includedir}/libnvme/
+%attr(0644,root,root) %{_includedir}/libnvme.h
+%attr(0755,root,root) %{_libdir}/libnvme.so
+%attr(0644,root,root) %{_libdir}/libnvme.a
+%attr(0644,root,root) %{_libdir}/pkgconfig/*
+%attr(0644,root,root) %{_mandir}/man2/*
+
+%changelog
+* Thu Dec 12 2019 Keith Busch <kbusch@kernel.org> - 0.1
+- Initial version
--- /dev/null
+#ifndef _LIBNVME_CMD_H
+#define _LIBNVME_CMD_H
+
+#include "types.h"
+
+/**
+ * DOC: NVMe Admin command enums
+ */
+
+/**
+ * enum nvme_admin_opcode - Known NVMe admin opcodes
+ * @nvme_admin_delete_sq:
+ * @nvme_admin_create_sq:
+ * @nvme_admin_get_log_page:
+ * @nvme_admin_delete_cq:
+ * @nvme_admin_create_cq:
+ * @nvme_admin_identify:
+ * @nvme_admin_abort_cmd:
+ * @nvme_admin_set_features:
+ * @nvme_admin_get_features:
+ * @nvme_admin_async_event:
+ * @nvme_admin_ns_mgmt:
+ * @nvme_admin_fw_commit:
+ * @nvme_admin_fw_download:
+ * @nvme_admin_dev_self_test:
+ * @nvme_admin_ns_attach:
+ * @nvme_admin_keep_alive:
+ * @nvme_admin_directive_send:
+ * @nvme_admin_directive_recv:
+ * @nvme_admin_virtual_mgmt:
+ * @nvme_admin_nvme_mi_send:
+ * @nvme_admin_nvme_mi_recv:
+ * @nvme_admin_dbbuf:
+ * @nvme_admin_fabrics:
+ * @nvme_admin_format_nvm:
+ * @nvme_admin_security_send:
+ * @nvme_admin_security_recv:
+ * @nvme_admin_sanitize_nvm:
+ * @nvme_admin_get_lba_status:
+ */
+enum nvme_admin_opcode {
+ nvme_admin_delete_sq = 0x00,
+ nvme_admin_create_sq = 0x01,
+ nvme_admin_get_log_page = 0x02,
+ nvme_admin_delete_cq = 0x04,
+ nvme_admin_create_cq = 0x05,
+ nvme_admin_identify = 0x06,
+ nvme_admin_abort_cmd = 0x08,
+ nvme_admin_set_features = 0x09,
+ nvme_admin_get_features = 0x0a,
+ nvme_admin_async_event = 0x0c,
+ nvme_admin_ns_mgmt = 0x0d,
+ nvme_admin_fw_commit = 0x10,
+ nvme_admin_fw_download = 0x11,
+ nvme_admin_dev_self_test = 0x14,
+ nvme_admin_ns_attach = 0x15,
+ nvme_admin_keep_alive = 0x18,
+ nvme_admin_directive_send = 0x19,
+ nvme_admin_directive_recv = 0x1a,
+ nvme_admin_virtual_mgmt = 0x1c,
+ nvme_admin_nvme_mi_send = 0x1d,
+ nvme_admin_nvme_mi_recv = 0x1e,
+ nvme_admin_dbbuf = 0x7c,
+ nvme_admin_fabrics = 0x7f,
+ nvme_admin_format_nvm = 0x80,
+ nvme_admin_security_send = 0x81,
+ nvme_admin_security_recv = 0x82,
+ nvme_admin_sanitize_nvm = 0x84,
+ nvme_admin_get_lba_status = 0x86,
+};
+
+/**
+ * enum nvme_identify_cns -
+ * @NVME_IDENTIFY_CNS_NS:
+ * @NVME_IDENTIFY_CNS_CTRL:
+ * @NVME_IDENTIFY_CNS_NS_ACTIVE_LIST:
+ * @NVME_IDENTIFY_CNS_NS_DESC_LIST:
+ * @NVME_IDENTIFY_CNS_NVMSET_LIST:
+ * @NVME_IDENTIFY_CNS_ALLOCATED_NS_LIST:
+ * @NVME_IDENTIFY_CNS_ALLOCATED_NS:
+ * @NVME_IDENTIFY_CNS_NS_CTRL_LIST:
+ * @NVME_IDENTIFY_CNS_CTRL_LIST:
+ * @NVME_IDENTIFY_CNS_PRIMARY_CTRL_CAP:
+ * @NVME_IDENTIFY_CNS_SECONDARY_CTRL_LIST:
+ * @NVME_IDENTIFY_CNS_NS_GRANULARITY:
+ * @NVME_IDENTIFY_CNS_UUID_LIST:
+ */
+enum nvme_identify_cns {
+ NVME_IDENTIFY_CNS_NS = 0x00,
+ NVME_IDENTIFY_CNS_CTRL = 0x01,
+ NVME_IDENTIFY_CNS_NS_ACTIVE_LIST = 0x02,
+ NVME_IDENTIFY_CNS_NS_DESC_LIST = 0x03,
+ NVME_IDENTIFY_CNS_NVMSET_LIST = 0x04,
+ NVME_IDENTIFY_CNS_ALLOCATED_NS_LIST = 0x10,
+ NVME_IDENTIFY_CNS_ALLOCATED_NS = 0x11,
+ NVME_IDENTIFY_CNS_NS_CTRL_LIST = 0x12,
+ NVME_IDENTIFY_CNS_CTRL_LIST = 0x13,
+ NVME_IDENTIFY_CNS_PRIMARY_CTRL_CAP = 0x14,
+ NVME_IDENTIFY_CNS_SECONDARY_CTRL_LIST = 0x15,
+ NVME_IDENTIFY_CNS_NS_GRANULARITY = 0x16,
+ NVME_IDENTIFY_CNS_UUID_LIST = 0x17,
+};
+
+/**
+ * enum nvme_cmd_get_log_lid -
+ * NVME_LOG_LID_ERROR:
+ * NVME_LOG_LID_SMART:
+ * NVME_LOG_LID_FW_SLOT:
+ * NVME_LOG_LID_CHANGED_NS:
+ * NVME_LOG_LID_CMD_EFFECTS:
+ * NVME_LOG_LID_DEVICE_SELF_TEST:
+ * NVME_LOG_LID_TELEMETRY_HOST:
+ * NVME_LOG_LID_TELEMETRY_CTRL:
+ * NVME_LOG_LID_ENDURANCE_GROUP:
+ * NVME_LOG_LID_PREDICTABLE_LAT_NVMSET:
+ * NVME_LOG_LID_PREDICTABLE_LAT_AGG:
+ * NVME_LOG_LID_ANA:
+ * NVME_LOG_LID_PERSISTENT_EVENT:
+ * NVME_LOG_LID_LBA_STATUS:
+ * NVME_LOG_LID_ENDURANCE_GRP_EVT:
+ * NVME_LOG_LID_DISC:
+ * NVME_LOG_LID_RESERVATION:
+ * NVME_LOG_LID_SANITIZE:
+ */
+enum nvme_cmd_get_log_lid {
+ NVME_LOG_LID_ERROR = 0x01,
+ NVME_LOG_LID_SMART = 0x02,
+ NVME_LOG_LID_FW_SLOT = 0x03,
+ NVME_LOG_LID_CHANGED_NS = 0x04,
+ NVME_LOG_LID_CMD_EFFECTS = 0x05,
+ NVME_LOG_LID_DEVICE_SELF_TEST = 0x06,
+ NVME_LOG_LID_TELEMETRY_HOST = 0x07,
+ NVME_LOG_LID_TELEMETRY_CTRL = 0x08,
+ NVME_LOG_LID_ENDURANCE_GROUP = 0x09,
+ NVME_LOG_LID_PREDICTABLE_LAT_NVMSET = 0x0a,
+ NVME_LOG_LID_PREDICTABLE_LAT_AGG = 0x0b,
+ NVME_LOG_LID_ANA = 0x0c,
+ NVME_LOG_LID_PERSISTENT_EVENT = 0x0d,
+ NVME_LOG_LID_LBA_STATUS = 0x0e,
+ NVME_LOG_LID_ENDURANCE_GRP_EVT = 0x0f,
+ NVME_LOG_LID_DISC = 0x70,
+ NVME_LOG_LID_RESERVATION = 0x80,
+ NVME_LOG_LID_SANITIZE = 0x81,
+};
+
+/**
+ * enum nvme_features_id -
+ * @NVME_FEAT_FID_ARBITRATION:
+ * @NVME_FEAT_FID_POWER_MGMT:
+ * @NVME_FEAT_FID_LBA_RANGE:
+ * @NVME_FEAT_FID_TEMP_THRESH:
+ * @NVME_FEAT_FID_ERR_RECOVERY:
+ * @NVME_FEAT_FID_VOLATILE_WC:
+ * @NVME_FEAT_FID_NUM_QUEUES:
+ * @NVME_FEAT_FID_IRQ_COALESCE:
+ * @NVME_FEAT_FID_IRQ_CONFIG:
+ * @NVME_FEAT_FID_WRITE_ATOMIC:
+ * @NVME_FEAT_FID_ASYNC_EVENT:
+ * @NVME_FEAT_FID_AUTO_PST:
+ * @NVME_FEAT_FID_HOST_MEM_BUF:
+ * @NVME_FEAT_FID_TIMESTAMP:
+ * @NVME_FEAT_FID_KATO:
+ * @NVME_FEAT_FID_HCTM:
+ * @NVME_FEAT_FID_NOPSC:
+ * @NVME_FEAT_FID_RRL:
+ * @NVME_FEAT_FID_PLM_CONFIG:
+ * @NVME_FEAT_FID_PLM_WINDOW:
+ * @NVME_FEAT_FID_LBA_STS_INTERVAL:
+ * @NVME_FEAT_FID_HOST_BEHAVIOR:
+ * @NVME_FEAT_FID_SANITIZE:
+ * @NVME_FEAT_FID_ENDURANCE_EVT_CFG:
+ * @NVME_FEAT_FID_SW_PROGRESS:
+ * @NVME_FEAT_FID_HOST_ID:
+ * @NVME_FEAT_FID_RESV_MASK:
+ * @NVME_FEAT_RESV_PERSIST:
+ * @NVME_FEAT_FID_WRITE_PROTECT:
+ */
+enum nvme_features_id {
+ NVME_FEAT_FID_ARBITRATION = 0x01,
+ NVME_FEAT_FID_POWER_MGMT = 0x02,
+ NVME_FEAT_FID_LBA_RANGE = 0x03,
+ NVME_FEAT_FID_TEMP_THRESH = 0x04,
+ NVME_FEAT_FID_ERR_RECOVERY = 0x05,
+ NVME_FEAT_FID_VOLATILE_WC = 0x06,
+ NVME_FEAT_FID_NUM_QUEUES = 0x07,
+ NVME_FEAT_FID_IRQ_COALESCE = 0x08,
+ NVME_FEAT_FID_IRQ_CONFIG = 0x09,
+ NVME_FEAT_FID_WRITE_ATOMIC = 0x0a,
+ NVME_FEAT_FID_ASYNC_EVENT = 0x0b,
+ NVME_FEAT_FID_AUTO_PST = 0x0c,
+ NVME_FEAT_FID_HOST_MEM_BUF = 0x0d,
+ NVME_FEAT_FID_TIMESTAMP = 0x0e,
+ NVME_FEAT_FID_KATO = 0x0f,
+ NVME_FEAT_FID_HCTM = 0X10,
+ NVME_FEAT_FID_NOPSC = 0X11,
+ NVME_FEAT_FID_RRL = 0x12,
+ NVME_FEAT_FID_PLM_CONFIG = 0x13,
+ NVME_FEAT_FID_PLM_WINDOW = 0x14,
+ NVME_FEAT_FID_LBA_STS_INTERVAL = 0x15,
+ NVME_FEAT_FID_HOST_BEHAVIOR = 0x16,
+ NVME_FEAT_FID_SANITIZE = 0x17,
+ NVME_FEAT_FID_ENDURANCE_EVT_CFG = 0x18,
+ NVME_FEAT_FID_SW_PROGRESS = 0x80,
+ NVME_FEAT_FID_HOST_ID = 0x81,
+ NVME_FEAT_FID_RESV_MASK = 0x82,
+ NVME_FEAT_RESV_PERSIST = 0x83,
+ NVME_FEAT_FID_WRITE_PROTECT = 0x84,
+};
+
+/**
+ * enum nvme_get_features_sel -
+ * @NVME_GET_FEATURES_SEL_CURRENT:
+ * @NVME_GET_FEATURES_SEL_DEFAULT:
+ * @NVME_GET_FEATURES_SEL_SAVED:
+ */
+enum nvme_get_features_sel {
+ NVME_GET_FEATURES_SEL_CURRENT = 0,
+ NVME_GET_FEATURES_SEL_DEFAULT = 1,
+ NVME_GET_FEATURES_SEL_SAVED = 2,
+};
+
+/**
+ * enum nvme_cmd_format_mset -
+ * @NVME_FORMAT_MSET_SEPARATE:
+ * @NVME_FORMAT_MSET_EXTENEDED:
+ */
+enum nvme_cmd_format_mset {
+ NVME_FORMAT_MSET_SEPARATE = 0,
+ NVME_FORMAT_MSET_EXTENEDED = 1,
+};
+
+/**
+ * enum nvme_cmd_format_pi -
+ * @NVME_FORMAT_PI_DISABLE:
+ * @NVME_FORMAT_PI_TYPE1:
+ * @NVME_FORMAT_PI_TYPE2:
+ * @NVME_FORMAT_PI_TYPE3:
+ */
+enum nvme_cmd_format_pi {
+ NVME_FORMAT_PI_DISABLE = 0,
+ NVME_FORMAT_PI_TYPE1 = 1,
+ NVME_FORMAT_PI_TYPE2 = 2,
+ NVME_FORMAT_PI_TYPE3 = 3,
+};
+
+/**
+ * @enum nvme_cmd_format_pil -
+ * @NVME_FORMAT_PIL_LAST:
+ * @NVME_FORMAT_PIL_FIRST:
+ */
+enum nvme_cmd_format_pil {
+ NVME_FORMAT_PIL_LAST = 0,
+ NVME_FORMAT_PIL_FIRST = 1,
+};
+
+/**
+ * enum nvme_cmd_format_ses -
+ * @NVME_FORMAT_SES_NONE:
+ * @NVME_FORMAT_SES_USER_DATA_ERASE:
+ * @NVME_FORMAT_SES_CRYPTO_ERASE:
+ */
+enum nvme_cmd_format_ses {
+ NVME_FORMAT_SES_NONE = 0,
+ NVME_FORMAT_SES_USER_DATA_ERASE = 1,
+ NVME_FORMAT_SES_CRYPTO_ERASE = 2,
+};
+
+/**
+ * enum nvme_ns_mgmt_sel -
+ * @NVME_NAMESPACE_MGMT_SEL_CREATE:
+ * @NVME_NAMESPACE_MGMT_SEL_DELETE:
+ */
+enum nvme_ns_mgmt_sel {
+ NVME_NS_MGMT_SEL_CREATE = 0,
+ NVME_NS_MGMT_SEL_DELETE = 1,
+};
+
+/**
+ * enum nvme_ns_attach_sel -
+ * NVME_NS_ATTACH_SEL_CTRL_ATTACH:
+ * NVME_NP_ATTACH_SEL_CTRL_DEATTACH:
+ */
+enum nvme_ns_attach_sel {
+ NVME_NS_ATTACH_SEL_CTRL_ATTACH = 0,
+ NVME_NS_ATTACH_SEL_CTRL_DEATTACH = 1,
+};
+
+/**
+ * enum nvme_fw_commit_ca -
+ * @NVME_FW_COMMIT_CA_REPLACE:
+ * @NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE:
+ * @NVME_FW_COMMIT_CA_SET_ACTIVE:
+ * @NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE_IMMEDIATE:
+ * @NVME_FW_COMMIT_CA_REPLACE_BOOT_PARTITION:
+ * @NVME_FW_COMMIT_CA_ACTIVATE_BOOT_PARTITION:
+ */
+enum nvme_fw_commit_ca {
+ NVME_FW_COMMIT_CA_REPLACE = 0,
+ NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE = 1,
+ NVME_FW_COMMIT_CA_SET_ACTIVE = 2,
+ NVME_FW_COMMIT_CA_REPLACE_AND_ACTIVATE_IMMEDIATE = 3,
+ NVME_FW_COMMIT_CA_REPLACE_BOOT_PARTITION = 6,
+ NVME_FW_COMMIT_CA_ACTIVATE_BOOT_PARTITION = 7,
+};
+
+/**
+ * enum nvme_directive_dtype -
+ * @NVME_DIRECTIVE_DTYPE_IDENTIFY:
+ * @NVME_DIRECTIVE_DTYPE_STREAMS:
+ */
+enum nvme_directive_dtype {
+ NVME_DIRECTIVE_DTYPE_IDENTIFY = 0,
+ NVME_DIRECTIVE_DTYPE_STREAMS = 1,
+};
+
+/**
+ * enum -
+ */
+enum nvme_cmd_directive_receive_identify_doper {
+ NVME_DIRECTIVE_RECEIVE_IDENTIFY_DOPER_PARAM = 0x01,
+};
+
+/**
+ * enum -
+ */
+enum nvme_cmd_directive_receive_streams_doper {
+ NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_PARAM = 0x01,
+ NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_STATUS = 0x02,
+ NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_RESOURCE = 0x03,
+};
+
+/**
+ * enum -
+ */
+enum nvme_cmd_directive_send_identify_doper {
+ NVME_DIRECTIVE_SEND_IDENTIFY_DOPER_ENDIR = 0x01,
+};
+
+/**
+ * enum -
+ */
+enum nvme_cmd_directive_send_identify_endir {
+ NVME_DIRECTIVE_SEND_IDENTIFY_ENDIR_DISABLE = 0,
+ NVME_DIRECTIVE_SEND_IDENTIFY_ENDIR_ENABLE = 1,
+};
+
+/**
+ * enum -
+ */
+enum nvme_cmd_directive_send_streams_doper {
+ NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_IDENTIFIER = 0x01,
+ NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_RESOURCE = 0x02,
+};
+
+/**
+ * enum nvme_sanitize_sanact -
+ * @NVME_SANITIZE_SANACT_EXIT_FAILURE:
+ * @NVME_SANITIZE_SANACT_START_BLOCK_ERASE:
+ * @NVME_SANITIZE_SANACT_START_OVERWRITE:
+ * @NVME_SANITIZE_SANACT_START_CRYPTO_ERASE:
+ */
+enum nvme_sanitize_sanact {
+ NVME_SANITIZE_SANACT_EXIT_FAILURE = 1,
+ NVME_SANITIZE_SANACT_START_BLOCK_ERASE = 2,
+ NVME_SANITIZE_SANACT_START_OVERWRITE = 3,
+ NVME_SANITIZE_SANACT_START_CRYPTO_ERASE = 4,
+};
+
+/**
+ * enum nvme_dst_stc -
+ * @NVME_DST_STC_SHORT:
+ * @NVME_DST_STC_LONG:
+ * @NVME_DST_STC_VS:
+ * @NVME_DST_STC_ABORT:
+ */
+enum nvme_dst_stc {
+ NVME_DST_STC_SHORT = 0x1,
+ NVME_DST_STC_LONG = 0x2,
+ NVME_DST_STC_VS = 0xe,
+ NVME_DST_STC_ABORT = 0xf,
+};
+
+/**
+ * enum nvme_virt_mgmt_act -
+ * @NVME_VIRT_MGMT_ACT_PRIM_CTRL_FLEX_ALLOC:
+ * @NVME_VIRT_MGMT_ACT_OFFLINE_SEC_CTRL:
+ * @NVME_VIRT_MGMT_ACT_ASSIGN_SEC_CTRL:
+ * @NVME_VIRT_MGMT_ACT_ONLINE_SEC_CTRL:
+ */
+enum nvme_virt_mgmt_act {
+ NVME_VIRT_MGMT_ACT_PRIM_CTRL_FLEX_ALLOC = 1,
+ NVME_VIRT_MGMT_ACT_OFFLINE_SEC_CTRL = 7,
+ NVME_VIRT_MGMT_ACT_ASSIGN_SEC_CTRL = 8,
+ NVME_VIRT_MGMT_ACT_ONLINE_SEC_CTRL = 9,
+};
+
+/**
+ * enum nvme_virt_mgmt_rt -
+ * @NVME_VIRT_MGMT_RT_VQ_RESOURCE:
+ * @NVME_VIRT_MGMT_RT_VI_RESOURCE:
+ */
+enum nvme_virt_mgmt_rt {
+ NVME_VIRT_MGMT_RT_VQ_RESOURCE = 0,
+ NVME_VIRT_MGMT_RT_VI_RESOURCE = 1,
+};
+
+/**
+ * nvme_identify() - Send the NVMe Identify command
+ * @fd: File descriptor of nvme device
+ * @cns: The Controller or Namespace structure, see @enum nvme_identify_cns
+ * @nsid: Namespace identifier, if applicable
+ * @cntid: The Controller Identifier, if applicable
+ * @nvmsetid: The NVMe Set ID if CNS is 04h
+ * @uuidx: UUID Index if controller supports this id selection method
+ * @data: User space destination address to transfer the data
+ *
+ * The Identify command returns a data buffer that describes information about
+ * the NVM subsystem, the controller or the namespace(s).
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify(int fd, enum nvme_identify_cns cns, __u32 nsid,
+ __u16 cntid, __u16 nvmsetid, __u8 uuidx, void *data);
+
+/**
+ * nvme_identify_ctrl() - Retrieves nvme identify controller
+ * @fd: File descriptor of nvme device
+ * id: User space destination address to transfer the data,
+ *
+ * Sends nvme identify with CNS value %NVME_IDENTIFY_CNS_CTRL.
+ *
+ * See &struct nvme_id_ctrl for details on the data returned.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_ctrl(int fd, struct nvme_id_ctrl *id);
+
+/**
+ * nvme_identify_ns() - Retrieves nvme identify namespace
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace to identify
+ * @ns: User space destination address to transfer the data
+ *
+ * If the Namespace Identifier (NSID) field specifies an active NSID, then the
+ * Identify Namespace data structure is returned to the host for that specified
+ * namespace.
+ *
+ * If the controller supports the Namespace Management capability and the NSID
+ * field is set to %NVME_NSID_ALL, then the controller returns an Identify Namespace
+ * data structure that specifies capabilities that are common across namespaces
+ * for this controller.
+ *
+ * See &struct nvme_id_ns for details on the structure returned.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_ns(int fd, __u32 nsid, struct nvme_id_ns *ns);
+
+/**
+ * nvme_identify_allocated_ns() - Same as nvme_identify_ns, but only for
+ * allocated namespaces
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace to identify
+ * @ns: User space destination address to transfer the data
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_allocated_ns(int fd, __u32 nsid, struct nvme_id_ns *ns);
+
+/**
+ * nvme_identify_active_ns_list() - Retrieves active namespaces id list
+ * @fd: File descriptor of nvme device
+ * @nsid: Return namespaces greater than this identifer
+ * @ns_list: User space destination address to transfer the data
+ *
+ * A list of 1024 namespace IDs is returned to the host containing NSIDs in
+ * increasing order that are greater than the value specified in the Namespace
+ * Identifier (nsid) field of the command.
+ *
+ * See &struct nvme_ns_list for the definition of the returned structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_active_ns_list(int fd, __u32 nsid, struct nvme_ns_list *list);
+
+/**
+ * nvme_identify_allocated_ns_list() - Retrieves allocated namespace id list
+ * @fd: File descriptor of nvme device
+ * @nsid: Return namespaces greater than this identifer
+ * @ns_list: User space destination address to transfer the data
+ *
+ * A list of 1024 namespace IDs is returned to the host containing NSIDs in
+ * increasing order that are greater than the value specified in the Namespace
+ * Identifier (nsid) field of the command.
+ *
+ * See &struct nvme_ns_list for the definition of the returned structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_allocated_ns_list(int fd, __u32 nsid, struct nvme_ns_list *list);
+
+/**
+ * nvme_identify_ctrl_list() - Retrieves identify controller list
+ * @fd: File descriptor of nvme device
+ * @cntlid: Starting CNTLID to return in the list
+ * @cntlist: User space destination address to transfer the data
+ *
+ * Up to 2047 controller identifiers is returned containing a controller
+ * identifier greater than or equal to the controller identifier specified in
+ * @cntid.
+ *
+ * See &struct nvme_ctrl_list for a definition of the structure returned.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_ctrl_list(int fd, __u16 cntid,
+ struct nvme_ctrl_list *ctrlist);
+
+/**
+ * nvme_identify_nsid_ctrl_list() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Return controllers that are attached to this nsid
+ * @cntlid: Starting CNTLID to return in the list
+ * @cntlist: User space destination address to transfer the data
+ *
+ * Up to 2047 controller identifiers is returned containing a controller
+ * identifier greater than or equal to the controller identifier specified in
+ * @cntid.
+ *
+ * See &struct nvme_ctrl_list for a definition of the structure returned.
+ *
+ * Return: The nvme command status if a response was received or -1
+ */
+int nvme_identify_nsid_ctrl_list(int fd, __u32 nsid, __u16 cntid,
+ struct nvme_ctrl_list *ctrlist);
+
+/**
+ * nvme_identify_ns_descs() - Retrieves namespace descriptor list
+ * @fd: File descriptor of nvme device
+ * @nsid: The namespace id to retrieve destriptors
+ * @descs: User space destination address to transfer the data
+ *
+ * A list of Namespace Identification Descriptor structures is returned to the
+ * host for the namespace specified in the Namespace Identifier (NSID) field if
+ * it is an active NSID.
+ *
+ * The data returned is in the form of an arrray of 'struct nvme_ns_id_desc'.
+ *
+ * See &struct nvme_ns_id_desc for the definition of the returned structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_ns_descs(int fd, __u32 nsid, struct nvme_ns_id_desc *descs);
+
+/**
+ * nvme_identify_nvmset_list() - Retrieves NVM Set List
+ * @fd: File descriptor of nvme device
+ * @nvmeset_id: NVM Set Identifier
+ * @nvmset: User space destination address to transfer the data
+ *
+ * Retrieves an NVM Set List, struct nvme_id_nvmset. The data structure is an
+ * ordered list by NVM Set Identifier, starting with the first NVM Set
+ * Identifier supported by the NVM subsystem that is equal to or greater than
+ * the NVM Set Identifier.
+ *
+ * See &struct nvme_id_nvmset_list for the defintion of the returned structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_nvmset_list(int fd, __u16 nvmsetid, struct nvme_id_nvmset_list *nvmset);
+
+/**
+ * nvme_identify_primary_ctrl() - Retrieve NVMe Primary Controller
+ * identification
+ * &fd:
+ * @cntid:
+ * @cap:
+ *
+ * See &struct nvme_primary_ctrl_cap for the defintion of the returned structure, @cap.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_identify_primary_ctrl(int fd, __u16 cntid, struct nvme_primary_ctrl_cap *cap);
+
+/**
+ * nvme_identify_secondary_ctrl_list() - Retrieves secondary controller list
+ * @fd: File descriptor of nvme device
+ * @cntid: Return controllers starting at this identifier
+ * @sc_list: User space destination address to transfer the data
+ *
+ * A Secondary Controller List is returned to the host for up to 127 secondary
+ * controllers associated with the primary controller processing this command.
+ * The list contains entries for controller identifiers greater than or equal
+ * to the value specified in the Controller Identifier (cntid).
+ *
+ * See &struct nvme_secondary_ctrls_list for a defintion of the returned
+ * structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_secondary_ctrl_list(int fd, __u16 cntid, struct nvme_secondary_ctrl_list *list);
+
+/**
+ * nvme_identify_ns_granularity() - Retrieves namespace granularity
+ * identification
+ * @fd: File descriptor of nvme device
+ * @gr_list: User space destination address to transfer the data
+ *
+ * If the controller supports reporting of Namespace Granularity, then a
+ * Namespace Granularity List is returned to the host for up to sixteen
+ * namespace granularity descriptors
+ *
+ * See &struct nvme_id_ns_granularity_list for the definition of the returned
+ * structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_ns_granularity(int fd, struct nvme_id_ns_granularity_list *list);
+
+/**
+ * nvme_identify_uuid() - Retrieves device's UUIDs
+ * @fd: File descriptor of nvme device
+ * @uuid_list: User space destination address to transfer the data
+ *
+ * Each UUID List entry is either 0h, the NVMe Invalid UUID, or a valid UUID.
+ * Valid UUIDs are those which are non-zero and are not the NVMe Invalid UUID.
+ *
+ * See &struct nvme_id_uuid_list for the definition of the returned structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_identify_uuid(int fd, struct nvme_id_uuid_list *list);
+
+/**
+ * nvme_get_log() - NVMe Admin Get Log command
+ * @fd: File descriptor of nvme device
+ * @lid: Log page identifier, see &enum nvme_cmd_get_log_lid for known values
+ * @nsid: Namespace identifier, if applicable
+ * @lpo: Log page offset for partial log transfers
+ * @lsp: Log specific field
+ * @lsi: Endurance group information
+ * @rae: Retain asynchronous events
+ * @uuidx: UUID selection, if supported
+ * @len: Length of provided user buffer to hold the log data in bytes
+ * @log: User space destination address to transfer the data
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log(int fd, enum nvme_cmd_get_log_lid lid, __u32 nsid, __u64 lpo,
+ __u8 lsp, __u16 lsi, bool rae, __u8 uuidx, __u32 len, void *log);
+
+/**
+ * nvme_get_log_error() - Retrieve nvme error log
+ * @fd: File descriptor of nvme device
+ * @entries: Number of error log entries allocated
+ * @rae: Retain asynchronous events
+ * @err_log: Array of error logs of size 'entries'
+ *
+ * This log page is used to describe extended error information for a command
+ * that completed with error, or may report an error that is not specific to a
+ * particular command.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_error(int fd, unsigned nr_entries, bool rae,
+ struct nvme_error_log_page *log);
+
+/**
+ * nvme_get_log_smart() - Retrieve nvme smart log
+ * @fd: File descriptor of nvme device
+ * @nsid: Optional namespace identifier
+ * @rae: Retain asynchronous events
+ * @smart_log: User address to store the smart log
+ *
+ * This log page is used to provide SMART and general health information. The
+ * information provided is over the life of the controller and is retained
+ * across power cycles. To request the controller log page, the namespace
+ * identifier specified is FFFFFFFFh. The controller may also support
+ * requesting the log page on a per namespace basis, as indicated by bit 0 of
+ * the LPA field in the Identify Controller data structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_smart(int fd, __u32 nsid, bool rae, struct nvme_smart_log *log);
+
+/**
+ * nvme_get_log_fw_slot() - Retrieves the controller firmware log
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ * @fw_log: User address to store the log page
+ *
+ * This log page is used to describe the firmware revision stored in each
+ * firmware slot supported. The firmware revision is indicated as an ASCII
+ * string. The log page also indicates the active slot number.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_fw_slot(int fd, bool rae, struct nvme_firmware_slot *log);
+
+/**
+ * nvme_get_log_changed_ns_list() - Retrieve namespace changed list
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ * @ns_list: User address to store the log page
+ *
+ * This log page is used to describe namespaces attached to this controller
+ * that have changed since the last time the namespace was identified, been
+ * added, or deleted.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_changed_ns_list(int fd, bool rae, struct nvme_ns_list *log);
+
+/**
+ * nvme_get_log_cmd_effects() - Retrieve nvme command effects log
+ * @fd: File descriptor of nvme device
+ * @effects_log:User address to store the effects log
+ *
+ * This log page is used to describe the commands that the controller supports
+ * and the effects of those commands on the state of the NVM subsystem.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_cmd_effects(int fd, struct nvme_cmd_effects_log *log);
+
+/**
+ * nvme_get_log_device_self_test() - Retrieve the device self test log
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID being tested
+ * @log: Userspace address of the log payload
+ *
+ * The log page is used to indicate the status of an in progress self test and
+ * the percent complete of that operation, and the results of the previous 20
+ * self-test operations.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_device_self_test(int fd, struct nvme_self_test_log *log);
+
+/**
+ * nvme_get_log_create_telemetry_host() -
+ */
+int nvme_get_log_create_telemetry_host(int fd, struct nvme_telemetry_log *log);
+/** * nvme_get_log_telemetry_host() -
+ * @fd: File descriptor of nvme device
+ * @offset: Offset into the telemetry data
+ * @len: Length of provided user buffer to hold the log data in bytes
+ * @log: User address for log page data
+ *
+ * Retreives the Telemetry Host-Initiated log page at the requested offset
+ * using the previously existing capture.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_telemetry_host(int fd, __u64 offset, __u32 len, void *log);
+
+/**
+ * nvme_get_log_telemetry_ctrl() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ * @offset: Offset into the telemetry data
+ * @len: Length of provided user buffer to hold the log data in bytes
+ * @log: User address for log page data
+ */
+int nvme_get_log_telemetry_ctrl(int fd, bool rae, __u64 offset, __u32 len, void *log);
+
+/**
+ * nvme_get_log_endurance_group() -
+ * @fd: File descriptor of nvme device
+ * @endgid: Starting group identifier to return in the list
+ * @log: User address to store the endurance log
+ *
+ * This log page indicates if an Endurance Group Event has occurred for a
+ * particular Endurance Group. If an Endurance Group Event has occurred, the
+ * details of the particular event are included in the Endurance Group
+ * Information log page for that Endurance Group. An asynchronous event is
+ * generated when an entry for an Endurance Group is newly added to this log
+ * page.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_endurance_group(int fd, __u16 endgid, struct nvme_endurance_group_log *log);
+
+/**
+ * nvme_get_log_predictable_lat_nvmset() -
+ * @fd:
+ * @nvmsetid:
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_predictable_lat_nvmset(int fd, __u16 nvmsetid,
+ struct nvme_nvmset_predictable_lat_log *log);
+
+/**
+ * nvme_get_log_predictable_lat_event() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ */
+int nvme_get_log_predictable_lat_event(int fd, bool rae, __u32 offset,
+ __u32 len, void *log);
+
+/**
+ *
+ */
+enum nvme_log_ana_lsp {
+ NVME_LOG_ANA_LSP_RGO_NAMESPACES = 0,
+ NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY = 1,
+};
+
+/**
+ * nvme_get_log_ana() -
+ * @fd: File descriptor of nvme device
+ * @lsp: Log specific, see &enum nvme_get_log_ana_lsp
+ * @rae: Retain asynchronous events
+ * @len: The allocated length of the log page
+ * @log: User address to store the ana log
+ *
+ * This log consists of a header describing the log and descriptors containing
+ * the asymmetric namespace access information for ANA Groups that contain
+ * namespaces that are attached to the controller processing the command.
+ *
+ * See &struct nvme_ana_rsp_hdr for the defintion of the returned structure.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_ana(int fd, enum nvme_log_ana_lsp lsp, bool rae, __u64 offset,
+ __u32 len, void *log);
+
+/**
+ * nvme_get_log_ana_groups() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ *
+ * See &struct nvme_ana_group_desc for the defintion of the returned structure.
+ */
+int nvme_get_log_ana_groups(int fd, bool rae, __u32 len,
+ struct nvme_ana_group_desc *log);
+
+/**
+ * nvme_get_log_lba_status() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ */
+int nvme_get_log_lba_status(int fd, bool rae, __u64 offset, __u32 len,
+ void *log);
+
+/**
+ * nvme_get_log_endurance_grp_evt() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ */
+int nvme_get_log_endurance_grp_evt(int fd, bool rae, __u32 offset, __u32 len,
+ void *log);
+
+/**
+ * nvme_get_log_discovery() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ * @offset: Offset of this log to retrieve
+ * @len: The allocated size for this portion of the log
+ * @log: User address to store the discovery log
+ *
+ * Supported only by fabrics discovery controllers, returning discovery
+ * records.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_discovery(int fd, bool rae, __u32 offset, __u32 len, void *log);
+
+/**
+ * nvme_get_log_reservation() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ */
+int nvme_get_log_reservation(int fd, bool rae,
+ struct nvme_resv_notification_log *log);
+
+/**
+ * nvme_get_log_sanitize() -
+ * @fd: File descriptor of nvme device
+ * @rae: Retain asynchronous events
+ * @log: User address to store the sanitize log
+ *
+ * The Sanitize Status log page is used to report sanitize operation time
+ * estimates and information about the most recent sanitize operation.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_log_sanitize(int fd, bool rae,
+ struct nvme_sanitize_log_page *log);
+
+/**
+ * nvme_set_feature() - Set a feature attribute
+ * @fd: File descriptor of nvme device
+ * @fid: Feature identifier
+ * @nsid: Namespace ID, if applicable
+ * @cdw11: Value to set the feature to
+ * @cdw12: Feature specific command dword12 field
+ * @save: Save value across power states
+ * @uuidx: UUID Index for differentiating vendor specific encoding
+ * @cdw14: Feature specific command dword15 field
+ * @data_len: Length of feature data, if applicable, in bytes
+ * @data: User address of feature data, if applicable
+ * @result: The command completion result from CQE dword0
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_set_features(int fd, __u8 fid, __u32 nsid, __u32 cdw11, __u32 cdw12,
+ bool save, __u8 uuidx, __u32 cdw15, __u32 data_len,
+ void *data, __u32 *result);
+
+/**
+ * nvme_set_features_arbitration() -
+ */
+int nvme_set_features_arbitration(int fd, __u8 ab, __u8 lpw, __u8 mpw,
+ __u8 hpw, bool save, __u32 *result);
+
+/**
+ * nvme_set_features_power_mgmt() -
+ */
+int nvme_set_features_power_mgmt(int fd, __u8 ps, __u8 wh, bool save,
+ __u32 *result);
+
+/**
+ * nvme_set_features_lba_range() -
+ */
+int nvme_set_features_lba_range(int fd, __u32 nsid, __u32 nr_ranges, bool save,
+ struct nvme_lba_range_type *data, __u32 *result);
+
+
+/**
+ * enum nvme_feat_tmpthresh_thsel -
+ */
+enum nvme_feat_tmpthresh_thsel {
+ NVME_FEATURE_TEMPTHRESH_THSEL_OVER = 0,
+ NVME_FEATURETEMPTHRESH__THSEL_UNDER = 1,
+};
+
+/**
+ * nvme_set_features_temp_thresh() -
+ */
+int nvme_set_features_temp_thresh(int fd, __u16 tmpth, __u8 tmpsel,
+ enum nvme_feat_tmpthresh_thsel thsel,
+ bool save, __u32 *result);
+
+/**
+ * nvme_set_features_err_recovery() -
+ */
+int nvme_set_features_err_recovery(int fd, __u32 nsid, __u16 tler,
+ bool dulbe, bool save, __u32 *result);
+
+
+/**
+ * nvme_set_features_volatile_wc() -
+ */
+int nvme_set_features_volatile_wc(int fd, bool wce, bool save,
+ __u32 *result);
+
+/**
+ * nvme_set_features_irq_coalesce() -
+ */
+int nvme_set_features_irq_coalesce(int fd, __u8 thr, __u8 time,
+ bool save, __u32 *result);
+
+/**
+ * nvme_set_features_irq_config() -
+ */
+int nvme_set_features_irq_config(int fd, __u16 iv, bool cd, bool save,
+ __u32 *result);
+
+
+/**
+ * nvme_set_features_write_atomic() -
+ */
+int nvme_set_features_write_atomic(int fd, bool dn, bool save,
+ __u32 *result);
+
+/**
+ * enum nvme_features_async_event_config_flags -
+ */
+enum nvme_features_async_event_config_flags {
+ NVME_FEATURE_AENCFG_SMART_CRIT_SPARE = 1 << 0,
+ NVME_FEATURE_AENCFG_SMART_CRIT_TEMPERATURE = 1 << 1,
+ NVME_FEATURE_AENCFG_SMART_CRIT_DEGRADED = 1 << 2,
+ NVME_FEATURE_AENCFG_SMART_CRIT_READ_ONLY = 1 << 3,
+ NVME_FEATURE_AENCFG_SMART_CRIT_VOLATILE_BACKUP = 1 << 4,
+ NVME_FEATURE_AENCFG_SMART_CRIT_READ_ONLY_PMR = 1 << 5,
+ NVME_FEATURE_AENCFG_NOTICE_NAMESPACE_ATTRIBUTES = 1 << 8,
+ NVME_FEATURE_AENCFG_NOTICE_FIRMWARE_ACTIVATION = 1 << 9,
+ NVME_FEATURE_AENCFG_NOTICE_TELEMETRY_LOG = 1 << 10,
+ NVME_FEATURE_AENCFG_NOTICE_ANA_CHANGE = 1 << 11,
+ NVME_FEATURE_AENCFG_NOTICE_PL_EVENT = 1 << 12,
+ NVME_FEATURE_AENCFG_NOTICE_LBA_STATUS = 1 << 13,
+ NVME_FEATURE_AENCFG_NOTICE_EG_EVENT = 1 << 14,
+ NVME_FEATURE_AENCFG_NOTICE_DISCOVERY_CHANGE = 1 << 31,
+};
+
+/**
+ * nvme_set_features_async_event() -
+ */
+int nvme_set_features_async_event(int fd, __u32 events, bool save,
+ __u32 *result);
+
+
+/**
+ * nvme_set_features_auto_pst() -
+ */
+int nvme_set_features_auto_pst(int fd, bool apste, bool save,
+ struct nvme_feat_auto_pst *apst,
+ __u32 *result);
+
+/**
+ * nvme_set_features_timestamp() -
+ */
+int nvme_set_features_timestamp(int fd, bool save, __u64 timestamp);
+
+
+/**
+ * nvme_set_features_hctm() -
+ */
+int nvme_set_features_hctm(int fd, __u16 tmt2, __u16 tmt1, bool save,
+ __u32 *result);
+
+/**
+ * nvme_admin_set_features_nopsc() -
+ */
+int nvme_admin_set_features_nopsc(int fd, bool noppme, bool save,
+ __u32 *result);
+
+/**
+ * nvme_set_features_rrl() -
+ */
+int nvme_set_features_rrl(int fd, __u8 rrl, __u16 nvmsetid, bool save,
+ __u32 *result);
+
+/**
+ * nvme_set_features_plm_config() -
+ */
+int nvme_set_features_plm_config(int fd, bool enable, __u16 nvmsetid,
+ bool save, struct nvme_plm_config *data,
+ __u32*result);
+
+/**
+ * enum nvme_feat_plm_window_select -
+ */
+enum nvme_feat_plm_window_select {
+ NVME_FEATURE_PLM_DTWIN = 1,
+ NVME_FEATURE_PLM_NDWIN = 2,
+};
+
+/**
+ * nvme_set_features_plm_window() -
+ */
+int nvme_set_features_plm_window(int fd, enum nvme_feat_plm_window_select sel,
+ __u16 nvmsetid, bool save, __u32 *result);
+
+
+/**
+ * nvme_set_features_lba_sts_interval() -
+ */
+int nvme_set_features_lba_sts_interval(int fd, __u16 lsiri, __u16 lsipi,
+ bool save, __u32 *result);
+
+
+/**
+ * nvme_set_features_host_behavior() -
+ */
+int nvme_set_features_host_behavior(int fd, bool save,
+ struct nvme_feat_host_behavior *data);
+
+/**
+ * nvme_set_features_sanitize() -
+ */
+int nvme_set_features_sanitize(int fd, bool nodrm, bool save, __u32 *result);
+
+/**
+ * nvme_set_features_endurance_evt_cfg() -
+ * @fd:
+ * @endgid:
+ * @egwarn: Flags to enable warning, see &enum nvme_eg_critical_warning_flags
+ */
+int nvme_set_features_endurance_evt_cfg(int fd, __u16 endgid, __u8 egwarn,
+ bool save, __u32 *result);
+
+/**
+ * nvme_set_features_sw_progress() -
+ */
+int nvme_set_features_sw_progress(int fd, __u8 pbslc, bool save,
+ __u32 *result);
+
+
+/**
+ * nvme_set_features_host_id() -
+ */
+int nvme_set_features_host_id(int fd, bool exhid, bool save, __u8 *hostid);
+
+/**
+ *
+ */
+enum nvme_feat_resv_notify_flags {
+ NVME_FEAT_RESV_NOTIFY_REGPRE = 1 << 1,
+ NVME_FEAT_RESV_NOTIFY_RESREL = 1 << 2,
+ NVME_FEAT_RESV_NOTIFY_RESPRE = 1 << 3,
+};
+
+/**
+ * nvme_set_features_resv_mask() -
+ */
+int nvme_set_features_resv_mask(int fd, __u32 mask, bool save, __u32 *result);
+
+/**
+ * nvme_set_features_resv_persist() -
+ */
+int nvme_set_features_resv_persist(int fd, bool ptpl, bool save, __u32 *result);
+
+/**
+ * enum nvme_feat_ns_wp_cfg_state -
+ * @NVME_FEAT_NS_NO_WRITE_PROTECT:
+ * @NVME_FEAT_NS_WRITE_PROTECT:
+ * @NVME_FEAT_NS_WRITE_PROTECT_PWR_CYCLE:
+ * @NVME_FEAT_NS_WRITE_PROTECT_PERMANENT:
+ */
+enum nvme_feat_nswpcfg_state {
+ NVME_FEAT_NS_NO_WRITE_PROTECT = 0,
+ NVME_FEAT_NS_WRITE_PROTECT = 1,
+ NVME_FEAT_NS_WRITE_PROTECT_PWR_CYCLE = 2,
+ NVME_FEAT_NS_WRITE_PROTECT_PERMANENT = 3,
+};
+
+/**
+ * nvme_set_features_write_protect() -
+ */
+int nvme_set_features_write_protect(int fd, enum nvme_feat_nswpcfg_state state,
+ bool save, __u32 *result);
+
+/**
+ * nvme_get_features() - Retrieve a feature attribute
+ * @fd: File descriptor of nvme device
+ * @fid: Feature identifier
+ * @nsid: Namespace ID, if applicable
+ * @sel: Select which type of attribute to return, see &enum nvme_get_features_sel
+ * @cdw11: Feature specific command dword11 field
+ * @uuidx: UUID Index for differentiating vendor specific encoding
+ * @data_len: Length of feature data, if applicable, in bytes
+ * @data: User address of feature data, if applicable
+ * @result: The command completion result from CQE dword0
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_features(int fd, enum nvme_features_id fid, __u32 nsid,
+ enum nvme_get_features_sel sel, __u32 cdw11, __u8 uuidx,
+ __u32 data_len, void *data, __u32 *result);
+
+/**
+ * nvme_get_features_arbitration() -
+ */
+int nvme_get_features_arbitration(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_power_mgmt() -
+ */
+int nvme_get_features_power_mgmt(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_lba_range() -
+ */
+int nvme_get_features_lba_range(int fd, enum nvme_get_features_sel sel,
+ struct nvme_lba_range_type *data,
+ __u32 *result);
+
+/**
+ * nvme_get_features_temp_thresh() -
+ */
+int nvme_get_features_temp_thresh(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_err_recovery() -
+ */
+int nvme_get_features_err_recovery(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_volatile_wc() -
+ */
+int nvme_get_features_volatile_wc(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_num_queues() -
+ */
+int nvme_get_features_num_queues(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_irq_coalesce() -
+ */
+int nvme_get_features_irq_coalesce(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_irq_config() -
+ */
+int nvme_get_features_irq_config(int fd, enum nvme_get_features_sel sel,
+ __u16 iv, __u32 *result);
+
+/**
+ * nvme_get_features_write_atomic() -
+ */
+int nvme_get_features_write_atomic(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_async_event() -
+ */
+int nvme_get_features_async_event(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_auto_pst() -
+ */
+int nvme_get_features_auto_pst(int fd, enum nvme_get_features_sel sel,
+ struct nvme_feat_auto_pst *apst, __u32 *result);
+
+/**
+ * nvme_get_features_host_mem_buf() -
+ */
+int nvme_get_features_host_mem_buf(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_timestamp() -
+ */
+int nvme_get_features_timestamp(int fd, enum nvme_get_features_sel sel,
+ struct nvme_timestamp *ts);
+
+/**
+ * nvme_get_features_kato() -
+ */
+int nvme_get_features_kato(int fd, enum nvme_get_features_sel sel, __u32 *result);
+
+/**
+ * nvme_get_features_hctm() -
+ */
+int nvme_get_features_hctm(int fd, enum nvme_get_features_sel sel, __u32 *result);
+
+/**
+ * nvme_get_features_nopsc() -
+ */
+int nvme_get_features_nopsc(int fd, enum nvme_get_features_sel sel, __u32 *result);
+
+/**
+ * nvme_get_features_rrl() -
+ */
+int nvme_get_features_rrl(int fd, enum nvme_get_features_sel sel, __u32 *result);
+
+/**
+ * nvme_get_features_plm_config() -
+ */
+int nvme_get_features_plm_config(int fd, enum nvme_get_features_sel sel,
+ __u16 nvmsetid, struct nvme_plm_config *data,
+ __u32 *result);
+
+/**
+ * nvme_get_features_plm_window() -
+ */
+int nvme_get_features_plm_window(int fd, enum nvme_get_features_sel sel,
+ __u16 nvmsetid, __u32 *result);
+
+/**
+ * nvme_get_features_lba_sts_interval() -
+ */
+int nvme_get_features_lba_sts_interval(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_host_behavior() -
+ */
+int nvme_get_features_host_behavior(int fd, enum nvme_get_features_sel sel,
+ struct nvme_feat_host_behavior *data,
+ __u32 *result);
+
+/**
+ * nvme_get_features_sanitize() -
+ */
+int nvme_get_features_sanitize(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_endurance_event_cfg() -
+ */
+int nvme_get_features_endurance_event_cfg(int fd, enum nvme_get_features_sel sel,
+ __u16 endgid, __u32 *result);
+
+/**
+ * nvme_get_features_sw_progress() -
+ */
+int nvme_get_features_sw_progress(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_host_id() -
+ */
+int nvme_get_features_host_id(int fd, enum nvme_get_features_sel sel,
+ bool exhid, __u32 len, __u8 *hostid);
+
+/**
+ * nvme_get_features_resv_mask() -
+ */
+int nvme_get_features_resv_mask(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_resv_persist() -
+ */
+int nvme_get_features_resv_persist(int fd, enum nvme_get_features_sel sel,
+ __u32 *result);
+
+/**
+ * nvme_get_features_write_protect() -
+ */
+int nvme_get_features_write_protect(int fd, __u32 nsid,
+ enum nvme_get_features_sel sel,
+ __u32 *result);
+
+
+/**
+ * nvme_format_nvm() - Format nvme namespace(s)
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to format
+ * @lbaf: Logical block address format
+ * @mset: Metadata settings (extended or separated), true if extended
+ * @pi: Protection information type
+ * @pil: Protection information location (beginning or end), true if end
+ * @ses: Secure erase settings
+ * @timeout: Set to override default timeout to this value in milliseconds;
+ * useful for long running formats. 0 will use system default.
+ *
+ * The Format NVM command is used to low level format the NVM media. This
+ * command is used by the host to change the LBA data size and/or metadata
+ * size. A low level format may destroy all data and metadata associated with
+ * all namespaces or only the specific namespace associated with the command
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_format_nvm(int fd, __u32 nsid, __u8 lbaf,
+ enum nvme_cmd_format_mset mset,
+ enum nvme_cmd_format_pi pi,
+ enum nvme_cmd_format_pil pil,
+ enum nvme_cmd_format_ses ses,
+ __u32 timeout);
+
+/**
+ * nvme_ns_mgmt() -
+ * @fd: File descriptor of nvme device
+ */
+int nvme_ns_mgmt(int fd, __u32 nsid, enum nvme_ns_mgmt_sel sel,
+ struct nvme_id_ns *ns, __u32 *result, __u32 timeout);
+
+/**
+ * nvme_ns_mgmt_create() -
+ * @fd: File descriptor of nvme device
+ * @ns: Namespace identifiaction that defines creation parameters
+ * @nsid: On success, set to the namespace id that was created
+ * @timeout: Overide the default timeout to this value in milliseconds;
+ * set to 0 to use the system default.
+ *
+ * On successful creation, the namespace exists in the subsystem, but is not
+ * attached to any controller. Use the nvme_ns_attach_ctrls() to assign the
+ * namespace to one or more controllers.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_ns_mgmt_create(int fd, struct nvme_id_ns *ns, __u32 *nsid,
+ __u32 timeout);
+
+/**
+ * nvme_ns_mgmt_delete() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier to delete
+ *
+ * It is recommended that a namespace being deleted is not attached to any
+ * controller. Use the nvme_ns_detach_ctrls() first if the namespace is still
+ * attached.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_ns_mgmt_delete(int fd, __u32 nsid);
+
+/**
+ * nvme_ns_attach() - Attach or detach namespace to controller(s)
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to execute attach selection
+ * @sel: Attachment selection, see &enum nvme_ns_attach_sel
+ * @ctrlist: Controller list to modify attachment state of nsid
+ */
+int nvme_ns_attach(int fd, __u32 nsid, enum nvme_ns_attach_sel sel,
+ struct nvme_ctrl_list *ctrlist);
+
+/**
+ * nvme_ns_attach_ctrls() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to attach
+ * @ctrlist: Controller list to modify attachment state of nsid
+ */
+int nvme_ns_attach_ctrls(int fd, __u32 nsid, struct nvme_ctrl_list *ctrlist);
+
+/**
+ * nvme_ns_dettach_ctrls() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to dettach
+ * @ctrlist: Controller list to modify attachment state of nsid
+ */
+int nvme_ns_dettach_ctrls(int fd, __u32 nsid, struct nvme_ctrl_list *ctrlist);
+
+/**
+ * nvme_fw_download() - Download part or all of a firmware image to the
+ * controller
+ * @fd: File descriptor of nvme device
+ * @offset: Offset in the firmware data
+ * @data_len: Length of data in this command in bytes
+ * @data: Userspace address of the firmware data
+ *
+ * The Firmware Image Download command is used to download all or a portion of
+ * an image for a future update to the controller. The Firmware Image Download
+ * command downloads a new image (in whole or in part) to the controller.
+ *
+ * The image may be constructed of multiple pieces that are individually
+ * downloaded with separate Firmware Image Download commands. Each Firmware
+ * Image Download command includes a Dword Offset and Number of Dwords that
+ * specify a dword range.
+ *
+ * The new firmware image is not activated as part of the Firmware Image
+ * Download command. Use the nvme_fw_commit() to activate a newly downloaded
+ * image.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_fw_download(int fd, __u32 offset, __u32 data_len, void *data);
+
+/**
+ * nvme_fw_commit() - Commit firmware using the specified action
+ * @fd: File descriptor of nvme device
+ * @slot: Firmware slot to commit the downloaded image
+ * @action: Action to use for the firmware image, see &enum nvme_fw_commit_ca
+ * @bpid: Set to true to select the boot partition id
+ *
+ * The Firmware Commit command is used to modify the firmware image or Boot
+ * Partitions.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise. The command status response may specify additional
+ * reset actions required to complete the commit process.
+ */
+int nvme_fw_commit(int fd, __u8 slot, enum nvme_fw_commit_ca action, bool bpid);
+
+/**
+ * nvme_security_receive() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to issue security command on
+ * @nssf: NVMe Security Specific field
+ * @spsp0: Security Protocol Specific field
+ * @spsp1: Security Protocol Specific field
+ * @secp: Security Protocol
+ * @tl: Protocol specific transfer length
+ * @data_len: Data length of the payload in bytes
+ * @data: Security data payload to send
+ * @result: The command completion result from CQE dword0
+ *
+ * The Security Send command is used to transfer security protocol data to the
+ * controller. The data structure transferred to the controller as part of this
+ * command contains security protocol specific commands to be performed by the
+ * controller. The data structure transferred may also contain data or
+ * parameters associated with the security protocol commands.
+ *
+ * The security data is protocol specific and is not defined by the NVMe
+ * specification.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_security_send(int fd, __u32 nsid, __u8 nssf, __u8 spsp0, __u8 spsp1,
+ __u8 secp, __u32 tl, __u32 data_len, void *data,
+ __u32 *result);
+
+/**
+ * nvme_security_receive() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to issue security command on
+ * @nssf: NVMe Security Specific field
+ * @spsp0: Security Protocol Specific field
+ * @spsp1: Security Protocol Specific field
+ * @secp: Security Protocol
+ * @al: Protocol specific allocation length
+ * @data_len: Data length of the payload in bytes
+ * @data: Security data payload to send
+ * @result: The command completion result from CQE dword0
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_security_receive(int fd, __u32 nsid, __u8 nssf, __u8 spsp0,
+ __u8 spsp1, __u8 secp, __u32 al, __u32 data_len,
+ void *data, __u32 *result);
+
+/**
+ * nvme_get_lba_status() - Retrieve information on possibly unrecoverable LBAs
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to retrieve LBA status
+ * @slba: Starting logical block address to check statuses
+ * @mndw: Maximum number of dwords to return
+ * @atype: Action type mechanism to determine LBA status desctriptors to
+ * return, see &enum nvme_lba_status_atype
+ * @rl: Range length from slba to perform the action
+ * @lbas: Data payload to return status descriptors
+ *
+ * The Get LBA Status command requests information about Potentially
+ * Unrecoverable LBAs. Refer to the specification for action type descriptions.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_lba_status(int fd, __u32 nsid, __u64 slba, __u32 mndw, __u16 rl,
+ enum nvme_lba_status_atype atype,
+ struct nvme_lba_status *lbas);
+
+/**
+ * nvme_directive_send() - Send directive command
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID, if applicable
+ * @dspec: Directive specific field
+ * @doper: Directive operation
+ * @dtype: Directive type, see &enum nvme_directive_dtype
+ * @dw12: Directive specific command dword12
+ * @data_len: Length of data payload in bytes
+ * @data: Usespace address of data payload
+ * @result: If successful, the CQE dword0 value
+ *
+ * Directives is a mechanism to enable host and NVM subsystem or controller
+ * information exchange. The Directive Send command is used to transfer data
+ * related to a specific Directive Type from the host to the controller.
+ *
+ * See the NVMe specification for more information.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_send(int fd, __u32 nsid, __u16 dspec, __u8 doper,
+ enum nvme_directive_dtype dtype, __u32 cdw12,
+ __u32 data_len, void *data, __u32 *result);
+
+/**
+ * nvme_directive_send_id_endir() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_send_id_endir(int fd, __u32 nsid, bool endir,
+ enum nvme_directive_dtype dtype,
+ struct nvme_id_directives *id);
+
+/**
+ * nvme_directive_send_stream_release_identifier() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_send_stream_release_identifier(int fd, __u32 nsid,
+ __u16 stream_id);
+
+/**
+ * nvme_directive_send_stream_release_resource() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_send_stream_release_resource(int fd, __u32 nsid);
+
+/**
+ * nvme_directive_recv() - Receive directive specific data
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID, if applicable
+ * @dspec: Directive specific field
+ * @doper: Directive operation
+ * @dtype: Directive type, see &enum nvme_directive_dtype
+ * @dw12: Directive specific command dword12
+ * @data_len: Length of data payload
+ * @data: Usespace address of data payload in bytes
+ * @result: If successful, the CQE dword0 value
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_recv(int fd, __u32 nsid, __u16 dspec, __u8 doper,
+ enum nvme_directive_dtype dtype, __u32 cdw12,
+ __u32 data_len, void *data, __u32 *result);
+
+/**
+ * nvme_directive_recv_identify_parameters() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_recv_identify_parameters(int fd, __u32 nsid,
+ struct nvme_id_directives *id);
+
+/**
+ * nvme_directive_recv_stream_parameters() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_recv_stream_parameters(int fd, __u32 nsid,
+ struct nvme_streams_directive_params *parms);
+
+/**
+ * nvme_directive_recv_stream_status() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_recv_stream_status(int fd, __u32 nsid, unsigned nr_entries,
+ struct nvme_streams_directive_status *id);
+
+/**
+ * nvme_directive_recv_stream_allocate() -
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_directive_recv_stream_allocate(int fd, __u32 nsid, __u16 nsr,
+ __u32 *result);
+
+/**
+ * enum nvme_fctype -
+ * @nvme_fabrics_type_property_set:
+ * @nvme_fabrics_type_connect:
+ * @nvme_fabrics_type_property_get:
+ * @nvme_fabrics_type_auth_send:
+ * @nvme_fabrics_type_auth_receive:
+ * @nvme_fabrics_type_disconnect:
+ */
+enum nvme_fctype {
+ nvme_fabrics_type_property_set = 0x00,
+ nvme_fabrics_type_connect = 0x01,
+ nvme_fabrics_type_property_get = 0x04,
+ nvme_fabrics_type_auth_send = 0x05,
+ nvme_fabrics_type_auth_receive = 0x06,
+ nvme_fabrics_type_disconnect = 0x08,
+};
+
+/**
+ * nvme_set_property() - Set controller property
+ * @fd: File descriptor of nvme device
+ * @offset: Property offset from the base to set
+ * @value: The value to set the property
+ *
+ * This is an NVMe-over-Fabrics specific command, not applicable to PCIe. These
+ * properties align to the PCI MMIO controller registers.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_set_property(int fd, int offset, __u64 value);
+
+/**
+ * nvme_get_property() - Get a controller property
+ * @fd: File descriptor of nvme device
+ * @offset: Property offset from the base to retrieve
+ * @value: Where the property's value will be stored on success
+ *
+ * This is an NVMe-over-Fabrics specific command, not applicable to PCIe. These
+ * properties align to the PCI MMIO controller registers.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_get_property(int fd, int offset, __u64 *value);
+
+/**
+ * nvme_sanitize() - Start a sanitize operation
+ * @fd: File descriptor of nvme device
+ * @sanact: Sanitize action, see &enum nvme_sanitize_sanact
+ * @ause: Set to allow unrestriced sanitize exit
+ * @owpass: Overwrite pass count
+ * @oipbp: Set to overwrite invert pattern between passes
+ * @nodas: Set to not deallocate blocks after sanitizing
+ * @ovrpat: Overwrite pattern
+ *
+ * A sanitize operation alters all user data in the NVM subsystem such that
+ * recovery of any previous user data from any cache, the non-volatile media,
+ * or any Controller Memory Buffer is not possible.
+ *
+ * The Sanitize command is used to start a sanitize operation or to recover
+ * from a previously failed sanitize operation. The sanitize operation types
+ * that may be supported are Block Erase, Crypto Erase, and Overwrite. All
+ * sanitize operations are processed in the background, i.e., completion of the
+ * sanitize command does not indicate completion of the sanitize operation.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_sanitize_nvm(int fd, enum nvme_sanitize_sanact sanact, bool ause,
+ __u8 owpass, bool oipbp, bool nodas, __u32 ovrpat);
+
+/**
+ * nvme_dev_self_test() - Start or abort a self test
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to test
+ * @stc: Self test code, see &enum nvme_dst_stc
+ *
+ * The Device Self-test command is used to start a device self-test operation
+ * or abort a device self-test operation. A device self-test operation is a
+ * diagnostic testing sequence that tests the integrity and functionality of
+ * the controller and may include testing of the media associated with
+ * namespaces. The controller may return a response to this command immediately
+ * while running the self-test in the background.
+ *
+ * Set the 'nsid' field to 0 to not include namepsaces in the test. Set to
+ * 0xffffffff to test all namespaces. All other values tests a specific
+ * namespace, if present.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_dev_self_test(int fd, __u32 nsid, enum nvme_dst_stc stc);
+
+/**
+ * nvme_virtual_mgmt() - Virtualization resource management
+ * @fd: File descriptor of nvme device
+ * @act: Virtual resource action, see &enum nvme_virt_mgmt_act
+ * @rt: Resource type to modify, see &enum nvme_virt_mgmt_rt
+ * @cntlid: Controller id for which resources are bing modified
+ * @nr: Number of resources being allocated or assigned
+ * @result: If successful, the CQE dword0
+ *
+ * The Virtualization Management command is supported by primary controllers
+ * that support the Virtualization Enhancements capability. This command is
+ * used for several functions:
+ *
+ * - Modifying Flexible Resource allocation for the primary controller
+ * - Assigning Flexible Resources for secondary controllers
+ * - Setting the Online and Offline state for secondary controllers
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_virtual_mgmt(int fd, enum nvme_virt_mgmt_act act,
+ enum nvme_virt_mgmt_rt rt, __u16 cntlid, __u16 nr,
+ __u32 *result);
+
+/**
+ * DOC: NVMe IO command enums
+ */
+
+/**
+ * enum nvme_io_opcode -
+ * @nvme_cmd_flush:
+ * @nvme_cmd_write:
+ * @nvme_cmd_read:
+ * @nvme_cmd_write_uncor:
+ * @nvme_cmd_compare:
+ * @nvme_cmd_write_zeroes:
+ * @nvme_cmd_dsm:
+ * @nvme_cmd_verify:
+ * @nvme_cmd_resv_register:
+ * @nvme_cmd_resv_report:
+ * @nvme_cmd_resv_acquire:
+ * @nvme_cmd_resv_release:
+ */
+enum nvme_io_opcode {
+ nvme_cmd_flush = 0x00,
+ nvme_cmd_write = 0x01,
+ nvme_cmd_read = 0x02,
+ nvme_cmd_write_uncor = 0x04,
+ nvme_cmd_compare = 0x05,
+ nvme_cmd_write_zeroes = 0x08,
+ nvme_cmd_dsm = 0x09,
+ nvme_cmd_verify = 0x0c,
+ nvme_cmd_resv_register = 0x0d,
+ nvme_cmd_resv_report = 0x0e,
+ nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_resv_release = 0x15,
+};
+
+/**
+ * nvme_flush() - Send an nvme flush command
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ *
+ * The Flush command is used to request that the contents of volatile write
+ * cache be made non-volatile.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_flush(int fd, __u32 nsid);
+
+/**
+ * enum nvme_io_control_flags -
+ * @NVME_IO_DTYPE_STREAMS:
+ * @NVME_IO_DEAC:
+ * @NVME_IO_PRINFO_PRCHK_REF:
+ * @NVME_IO_PRINFO_PRCHK_APP:
+ * @NVME_IO_PRINFO_PRCHK_GUARD:
+ * @NVME_IO_PRINFO_PRACT:
+ * @NVME_IO_FUA:
+ * @NVME_IO_LR:
+ */
+enum nvme_io_control_flags {
+ NVME_IO_DTYPE_STREAMS = 1 << 4,
+ NVME_IO_DEAC = 1 << 9,
+ NVME_IO_PRINFO_PRCHK_REF = 1 << 10,
+ NVME_IO_PRINFO_PRCHK_APP = 1 << 11,
+ NVME_IO_PRINFO_PRCHK_GUARD = 1 << 12,
+ NVME_IO_PRINFO_PRACT = 1 << 13,
+ NVME_IO_FUA = 1 << 14,
+ NVME_IO_LR = 1 << 15,
+};
+
+/**
+ * enum nvme_io_dsm_flag -
+ * @NVME_IO_DSM_FREQ_UNSPEC:
+ * @NVME_IO_DSM_FREQ_TYPICAL:
+ * @NVME_IO_DSM_FREQ_RARE:
+ * @NVME_IO_DSM_FREQ_READS:
+ * @NVME_IO_DSM_FREQ_WRITES:
+ * @NVME_IO_DSM_FREQ_RW:
+ * @NVME_IO_DSM_FREQ_ONCE:
+ * @NVME_IO_DSM_FREQ_PREFETCH:
+ * @NVME_IO_DSM_FREQ_TEMP:
+ * @NVME_IO_DSM_LATENCY_NONE:
+ * @NVME_IO_DSM_LATENCY_IDLE:
+ * @NVME_IO_DSM_LATENCY_NORM:
+ * @NVME_IO_DSM_LATENCY_LOW:
+ * @NVME_IO_DSM_SEQ_REQ:
+ * @NVME_IO_DSM_COMPRESSED:
+ */
+enum nvme_io_dsm_flags {
+ NVME_IO_DSM_FREQ_UNSPEC = 0,
+ NVME_IO_DSM_FREQ_TYPICAL = 1,
+ NVME_IO_DSM_FREQ_RARE = 2,
+ NVME_IO_DSM_FREQ_READS = 3,
+ NVME_IO_DSM_FREQ_WRITES = 4,
+ NVME_IO_DSM_FREQ_RW = 5,
+ NVME_IO_DSM_FREQ_ONCE = 6,
+ NVME_IO_DSM_FREQ_PREFETCH = 7,
+ NVME_IO_DSM_FREQ_TEMP = 8,
+ NVME_IO_DSM_LATENCY_NONE = 0 << 4,
+ NVME_IO_DSM_LATENCY_IDLE = 1 << 4,
+ NVME_IO_DSM_LATENCY_NORM = 2 << 4,
+ NVME_IO_DSM_LATENCY_LOW = 3 << 4,
+ NVME_IO_DSM_SEQ_REQ = 1 << 6,
+ NVME_IO_DSM_COMPRESSED = 1 << 7,
+};
+
+/**
+ * nvme_read() - Submit an nvme user read command
+ * @fd: File descriptor of nvme device
+ * @nsid:
+ * @slba: Starting logical block
+ * @nblocks: Number of logical blocks to send (0's based value)
+ * @control: Command control flags, see &enum nvme_io_control_flags.
+ * @dsm: Data set management attributes, see &enum nvme_io_dsm_flags
+ * @reftag: This field specifies the Initial Logical Block Reference Tag
+ * expected value. Used only if the namespace is formatted to use
+ * end-to-end protection information.
+ * @apptag: This field specifies the Application Tag Mask expected value.
+ * Used only if the namespace is formatted to use end-to-end
+ * protection information.
+ * @appmask: This field specifies the Application Tag expected value. Used
+ * only if the namespace is formatted to use end-to-end protection
+ * information.
+ * @data_len: Length of user buffer, @data, in bytes
+ * @data: Pointer to user address of the data buffer
+ * metadata_len:Length of user buffer, @metadata, in bytes
+ * @metadata: Pointer to user address of the metadata buffer
+ *
+ * Calls nvme_io() with nvme_cmd_read for the opcode.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_read(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u8 dsm, __u32 reftag, __u16 apptag, __u16 appmask,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata);
+
+/**
+ * nvme_write() - Submit an nvme user write command
+ * @fd: File descriptor of nvme device
+ * @nsid:
+ * @slba: Starting logical block
+ * @nblocks: Number of logical blocks to send (0's based value)
+ * @control: Command control flags, see &enum nvme_io_control_flags.
+ * @dsm: Data set management attributes, see &enum nvme_io_dsm_flags
+ * @dspec: Directive specific command, eg: stream identifier
+ * @reftag: This field specifies the Initial Logical Block Reference Tag
+ * expected value. Used only if the namespace is formatted to use
+ * end-to-end protection information.
+ * @apptag: This field specifies the Application Tag Mask expected value.
+ * Used only if the namespace is formatted to use end-to-end
+ * protection information.
+ * @appmask: This field specifies the Application Tag expected value. Used
+ * only if the namespace is formatted to use end-to-end protection
+ * information.
+ * @data_len: Length of user buffer, @data, in bytes
+ * @data: Pointer to user address of the data buffer
+ * metadata_len:Length of user buffer, @metadata, in bytes
+ * @metadata: Pointer to user address of the metadata buffer
+ *
+ * Calls nvme_io() with nvme_cmd_write for the opcode.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_write(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u8 dsm, __u16 dspec, __u32 reftag, __u16 apptag,
+ __u16 appmask, __u32 data_len, void *data, __u32 metadata_len,
+ void *metadata);
+
+/**
+ * nvme_compare() - Submit an nvme user compare command
+ * @fd: File descriptor of nvme device
+ * @nsid:
+ * @slba: Starting logical block
+ * @nblocks: Number of logical blocks to send (0's based value)
+ * @control: Command control flags, see &enum nvme_io_control_flags.
+ * @reftag: This field specifies the Initial Logical Block Reference Tag
+ * expected value. Used only if the namespace is formatted to use
+ * end-to-end protection information.
+ * @apptag: This field specifies the Application Tag Mask expected value.
+ * Used only if the namespace is formatted to use end-to-end
+ * protection information.
+ * @appmask: This field specifies the Application Tag expected value. Used
+ * only if the namespace is formatted to use end-to-end protection
+ * information.
+ * @data_len: Length of user buffer, @data, in bytes
+ * @data: Pointer to user address of the data buffer
+ * metadata_len:Length of user buffer, @metadata, in bytes
+ * @metadata: Pointer to user address of the metadata buffer
+ *
+ * Calls nvme_io() with nvme_cmd_compare for the opcode.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_compare(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u32 reftag, __u16 apptag, __u16 appmask, __u32 data_len,
+ void *data, __u32 metadata_len, void *metadata);
+
+/**
+ * nvme_write_zeros() - Submit an nvme write zeroes command
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @slba: Starting logical block
+ * @nlb: Number of logical blocks to clear (0's based value)
+ * @control: Command control flags, see &enum nvme_io_control_flags.
+ * @reftag: This field specifies the Initial Logical Block Reference Tag
+ * expected value. Used only if the namespace is formatted to use
+ * end-to-end protection information.
+ * @apptag: This field specifies the Application Tag Mask expected value.
+ * Used only if the namespace is formatted to use end-to-end
+ * protection information.
+ * @appmask: This field specifies the Application Tag expected value. Used
+ * only if the namespace is formatted to use end-to-end protection
+ * information.
+ *
+ * The Write Zeroes command is used to set a range of logical blocks to zero.
+ * After successful completion of this command, the value returned by
+ * subsequent reads of logical blocks in this range shall be all bytes cleared
+ * to 0h until a write occurs to this LBA range.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_write_zeros(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u32 reftag, __u16 apptag, __u16 appmask);
+
+/**
+ * nvme_write_uncorrectable() - Submit an nvme write uncorrectable command
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @slba: Starting logical block
+ * @nlb: Number of logical blocks to invalidate (0's based value)
+ *
+ * The Write Uncorrectable command is used to mark a range of logical blocks as
+ * invalid. When the specified logical block(s) are read after this operation,
+ * a failure is returned with Unrecovered Read Error status. To clear the
+ * invalid logical block status, a write operation on those logical blocks is
+ * required.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_write_uncorrectable(int fd, __u32 nsid, __u64 slba, __u16 nlb);
+
+/**
+ * nvme_verify() - Send an nvme verify command
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @slba: Starting logical block
+ * @nlb: Number of logical blocks to verify (0's based value)
+ * @control: Command control flags, see &enum nvme_io_control_flags.
+ * @reftag: This field specifies the Initial Logical Block Reference Tag
+ * expected value. Used only if the namespace is formatted to use
+ * end-to-end protection information.
+ * @apptag: This field specifies the Application Tag Mask expected value.
+ * Used only if the namespace is formatted to use end-to-end
+ * protection information.
+ * @appmask: This field specifies the Application Tag expected value. Used
+ * only if the namespace is formatted to use end-to-end protection
+ * information.
+ *
+ * The Verify command verifies integrity of stored information by reading data
+ * and metadata, if applicable, for the LBAs indicated without transferring any
+ * data or metadata to the host.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_verify(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u32 reftag, __u16 apptag, __u16 appmask);
+
+/**
+ * enum nvme_dsm_attributes -
+ * @NVME_DSMGMT_IDR:
+ * @NVME_DSMGMT_IDW:
+ * @NVME_DSMGMT_AD:
+ */
+enum nvme_dsm_attributes {
+ NVME_DSMGMT_IDR = 1 << 0,
+ NVME_DSMGMT_IDW = 1 << 1,
+ NVME_DSMGMT_AD = 1 << 2,
+};
+
+/**
+ * nvme_dsm() - Send an nvme data set management command
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @attrs: DSM attributes, see &enum nvme_dsm_attributes
+ * &nr_ranges: Number of block ranges in the data set management attributes
+ * @dsm: The data set management attributes
+ *
+ * The Dataset Management command is used by the host to indicate attributes
+ * for ranges of logical blocks. This includes attributes like frequency that
+ * data is read or written, access size, and other information that may be used
+ * to optimize performance and reliability, and may be used to
+ * deallocate/unmap/trim those logical blocks.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_dsm(int fd, __u32 nsid, __u32 attrs, __u16 nr_ranges,
+ struct nvme_dsm_range *dsm);
+
+/**
+ * enum nvme_reservation_rtype -
+ * @NVME_RESERVATION_RTYPE_WE:
+ * @NVME_RESERVATION_RTYPE_EA:
+ * @NVME_RESERVATION_RTYPE_WERO:
+ * @NVME_RESERVATION_RTYPE_EARO:
+ * @NVME_RESERVATION_RTYPE_WEAR:
+ * @NVME_RESERVATION_RTYPE_EAAR:
+ */
+enum nvme_reservation_rtype {
+ NVME_RESERVATION_RTYPE_WE = 1,
+ NVME_RESERVATION_RTYPE_EA = 2,
+ NVME_RESERVATION_RTYPE_WERO = 3,
+ NVME_RESERVATION_RTYPE_EARO = 4,
+ NVME_RESERVATION_RTYPE_WEAR = 5,
+ NVME_RESERVATION_RTYPE_EAAR = 6,
+};
+
+/**
+ * enum nvme_reservation_racqa -
+ * @NVME_RESERVATION_RACQA_ACQUIRE:
+ * @NVME_RESERVATION_RACQA_PREEMPT:
+ * @NVME_RESERVATION_RACQA_PREEMPT_AND_ABORT:
+ */
+enum nvme_reservation_racqa {
+ NVME_RESERVATION_RACQA_ACQUIRE = 0,
+ NVME_RESERVATION_RACQA_PREEMPT = 1,
+ NVME_RESERVATION_RACQA_PREEMPT_AND_ABORT = 2,
+};
+
+/**
+ * nvme_resv_acquire() - Send an nvme reservation acquire
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @rtype: The type of reservation to be create, see &enum nvme_reservation_rtype
+ * @racqa: The action that is performed by the command, see &enum nvme_reservation_racqa
+ * @iekey: Set to ignore the existing key
+ * @crkey: The current reservation key associated with the host
+ * @nrkey: The reservation key to be unregistered from the namespace if
+ * the action is preempt
+ *
+ * The Reservation Acquire command is used to acquire a reservation on a
+ * namespace, preempt a reservation held on a namespace, and abort a
+ * reservation held on a namespace.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_resv_acquire(int fd, __u32 nsid, enum nvme_reservation_rtype rtype,
+ enum nvme_reservation_racqa racqa, bool iekey,
+ __u64 crkey, __u64 nrkey);
+
+/**
+ * enum nvme_reservation_rrega -
+ * @NVME_RESERVATION_RREGA_REGISTER_KEY:
+ * @NVME_RESERVATION_RREGA_UNREGISTER_KEY:
+ * @NVME_RESERVATION_RREGA_REPLACE_KEY:
+ */
+enum nvme_reservation_rrega {
+ NVME_RESERVATION_RREGA_REGISTER_KEY = 0,
+ NVME_RESERVATION_RREGA_UNREGISTER_KEY = 1,
+ NVME_RESERVATION_RREGA_REPLACE_KEY = 2,
+};
+
+/**
+ * enum nvme_reservation_cptpl -
+ * @NVME_RESERVATION_CPTPL_NO_CHANGE:
+ * @NVME_RESERVATION_CPTPL_CLEAR:
+ * @NVME_RESERVATION_CPTPL_PERSIST:
+ */
+enum nvme_reservation_cptpl {
+ NVME_RESERVATION_CPTPL_NO_CHANGE = 0,
+ NVME_RESERVATION_CPTPL_CLEAR = 2,
+ NVME_RESERVATION_CPTPL_PERSIST = 3,
+};
+
+/**
+ * nvme_resv_register() - Send an nvme reservation register
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @rrega: The registration action, see &enum nvme_reservation_rrega
+ * @cptpl: Change persist through power loss, see &enum nvme_reservation_cptpl
+ * @iekey: Set to ignore the existing key
+ * @crkey: The current reservation key associated with the host
+ * @nrkey: The new reservation key to be register if action is register or
+ * replace
+ *
+ * The Reservation Register command is used to register, unregister, or replace
+ * a reservation key.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_resv_register(int fd, __u32 nsid, enum nvme_reservation_rrega rrega,
+ enum nvme_reservation_cptpl cptpl, bool iekey,
+ __u64 crkey, __u64 nrkey);
+
+/**
+ * enum nvme_reservation_rrela -
+ * @NVME_RESERVATION_RRELA_RELEASE:
+ * @NVME_RESERVATION_RRELA_CLEAR:
+ */
+enum nvme_reservation_rrela {
+ NVME_RESERVATION_RRELA_RELEASE = 0,
+ NVME_RESERVATION_RRELA_CLEAR = 1
+};
+
+/**
+ * nvme_resv_release() - Send an nvme reservation release
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @rtype: The type of reservation to be create, see &enum nvme_reservation_rtype
+ * @rrela: Reservation releast action, see &enum nvme_reservation_rrela
+ * @iekey: Set to ignore the existing key
+ * @crkey: The current reservation key to release
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_resv_release(int fd, __u32 nsid, enum nvme_reservation_rtype rtype,
+ enum nvme_reservation_rrela rrela, bool iekey,
+ __u64 crkey);
+
+/**
+ * nvme_resv_report() - Send an nvme reservation report
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace identifier
+ * @eds: Request extended Data Structure
+ * @len: Number of bytes to request transfered with this command
+ * @report: The user space destination address to store the reservation report
+ *
+ * Returns a Reservation Status data structure to memory that describes the
+ * registration and reservation status of a namespace. See the defintion for
+ * the returned structure, &struct nvme_reservation_status, for more details.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_resv_report(int fd, __u32 nsid, bool eds, __u32 len,
+ struct nvme_reservation_status *report);
+
+#endif /* _LIBNVME_CMD_H */
--- /dev/null
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <systemd/sd-id128.h>
+
+#include "fabrics.h"
+#include "types.h"
+#include "cmd.h"
+#include "util.h"
+
+#define NVMF_HOSTID_SIZE 36
+#define NVME_HOSTNQN_ID SD_ID128_MAKE(c7,f4,61,81,12,be,49,32,8c,83,10,6f,9d,dd,d8,6b)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof (*x))
+
+const char *nvmf_dev = "/dev/nvme-fabrics";
+const char *nvmf_hostnqn_file = "/etc/nvme/hostnqn";
+const char *nvmf_hostid_file = "/etc/nvme/hostid";
+
+static int add_bool_argument(char **argstr, char *tok, bool arg)
+{
+ char *nstr;
+
+ if (arg) {
+ if (asprintf(&nstr, "%s,%s", *argstr, tok) < 0) {
+ errno = ENOMEM;
+ return -1;
+ }
+ free(*argstr);
+ *argstr = nstr;
+ }
+ return 0;
+}
+
+static int add_int_argument(char **argstr, char *tok, int arg,
+ bool allow_zero)
+{
+ char *nstr;
+
+ if ((arg && !allow_zero) || (arg != -1 && allow_zero)) {
+ if (asprintf(&nstr, "%s,%s=%d", *argstr, tok, arg) < 0) {
+ errno = ENOMEM;
+ return -1;
+ }
+ free(*argstr);
+ *argstr = nstr;
+ }
+ return 0;
+}
+
+static int add_argument(char **argstr, const char *tok, const char *arg)
+{
+ char *nstr;
+
+ if (arg && strcmp(arg, "none")) {
+ if (asprintf(&nstr, "%s,%s=%s", *argstr, tok, arg) < 0) {
+ errno = ENOMEM;
+ return -1;
+ }
+ free(*argstr);
+ *argstr = nstr;
+ }
+ return 0;
+}
+
+static int build_options(char **argstr, struct nvme_fabrics_config *cfg)
+{
+ /* always specify nqn as first arg - this will init the string */
+ if (asprintf(argstr, "nqn=%s", cfg->nqn) < 0) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+
+ if (add_argument(argstr, "transport", cfg->transport) ||
+ add_argument(argstr, "traddr", cfg->traddr) ||
+ add_argument(argstr, "host_traddr", cfg->host_traddr) ||
+ add_argument(argstr, "trsvcid", cfg->trsvcid) ||
+ add_argument(argstr, "hostnqn", cfg->hostnqn) ||
+ add_argument(argstr, "hostid", cfg->hostid) ||
+ add_int_argument(argstr, "nr_write_queues", cfg->nr_write_queues, false) ||
+ add_int_argument(argstr, "nr_poll_queues", cfg->nr_poll_queues, false) ||
+ add_int_argument(argstr, "reconnect_delay", cfg->reconnect_delay, false) ||
+ add_int_argument(argstr, "ctrl_loss_tmo", cfg->ctrl_loss_tmo, false) ||
+ add_int_argument(argstr, "tos", cfg->tos, true) ||
+ add_bool_argument(argstr, "duplicate_connect", cfg->duplicate_connect) ||
+ add_bool_argument(argstr, "disable_sqflow", cfg->disable_sqflow) ||
+ add_bool_argument(argstr, "hdr_digest", cfg->hdr_digest) ||
+ add_bool_argument(argstr, "data_digest", cfg->data_digest) ||
+ add_int_argument(argstr, "queue_size", cfg->queue_size, false) ||
+ add_int_argument(argstr, "keep_alive_tmo", cfg->keep_alive_tmo, false) ||
+ add_int_argument(argstr, "nr_io_queues", cfg->nr_io_queues, false)) {
+ free(*argstr);
+ return -1;
+ }
+
+ return 0;
+}
+static int __nvmf_add_ctrl(const char *argstr)
+{
+ int ret, fd, len = strlen(argstr);
+ char buf[0x1000], *options, *p;
+
+ fd = open(nvmf_dev, O_RDWR);
+ if (fd < 0)
+ return -1;
+
+ ret = write(fd, argstr, len);
+ if (ret != len) {
+ ret = -1;
+ goto out_close;
+ }
+
+ len = read(fd, buf, sizeof(buf));
+ if (len < 0) {
+ ret = -1;
+ goto out_close;
+ }
+
+ buf[len] = '\0';
+ options = buf;
+ while ((p = strsep(&options, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+ if (sscanf(p, "instance=%d", &ret) == 1)
+ goto out_close;
+ }
+
+ errno = EINVAL;
+ ret = -1;
+out_close:
+ close(fd);
+ return ret;
+}
+
+int nvmf_add_ctrl_opts(struct nvme_fabrics_config *cfg)
+{
+ char *argstr;
+ int ret;
+
+ ret = build_options(&argstr, cfg);
+ if (ret)
+ return ret;
+
+ ret = __nvmf_add_ctrl(argstr);
+ printf("ctrl:%s ret:%d\n", argstr, ret);
+
+ free(argstr);
+ return ret;
+}
+
+nvme_ctrl_t nvmf_add_ctrl(struct nvme_fabrics_config *cfg)
+{
+ char d[32];
+ int ret;
+
+ ret = nvmf_add_ctrl_opts(cfg);
+ if (ret < 0)
+ return NULL;
+
+ memset(d, 0, sizeof(d));
+ if (snprintf(d, sizeof(d), "nvme%d", ret) < 0)
+ return NULL;
+
+ return nvme_scan_ctrl(d);
+}
+
+static void chomp(char *s, int l)
+{
+ while (l && (s[l] == '\0' || s[l] == ' '))
+ s[l--] = '\0';
+}
+
+nvme_ctrl_t nvmf_connect_disc_entry(struct nvmf_disc_log_entry *e,
+ const struct nvme_fabrics_config *defcfg, bool *discover)
+{
+ struct nvme_fabrics_config cfg = { 0 };
+ nvme_ctrl_t c;
+
+ memcpy(&cfg, defcfg, sizeof(cfg));
+ switch (e->subtype) {
+ case NVME_NQN_DISC:
+ if (discover)
+ *discover = true;
+ break;
+ case NVME_NQN_NVME:
+ break;
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+
+ switch (e->trtype) {
+ case NVMF_TRTYPE_RDMA:
+ case NVMF_TRTYPE_TCP:
+ switch (e->adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ case NVMF_ADDR_FAMILY_IP6:
+ chomp(e->traddr, NVMF_TRADDR_SIZE);
+ chomp(e->trsvcid, NVMF_TRSVCID_SIZE);
+ cfg.traddr = e->traddr;
+ cfg.trsvcid = e->trsvcid;
+ break;
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+ break;
+ case NVMF_TRTYPE_FC:
+ switch (e->adrfam) {
+ case NVMF_ADDR_FAMILY_FC:
+ chomp(e->traddr, NVMF_TRADDR_SIZE),
+ cfg.traddr = e->traddr;
+ cfg.trsvcid = NULL;
+ break;
+ }
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+ cfg.transport = nvmf_trtype_str(e->trtype);
+
+ cfg.nqn = e->subnqn;
+ if (e->treq & NVMF_TREQ_DISABLE_SQFLOW)
+ cfg.disable_sqflow = true;
+
+ c = nvmf_add_ctrl(&cfg);
+ if (!c && errno == EINVAL && cfg.disable_sqflow) {
+ errno = 0;
+ /* disable_sqflow is unrecognized option on older kernels */
+ cfg.disable_sqflow = false;
+ c = nvmf_add_ctrl(&cfg);
+ }
+
+ return c;
+}
+
+static int nvme_discovery_log(int fd, __u32 len, struct nvmf_discovery_log *log)
+{
+ return nvme_get_log_page(fd, 0, NVME_LOG_LID_DISC, true, len, log);
+}
+
+int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp,
+ int max_retries)
+{
+ struct nvmf_discovery_log *log;
+ int hdr, ret, retries = 0;
+ uint64_t genctr, numrec;
+ unsigned int size;
+
+ hdr = sizeof(struct nvmf_discovery_log);
+ log = malloc(hdr);
+ if (!log) {
+ errno = ENOMEM;
+ return -1;
+ }
+ memset(log, 0, hdr);
+
+ ret = nvme_discovery_log(nvme_ctrl_get_fd(c), 0x100, log);
+ if (ret)
+ goto out_free_log;
+
+ do {
+ numrec = le64_to_cpu(log->numrec);
+ genctr = le64_to_cpu(log->genctr);
+
+ free(log);
+ if (numrec == 0) {
+ *logp = log;
+ return 0;
+ }
+
+ size = sizeof(struct nvmf_discovery_log) +
+ sizeof(struct nvmf_disc_log_entry) * (numrec);
+
+ log = malloc(size);
+ if (!log) {
+ errno = ENOMEM;
+ return -1;
+ }
+ memset(log, 0, size);
+
+ ret = nvme_discovery_log(nvme_ctrl_get_fd(c), size, log);
+ if (ret)
+ goto out_free_log;
+
+ genctr = le64_to_cpu(log->genctr);
+ ret = nvme_discovery_log(nvme_ctrl_get_fd(c), hdr, log);
+ if (ret)
+ goto out_free_log;
+ } while (genctr != le64_to_cpu(log->genctr) &&
+ ++retries < max_retries);
+
+ if (genctr != le64_to_cpu(log->genctr)) {
+ errno = EAGAIN;
+ ret = -1;
+ } else if (numrec != le64_to_cpu(log->numrec)) {
+ errno = EBADSLT;
+ ret = -1;
+ } else {
+ *logp = log;
+ return 0;
+ }
+
+out_free_log:
+ free(log);
+ return ret;
+}
+
+char *nvmf_hostnqn_generate()
+{
+ sd_id128_t id;
+ char *ret = NULL;
+
+ if (sd_id128_get_machine_app_specific(NVME_HOSTNQN_ID, &id) < 0)
+ return NULL;
+
+ if (asprintf(&ret,
+ "nqn.2014-08.org.nvmexpress:uuid:" SD_ID128_FORMAT_STR "\n",
+ SD_ID128_FORMAT_VAL(id)) < 0)
+ ret = NULL;
+
+ return ret;
+}
+
+static char *nvmf_read_file(const char *f, int len)
+{
+ char buf[len];
+ int ret, fd;
+
+ fd = open(f, O_RDONLY);
+ if (fd < 0)
+ return false;
+
+ memset(buf, 0, len);
+ ret = read(fd, buf, sizeof(buf - 1));
+ close (fd);
+
+ if (ret < 0)
+ return NULL;
+ return strndup(buf, strcspn(buf, "\n"));
+}
+
+char *nvmf_hostnqn_from_file()
+{
+ return nvmf_read_file(nvmf_hostnqn_file, NVMF_NQN_SIZE);
+}
+
+char *nvmf_hostid_from_file()
+{
+ return nvmf_read_file(nvmf_hostid_file, NVMF_HOSTID_SIZE);
+}
--- /dev/null
+#ifndef _LIBNVME_FABRICS_H
+#define _LIBNVME_FABRICS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "tree.h"
+
+struct nvme_fabrics_config {
+ const char *transport;
+ const char *traddr;
+ const char *trsvcid;
+ const char *nqn;
+ const char *hostnqn;
+ const char *host_traddr;
+ const char *hostid;
+
+ int queue_size;
+ int nr_io_queues;
+ int reconnect_delay;
+ int ctrl_loss_tmo;
+ int keep_alive_tmo;
+ int nr_write_queues;
+ int nr_poll_queues;
+ int tos;
+
+ bool duplicate_connect;
+ bool disable_sqflow;
+ bool hdr_digest;
+ bool data_digest;
+
+ uint8_t rsvd[0x200];
+};
+
+int nvmf_add_ctrl_opts(struct nvme_fabrics_config *cfg);
+nvme_ctrl_t nvmf_add_ctrl(struct nvme_fabrics_config *cfg);
+int nvmf_get_discovery_log(nvme_ctrl_t c, struct nvmf_discovery_log **logp, int max_retries);
+char *nvmf_hostnqn_generate();
+char *nvmf_hostnqn_from_file();
+char *nvmf_hostid_from_file();
+
+
+const char *nvmf_trtype_str(__u8 trtype);
+const char *nvmf_adrfam_str(__u8 adrfam);
+const char *nvmf_subtype_str(__u8 subtype);
+const char *nvmf_treq_str(__u8 treq);
+const char *nvmf_sectype_str(__u8 sectype);
+const char *nvmf_prtype_str(__u8 prtype);
+const char *nvmf_qptype_str(__u8 qptype);
+const char *nvmf_cms_str(__u8 cm);
+
+nvme_ctrl_t nvmf_connect_disc_entry(struct nvmf_disc_log_entry *e,
+ const struct nvme_fabrics_config *defcfg, bool *discover);
+#endif /* _LIBNVME_FABRICS_H */
--- /dev/null
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <dirent.h>
+#include <libgen.h>
+
+#include <linux/types.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "filters.h"
+#include "types.h"
+#include "util.h"
+
+const char *nvme_ctrl_sysfs_dir = "/sys/class/nvme";
+const char *nvme_subsys_sysfs_dir = "/sys/class/nvme-subsystem";
+
+int nvme_namespace_filter(const struct dirent *d)
+{
+ int i, n;
+
+ if (d->d_name[0] == '.')
+ return 0;
+
+ if (strstr(d->d_name, "nvme"))
+ if (sscanf(d->d_name, "nvme%dn%d", &i, &n) == 2)
+ return 1;
+
+ return 0;
+}
+
+int nvme_paths_filter(const struct dirent *d)
+{
+ int i, c, n;
+
+ if (d->d_name[0] == '.')
+ return 0;
+
+ if (strstr(d->d_name, "nvme"))
+ if (sscanf(d->d_name, "nvme%dc%dn%d", &i, &c, &n) == 3)
+ return 1;
+
+ return 0;
+}
+
+int nvme_ctrls_filter(const struct dirent *d)
+{
+ int i, c, n;
+
+ if (d->d_name[0] == '.')
+ return 0;
+
+ if (strstr(d->d_name, "nvme")) {
+ if (sscanf(d->d_name, "nvme%dc%dn%d", &i, &c, &n) == 3)
+ return 0;
+ if (sscanf(d->d_name, "nvme%dn%d", &i, &n) == 2)
+ return 0;
+ if (sscanf(d->d_name, "nvme%d", &i) == 1)
+ return 1;
+ }
+
+ return 0;
+}
+
+int nvme_subsys_filter(const struct dirent *d)
+{
+ int i;
+
+ if (d->d_name[0] == '.')
+ return 0;
+
+ if (strstr(d->d_name, "nvme-subsys"))
+ if (sscanf(d->d_name, "nvme-subsys%d", &i) == 1)
+ return 1;
+
+ return 0;
+}
+
+int nvme_scan_subsystems(struct dirent ***subsys)
+{
+ return scandir(nvme_subsys_sysfs_dir, subsys, nvme_subsys_filter, alphasort);
+}
+
+int nvme_scan_subsystem_ctrls(nvme_subsystem_t s, struct dirent ***ctrls)
+{
+ return scandir(nvme_subsystem_get_sysfs_dir(s), ctrls, nvme_ctrls_filter, alphasort);
+}
+
+int nvme_scan_subsystem_namespaces(nvme_subsystem_t s, struct dirent ***namespaces)
+{
+ return scandir(nvme_subsystem_get_sysfs_dir(s), namespaces, nvme_namespace_filter, alphasort);
+}
+
+int nvme_scan_ctrl_namespace_paths(nvme_ctrl_t c, struct dirent ***namespaces)
+{
+ return scandir(nvme_ctrl_get_sysfs_dir(c), namespaces, nvme_paths_filter, alphasort);
+}
+
+int nvme_scan_ctrl_namespaces(nvme_ctrl_t c, struct dirent ***namespaces)
+{
+ return scandir(nvme_ctrl_get_sysfs_dir(c), namespaces, nvme_namespace_filter, alphasort);
+}
--- /dev/null
+#ifndef _LIBNVME_FILTERS_H
+#define _LIBNVME_FILTERS_H
+
+#include <dirent.h>
+#include "tree.h"
+
+
+int nvme_namespace_filter(const struct dirent *d);
+int nvme_paths_filter(const struct dirent *d);
+int nvme_ctrls_filter(const struct dirent *d);
+int nvme_subsys_filter(const struct dirent *d);
+
+int nvme_scan_subsystems(struct dirent ***subsys);
+int nvme_scan_subsystem_ctrls(nvme_subsystem_t s, struct dirent ***ctrls);
+int nvme_scan_subsystem_namespaces(nvme_subsystem_t s, struct dirent ***namespaces);
+int nvme_scan_ctrl_namespace_paths(nvme_ctrl_t c, struct dirent ***namespaces);
+int nvme_scan_ctrl_namespaces(nvme_ctrl_t c, struct dirent ***namespaces);
+
+#endif /* _LIBNVME_FILTERS_H */
--- /dev/null
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+
+#include "ioctl.h"
+#include "cmd.h"
+#include "types.h"
+
+static int nvme_verify_chr(int fd)
+{
+ static struct stat nvme_stat;
+ int err = fstat(fd, &nvme_stat);
+
+ if (err < 0) {
+ perror("fstat");
+ return errno;
+ }
+ if (!S_ISCHR(nvme_stat.st_mode)) {
+ errno = ENOTBLK;
+ return -1;
+ }
+ return 0;
+}
+
+int nvme_subsystem_reset(int fd)
+{
+ int ret;
+
+ ret = nvme_verify_chr(fd);
+ if (ret)
+ return ret;
+ return ioctl(fd, NVME_IOCTL_SUBSYS_RESET);
+}
+
+int nvme_reset_controller(int fd)
+{
+ int ret;
+
+ ret = nvme_verify_chr(fd);
+ if (ret)
+ return ret;
+ return ioctl(fd, NVME_IOCTL_RESET);
+}
+
+int nvme_ns_rescan(int fd)
+{
+ int ret;
+
+ ret = nvme_verify_chr(fd);
+ if (ret)
+ return ret;
+ return ioctl(fd, NVME_IOCTL_RESCAN);
+}
+
+int nvme_get_nsid(int fd)
+{
+ static struct stat nvme_stat;
+ int err = fstat(fd, &nvme_stat);
+
+ if (err < 0)
+ return -1;
+
+ if (!S_ISBLK(nvme_stat.st_mode)) {
+ errno = ENOTBLK;
+ return -1;
+ }
+ return ioctl(fd, NVME_IOCTL_ID);
+}
+
+static int nvme_submit_passthru64(int fd, unsigned long ioctl_cmd,
+ struct nvme_passthru_cmd64 *cmd, __u64 *result)
+{
+ int err = ioctl(fd, ioctl_cmd, cmd);
+
+ if (err >= 0 && result)
+ *result = cmd->result;
+ return err;
+}
+
+static int nvme_submit_passthru(int fd, unsigned long ioctl_cmd,
+ struct nvme_passthru_cmd *cmd, __u32 *result)
+{
+ int err = ioctl(fd, ioctl_cmd, cmd);
+
+ if (err >= 0 && result)
+ *result = cmd->result;
+ return err;
+}
+
+static int nvme_passthru64(int fd, unsigned long ioctl_cmd, __u8 opcode,
+ __u8 flags, __u16 rsvd, __u32 nsid, __u32 cdw2, __u32 cdw3,
+ __u32 cdw10, __u32 cdw11, __u32 cdw12, __u32 cdw13,
+ __u32 cdw14, __u32 cdw15, __u32 data_len, void *data,
+ __u32 metadata_len, void *metadata, __u32 timeout_ms,
+ __u64 *result)
+{
+ struct nvme_passthru_cmd64 cmd = {
+ .opcode = opcode,
+ .flags = flags,
+ .rsvd1 = rsvd,
+ .nsid = nsid,
+ .cdw2 = cdw2,
+ .cdw3 = cdw3,
+ .metadata = (__u64)(uintptr_t)metadata,
+ .addr = (__u64)(uintptr_t)data,
+ .metadata_len = metadata_len,
+ .data_len = data_len,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .cdw13 = cdw13,
+ .cdw14 = cdw14,
+ .cdw15 = cdw15,
+ .timeout_ms = timeout_ms,
+ };
+
+ return nvme_submit_passthru64(fd, ioctl_cmd, &cmd, result);
+}
+
+static int nvme_passthru(int fd, unsigned long ioctl_cmd, __u8 opcode,
+ __u8 flags, __u16 rsvd, __u32 nsid, __u32 cdw2, __u32 cdw3,
+ __u32 cdw10, __u32 cdw11, __u32 cdw12, __u32 cdw13,
+ __u32 cdw14, __u32 cdw15, __u32 data_len, void *data,
+ __u32 metadata_len, void *metadata, __u32 timeout_ms,
+ __u32 *result)
+{
+ struct nvme_passthru_cmd cmd = {
+ .opcode = opcode,
+ .flags = flags,
+ .rsvd1 = rsvd,
+ .nsid = nsid,
+ .cdw2 = cdw2,
+ .cdw3 = cdw3,
+ .metadata = (__u64)(uintptr_t)metadata,
+ .addr = (__u64)(uintptr_t)data,
+ .metadata_len = metadata_len,
+ .data_len = data_len,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .cdw13 = cdw13,
+ .cdw14 = cdw14,
+ .cdw15 = cdw15,
+ .timeout_ms = timeout_ms,
+ };
+
+ return nvme_submit_passthru(fd, ioctl_cmd, &cmd, result);
+}
+
+int nvme_submit_admin_passthru64(int fd, struct nvme_passthru_cmd64 *cmd,
+ __u64 *result)
+{
+ return nvme_submit_passthru64(fd, NVME_IOCTL_ADMIN64_CMD, cmd, result);
+}
+
+int nvme_admin_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u64 *result)
+{
+ return nvme_passthru64(fd, NVME_IOCTL_ADMIN64_CMD, opcode, flags, rsvd,
+ nsid, cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, cdw14, cdw15,
+ data_len, data, metadata_len, metadata, timeout_ms, result);
+}
+
+int nvme_submit_admin_passthru(int fd, struct nvme_passthru_cmd *cmd, __u32 *result)
+{
+ return nvme_submit_passthru(fd, NVME_IOCTL_ADMIN_CMD, cmd, result);
+}
+
+int nvme_admin_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u32 *result)
+{
+ return nvme_passthru(fd, NVME_IOCTL_ADMIN_CMD, opcode, flags, rsvd, nsid,
+ cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, cdw14, cdw15, data_len,
+ data, metadata_len, metadata, timeout_ms, result);
+}
+
+enum nvme_cmd_dword_fields {
+ NVME_DEVICE_SELF_TEST_CDW10_STC_SHIFT = 0,
+ NVME_DEVICE_SELF_TEST_CDW10_STC_MASK = 0x7,
+ NVME_DIRECTIVE_CDW11_DOPER_SHIFT = 0,
+ NVME_DIRECTIVE_CDW11_DTYPE_SHIFT = 8,
+ NVME_DIRECTIVE_CDW11_DPSEC_SHIFT = 16,
+ NVME_DIRECTIVE_CDW11_DOPER_MASK = 0xff,
+ NVME_DIRECTIVE_CDW11_DTYPE_MASK = 0xff,
+ NVME_DIRECTIVE_CDW11_DPSEC_MASK = 0xffff,
+ NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR_SHIFT = 0,
+ NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE_SHIFT = 1,
+ NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR_MASK = 0x1,
+ NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE_MASK = 0x1,
+ NVME_FW_COMMIT_CDW10_FS_SHIFT = 0,
+ NVME_FW_COMMIT_CDW10_CA_SHIFT = 3,
+ NVME_FW_COMMIT_CDW10_BPID_SHIFT = 31,
+ NVME_FW_COMMIT_CDW10_FS_MASK = 0x7,
+ NVME_FW_COMMIT_CDW10_CA_MASK = 0x7,
+ NVME_FW_COMMIT_CDW10_BPID_MASK = 0x1,
+ NVME_GET_FEATURES_CDW10_SEL_SHIFT = 8,
+ NVME_GET_FEATURES_CDW10_SEL_MASK = 0x7,
+ NVME_SET_FEATURES_CDW10_SAVE_SHIFT = 31,
+ NVME_SET_FEATURES_CDW10_SAVE_MASK = 0x1,
+ NVME_FEATURES_CDW10_FID_SHIFT = 0,
+ NVME_FEATURES_CDW14_UUID_SHIFT = 0,
+ NVME_FEATURES_CDW10_FID_MASK = 0xff,
+ NVME_FEATURES_CDW14_UUID_MASK = 0x7f,
+ NVME_LOG_CDW10_LID_SHIFT = 0,
+ NVME_LOG_CDW10_LSP_SHIFT = 8,
+ NVME_LOG_CDW10_RAE_SHIFT = 15,
+ NVME_LOG_CDW10_NUMDL_SHIFT = 16,
+ NVME_LOG_CDW11_NUMDU_SHIFT = 0,
+ NVME_LOG_CDW11_LSI_SHIFT = 16,
+ NVME_LOG_CDW14_UUID_SHIFT = 0,
+ NVME_LOG_CDW10_LID_MASK = 0xff,
+ NVME_LOG_CDW10_LSP_MASK = 0xf,
+ NVME_LOG_CDW10_RAE_MASK = 0x1,
+ NVME_LOG_CDW10_NUMDL_MASK = 0xff,
+ NVME_LOG_CDW11_NUMDU_MASK = 0xff,
+ NVME_LOG_CDW11_LSI_MASK = 0xff,
+ NVME_LOG_CDW14_UUID_MASK = 0x7f,
+ NVME_IDENTIFY_CDW10_CNS_SHIFT = 0,
+ NVME_IDENTIFY_CDW10_CNTID_SHIFT = 16,
+ NVME_IDENTIFY_CDW11_NVMSETID_SHIFT = 0,
+ NVME_IDENTIFY_CDW14_UUID_SHIFT = 0,
+ NVME_IDENTIFY_CDW10_CNS_MASK = 0xff,
+ NVME_IDENTIFY_CDW10_CNTID_MASK = 0xffff,
+ NVME_IDENTIFY_CDW11_NVMSETID_MASK = 0xffff,
+ NVME_IDENTIFY_CDW14_UUID_MASK = 0x7f,
+ NVME_NAMESPACE_ATTACH_CDW10_SEL_SHIFT = 0,
+ NVME_NAMESPACE_ATTACH_CDW10_SEL_MASK = 0xf,
+ NVME_NAMESPACE_MGMT_CDW10_SEL_SHIFT = 0,
+ NVME_NAMESPACE_MGMT_CDW10_SEL_MASK = 0xf,
+ NVME_VIRT_MGMT_CDW10_ACT_SHIFT = 0,
+ NVME_VIRT_MGMT_CDW10_RT_SHIFT = 8,
+ NVME_VIRT_MGMT_CDW10_CNTLID_SHIFT = 16,
+ NVME_VIRT_MGMT_CDW11_NR_SHIFT = 0,
+ NVME_VIRT_MGMT_CDW10_ACT_MASK = 0,
+ NVME_VIRT_MGMT_CDW10_RT_MASK = 8,
+ NVME_VIRT_MGMT_CDW10_CNTLID_MASK = 16,
+ NVME_VIRT_MGMT_CDW11_NR_MASK = 0xffff,
+ NVME_FORMAT_CDW10_LBAF_SHIFT = 0,
+ NVME_FORMAT_CDW10_MSET_SHIFT = 4,
+ NVME_FORMAT_CDW10_PI_SHIFT = 5,
+ NVME_FORMAT_CDW10_PIL_SHIFT = 8,
+ NVME_FORMAT_CDW10_SES_SHIFT = 9,
+ NVME_FORMAT_CDW10_LBAF_MASK = 0xf,
+ NVME_FORMAT_CDW10_MSET_MASK = 0x1,
+ NVME_FORMAT_CDW10_PI_MASK = 0x7,
+ NVME_FORMAT_CDW10_PIL_MASK = 0x1,
+ NVME_FORMAT_CDW10_SES_MASK = 0x7,
+ NVME_SANITIZE_CDW10_SANACT_SHIFT = 0,
+ NVME_SANITIZE_CDW10_AUSE_SHIFT = 3,
+ NVME_SANITIZE_CDW10_OWPASS_SHIFT = 4,
+ NVME_SANITIZE_CDW10_OIPBP_SHIFT = 8,
+ NVME_SANITIZE_CDW10_NODAS_SHIFT = 9,
+ NVME_SANITIZE_CDW10_SANACT_MASK = 0x7,
+ NVME_SANITIZE_CDW10_AUSE_MASK = 0x1,
+ NVME_SANITIZE_CDW10_OWPASS_MASK = 0xf,
+ NVME_SANITIZE_CDW10_OIPBP_MASK = 0x1,
+ NVME_SANITIZE_CDW10_NODAS_MASK = 0x1,
+ NVME_SECURITY_NSSF_SHIFT = 0,
+ NVME_SECURITY_SPSP0_SHIFT = 8,
+ NVME_SECURITY_SPSP1_SHIFT = 16,
+ NVME_SECURITY_SECP_SHIFT = 24,
+ NVME_SECURITY_NSSF_MASK = 0xff,
+ NVME_SECURITY_SPSP0_MASK = 0xff,
+ NVME_SECURITY_SPSP1_MASK = 0xff,
+ NVME_SECURITY_SECP_MASK = 0xffff,
+ NVME_GET_LBA_STATUS_CDW13_RL_SHIFT = 0,
+ NVME_GET_LBA_STATUS_CDW13_ATYPE_SHIFT = 24,
+ NVME_GET_LBA_STATUS_CDW13_RL_MASK = 0xffff,
+ NVME_GET_LBA_STATUS_CDW13_ATYPE_MASK = 0xff,
+};
+
+enum features {
+ NVME_FEATURES_ARBITRATION_BURST_SHIFT = 0,
+ NVME_FEATURES_ARBITRATION_LPW_SHIFT = 8,
+ NVME_FEATURES_ARBITRATION_MPW_SHIFT = 16,
+ NVME_FEATURES_ARBITRATION_HPW_SHIFT = 24,
+ NVME_FEATURES_ARBITRATION_BURST_MASK = 0x7,
+ NVME_FEATURES_ARBITRATION_LPW_MASK = 0xff,
+ NVME_FEATURES_ARBITRATION_MPW_MASK = 0xff,
+ NVME_FEATURES_ARBITRATION_HPW_MASK = 0xff,
+ NVME_FEATURES_PWRMGMT_PS_SHIFT = 0,
+ NVME_FEATURES_PWRMGMT_WH_SHIFT = 5,
+ NVME_FEATURES_PWRMGMT_PS_MASK = 0x1f,
+ NVME_FEATURES_PWRMGMT_WH_MASK = 0x7,
+ NVME_FEATURES_TMPTH_SHIFT = 0,
+ NVME_FEATURES_TMPSEL_SHIFT = 16,
+ NVME_FEATURES_THSEL_SHIFT = 20,
+ NVME_FEATURES_TMPTH_MASK = 0xff,
+ NVME_FEATURES_TMPSEL_MASK = 0xf,
+ NVME_FEATURES_THSEL_MASK = 0x3,
+ NVME_FEATURES_ERROR_RECOVERY_TLER_SHIFT = 0,
+ NVME_FEATURES_ERROR_RECOVERY_DULBE_SHIFT = 16,
+ NVME_FEATURES_ERROR_RECOVERY_TLER_MASK = 0xff,
+ NVME_FEATURES_ERROR_RECOVERY_DULBE_MASK = 0x1,
+ NVME_FEATURES_VWC_WCE_SHIFT = 0,
+ NVME_FEATURES_VWC_WCE_MASK = 0x1,
+ NVME_FEATURES_IRQC_THR_SHIFT = 0,
+ NVME_FEATURES_IRQC_TIME_SHIFT = 8,
+ NVME_FEATURES_IRQC_THR_MASK = 0xff,
+ NVME_FEATURES_IRQC_TIME_MASK = 0xff,
+ NVME_FEATURES_IVC_IV_SHIFT = 0,
+ NVME_FEATURES_IVC_CD_SHIFT = 16,
+ NVME_FEATURES_IVC_IV_MASK = 0xffff,
+ NVME_FEATURES_IVC_CD_MASK = 0x1,
+ NVME_FEATURES_WAN_DN_SHIFT = 0,
+ NVME_FEATURES_WAN_DN_MASK = 0x1,
+ NVME_FEATURES_APST_APSTE_SHIFT = 0,
+ NVME_FEATURES_APST_APSTE_MASK = 0x1,
+ NVME_FEATURES_HCTM_TMT2_SHIFT = 0,
+ NVME_FEATURES_HCTM_TMT1_SHIFT = 16,
+ NVME_FEATURES_HCTM_TMT2_MASK = 0xffff,
+ NVME_FEATURES_HCTM_TMT1_MASK = 0xffff,
+ NVME_FEATURES_NOPS_NOPPME_SHIFT = 0,
+ NVME_FEATURES_NOPS_NOPPME_MASK = 0x1,
+ NVME_FEATURES_PLM_PLE_SHIFT = 0,
+ NVME_FEATURES_PLM_PLE_MASK = 0x1,
+ NVME_FEATURES_PLM_WINDOW_SELECT_SHIFT = 0,
+ NVME_FEATURES_PLM_WINDOW_SELECT_MASK = 0xf,
+ NVME_FEATURES_LBAS_LSIRI_SHIFT = 0,
+ NVME_FEATURES_LBAS_LSIPI_SHIFT = 16,
+ NVME_FEATURES_LBAS_LSIRI_MASK = 0xffff,
+ NVME_FEATURES_LBAS_LSIPI_MASK = 0xffff,
+};
+
+#define DW(value, prefix) ((value) & (prefix ## _MASK)) << prefix ## _SHIFT
+
+int nvme_identify(int fd, enum nvme_identify_cns cns, __u32 nsid, __u16 cntid,
+ __u16 nvmsetid, __u8 uuidx, void *data)
+{
+ __u32 cdw10 = DW(cntid, NVME_IDENTIFY_CDW10_CNTID) |
+ DW(cns, NVME_IDENTIFY_CDW10_CNS);
+ __u32 cdw11 = DW(nvmsetid, NVME_IDENTIFY_CDW11_NVMSETID);
+ __u32 cdw14 = DW(uuidx, NVME_IDENTIFY_CDW14_UUID);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_identify,
+ .nsid = nsid,
+ .addr = (__u64)(uintptr_t)data,
+ .data_len = NVME_IDENTIFY_DATA_SIZE,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw14 = cdw14,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+static int __nvme_identify(int fd, __u8 cns, __u32 nsid, void *data)
+{
+ return nvme_identify(fd, cns, nsid, NVME_CNTLID_NONE,
+ NVME_NVMSETID_NONE, NVME_UUID_NONE, data);
+}
+
+int nvme_identify_ctrl(int fd, struct nvme_id_ctrl *id)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_CTRL, NVME_NSID_NONE, id);
+}
+
+int nvme_identify_ns(int fd, __u32 nsid, struct nvme_id_ns *ns)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_NS, nsid, ns);
+}
+
+int nvme_identify_allocated_ns(int fd, __u32 nsid, struct nvme_id_ns *ns)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_ALLOCATED_NS, nsid, ns);
+}
+
+int nvme_identify_active_ns_list(int fd, __u32 nsid, struct nvme_ns_list *list)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_NS_ACTIVE_LIST, nsid,
+ list);
+}
+
+int nvme_identify_allocated_ns_list(int fd, __u32 nsid,
+ struct nvme_ns_list *list)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_ALLOCATED_NS_LIST, nsid,
+ list);
+}
+
+int nvme_identify_ctrl_list(int fd, __u16 cntid,
+ struct nvme_ctrl_list *ctrlist)
+{
+ return nvme_identify(fd, NVME_IDENTIFY_CNS_CTRL_LIST,
+ NVME_NSID_NONE, cntid, NVME_NVMSETID_NONE,
+ NVME_UUID_NONE, ctrlist);
+}
+
+int nvme_identify_nsid_ctrl_list(int fd, __u32 nsid, __u16 cntid,
+ struct nvme_ctrl_list *ctrlist)
+{
+ return nvme_identify(fd, NVME_IDENTIFY_CNS_NS_CTRL_LIST, nsid,
+ cntid, NVME_NVMSETID_NONE, NVME_UUID_NONE, ctrlist);
+}
+
+int nvme_identify_ns_descs(int fd, __u32 nsid, struct nvme_ns_id_desc *descs)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_NS_DESC_LIST, nsid, descs);
+}
+
+int nvme_identify_nvmset_list(int fd, __u16 nvmsetid,
+ struct nvme_id_nvmset_list *nvmset)
+{
+ return nvme_identify(fd, NVME_IDENTIFY_CNS_NVMSET_LIST,
+ NVME_NSID_NONE, NVME_CNTLID_NONE, nvmsetid, NVME_UUID_NONE,
+ nvmset);
+}
+
+int nvme_identify_primary_ctrl(int fd, __u16 cntid,
+ struct nvme_primary_ctrl_cap *cap)
+{
+ return nvme_identify(fd, NVME_IDENTIFY_CNS_PRIMARY_CTRL_CAP,
+ NVME_NSID_NONE, cntid, NVME_NVMSETID_NONE, NVME_UUID_NONE,
+ cap);
+}
+
+int nvme_identify_secondary_ctrl_list(int fd, __u16 cntid,
+ struct nvme_secondary_ctrl_list *list)
+{
+ return nvme_identify(fd, NVME_IDENTIFY_CNS_SECONDARY_CTRL_LIST,
+ NVME_NSID_NONE, cntid, NVME_NVMSETID_NONE, NVME_UUID_NONE,
+ list);
+}
+
+int nvme_identify_ns_granularity(int fd,
+ struct nvme_id_ns_granularity_list *list)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_NS_GRANULARITY,
+ NVME_NSID_NONE, list);
+}
+
+int nvme_identify_uuid(int fd, struct nvme_id_uuid_list *list)
+{
+ return __nvme_identify(fd, NVME_IDENTIFY_CNS_UUID_LIST, NVME_NSID_NONE,
+ list);
+}
+
+int nvme_get_log(int fd, enum nvme_cmd_get_log_lid lid, __u32 nsid, __u64 lpo,
+ __u8 lsp, __u16 lsi, bool rae, __u8 uuidx, __u32 len, void *log)
+{
+ __u32 numd = (len >> 2) - 1;
+ __u16 numdu = numd >> 16, numdl = numd & 0xffff;
+
+ __u32 cdw10 = DW(lid, NVME_LOG_CDW10_LID) |
+ DW(lsp, NVME_LOG_CDW10_LSP) |
+ DW(!!rae, NVME_LOG_CDW10_RAE) |
+ DW(numdl, NVME_LOG_CDW10_NUMDL);
+ __u32 cdw11 = DW(numdu, NVME_LOG_CDW11_NUMDU) |
+ DW(lsi, NVME_LOG_CDW11_LSI);
+ __u32 cdw12 = lpo & 0xffffffff;
+ __u32 cdw13 = lpo >> 32;
+ __u32 cdw14 = DW(uuidx, NVME_LOG_CDW14_UUID);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_get_log_page,
+ .nsid = nsid,
+ .addr = (__u64)(uintptr_t)log,
+ .data_len = len,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .cdw13 = cdw13,
+ .cdw14 = cdw14,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+static int __nvme_get_log(int fd, enum nvme_cmd_get_log_lid lid, bool rae,
+ __u32 len, void *log)
+{
+ return nvme_get_log(fd, lid, NVME_NSID_ALL, 0, NVME_LOG_LSP_NONE,
+ NVME_LOG_LSI_NONE, NVME_UUID_NONE, rae, len, log);
+}
+
+int nvme_get_log_error(int fd, unsigned nr_entries, bool rae,
+ struct nvme_error_log_page *log)
+{
+ return __nvme_get_log(fd, NVME_LOG_LID_ERROR, rae,
+ sizeof(*log) * nr_entries, log);
+}
+
+int nvme_get_log_smart(int fd, __u32 nsid, bool rae, struct nvme_smart_log *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_SMART, nsid, 0,
+ NVME_LOG_LSP_NONE, NVME_LOG_LSI_NONE, rae, NVME_UUID_NONE,
+ sizeof(*log), log);
+}
+
+int nvme_get_log_fw_slot(int fd, bool rae, struct nvme_firmware_slot *log)
+{
+ return __nvme_get_log(fd, NVME_LOG_LID_FW_SLOT, rae, sizeof(*log),
+ log);
+}
+
+int nvme_get_log_changed_ns_list(int fd, bool rae, struct nvme_ns_list *log)
+{
+ return __nvme_get_log(fd, NVME_LOG_LID_CHANGED_NS, rae,
+ sizeof(*log), log);
+}
+
+int nvme_get_log_cmd_effects(int fd, struct nvme_cmd_effects_log *log)
+{
+ return __nvme_get_log(fd, NVME_LOG_LID_CMD_EFFECTS, false,
+ sizeof(*log), log);
+}
+
+int nvme_get_log_device_self_test(int fd, struct nvme_self_test_log *log)
+{
+ return __nvme_get_log(fd, NVME_LOG_LID_DEVICE_SELF_TEST, false,
+ sizeof(*log), log);
+}
+
+enum nvme_cmd_get_log_telemetry_host_lsp {
+ NVME_LOG_TELEM_HOST_LSP_RETAIN = 0,
+ NVME_LOG_TELEM_HOST_LSP_CREATE = 1,
+};
+
+int nvme_get_log_create_telemetry_host(int fd, struct nvme_telemetry_log *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_TELEMETRY_HOST, NVME_NSID_NONE, 0,
+ NVME_LOG_TELEM_HOST_LSP_CREATE, NVME_LOG_LSI_NONE, false,
+ NVME_UUID_NONE, sizeof(*log), log);
+}
+
+int nvme_get_log_telemetry_host(int fd, __u64 offset, __u32 len, void *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_TELEMETRY_HOST, NVME_NSID_NONE,
+ offset, NVME_LOG_TELEM_HOST_LSP_RETAIN, NVME_LOG_LSI_NONE,
+ false, NVME_UUID_NONE, len, log);
+}
+
+int nvme_get_log_telemetry_ctrl(int fd, bool rae, __u64 offset, __u32 len,
+ void *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_TELEMETRY_CTRL, NVME_NSID_NONE,
+ offset, NVME_LOG_LSP_NONE, NVME_LOG_LSI_NONE, rae,
+ NVME_UUID_NONE, len, log);
+}
+
+int nvme_get_log_endurance_group(int fd, __u16 endgid,
+ struct nvme_endurance_group_log *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_ENDURANCE_GROUP, NVME_NSID_NONE,
+ 0, NVME_LOG_LSP_NONE, endgid, false, NVME_UUID_NONE,
+ sizeof(*log), log);
+}
+
+int nvme_get_log_predictable_lat_nvmset(int fd, __u16 nvmsetid,
+ struct nvme_nvmset_predictable_lat_log *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_PREDICTABLE_LAT_NVMSET,
+ NVME_NSID_NONE, 0, NVME_LOG_LSP_NONE, nvmsetid, false,
+ NVME_UUID_NONE, sizeof(*log), log);
+}
+
+int nvme_get_log_predictable_lat_event(int fd, bool rae, __u32 offset,
+ __u32 len, void *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_PREDICTABLE_LAT_AGG,
+ NVME_NSID_NONE, offset, NVME_LOG_LSP_NONE, NVME_LOG_LSI_NONE,
+ rae, NVME_UUID_NONE, len, log);
+}
+
+int nvme_get_log_ana(int fd, enum nvme_log_ana_lsp lsp, bool rae, __u64 offset,
+ __u32 len, void *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_ANA, NVME_NSID_NONE, offset, lsp,
+ NVME_LOG_LSI_NONE, false, NVME_UUID_NONE, len, log);
+}
+
+int nvme_get_log_ana_groups(int fd, bool rae, __u32 len,
+ struct nvme_ana_group_desc *log)
+{
+ return nvme_get_log_ana(fd, NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY, rae, 0,
+ len, log);
+}
+
+int nvme_get_log_lba_status(int fd, bool rae, __u64 offset, __u32 len,
+ void *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_LBA_STATUS, NVME_NSID_NONE,
+ offset, NVME_LOG_LSP_NONE, NVME_LOG_LSI_NONE, rae,
+ NVME_UUID_NONE, len, log);
+}
+
+int nvme_get_log_endurance_grp_evt(int fd, bool rae, __u32 offset, __u32 len,
+ void *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_ENDURANCE_GRP_EVT,
+ NVME_NSID_NONE, offset, NVME_LOG_LSP_NONE, NVME_LOG_LSI_NONE,
+ rae, NVME_UUID_NONE, len, log);
+}
+
+int nvme_get_log_discovery(int fd, bool rae, __u32 offset, __u32 len, void *log)
+{
+ return nvme_get_log(fd, NVME_LOG_LID_DISC, NVME_NSID_NONE, offset,
+ NVME_LOG_LSP_NONE, NVME_LOG_LSI_NONE, rae, NVME_UUID_NONE,
+ len, log);
+}
+
+int nvme_get_log_reservation(int fd, bool rae,
+ struct nvme_resv_notification_log *log)
+{
+ return __nvme_get_log(fd, NVME_LOG_LID_RESERVATION, rae,
+ sizeof(*log), log);
+}
+
+int nvme_get_log_sanitize(int fd, bool rae,
+ struct nvme_sanitize_log_page *log)
+{
+ return __nvme_get_log(fd, NVME_LOG_LID_SANITIZE, rae, sizeof(*log),
+ log);
+}
+
+int nvme_set_features(int fd, __u8 fid, __u32 nsid, __u32 cdw11, __u32 cdw12,
+ bool save, __u8 uuidx, __u32 cdw15, __u32 data_len,
+ void *data, __u32 *result)
+{
+ __u32 cdw10 = DW(fid, NVME_FEATURES_CDW10_FID) |
+ DW(!!save, NVME_SET_FEATURES_CDW10_SAVE);
+ __u32 cdw14 = DW(uuidx, NVME_FEATURES_CDW14_UUID);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_set_features,
+ .nsid = nsid,
+ .addr = (__u64)(uintptr_t)data,
+ .data_len = data_len,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .cdw14 = cdw14,
+ .cdw14 = cdw15,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+static int __nvme_set_features(int fd, __u8 fid, __u32 cdw11, bool save,
+ __u32 *result)
+{
+ return nvme_set_features(fd, fid, NVME_NSID_NONE, cdw11, 0, save,
+ NVME_UUID_NONE, 0, 0, NULL, result);
+}
+
+int nvme_set_features_arbitration(int fd, __u8 ab, __u8 lpw, __u8 mpw,
+ __u8 hpw, bool save, __u32 *result)
+{
+ __u32 value = DW(ab, NVME_FEATURES_ARBITRATION_BURST) |
+ DW(lpw, NVME_FEATURES_ARBITRATION_LPW) |
+ DW(mpw, NVME_FEATURES_ARBITRATION_MPW) |
+ DW(hpw, NVME_FEATURES_ARBITRATION_HPW);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_ARBITRATION, value, save,
+ result);
+}
+
+int nvme_set_features_power_mgmt(int fd, __u8 ps, __u8 wh, bool save,
+ __u32 *result)
+{
+ __u32 value = DW(ps, NVME_FEATURES_PWRMGMT_PS) |
+ DW(wh, NVME_FEATURES_PWRMGMT_PS);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_POWER_MGMT, value, save,
+ result);
+}
+
+int nvme_set_features_lba_range(int fd, __u32 nsid, __u32 nr_ranges, bool save,
+ struct nvme_lba_range_type *data, __u32 *result)
+{
+ return -1;
+}
+
+int nvme_set_features_temp_thresh(int fd, __u16 tmpth, __u8 tmpsel,
+ enum nvme_feat_tmpthresh_thsel thsel, bool save, __u32 *result)
+{
+ __u32 value = DW(tmpth, NVME_FEATURES_TMPTH) |
+ DW(tmpsel, NVME_FEATURES_TMPSEL) |
+ DW(thsel, NVME_FEATURES_THSEL);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_TEMP_THRESH, value, save,
+ result);
+}
+
+int nvme_set_features_err_recovery(int fd, __u32 nsid, __u16 tler, bool dulbe,
+ bool save, __u32 *result)
+{
+ __u32 value = DW(tler, NVME_FEATURES_ERROR_RECOVERY_TLER) |
+ DW(!!dulbe, NVME_FEATURES_ERROR_RECOVERY_DULBE);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_ERR_RECOVERY, value, save,
+ result);
+}
+
+int nvme_set_features_volatile_wc(int fd, bool wce, bool save, __u32 *result)
+{
+ __u32 value = DW(!!wce, NVME_FEATURES_VWC_WCE);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_VOLATILE_WC, value, save,
+ result);
+}
+
+int nvme_set_features_irq_coalesce(int fd, __u8 thr, __u8 time, bool save,
+ __u32 *result)
+{
+ __u32 value = DW(thr, NVME_FEATURES_IRQC_TIME) |
+ DW(time, NVME_FEATURES_IRQC_THR);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_IRQ_COALESCE, value, save,
+ result);
+}
+
+int nvme_set_features_irq_config(int fd, __u16 iv, bool cd, bool save,
+ __u32 *result)
+{
+ __u32 value = DW(iv, NVME_FEATURES_IVC_IV) |
+ DW(!!cd, NVME_FEATURES_IVC_CD);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_IRQ_CONFIG, value, save,
+ result);
+}
+
+int nvme_set_features_write_atomic(int fd, bool dn, bool save, __u32 *result)
+{
+ __u32 value = DW(!!dn, NVME_FEATURES_WAN_DN);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_WRITE_ATOMIC, value, save,
+ result);
+}
+
+int nvme_set_features_async_event(int fd, __u32 events,
+ bool save, __u32 *result)
+{
+ return __nvme_set_features(fd, NVME_FEAT_FID_ASYNC_EVENT, events, save,
+ result);
+}
+
+int nvme_set_features_auto_pst(int fd, bool apste, bool save,
+ struct nvme_feat_auto_pst *apst, __u32 *result)
+{
+ __u32 value = DW(!!apste, NVME_FEATURES_APST_APSTE);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_AUTO_PST, value, save,
+ result);
+}
+
+int nvme_set_features_timestamp(int fd, bool save, __u64 timestamp)
+{
+ __le64 t = cpu_to_le64(timestamp);
+ struct nvme_timestamp ts;
+
+ memcpy(&t, ts.timestamp, sizeof(ts.timestamp));
+ return nvme_set_features(fd, NVME_FEAT_FID_TIMESTAMP,
+ NVME_NSID_NONE, 0, 0, save, NVME_UUID_NONE, 0,
+ sizeof(ts), &ts, NULL);
+}
+
+int nvme_set_features_hctm(int fd, __u16 tmt2, __u16 tmt1,
+ bool save, __u32 *result)
+{
+ __u32 value = DW(tmt2, NVME_FEATURES_HCTM_TMT2) |
+ DW(tmt1, NVME_FEATURES_HCTM_TMT1);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_HCTM, value, save,
+ result);
+}
+
+int nvme_set_features_nopsc(int fd, bool noppme, bool save, __u32 *result)
+{
+ __u32 value = DW(noppme, NVME_FEATURES_NOPS_NOPPME);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_NOPSC, value, save,
+ result);
+}
+
+int nvme_set_features_rrl(int fd, __u8 rrl, __u16 nvmsetid,
+ bool save, __u32 *result)
+{
+ return nvme_set_features(fd, NVME_FEAT_FID_RRL, NVME_NSID_NONE,
+ nvmsetid, rrl, save, NVME_UUID_NONE, 0, 0, NULL, result);
+}
+
+int nvme_set_features_plm_config(int fd, bool plm, __u16 nvmsetid, bool save,
+ struct nvme_plm_config *data, __u32 *result)
+{
+ return nvme_set_features(fd, NVME_FEAT_FID_PLM_CONFIG,
+ NVME_NSID_NONE, nvmsetid, !!plm, save, NVME_UUID_NONE, 0, 0,
+ NULL, result);
+}
+
+int nvme_set_features_plm_window(int fd, enum nvme_feat_plm_window_select sel,
+ __u16 nvmsetid, bool save, __u32 *result)
+{
+ __u32 cdw12 = DW(sel, NVME_FEATURES_PLM_WINDOW_SELECT);
+
+ return nvme_set_features(fd, NVME_FEAT_FID_PLM_WINDOW, NVME_NSID_NONE,
+ nvmsetid, cdw12, save, NVME_UUID_NONE, 0, 0, NULL, result);
+}
+
+int nvme_set_features_lba_sts_interval(int fd, __u16 lsiri, __u16 lsipi,
+ bool save, __u32 *result)
+{
+ __u32 value = DW(lsiri, NVME_FEATURES_LBAS_LSIRI) |
+ DW(lsipi, NVME_FEATURES_LBAS_LSIPI);
+
+ return __nvme_set_features(fd, NVME_FEAT_FID_LBA_STS_INTERVAL, value,
+ save, result);
+}
+
+int nvme_set_features_host_behavior(int fd, bool save,
+ struct nvme_feat_host_behavior *data)
+{
+ return nvme_set_features(fd, NVME_FEAT_FID_HOST_BEHAVIOR,
+ NVME_NSID_NONE, save, 0, 0, NVME_UUID_NONE, 0, sizeof(*data),
+ data, NULL);
+}
+
+int nvme_set_features_sanitize(int fd, bool nodrm, bool save, __u32 *result)
+{
+ return __nvme_set_features(fd, NVME_FEAT_FID_SANITIZE, !!nodrm, save,
+ result);
+}
+
+int nvme_set_features_endurance_evt_cfg(int fd, __u16 endgid, __u8 egwarn,
+ bool save, __u32 *result)
+{
+ __u32 value = endgid | egwarn << 16;
+ return __nvme_set_features(fd, NVME_FEAT_FID_ENDURANCE_EVT_CFG, value,
+ save, result);
+}
+
+int nvme_set_features_sw_progress(int fd, __u8 pbslc, bool save,
+ __u32 *result)
+{
+ return __nvme_set_features(fd, NVME_FEAT_FID_SW_PROGRESS, pbslc, save,
+ result);
+}
+
+int nvme_set_features_host_id(int fd, bool save, bool exhid, __u8 *hostid)
+{
+ __u32 len = exhid ? 16 : 8;
+ __u32 value = !!exhid;
+
+ return nvme_set_features(fd, NVME_FEAT_FID_HOST_ID, NVME_NSID_NONE,
+ save, value, 0, NVME_UUID_NONE, 0, len, hostid, NULL);
+}
+
+int nvme_set_features_resv_mask(int fd, __u32 mask, bool save, __u32 *result)
+{
+ return __nvme_set_features(fd, NVME_FEAT_FID_RESV_MASK, mask, save,
+ result);
+}
+
+int nvme_set_features_resv_persist(int fd, bool ptpl, bool save, __u32 *result)
+{
+ return __nvme_set_features(fd, NVME_FEAT_RESV_PERSIST, !!ptpl, save,
+ result);
+}
+
+int nvme_set_features_write_protect(int fd, enum nvme_feat_nswpcfg_state state,
+ bool save, __u32 *result)
+{
+ return __nvme_set_features(fd, NVME_FEAT_FID_WRITE_PROTECT, state,
+ save, result);
+}
+
+int nvme_get_features(int fd, enum nvme_features_id fid, __u32 nsid,
+ enum nvme_get_features_sel sel, __u32 cdw11, __u8 uuidx,
+ __u32 data_len, void *data, __u32 *result)
+{
+ __u32 cdw10 = DW(fid, NVME_FEATURES_CDW10_FID) |
+ DW(sel, NVME_GET_FEATURES_CDW10_SEL);
+ __u32 cdw14 = DW(uuidx, NVME_FEATURES_CDW14_UUID);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_get_features,
+ .nsid = nsid,
+ .addr = (__u64)(uintptr_t)data,
+ .data_len = data_len,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw14 = cdw14,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+static int __nvme_get_features(int fd, enum nvme_features_id fid,
+ enum nvme_get_features_sel sel, __u32 *result)
+{
+ return nvme_get_features(fd, fid, NVME_NSID_NONE, sel, 0,
+ NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_arbitration(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_ARBITRATION, sel, result);
+}
+
+int nvme_get_features_power_mgmt(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_POWER_MGMT, sel, result);
+}
+
+int nvme_get_features_lba_range(int fd, enum nvme_get_features_sel sel,
+ struct nvme_lba_range_type *data,
+ __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_LBA_RANGE, NVME_NSID_NONE, sel, 0,
+ NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_temp_thresh(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_TEMP_THRESH, sel, result);
+}
+
+int nvme_get_features_err_recovery(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_ERR_RECOVERY, sel,
+ result);
+}
+
+int nvme_get_features_volatile_wc(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_VOLATILE_WC, sel, result);
+}
+
+int nvme_get_features_num_queues(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_NUM_QUEUES, sel, result);
+}
+
+int nvme_get_features_irq_coalesce(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_IRQ_COALESCE, sel,
+ result);
+}
+
+int nvme_get_features_irq_config(int fd, enum nvme_get_features_sel sel,
+ __u16 iv, __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_IRQ_CONFIG, NVME_NSID_NONE, sel, iv,
+ NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_write_atomic(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_WRITE_ATOMIC, sel,
+ result);
+}
+
+int nvme_get_features_async_event(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_ASYNC_EVENT, sel, result);
+}
+
+int nvme_get_features_auto_pst(int fd, enum nvme_get_features_sel sel,
+ struct nvme_feat_auto_pst *apst, __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_AUTO_PST, NVME_NSID_NONE, sel, 0,
+ NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_host_mem_buf(int fd, enum nvme_get_features_sel sel,
+ __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_HOST_MEM_BUF, sel, result);
+}
+
+int nvme_get_features_timestamp(int fd,
+ enum nvme_get_features_sel sel, struct nvme_timestamp *ts)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_TIMESTAMP, NVME_NSID_NONE, sel, 0,
+ NVME_UUID_NONE, 0, NULL, NULL);
+}
+
+int nvme_get_features_kato(int fd, enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_KATO, sel, result);
+}
+
+int nvme_get_features_hctm(int fd, enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_HCTM, sel, result);
+}
+
+int nvme_get_features_nopsc(int fd, enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_NOPSC, sel, result);
+}
+
+int nvme_get_features_rrl(int fd, enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_RRL, sel, result);
+}
+
+int nvme_get_features_plm_config(int fd, enum nvme_get_features_sel sel,
+ __u16 nvmsetid, struct nvme_plm_config *data, __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_PLM_CONFIG, NVME_NSID_NONE,
+ sel, nvmsetid, NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_plm_window(int fd, enum nvme_get_features_sel sel,
+ __u16 nvmsetid, __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_PLM_WINDOW, NVME_NSID_NONE,
+ sel, nvmsetid, NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_lba_sts_interval(int fd, enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_LBA_STS_INTERVAL, sel, result);
+}
+
+int nvme_get_features_host_behavior(int fd, enum nvme_get_features_sel sel,
+ struct nvme_feat_host_behavior *data, __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_HOST_BEHAVIOR,
+ NVME_NSID_NONE, sel, 0, NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_sanitize(int fd, enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_SANITIZE, sel, result);
+}
+
+int nvme_get_features_endurance_event_cfg(int fd, enum nvme_get_features_sel sel, __u16 endgid, __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_ENDURANCE_EVT_CFG,
+ NVME_NSID_NONE, sel, 0, NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_get_features_sw_progress(int fd, enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_SW_PROGRESS, sel, result);
+}
+
+int nvme_get_features_host_id(int fd, enum nvme_get_features_sel sel,
+ bool exhid, __u32 len, __u8 *hostid)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_HOST_ID, NVME_NSID_NONE,
+ sel, !!exhid, NVME_UUID_NONE, len, hostid, NULL);
+}
+
+int nvme_get_features_resv_mask(int fd,
+ enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_FID_RESV_MASK, sel, result);
+}
+
+int nvme_get_features_resv_persist(int fd,
+ enum nvme_get_features_sel sel, __u32 *result)
+{
+ return __nvme_get_features(fd, NVME_FEAT_RESV_PERSIST, sel, result);
+}
+
+int nvme_get_features_write_protect(int fd, __u32 nsid,
+ enum nvme_get_features_sel sel, __u32 *result)
+{
+ return nvme_get_features(fd, NVME_FEAT_FID_WRITE_PROTECT, nsid, sel, 0,
+ NVME_UUID_NONE, 0, NULL, result);
+}
+
+int nvme_format_nvm(int fd, __u32 nsid, __u8 lbaf,
+ enum nvme_cmd_format_mset mset, enum nvme_cmd_format_pi pi,
+ enum nvme_cmd_format_pil pil, enum nvme_cmd_format_ses ses,
+ __u32 timeout)
+{
+ __u32 cdw10 = DW(lbaf, NVME_FORMAT_CDW10_LBAF) |
+ DW(mset, NVME_FORMAT_CDW10_MSET) |
+ DW(pi, NVME_FORMAT_CDW10_PI) |
+ DW(pil, NVME_FORMAT_CDW10_PIL) |
+ DW(ses, NVME_FORMAT_CDW10_SES);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_format_nvm,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .timeout_ms = timeout,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_ns_mgmt(int fd, __u32 nsid, enum nvme_ns_mgmt_sel sel,
+ struct nvme_id_ns *ns, __u32 *result, __u32 timeout)
+{
+ __u32 cdw10 = DW(sel, NVME_NAMESPACE_MGMT_CDW10_SEL);
+ __u32 data_len = ns ? sizeof(*ns) : 0;
+
+ struct nvme_passthru_cmd cmd = {
+ .nsid = nsid,
+ .opcode = nvme_admin_ns_mgmt,
+ .cdw10 = cdw10,
+ .timeout_ms = timeout,
+ .data_len = data_len,
+ .addr = (__u64)(uintptr_t)ns,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+int nvme_ns_mgmt_create(int fd, struct nvme_id_ns *ns, __u32 *nsid,
+ __u32 timeout)
+{
+ return nvme_ns_mgmt(fd, NVME_NSID_NONE, NVME_NS_MGMT_SEL_CREATE, ns, nsid,
+ timeout);
+}
+
+int nvme_ns_mgmt_delete(int fd, __u32 nsid)
+{
+ return nvme_ns_mgmt(fd, nsid, NVME_NS_MGMT_SEL_DELETE, NULL, NULL, 0);
+}
+
+int nvme_ns_attach(int fd, __u32 nsid, enum nvme_ns_attach_sel sel,
+ struct nvme_ctrl_list *ctrlist)
+{
+ __u32 cdw10 = DW(sel, NVME_NAMESPACE_ATTACH_CDW10_SEL);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_ns_attach,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .data_len = sizeof(*ctrlist),
+ .addr = (__u64)(uintptr_t)ctrlist,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_ns_attach_ctrls(int fd, __u32 nsid, struct nvme_ctrl_list *ctrlist)
+{
+ return nvme_ns_attach(fd, nsid, NVME_NS_ATTACH_SEL_CTRL_ATTACH, ctrlist);
+}
+
+int nvme_ns_dettach_ctrls(int fd, __u32 nsid, struct nvme_ctrl_list *ctrlist)
+{
+ return nvme_ns_attach(fd, nsid, NVME_NS_ATTACH_SEL_CTRL_DEATTACH,
+ ctrlist);
+}
+
+int nvme_fw_download(int fd, __u32 offset, __u32 data_len, void *data)
+{
+ __u32 cdw10 = (data_len >> 2) - 1;
+ __u32 cdw11 = offset >> 2;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_fw_download,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .data_len = data_len,
+ .addr = (__u64)(uintptr_t)data,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_fw_commit(int fd, __u8 slot, enum nvme_fw_commit_ca action, bool bpid)
+{
+ __u32 cdw10 = DW(slot, NVME_FW_COMMIT_CDW10_FS) |
+ DW(action, NVME_FW_COMMIT_CDW10_CA) |
+ DW(bpid, NVME_FW_COMMIT_CDW10_BPID);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_fw_commit,
+ .cdw10 = cdw10,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_security_send(int fd, __u32 nsid, __u8 nssf, __u8 spsp0, __u8 spsp1,
+ __u8 secp, __u32 tl, __u32 data_len, void *data,
+ __u32 *result)
+{
+ __u32 cdw10 = DW(secp, NVME_SECURITY_SECP) |
+ DW(spsp0, NVME_SECURITY_SPSP0) |
+ DW(spsp1, NVME_SECURITY_SPSP1) |
+ DW(nssf, NVME_SECURITY_NSSF);
+ __u32 cdw11 = tl;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_security_send,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .data_len = data_len,
+ .addr = (__u64)(uintptr_t)data,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+int nvme_security_receive(int fd, __u32 nsid, __u8 nssf, __u8 spsp0,
+ __u8 spsp1, __u8 secp, __u32 al, __u32 data_len,
+ void *data, __u32 *result)
+{
+ __u32 cdw10 = DW(secp, NVME_SECURITY_SECP) |
+ DW(spsp0, NVME_SECURITY_SPSP0) |
+ DW(spsp1, NVME_SECURITY_SPSP1) |
+ DW(nssf, NVME_SECURITY_NSSF);
+ __u32 cdw11 = al;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_security_recv,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .data_len = data_len,
+ .addr = (__u64)(uintptr_t)data,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+int nvme_get_lba_status(int fd, __u32 nsid, __u64 slba, __u32 mndw, __u16 rl,
+ enum nvme_lba_status_atype atype,
+ struct nvme_lba_status *lbas)
+{
+ __u32 cdw10 = slba & 0xffffffff;
+ __u32 cdw11 = slba >> 32;
+ __u32 cdw12 = mndw;
+ __u32 cdw13 = DW(rl, NVME_GET_LBA_STATUS_CDW13_RL) |
+ DW(atype, NVME_GET_LBA_STATUS_CDW13_ATYPE);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_get_lba_status,
+ .nsid = nsid,
+ .addr = (__u64)(uintptr_t)lbas,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .cdw13 = cdw13,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_directive_send(int fd, __u32 nsid, __u16 dspec, __u8 doper,
+ enum nvme_directive_dtype dtype, __u32 cdw12,
+ __u32 data_len, void *data, __u32 *result)
+{
+ __u32 cdw10 = data_len ? (data_len >> 2) - 1 : 0;
+ __u32 cdw11 = DW(doper, NVME_DIRECTIVE_CDW11_DOPER) |
+ DW(dtype, NVME_DIRECTIVE_CDW11_DTYPE) |
+ DW(dspec, NVME_DIRECTIVE_CDW11_DPSEC);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_directive_send,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .data_len = data_len,
+ .addr = (__u64)(uintptr_t)data,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+int nvme_directive_send_id_endir(int fd, __u32 nsid, bool endir,
+ enum nvme_directive_dtype dtype,
+ struct nvme_id_directives *id)
+{
+ __u32 cdw12 = DW(dtype, NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE) |
+ DW(endir, NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR);
+
+ return nvme_directive_send(fd, nsid, 0, NVME_DIRECTIVE_DTYPE_IDENTIFY,
+ NVME_DIRECTIVE_SEND_IDENTIFY_DOPER_ENDIR, cdw12, sizeof(*id),
+ id, NULL);
+}
+
+int nvme_directive_send_stream_release_identifier(int fd, __u32 nsid,
+ __u16 stream_id)
+{
+ return nvme_directive_send(fd, nsid, stream_id,
+ NVME_DIRECTIVE_DTYPE_STREAMS,
+ NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_IDENTIFIER, 0, 0,
+ NULL, NULL);
+}
+
+int nvme_directive_send_stream_release_resource(int fd, __u32 nsid)
+{
+ return nvme_directive_send(fd, nsid, 0, NVME_DIRECTIVE_DTYPE_STREAMS,
+ NVME_DIRECTIVE_SEND_STREAMS_DOPER_RELEASE_RESOURCE, 0, 0, NULL,
+ NULL);
+}
+
+int nvme_directive_recv(int fd, __u32 nsid, __u16 dspec, __u8 doper,
+ enum nvme_directive_dtype dtype, __u32 cdw12,
+ __u32 data_len, void *data, __u32 *result)
+{
+ __u32 cdw10 = data_len ? (data_len >> 2) - 1 : 0;
+ __u32 cdw11 = DW(doper, NVME_DIRECTIVE_CDW11_DOPER) |
+ DW(dtype, NVME_DIRECTIVE_CDW11_DTYPE) |
+ DW(dspec, NVME_DIRECTIVE_CDW11_DPSEC);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_directive_recv,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .data_len = data_len,
+ .addr = (__u64)(uintptr_t)data,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+int nvme_directive_recv_identify_parameters(int fd, __u32 nsid,
+ struct nvme_id_directives *id)
+{
+ return nvme_directive_recv(fd, nsid, 0, NVME_DIRECTIVE_DTYPE_IDENTIFY,
+ NVME_DIRECTIVE_RECEIVE_IDENTIFY_DOPER_PARAM, 0, sizeof(*id),
+ id, NULL);
+}
+
+int nvme_directive_recv_stream_parameters(int fd, __u32 nsid,
+ struct nvme_streams_directive_params *parms)
+{
+ return nvme_directive_recv(fd, nsid, 0, NVME_DIRECTIVE_DTYPE_STREAMS,
+ NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_PARAM, 0, sizeof(*parms),
+ parms, NULL);
+}
+
+int nvme_directive_recv_stream_status(int fd, __u32 nsid, unsigned nr_entries,
+ struct nvme_streams_directive_status *id)
+{
+ return nvme_directive_recv(fd, nsid, 0, NVME_DIRECTIVE_DTYPE_STREAMS,
+ NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_STATUS, 0, sizeof(*id),
+ id, NULL);
+}
+
+int nvme_directive_recv_stream_allocate(int fd, __u32 nsid, __u16 nsr,
+ __u32 *result)
+{
+ return nvme_directive_recv(fd, nsid, 0, NVME_DIRECTIVE_DTYPE_STREAMS,
+ NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_RESOURCE, nsr, 0, NULL,
+ result);
+}
+
+int nvme_set_property(int fd, int offset, __u64 value)
+{
+ __u32 cdw10 = is_64bit_reg(offset);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_fabrics,
+ .nsid = nvme_fabrics_type_property_set,
+ .cdw10 = cdw10,
+ .cdw11 = offset,
+ .cdw12 = value & 0xffffffff,
+ .cdw13 = value >> 32,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_get_property(int fd, int offset, __u64 *value)
+{
+ __u32 cdw10 = is_64bit_reg(offset);
+
+ struct nvme_passthru_cmd64 cmd = {
+ .opcode = nvme_admin_fabrics,
+ .nsid = nvme_fabrics_type_property_get,
+ .cdw10 = cdw10,
+ .cdw11 = offset,
+ };
+
+ return nvme_submit_admin_passthru64(fd, &cmd, value);
+}
+
+int nvme_sanitize_nvm(int fd, enum nvme_sanitize_sanact sanact, bool ause,
+ __u8 owpass, bool oipbp, bool nodas, __u32 ovrpat)
+{
+ __u32 cdw10 = DW(sanact, NVME_SANITIZE_CDW10_SANACT) |
+ DW(!!ause, NVME_SANITIZE_CDW10_AUSE) |
+ DW(owpass, NVME_SANITIZE_CDW10_OWPASS) |
+ DW(!!oipbp, NVME_SANITIZE_CDW10_OIPBP) |
+ DW(!!nodas, NVME_SANITIZE_CDW10_NODAS);
+ __u32 cdw11 = ovrpat;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_sanitize_nvm,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_dev_self_test(int fd, __u32 nsid, enum nvme_dst_stc stc)
+{
+ __u32 cdw10 = DW(stc, NVME_DEVICE_SELF_TEST_CDW10_STC);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_dev_self_test,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, NULL);
+}
+
+int nvme_virtual_mgmt(int fd, enum nvme_virt_mgmt_act act,
+ enum nvme_virt_mgmt_rt rt, __u16 cntlid, __u16 nr,
+ __u32 *result)
+{
+ __u32 cdw10 = DW(act, NVME_VIRT_MGMT_CDW10_ACT) |
+ DW(rt, NVME_VIRT_MGMT_CDW10_RT) |
+ DW(cntlid, NVME_VIRT_MGMT_CDW10_CNTLID);
+ __u32 cdw11 = DW(nr, NVME_VIRT_MGMT_CDW11_NR);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_admin_virtual_mgmt,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ };
+
+ return nvme_submit_admin_passthru(fd, &cmd, result);
+}
+
+int nvme_submit_io_passthru64(int fd, struct nvme_passthru_cmd64 *cmd,
+ __u64 *result)
+{
+ return nvme_submit_passthru64(fd, NVME_IOCTL_IO64_CMD, cmd, result);
+}
+
+int nvme_io_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u64 *result)
+{
+ return nvme_passthru64(fd, NVME_IOCTL_IO64_CMD, opcode, flags, rsvd,
+ nsid, cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, cdw14, cdw15,
+ data_len, data, metadata_len, metadata, timeout_ms, result);
+}
+
+int nvme_submit_io_passthru(int fd, struct nvme_passthru_cmd *cmd, __u32 *result)
+{
+ return nvme_submit_passthru(fd, NVME_IOCTL_IO_CMD, cmd, result);
+}
+
+int nvme_io_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u32 *result)
+{
+ return nvme_passthru(fd, NVME_IOCTL_IO_CMD, opcode, flags, rsvd, nsid,
+ cdw2, cdw3, cdw10, cdw11, cdw12, cdw13, cdw14, cdw15, data_len,
+ data, metadata_len, metadata, timeout_ms, result);
+}
+
+int nvme_flush(int fd, __u32 nsid)
+{
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_cmd_flush,
+ .nsid = nsid,
+ };
+
+ return nvme_submit_io_passthru(fd, &cmd, NULL);
+}
+
+static int nvme_io(int fd, __u8 opcode, __u32 nsid, __u64 slba, __u16 nlb,
+ __u16 control, __u32 flags, __u32 reftag, __u16 apptag, __u16 appmask,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata)
+{
+ __u32 cdw10 = slba & 0xffffffff;
+ __u32 cdw11 = slba >> 32;
+ __u32 cdw12 = nlb | (control << 16);
+ __u32 cdw13 = flags;
+ __u32 cdw14 = reftag;
+ __u32 cdw15 = apptag | (appmask << 16);
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = opcode,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .cdw11 = cdw11,
+ .cdw12 = cdw12,
+ .cdw13 = cdw13,
+ .cdw14 = cdw14,
+ .cdw15 = cdw15,
+ .data_len = data_len,
+ .metadata_len = metadata_len,
+ .addr = (__u64)(uintptr_t)data,
+ .metadata = (__u64)(uintptr_t)metadata,
+ };
+
+ return nvme_submit_io_passthru(fd, &cmd, NULL);
+}
+
+int nvme_read(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u8 dsm, __u32 reftag, __u16 apptag, __u16 appmask,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata)
+{
+ return nvme_io(fd, nvme_cmd_read, nsid, slba, nlb, control, dsm,
+ reftag, apptag, appmask, data_len, data, metadata_len,
+ metadata);
+}
+
+int nvme_write(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u8 dsm, __u16 dspec, __u32 reftag, __u16 apptag,
+ __u16 appmask, __u32 data_len, void *data, __u32 metadata_len,
+ void *metadata)
+{
+ __u32 flags = dsm | dspec << 16;
+
+ return nvme_io(fd, nvme_cmd_write, nsid, slba, nlb, control, flags,
+ reftag, apptag, appmask, data_len, data, metadata_len,
+ metadata);
+}
+
+int nvme_compare(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u32 reftag, __u16 apptag, __u16 appmask, __u32 data_len,
+ void *data, __u32 metadata_len, void *metadata)
+{
+ return nvme_io(fd, nvme_cmd_compare, nsid, slba, nlb, control, 0,
+ reftag, apptag, appmask, data_len, data, metadata_len,
+ metadata);
+}
+
+int nvme_write_zeros(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u32 reftag, __u16 apptag, __u16 appmask)
+{
+ return nvme_io(fd, nvme_cmd_write_zeroes, nsid, slba, nlb, control, 0,
+ reftag, apptag, appmask, 0, NULL, 0, NULL);
+}
+
+int nvme_verify(int fd, __u32 nsid, __u64 slba, __u16 nlb, __u16 control,
+ __u32 reftag, __u16 apptag, __u16 appmask)
+{
+ return nvme_io(fd, nvme_cmd_verify, nsid, slba, nlb, control, 0,
+ reftag, apptag, appmask, 0, NULL, 0, NULL);
+}
+
+int nvme_write_uncorrectable(int fd, __u32 nsid, __u64 slba, __u16 nlb)
+{
+ return nvme_io(fd, nvme_cmd_write_uncor, nsid, slba, nlb, 0, 0, 0, 0,
+ 0, 0, NULL, 0, NULL);
+}
+
+int nvme_dsm(int fd, __u32 nsid, __u32 attrs, __u16 nr_ranges,
+ struct nvme_dsm_range *dsm)
+{
+ __u32 cdw11 = attrs;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_cmd_dsm,
+ .nsid = nsid,
+ .addr = (__u64)(uintptr_t)dsm,
+ .data_len = nr_ranges * sizeof(*dsm),
+ .cdw10 = nr_ranges - 1,
+ .cdw11 = cdw11,
+ };
+
+ return nvme_submit_io_passthru(fd, &cmd, NULL);
+}
+
+int nvme_resv_acquire(int fd, __u32 nsid, enum nvme_reservation_rtype rtype,
+ enum nvme_reservation_racqa racqa, bool iekey,
+ __u64 crkey, __u64 nrkey)
+{
+ __le64 payload[2] = { cpu_to_le64(crkey), cpu_to_le64(nrkey) };
+ __u32 cdw10 = (racqa & 0x7) | (iekey ? 1 << 3 : 0) | rtype << 8;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_cmd_resv_acquire,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .data_len = sizeof(payload),
+ .addr = (__u64)(uintptr_t)(payload),
+ };
+
+ return nvme_submit_io_passthru(fd, &cmd, NULL);
+}
+
+int nvme_resv_register(int fd, __u32 nsid, enum nvme_reservation_rrega rrega,
+ enum nvme_reservation_cptpl cptpl, bool iekey,
+ __u64 crkey, __u64 nrkey)
+{
+ __le64 payload[2] = { cpu_to_le64(crkey), cpu_to_le64(nrkey) };
+ __u32 cdw10 = (rrega & 0x7) | (iekey ? 1 << 3 : 0) | cptpl << 30;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_cmd_resv_register,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .data_len = sizeof(payload),
+ .addr = (__u64)(uintptr_t)(payload),
+ };
+
+ return nvme_submit_io_passthru(fd, &cmd, NULL);
+}
+
+int nvme_resv_release(int fd, __u32 nsid, enum nvme_reservation_rtype rtype,
+ enum nvme_reservation_rrela rrela, bool iekey,
+ __u64 crkey)
+{
+ __le64 payload[1] = { cpu_to_le64(crkey) };
+ __u32 cdw10 = (rrela & 0x7) | (iekey ? 1 << 3 : 0) | rtype << 8;
+
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_cmd_resv_release,
+ .nsid = nsid,
+ .cdw10 = cdw10,
+ .addr = (__u64)(uintptr_t)(payload),
+ .data_len = sizeof(payload),
+ };
+
+ return nvme_submit_io_passthru(fd, &cmd, NULL);
+}
+
+int nvme_resv_report(int fd, __u32 nsid, bool eds, __u32 len,
+ struct nvme_reservation_status *report)
+{
+ struct nvme_passthru_cmd cmd = {
+ .opcode = nvme_cmd_resv_report,
+ .nsid = nsid,
+ .cdw10 = (len >> 2) - 1,
+ .cdw11 = eds ? 1 : 0,
+ .addr = (__u64)(uintptr_t)report,
+ .data_len = len,
+ };
+
+ return nvme_submit_io_passthru(fd, &cmd, NULL);
+}
--- /dev/null
+#ifndef _LIBNVME_IOCTL_H
+#define _LIBNVME_IOCTL_H
+
+#include <linux/types.h>
+#include <sys/ioctl.h>
+
+/*
+ * We can not always count on the kernel UAPI being installed. Use the same
+ * 'ifdef' guard to avoid double definitions just in case.
+ */
+#ifndef _UAPI_LINUX_NVME_IOCTL_H
+#define _UAPI_LINUX_NVME_IOCTL_H
+
+/**
+ * struct nvme_passthru_cmd -
+ * @opcode: Operation code, see &enum nvme_io_opcodes and &enum nvme_admin_opcodes
+ * @flags: Not supported: intended for command flags (eg: SGL, FUSE)
+ * @rsvd1: Reserved for future use
+ * @nsid: Namespace Identifier, or Fabrics type
+ * @cdw2: Command Dword 2 (no spec defined use)
+ * @cdw3: Command Dword 3 (no spec defined use)
+ * @metadata: User space address to metadata buffer (NULL if not used)
+ * @addr: User space address to data buffer (NULL if not used)
+ * @metadata_len: Metadata buffer transfer length
+ * @data_len: Data buffer transfer length
+ * @cdw10: Command Dword 10 (command specific)
+ * @cdw11: Command Dword 11 (command specific)
+ * @cdw12: Command Dword 12 (command specific)
+ * @cdw13: Command Dword 13 (command specific)
+ * @cdw14: Command Dword 14 (command specific)
+ * @cdw15: Command Dword 15 (command specific)
+ * @timeout_ms: If non-zero, overrides system default timeout in milliseconds
+ * @result: Set on completion to the command's CQE DWORD 0 controller response
+ */
+struct nvme_passthru_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd1;
+ __u32 nsid;
+ __u32 cdw2;
+ __u32 cdw3;
+ __u64 metadata;
+ __u64 addr;
+ __u32 metadata_len;
+ __u32 data_len;
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result;
+};
+
+/**
+ * struct nvme_passthru_cmd64 -
+ * @opcode: Operation code, see &enum nvme_io_opcodes and &enum nvme_admin_opcodes
+ * @flags: Not supported: intended for command flags (eg: SGL, FUSE)
+ * @rsvd1: Reserved for future use
+ * @nsid: Namespace Identifier, or Fabrics type
+ * @cdw2: Command Dword 2 (no spec defined use)
+ * @cdw3: Command Dword 3 (no spec defined use)
+ * @metadata: User space address to metadata buffer (NULL if not used)
+ * @addr: User space address to data buffer (NULL if not used)
+ * @metadata_len: Metadata buffer transfer length
+ * @data_len: Data buffer transfer length
+ * @cdw10: Command Dword 10 (command specific)
+ * @cdw11: Command Dword 11 (command specific)
+ * @cdw12: Command Dword 12 (command specific)
+ * @cdw13: Command Dword 13 (command specific)
+ * @cdw14: Command Dword 14 (command specific)
+ * @cdw15: Command Dword 15 (command specific)
+ * @timeout_ms: If non-zero, overrides system default timeout in milliseconds
+ * @rsvd2: Reserved for future use (and fills an impicit struct pad
+ * @result: Set on completion to the command's CQE DWORD 0-1 controller response
+ */
+struct nvme_passthru_cmd64 {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd1;
+ __u32 nsid;
+ __u32 cdw2;
+ __u32 cdw3;
+ __u64 metadata;
+ __u64 addr;
+ __u32 metadata_len;
+ __u32 data_len;
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 rsvd2;
+ __u64 result;
+};
+
+#define NVME_IOCTL_ID _IO('N', 0x40)
+#define NVME_IOCTL_RESET _IO('N', 0x44)
+#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
+#define NVME_IOCTL_RESCAN _IO('N', 0x46)
+#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_passthru_cmd)
+#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
+#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
+#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
+
+#endif /* _UAPI_LINUX_NVME_IOCTL_H */
+
+/**
+ * nvme_submit_admin_passthru64() - Submit a 64-bit nvme passthrough admin
+ * command
+ * @fd: File descriptor of nvme device
+ * @cmd: The nvme admin command to send
+ * @result: Optional field to return the result from the CQE DW0-1
+ *
+ * Uses NVME_IOCTL_ADMIN64_CMD for the ioctl request.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_submit_admin_passthru64(int fd, struct nvme_passthru_cmd64 *cmd,
+ __u64 *result);
+
+/**
+ * nvme_admin_passthru64() - Submit an nvme passthrough command
+ * @fd: File descriptor of nvme device
+ * @opcode: The nvme io command to send
+ * @flags: NVMe command flags (not used)
+ * @rsvd: Reserevd for future use
+ * @nsid: Namespace identifier
+ * @cdw2: Command dword 2
+ * @cdw3: Command dword 3
+ * @cdw10: Command dword 10
+ * @cdw11: Command dword 11
+ * @cdw12: Command dword 12
+ * @cdw13: Command dword 13
+ * @cdw14: Command dword 14
+ * @cdw15: Command dword 15
+ * @data_len: Length of the data transfered in this command in bytes
+ * @data: Pointer to user address of the data buffer
+ * @metadata_len:Length of metadata transfered in this command
+ * @metadata: Pointer to user address of the metadata buffer
+ * @timeout_ms: How long the kernel waits for the command to complete
+ * @result: Optional field to return the result from the CQE dword 0
+ *
+ * Parameterized form of nvme_submit_admin_passthru64(). This sets up and
+ * submits a &struct nvme_passthru_cmd64.
+ *
+ * Known values for @opcode are defined in &enum nvme_admin_opcode.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_admin_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u64 *result);
+
+/**
+ * nvme_submit_admin_passthru() - Submit an nvme passthrough admin command
+ * @fd: File descriptor of nvme device
+ * @cmd: The nvme admin command to send
+ * @result: Optional field to return the result from the CQE DW0
+ *
+ * Uses NVME_IOCTL_ADMIN_CMD for the ioctl request.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_submit_admin_passthru(int fd, struct nvme_passthru_cmd *cmd,
+ __u32 *result);
+
+/**
+ * nvme_admin_passthru() - Submit an nvme passthrough command
+ * @fd: File descriptor of nvme device
+ * @opcode: The nvme io command to send
+ * @flags: NVMe command flags (not used)
+ * @rsvd: Reserevd for future use
+ * @nsid: Namespace identifier
+ * @cdw2: Command dword 2
+ * @cdw3: Command dword 3
+ * @cdw10: Command dword 10
+ * @cdw11: Command dword 11
+ * @cdw12: Command dword 12
+ * @cdw13: Command dword 13
+ * @cdw14: Command dword 14
+ * @cdw15: Command dword 15
+ * @data_len: Length of the data transfered in this command in bytes
+ * @data: Pointer to user address of the data buffer
+ * @metadata_len:Length of metadata transfered in this command
+ * @metadata: Pointer to user address of the metadata buffer
+ * @timeout_ms: How long the kernel waits for the command to complete
+ * @result: Optional field to return the result from the CQE dword 0
+ *
+ * Parameterized form of nvme_submit_admin_passthru(). This sets up and
+ * submits a &struct nvme_passthru_cmd.
+ *
+ * Known values for @opcode are defined in &enum nvme_admin_opcode.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_admin_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u32 *result);
+
+/**
+ * nvme_submit_passthru64() - Submit a 64-bit nvme passthrough command
+ * @fd: File descriptor of nvme device
+ * @cmd: The nvme io command to send
+ * @result: Optional field to return the result from the CQE DW0-1
+ *
+ * Uses NVME_IOCTL_IO64_CMD for the ioctl request.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_submit_io_passthru64(int fd, struct nvme_passthru_cmd64 *cmd,
+ __u64 *result);
+
+/**
+ * nvme_io_passthru64() - Submit an nvme io passthrough command
+ * @fd: File descriptor of nvme device
+ * @opcode: The nvme io command to send
+ * @flags: NVMe command flags (not used)
+ * @rsvd: Reserevd for future use
+ * @nsid: Namespace identifier
+ * @cdw2: Command dword 2
+ * @cdw3: Command dword 3
+ * @cdw10: Command dword 10
+ * @cdw11: Command dword 11
+ * @cdw12: Command dword 12
+ * @cdw13: Command dword 13
+ * @cdw14: Command dword 14
+ * @cdw15: Command dword 15
+ * @data_len: Length of the data transfered in this command in bytes
+ * @data: Pointer to user address of the data buffer
+ * @metadata_len:Length of metadata transfered in this command
+ * @metadata: Pointer to user address of the metadata buffer
+ * @timeout_ms: How long the kernel waits for the command to complete
+ * @result: Optional field to return the result from the CQE dword 0
+ *
+ * Parameterized form of nvme_submit_io_passthru64(). This sets up and submits
+ * a &struct nvme_passthru_cmd64.
+ *
+ * Known values for @opcode are defined in &enum nvme_io_opcode.
+ *
+ * Return: The nvme command status if a response was received or -1 with errno
+ * set otherwise.
+ */
+int nvme_io_passthru64(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u64 *result);
+
+/**
+ * nvme_submit_passthru() - Submit an nvme passthrough command
+ * @fd: File descriptor of nvme device
+ * @cmd: The nvme io command to send
+ * @result: Optional field to return the result from the CQE dword 0
+ * @result: Optional field to return the result from the CQE DW0
+ *
+ * Uses NVME_IOCTL_IO_CMD for the ioctl request.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_submit_io_passthru(int fd, struct nvme_passthru_cmd *cmd,
+ __u32 *result);
+
+/**
+ * nvme_io_passthru() - Submit an nvme io passthrough command
+ * @fd: File descriptor of nvme device
+ * @opcode: The nvme io command to send
+ * @flags: NVMe command flags (not used)
+ * @rsvd: Reserevd for future use
+ * @nsid: Namespace identifier
+ * @cdw2: Command dword 2
+ * @cdw3: Command dword 3
+ * @cdw10: Command dword 10
+ * @cdw11: Command dword 11
+ * @cdw12: Command dword 12
+ * @cdw13: Command dword 13
+ * @cdw14: Command dword 14
+ * @cdw15: Command dword 15
+ * @data_len: Length of the data transfered in this command in bytes
+ * @data: Pointer to user address of the data buffer
+ * @metadata_len:Length of metadata transfered in this command
+ * @metadata: Pointer to user address of the metadata buffer
+ * @timeout_ms: How long the kernel waits for the command to complete
+ * @result: Optional field to return the result from the CQE dword 0
+ *
+ * Parameterized form of nvme_submit_io_passthru(). This sets up and submits
+ * a &struct nvme_passthru_cmd.
+ *
+ * Known values for @opcode are defined in &enum nvme_io_opcode.
+ *
+ * Return: The nvme command status if a response was received or -1
+ * with errno set otherwise.
+ */
+int nvme_io_passthru(int fd, __u8 opcode, __u8 flags, __u16 rsvd,
+ __u32 nsid, __u32 cdw2, __u32 cdw3, __u32 cdw10, __u32 cdw11,
+ __u32 cdw12, __u32 cdw13, __u32 cdw14, __u32 cdw15,
+ __u32 data_len, void *data, __u32 metadata_len, void *metadata,
+ __u32 timeout_ms, __u32 *result);
+
+/**
+ * nvme_subsystem_reset() - Initiate a subsystem reset
+ * @fd: File descriptor of nvme device
+ *
+ * This should only be sent to controller handles, not to namespaces.
+ *
+ * Return: Zero if a subsystem reset was initiated or -1 with errno set
+ * otherwise.
+ */
+int nvme_subsystem_reset(int fd);
+
+/**
+ * nvme_reset_ctrl() - Initiate a controller reset
+ * @fd: File descriptor of nvme device
+ *
+ * This should only be sent to controller handles, not to namespaces.
+ *
+ * Return: Zero if a reset was initiated or -1 with errno set otherwise.
+ */
+int nvme_reset_ctrl(int fd);
+
+/**
+ * nvme_ns_rescan() - Initiate a controller rescan
+ * @fd: File descriptor of nvme device
+ *
+ * This should only be sent to controller handles, not to namespaces.
+ *
+ * Return: Zero if a rescan was initiated or -1 with errno set otherwise.
+ */
+int nvme_ns_rescan(int fd);
+
+/**
+ * nvme_get_nsid() - Retrieve the NSID from a namespace file descriptor
+ * @fd: File descriptor of nvme namespace
+ *
+ * This should only be sent to namespace handles, not to controllers.
+ *
+ * Return: The namespace identifier if a succecssful or -1 with errno set
+ * otherwise.
+ */
+int nvme_get_nsid(int fd);
+
+#endif /* _LIBNVME_IOCTL_H */
--- /dev/null
+#ifndef _LIBNVME_PRIVATE_H
+#define _LIBNVME_PRIVATE_H
+
+#include <dirent.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "tree.h"
+
+struct nvme_path {
+ TAILQ_ENTRY(nvme_path) entry;
+ TAILQ_ENTRY(nvme_path) nentry;
+ struct nvme_ctrl *c;
+ struct nvme_ns *n;
+
+ char *name;
+ char *sysfs_dir;
+ char *ana_state;
+ int grpid;
+};
+
+struct nvme_ns {
+ TAILQ_ENTRY(nvme_ns) entry;
+ TAILQ_HEAD(, nvme_path) paths;
+ struct nvme_subsystem *s;
+ struct nvme_ctrl *c;
+
+ int fd;
+ char *name;
+ char *sysfs_dir;
+ int nsid;
+
+ int lba_size;
+ int meta_size;
+ uint64_t lba_count;
+ uint64_t lba_util;
+};
+
+struct nvme_ctrl {
+ TAILQ_ENTRY(nvme_ctrl) entry;
+ TAILQ_HEAD(, nvme_ns) namespaces;
+ TAILQ_HEAD(, nvme_path) paths;
+ struct nvme_subsystem *s;
+
+ int fd;
+ char *name;
+ char *sysfs_dir;
+ char *address;
+ char *firmware;
+ char *model;
+ char *state;
+ char *numa_node;
+ char *queue_count;
+ char *serial;
+ char *sqsize;
+ char *transport;
+ char *subsysnqn;
+};
+
+struct nvme_subsystem {
+ TAILQ_ENTRY(nvme_subsystem) entry;
+ TAILQ_HEAD(, nvme_ctrl) ctrls;
+ TAILQ_HEAD(, nvme_ns) namespaces;
+ struct nvme_root *r;
+
+ char *name;
+ char *sysfs_dir;
+ char *subsysnqn;
+};
+
+struct nvme_root {
+ TAILQ_HEAD(, nvme_subsystem) subsystems;
+};
+
+void nvme_free_ctrl(struct nvme_ctrl *c);
+void nvme_ctrl_free_ns(struct nvme_ns *n);
+void nvme_subsystem_free_ns(struct nvme_ns *n);
+void nvme_free_path(struct nvme_path *p);
+void nvme_free_subsystem(struct nvme_subsystem *s);
+
+int nvme_scan_subsystem(struct nvme_root *t, char *name, nvme_scan_filter_t f);
+int nvme_subsystem_scan_namespace(struct nvme_subsystem *s, char *name);
+int nvme_subsystem_scan_ctrls(struct nvme_subsystem *s);
+int nvme_subsystem_scan_ctrl(struct nvme_subsystem *s, char *name);
+
+int nvme_ctrl_scan_namespace(struct nvme_ctrl *c, char *name);
+int nvme_ctrl_scan_path(struct nvme_ctrl *c, char *name);
+
+static inline void nvme_free_dirents(struct dirent **d, int i)
+{
+ while (i-- > 0)
+ free(d[i]);
+ free(d);
+}
+
+#endif /* _LIBNVME_PRIVATE_H */
--- /dev/null
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "ioctl.h"
+#include "filters.h"
+#include "tree.h"
+#include "private.h"
+#include "filters.h"
+#include "util.h"
+#include "cmd.h"
+
+static int nvme_scan_topology(struct nvme_root *r, nvme_scan_filter_t f)
+{
+ struct dirent **subsys;
+ int i, ret;
+
+ ret = nvme_scan_subsystems(&subsys);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++)
+ nvme_scan_subsystem(r, subsys[i]->d_name, f);
+
+ nvme_free_dirents(subsys, i);
+ return 0;
+}
+
+nvme_root_t nvme_scan_filter(nvme_scan_filter_t f)
+{
+ struct nvme_root *r = malloc(sizeof(*r));
+
+ if (!r) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ memset(r, 0, sizeof(*r));
+ TAILQ_INIT(&r->subsystems);
+ nvme_scan_topology(r, f);
+ return r;
+}
+
+nvme_root_t nvme_scan()
+{
+ return nvme_scan_filter(NULL);
+}
+
+nvme_subsystem_t nvme_first_subsystem(nvme_root_t r)
+{
+ return TAILQ_FIRST(&r->subsystems);
+}
+
+nvme_subsystem_t nvme_next_subsystem(nvme_root_t r, nvme_subsystem_t s)
+{
+ if (!s)
+ return NULL;
+ return TAILQ_NEXT(s, entry);
+}
+
+void nvme_refresh_topology(nvme_root_t r)
+{
+ struct nvme_subsystem *s, *_s;
+
+ nvme_for_each_subsystem_safe(r, s, _s)
+ nvme_free_subsystem(s);
+ nvme_scan_topology(r, NULL);
+}
+
+void nvme_reset_topology(nvme_root_t r)
+{
+ struct nvme_subsystem *s, *_s;
+
+ nvme_for_each_subsystem_safe(r, s, _s)
+ nvme_free_subsystem(s);
+ nvme_scan_topology(r, NULL);
+}
+
+void nvme_free_tree(nvme_root_t r)
+{
+ struct nvme_subsystem *s, *_s;
+
+ nvme_for_each_subsystem_safe(r, s, _s)
+ nvme_free_subsystem(s);
+ free(r);
+}
+
+const char *nvme_subsystem_get_nqn(nvme_subsystem_t s)
+{
+ return s->subsysnqn;
+}
+
+const char *nvme_subsystem_get_sysfs_dir(nvme_subsystem_t s)
+{
+ return s->sysfs_dir;
+}
+
+const char *nvme_subsystem_get_name(nvme_subsystem_t s)
+{
+ return s->name;
+}
+
+nvme_ctrl_t nvme_subsystem_first_ctrl(nvme_subsystem_t s)
+{
+ return TAILQ_FIRST(&s->ctrls);
+}
+
+nvme_ctrl_t nvme_subsystem_next_ctrl(nvme_subsystem_t s, nvme_ctrl_t c)
+{
+ if (!c)
+ return NULL;
+ return TAILQ_NEXT(c, entry);
+}
+
+nvme_ns_t nvme_subsystem_first_ns(nvme_subsystem_t s)
+{
+ return TAILQ_FIRST(&s->namespaces);
+}
+
+nvme_ns_t nvme_subsystem_next_ns(nvme_subsystem_t s, nvme_ns_t n)
+{
+ if (!n)
+ return NULL;
+ return TAILQ_NEXT(n, entry);
+}
+
+void nvme_free_subsystem(struct nvme_subsystem *s)
+{
+ struct nvme_ctrl *c, *_c;
+ struct nvme_ns *n, *_n;
+
+ if (s->r)
+ TAILQ_REMOVE(&s->r->subsystems, s, entry);
+
+ nvme_subsystem_for_each_ctrl_safe(s, c, _c)
+ nvme_free_ctrl(c);
+
+ nvme_subsystem_for_each_ns_safe(s, n, _n)
+ nvme_subsystem_free_ns(n);
+
+ free(s->name);
+ free(s->sysfs_dir);
+ free(s->subsysnqn);
+ free(s);
+}
+
+static int nvme_subsystem_scan_namespaces(struct nvme_subsystem *s)
+{
+ struct dirent **namespaces;
+ int i, ret;
+
+ ret = nvme_scan_subsystem_namespaces(s, &namespaces);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++)
+ nvme_subsystem_scan_namespace(s, namespaces[i]->d_name);
+
+ nvme_free_dirents(namespaces, i);
+ return 0;
+}
+
+int nvme_subsystem_scan_ctrls(struct nvme_subsystem *s)
+{
+ struct dirent **ctrls;
+ int i, ret;
+
+ ret = nvme_scan_subsystem_ctrls(s, &ctrls);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++)
+ nvme_subsystem_scan_ctrl(s, ctrls[i]->d_name);
+
+ nvme_free_dirents(ctrls, i);
+ return 0;
+}
+
+int nvme_scan_subsystem(struct nvme_root *r, char *name, nvme_scan_filter_t f)
+{
+ struct nvme_subsystem *s;
+ char *path;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", nvme_subsys_sysfs_dir, name);
+ if (ret < 0)
+ return ret;
+
+ s = malloc(sizeof(*s));
+ if (!s) {
+ errno = ENOMEM;
+ goto free_path;
+ }
+ memset(s, 0, sizeof(*s));
+
+ s->r = r;
+ s->name = strdup(name);;
+ s->sysfs_dir = path;
+ s->subsysnqn = nvme_get_subsys_attr(s, "subsysnqn");
+ TAILQ_INIT(&s->ctrls);
+ TAILQ_INIT(&s->namespaces);
+
+ nvme_subsystem_scan_namespaces(s);
+ nvme_subsystem_scan_ctrls(s);
+ TAILQ_INSERT_TAIL(&r->subsystems, s, entry);
+
+ if (f && !f(s)) {
+ nvme_free_subsystem(s);
+ return -1;
+ }
+
+ return 0;
+
+free_path:
+ free(path);
+ return -1;
+}
+nvme_ctrl_t nvme_path_get_subsystem(nvme_path_t p)
+{
+ return p->c;
+}
+
+nvme_ns_t nvme_path_get_ns(nvme_path_t p)
+{
+ return p->n;
+}
+
+const char *nvme_path_get_sysfs_dir(nvme_path_t p)
+{
+ return p->sysfs_dir;
+}
+
+const char *nvme_path_get_name(nvme_path_t p)
+{
+ return p->name;
+}
+
+const char *nvme_path_get_ana_state(nvme_path_t p)
+{
+ return p->ana_state;
+}
+
+void nvme_free_path(struct nvme_path *p)
+{
+ TAILQ_REMOVE(&p->c->paths, p, entry);
+ TAILQ_REMOVE(&p->n->paths, p, nentry);
+ free(p->name);
+ free(p->sysfs_dir);
+ free(p->ana_state);
+ free(p);
+}
+
+static void nvme_subsystem_set_path_ns(nvme_subsystem_t s, nvme_path_t p)
+{
+ char n_name[32] = { 0 };
+ int i, c, nsid, ret;
+ nvme_ns_t n;
+
+ ret = sscanf(nvme_path_get_name(p), "nvme%dc%dn%d", &i, &c, &nsid);
+ if (ret != 3)
+ return;
+
+ sprintf(n_name, "nvme%dn%d", i, nsid);
+ nvme_subsystem_for_each_ns(s, n) {
+ if (!strcmp(n_name, nvme_ns_get_name(n))) {
+ TAILQ_INSERT_TAIL(&n->paths, p, nentry);
+ p->n = n;
+ }
+ }
+}
+
+int nvme_ctrl_scan_path(struct nvme_ctrl *c, char *name)
+{
+ struct nvme_path *p;
+ char *path, *grpid;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", c->sysfs_dir, name);
+ if (ret < 0) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ p = malloc(sizeof(*p));
+ if (!p) {
+ errno = ENOMEM;
+ goto free_path;
+ }
+ memset(p, 0, sizeof(*p));
+
+ p->c = c;
+ p->name = strdup(name);
+ p->sysfs_dir = path;
+ p->ana_state = nvme_get_path_attr(p, "ana_state");
+ nvme_subsystem_set_path_ns(c->s, p);
+
+ grpid = nvme_get_path_attr(p, "ana_grpid");
+ if (grpid) {
+ sscanf(grpid, "%d", &p->grpid);
+ free(grpid);
+ }
+
+ TAILQ_INSERT_TAIL(&c->paths, p, entry);
+ return 0;
+
+free_path:
+ free(path);
+ return -1;
+}
+
+int nvme_ctrl_get_fd(nvme_ctrl_t c)
+{
+ return c->fd;
+}
+
+nvme_subsystem_t nvme_ctrl_get_subsystem(nvme_ctrl_t c)
+{
+ return c->s;
+}
+
+const char *nvme_ctrl_get_name(nvme_ctrl_t c)
+{
+ return c->name;
+}
+
+const char *nvme_ctrl_get_sysfs_dir(nvme_ctrl_t c)
+{
+ return c->sysfs_dir;
+}
+
+const char *nvme_ctrl_get_subsysnqn(nvme_ctrl_t c)
+{
+ return c->subsysnqn;
+}
+
+const char *nvme_ctrl_get_address(nvme_ctrl_t c)
+{
+ return c->address;
+}
+
+const char *nvme_ctrl_get_firmware(nvme_ctrl_t c)
+{
+ return c->firmware;
+}
+
+const char *nvme_ctrl_get_model(nvme_ctrl_t c)
+{
+ return c->model;
+}
+
+const char *nvme_ctrl_get_state(nvme_ctrl_t c)
+{
+ return c->state;
+}
+
+const char *nvme_ctrl_get_numa_node(nvme_ctrl_t c)
+{
+ return c->numa_node;
+}
+
+const char *nvme_ctrl_get_queue_count(nvme_ctrl_t c)
+{
+ return c->queue_count;
+}
+
+const char *nvme_ctrl_get_serial(nvme_ctrl_t c)
+{
+ return c->serial;
+}
+
+const char *nvme_ctrl_get_sqsize(nvme_ctrl_t c)
+{
+ return c->sqsize;
+}
+
+const char *nvme_ctrl_get_transport(nvme_ctrl_t c)
+{
+ return c->transport;
+}
+
+int nvme_ctrl_identify(nvme_ctrl_t c, struct nvme_id_ctrl *id)
+{
+ return nvme_identify_ctrl(nvme_ctrl_get_fd(c), id);
+}
+
+nvme_ns_t nvme_ctrl_first_ns(nvme_ctrl_t c)
+{
+ return TAILQ_FIRST(&c->namespaces);
+}
+
+nvme_ns_t nvme_ctrl_next_ns(nvme_ctrl_t c, nvme_ns_t n)
+{
+ if (!n)
+ return NULL;
+ return TAILQ_NEXT(n, entry);
+}
+
+nvme_path_t nvme_ctrl_first_path(nvme_ctrl_t c)
+{
+ return TAILQ_FIRST(&c->paths);
+}
+
+nvme_path_t nvme_ctrl_next_path(nvme_ctrl_t c, nvme_path_t p)
+{
+ if (!p)
+ return NULL;
+ return TAILQ_NEXT(p, entry);
+}
+
+int nvme_ctrl_disconnect(nvme_ctrl_t c)
+{
+ return nvme_set_attr(nvme_ctrl_get_sysfs_dir(c), "delete_controller", "1");
+}
+
+void nvme_unlink_ctrl(nvme_ctrl_t c)
+{
+ if (c->s)
+ TAILQ_REMOVE(&c->s->ctrls, c, entry);
+ c->s = NULL;
+}
+
+void nvme_free_ctrl(nvme_ctrl_t c)
+{
+ struct nvme_path *p, *_p;
+ struct nvme_ns *n, *_n;
+
+ nvme_unlink_ctrl(c);
+
+ nvme_ctrl_for_each_path_safe(c, p, _p)
+ nvme_free_path(p);
+
+ nvme_ctrl_for_each_ns_safe(c, n, _n)
+ nvme_ctrl_free_ns(n);
+
+ close(c->fd);
+ free(c->name);
+ free(c->sysfs_dir);
+ free(c->subsysnqn);
+ free(c->address);
+ free(c->firmware);
+ free(c->model);
+ free(c->state);
+ free(c->numa_node);
+ free(c->queue_count);
+ free(c->serial);
+ free(c->sqsize);
+ free(c->transport);
+ free(c);
+}
+
+static int nvme_ctrl_scan_paths(struct nvme_ctrl *c)
+{
+ struct dirent **paths;
+ int i, ret;
+
+ ret = nvme_scan_ctrl_namespace_paths(c, &paths);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++)
+ nvme_ctrl_scan_path(c, paths[i]->d_name);
+
+ nvme_free_dirents(paths, i);
+ return 0;
+}
+
+static int nvme_ctrl_scan_namespaces(struct nvme_ctrl *c)
+{
+ struct dirent **namespaces;
+ int i, ret;
+
+ ret = nvme_scan_ctrl_namespaces(c, &namespaces);
+ for (i = 0; i < ret; i++)
+ nvme_ctrl_scan_namespace(c, namespaces[i]->d_name);
+
+ nvme_free_dirents(namespaces, i);
+ return 0;
+}
+
+static nvme_ctrl_t __nvme_ctrl_alloc(const char *path, const char *name)
+{
+ DIR *d;
+ nvme_ctrl_t c;
+
+ d = opendir(path);
+ if (!d)
+ return NULL;
+ closedir(d);
+
+ c = malloc(sizeof(*c));
+ if (!c) {
+ errno = ENOMEM;
+ return NULL;
+ }
+ memset(c, 0, sizeof(*c));
+
+ c->fd = nvme_open(name);
+ if (c->fd < 0)
+ goto free_ctrl;
+
+ TAILQ_INIT(&c->namespaces);
+ TAILQ_INIT(&c->paths);
+ c->name = strdup(name);
+ c->sysfs_dir = (char *)path;
+ c->subsysnqn = nvme_get_ctrl_attr(c, "subsysnqn");
+ c->address = nvme_get_ctrl_attr(c, "address");
+ c->firmware = nvme_get_ctrl_attr(c, "firmware_rev");
+ c->model = nvme_get_ctrl_attr(c, "model");
+ c->state = nvme_get_ctrl_attr(c, "state");
+ c->numa_node = nvme_get_ctrl_attr(c, "numa_node");
+ c->queue_count = nvme_get_ctrl_attr(c, "queue_count");
+ c->serial = nvme_get_ctrl_attr(c, "serial");
+ c->sqsize = nvme_get_ctrl_attr(c, "sqsize");
+ c->transport = nvme_get_ctrl_attr(c, "transport");
+
+ return c;
+
+free_ctrl:
+ free(c);
+ return NULL;
+}
+
+static nvme_ctrl_t nvme_ctrl_alloc(const char *sysfs, const char *name)
+{
+ nvme_ctrl_t c;
+ char *path;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", sysfs, name);
+ if (ret < 0) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ c = __nvme_ctrl_alloc(path, name);
+ if (!c)
+ free(path);
+ return c;
+}
+
+nvme_ctrl_t nvme_scan_ctrl(const char *name)
+{
+ return nvme_ctrl_alloc(nvme_ctrl_sysfs_dir, name);
+}
+
+int nvme_subsystem_scan_ctrl(struct nvme_subsystem *s, char *name)
+{
+ nvme_ctrl_t c;
+
+ c = nvme_ctrl_alloc(s->sysfs_dir, name);
+ if (!c)
+ return -1;
+
+ c->s = s;
+ nvme_ctrl_scan_namespaces(c);
+ nvme_ctrl_scan_paths(c);
+ TAILQ_INSERT_TAIL(&s->ctrls, c, entry);
+
+ return 0;
+}
+
+static int nvme_bytes_to_lba(nvme_ns_t n, off_t offset, size_t count, __u64 *lba,
+ __u16 *nlb)
+{
+ int bs;
+
+ bs = nvme_ns_get_lba_size(n);
+ if (!count || offset & bs || count & bs) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ *lba = offset / bs;
+ *nlb = (count / bs) - 1;
+ return 0;
+}
+
+int nvme_ns_get_fd(nvme_ns_t n)
+{
+ return n->fd;
+}
+
+nvme_subsystem_t nvme_ns_get_subsystem(nvme_ns_t n)
+{
+ return n->s;
+}
+
+nvme_ctrl_t nvme_ns_get_ctrl(nvme_ns_t n)
+{
+ return n->c;
+}
+
+int nvme_ns_get_nsid(nvme_ns_t n)
+{
+ return n->nsid;
+}
+
+const char *nvme_ns_get_sysfs_dir(nvme_ns_t n)
+{
+ return n->sysfs_dir;
+}
+
+const char *nvme_ns_get_name(nvme_ns_t n)
+{
+ return n->name;
+}
+
+int nvme_ns_get_lba_size(nvme_ns_t n)
+{
+ return n->lba_size;
+}
+
+uint64_t nvme_ns_get_lba_count(nvme_ns_t n)
+{
+ return n->lba_count;
+}
+
+uint64_t nvme_ns_get_lba_util(nvme_ns_t n)
+{
+ return n->lba_util;
+}
+
+int nvme_ns_identify(nvme_ns_t n, struct nvme_id_ns *ns)
+{
+ return nvme_identify_ns(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), ns);
+}
+
+int nvme_ns_verify(nvme_ns_t n, off_t offset, size_t count)
+{
+ __u64 slba;
+ __u16 nlb;
+
+ if (nvme_bytes_to_lba(n, offset, count, &slba, &nlb))
+ return -1;
+
+ return nvme_verify(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), slba, nlb,
+ 0, 0, 0, 0);
+}
+
+int nvme_ns_write_uncorrectable(nvme_ns_t n, off_t offset, size_t count)
+{
+ __u64 slba;
+ __u16 nlb;
+
+ if (nvme_bytes_to_lba(n, offset, count, &slba, &nlb))
+ return -1;
+
+ return nvme_write_uncorrectable(nvme_ns_get_fd(n), nvme_ns_get_nsid(n),
+ slba, nlb);
+}
+
+int nvme_ns_write_zeros(nvme_ns_t n, off_t offset, size_t count)
+{
+ __u64 slba;
+ __u16 nlb;
+
+ if (nvme_bytes_to_lba(n, offset, count, &slba, &nlb))
+ return -1;
+
+ return nvme_write_zeros(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), slba,
+ nlb, 0, 0, 0, 0);
+}
+
+int nvme_ns_write(nvme_ns_t n, void *buf, off_t offset, size_t count)
+{
+ __u64 slba;
+ __u16 nlb;
+
+ if (nvme_bytes_to_lba(n, offset, count, &slba, &nlb))
+ return -1;
+
+ return nvme_write(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), slba, nlb, 0,
+ 0, 0, 0, 0, 0, count, buf, 0, NULL);
+}
+
+int nvme_ns_read(nvme_ns_t n, void *buf, off_t offset, size_t count)
+{
+ __u64 slba;
+ __u16 nlb;
+
+ if (nvme_bytes_to_lba(n, offset, count, &slba, &nlb))
+ return -1;
+
+ return nvme_read(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), slba, nlb, 0,
+ 0, 0, 0, 0, count, buf, 0, NULL);
+}
+
+int nvme_ns_compare(nvme_ns_t n, void *buf, off_t offset, size_t count)
+{
+ __u64 slba;
+ __u16 nlb;
+
+ if (nvme_bytes_to_lba(n, offset, count, &slba, &nlb))
+ return -1;
+
+ return nvme_compare(nvme_ns_get_fd(n), nvme_ns_get_nsid(n), slba, nlb,
+ 0, 0, 0, 0, count, buf, 0, NULL);
+}
+
+int nvme_ns_flush(nvme_ns_t n)
+{
+ return nvme_flush(nvme_ns_get_fd(n), nvme_ns_get_nsid(n));
+}
+
+static void nvme_free_ns(struct nvme_ns *n)
+{
+ close(n->fd);
+ free(n->name);
+ free(n->sysfs_dir);
+ free(n);
+}
+
+void nvme_ctrl_free_ns(struct nvme_ns *n)
+{
+ TAILQ_REMOVE(&n->s->namespaces, n, entry);
+ nvme_free_ns(n);
+}
+
+void nvme_subsystem_free_ns(struct nvme_ns *n)
+{
+ TAILQ_REMOVE(&n->s->namespaces, n, entry);
+ nvme_free_ns(n);
+}
+
+static void nvme_ns_init(struct nvme_ns *n)
+{
+ struct nvme_id_ns ns = { 0 };
+
+ if (nvme_ns_identify(n, &ns) != 0)
+ return;
+
+ n->lba_size = 1 << ns.lbaf[ns.flbas & NVME_NS_FLBAS_LBA_MASK].ds;
+ n->lba_count = le64_to_cpu(ns.nsze);
+ n->lba_util = le64_to_cpu(ns.nuse);
+}
+
+static struct nvme_ns *__nvme_scan_namespace(const char *sysfs_dir, char *name)
+{
+ struct nvme_ns *n;
+ char *path;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", sysfs_dir, name);
+ if (ret < 0) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ n = malloc(sizeof(*n));
+ if (!n) {
+ errno = ENOMEM;
+ goto free_path;
+ }
+ memset(n, 0, sizeof(*n));
+
+ n->name = strdup(name);
+ n->sysfs_dir = path;
+ n->fd = nvme_open(name);
+ if (n->fd < 0)
+ goto free_ns;
+
+ n->nsid = nvme_get_nsid(n->fd);
+ if (n->nsid < 0)
+ goto close_fd;
+
+ TAILQ_INIT(&n->paths);
+ nvme_ns_init(n);
+
+ return n;
+
+close_fd:
+ close(n->fd);
+free_ns:
+ free(n);
+free_path:
+ free(path);
+ return NULL;
+}
+
+int nvme_ctrl_scan_namespace(struct nvme_ctrl *c, char *name)
+{
+ struct nvme_ns *n;
+
+ n = __nvme_scan_namespace(c->sysfs_dir, name);
+ if (!n)
+ return -1;
+
+ n->s = c->s;
+ n->c = c;
+ TAILQ_INSERT_TAIL(&c->namespaces, n, entry);
+ return 0;
+}
+
+int nvme_subsystem_scan_namespace(struct nvme_subsystem *s, char *name)
+{
+ struct nvme_ns *n;
+
+ n = __nvme_scan_namespace(s->sysfs_dir, name);
+ if (!n)
+ return -1;
+
+ n->s = s;
+ TAILQ_INSERT_TAIL(&s->namespaces, n, entry);
+ return 0;
+}
--- /dev/null
+#ifndef _LIBNVME_TREE_H
+#define _LIBNVME_TREE_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include <sys/types.h>
+
+#include "ioctl.h"
+#include "util.h"
+
+extern const char *nvme_ctrl_sysfs_dir;
+extern const char *nvme_subsys_sysfs_dir;
+
+typedef struct nvme_ns *nvme_ns_t;
+typedef struct nvme_path *nvme_path_t;
+typedef struct nvme_ctrl *nvme_ctrl_t;
+typedef struct nvme_subsystem *nvme_subsystem_t;
+typedef struct nvme_root *nvme_root_t;
+
+nvme_subsystem_t nvme_first_subsystem(nvme_root_t r);
+nvme_subsystem_t nvme_next_subsystem(nvme_root_t r, nvme_subsystem_t s);
+
+nvme_ns_t nvme_ctrl_first_ns(nvme_ctrl_t c);
+nvme_ns_t nvme_ctrl_next_ns(nvme_ctrl_t c, nvme_ns_t n);
+
+nvme_path_t nvme_ctrl_first_path(nvme_ctrl_t c);
+nvme_path_t nvme_ctrl_next_path(nvme_ctrl_t c, nvme_path_t p);
+
+nvme_ctrl_t nvme_subsystem_first_ctrl(nvme_subsystem_t s);
+nvme_ctrl_t nvme_subsystem_next_ctrl(nvme_subsystem_t s, nvme_ctrl_t c);
+
+nvme_ns_t nvme_subsystem_first_ns(nvme_subsystem_t s);
+nvme_ns_t nvme_subsystem_next_ns(nvme_subsystem_t s, nvme_ns_t n);
+
+#define nvme_for_each_subsystem_safe(r, s, _s) \
+ for (s = nvme_first_subsystem(r), \
+ _s = nvme_next_subsystem(r, s); \
+ s != NULL; \
+ s = _s, _s = nvme_next_subsystem(r, s))
+
+#define nvme_for_each_subsystem(r, s) \
+ for (s = nvme_first_subsystem(r); s != NULL; \
+ s = nvme_next_subsystem(r, s))
+
+#define nvme_subsystem_for_each_ctrl_safe(s, c, _c) \
+ for (c = nvme_subsystem_first_ctrl(s), \
+ _c = nvme_subsystem_next_ctrl(s, c); \
+ c != NULL; \
+ c = _c, _c = nvme_subsystem_next_ctrl(s, c))
+
+#define nvme_subsystem_for_each_ctrl(s, c) \
+ for (c = nvme_subsystem_first_ctrl(s); c != NULL; \
+ c = nvme_subsystem_next_ctrl(s, c))
+
+#define nvme_ctrl_for_each_ns_safe(c, n, _n) \
+ for (n = nvme_ctrl_first_ns(c), \
+ _n = nvme_ctrl_next_ns(c, n); \
+ n != NULL; \
+ n = _n, _n = nvme_ctrl_next_ns(c, n))
+
+#define nvme_ctrl_for_each_ns(c, n) \
+ for (n = nvme_ctrl_first_ns(c); n != NULL; \
+ n = nvme_ctrl_next_ns(c, n))
+
+#define nvme_ctrl_for_each_path_safe(c, p, _p) \
+ for (p = nvme_ctrl_first_path(c), \
+ _p = nvme_ctrl_next_path(c, p); \
+ p != NULL; \
+ p = _p, _p = nvme_ctrl_next_path(c, p))
+
+#define nvme_ctrl_for_each_path(c, p) \
+ for (p = nvme_ctrl_first_path(c); p != NULL; \
+ p = nvme_ctrl_next_path(c, p))
+
+#define nvme_subsystem_for_each_ns_safe(s, n, _n) \
+ for (n = nvme_subsystem_first_ns(s), \
+ _n = nvme_subsystem_next_ns(s, n); \
+ n != NULL; \
+ n = _n, _n = nvme_subsystem_next_ns(s, n))
+
+#define nvme_subsystem_for_each_ns(s, n) \
+ for (n = nvme_subsystem_first_ns(s); n != NULL; \
+ n = nvme_subsystem_next_ns(s, n))
+
+int nvme_ns_get_fd(nvme_ns_t n);
+int nvme_ns_get_nsid(nvme_ns_t n);
+int nvme_ns_get_lba_size(nvme_ns_t n);
+uint64_t nvme_ns_get_lba_count(nvme_ns_t n);
+uint64_t nvme_ns_get_lba_util(nvme_ns_t n);
+const char *nvme_ns_get_sysfs_dir(nvme_ns_t n);
+const char *nvme_ns_get_name(nvme_ns_t n);
+nvme_subsystem_t nvme_ns_get_subsystem(nvme_ns_t n);
+nvme_ctrl_t nvme_ns_get_ctrl(nvme_ns_t n);
+
+/**
+ * nvme_ns_read() -
+ */
+int nvme_ns_read(nvme_ns_t n, void *buf, off_t offset, size_t count);
+
+/**
+ * nvme_ns_write() -
+ */
+int nvme_ns_write(nvme_ns_t n, void *buf, off_t offset, size_t count);
+
+/**
+ * nvme_ns_verify() -
+ */
+int nvme_ns_verify(nvme_ns_t n, off_t offset, size_t count);
+
+/**
+ * nvme_ns_compare() -
+ */
+int nvme_ns_compare(nvme_ns_t n, void *buf, off_t offset, size_t count);
+
+/**
+ * nvme_ns_write_zeros() -
+ */
+int nvme_ns_write_zeros(nvme_ns_t n, off_t offset, size_t count);
+
+/**
+ * nvme_ns_write_uncorrectable() -
+ */
+int nvme_ns_write_uncorrectable(nvme_ns_t n, off_t offset, size_t count);
+
+/**
+ * nvme_ns_flush() -
+ */
+int nvme_ns_flush(nvme_ns_t n);
+
+/**
+ * nvme_ns_identify() -
+ */
+int nvme_ns_identify(nvme_ns_t n, struct nvme_id_ns *ns);
+
+const char *nvme_path_get_name(nvme_path_t p);
+const char *nvme_path_get_sysfs_dir(nvme_path_t p);
+const char *nvme_path_get_ana_state(nvme_path_t p);
+nvme_ctrl_t nvme_path_get_subsystem(nvme_path_t p);
+nvme_ns_t nvme_path_get_ns(nvme_path_t p);
+
+int nvme_ctrl_get_fd(nvme_ctrl_t c);
+const char *nvme_ctrl_get_name(nvme_ctrl_t c);
+const char *nvme_ctrl_get_sysfs_dir(nvme_ctrl_t c);
+const char *nvme_ctrl_get_address(nvme_ctrl_t c);
+const char *nvme_ctrl_get_firmware(nvme_ctrl_t c);
+const char *nvme_ctrl_get_model(nvme_ctrl_t c);
+const char *nvme_ctrl_get_state(nvme_ctrl_t c);
+const char *nvme_ctrl_get_numa_node(nvme_ctrl_t c);
+const char *nvme_ctrl_get_queue_count(nvme_ctrl_t c);
+const char *nvme_ctrl_get_serial(nvme_ctrl_t c);
+const char *nvme_ctrl_get_sqsize(nvme_ctrl_t c);
+const char *nvme_ctrl_get_transport(nvme_ctrl_t c);
+const char *nvme_ctrl_get_nqn(nvme_ctrl_t c);
+const char *nvme_ctrl_get_subsysnqn(nvme_ctrl_t c);
+nvme_subsystem_t nvme_ctrl_get_subsystem(nvme_ctrl_t c);
+
+int nvme_ctrl_identify(nvme_ctrl_t c, struct nvme_id_ctrl *id);
+int nvme_ctrl_disconnect(nvme_ctrl_t c);
+nvme_ctrl_t nvme_scan_ctrl(const char *name);
+
+void nvme_free_ctrl(struct nvme_ctrl *c);
+void nvme_unlink_ctrl(struct nvme_ctrl *c);
+
+const char *nvme_subsystem_get_nqn(nvme_subsystem_t s);
+const char *nvme_subsystem_get_sysfs_dir(nvme_subsystem_t s);
+const char *nvme_subsystem_get_name(nvme_subsystem_t s);
+
+typedef bool (*nvme_scan_filter_t)(nvme_subsystem_t);
+nvme_root_t nvme_scan_filter(nvme_scan_filter_t f);
+
+nvme_root_t nvme_scan();
+void nvme_refresh_topology(nvme_root_t r);
+void nvme_reset_topology(nvme_root_t r);
+void nvme_free_tree(nvme_root_t r);
+
+char *nvme_get_subsys_attr(nvme_subsystem_t s, const char *attr);
+char *nvme_get_ctrl_attr(nvme_ctrl_t c, const char *attr);
+char *nvme_get_ns_attr(nvme_ns_t n, const char *attr);
+char *nvme_get_path_attr(nvme_path_t p, const char *attr);
+
+#endif /* _LIBNVME_TREE_H */
--- /dev/null
+#ifndef _LIBNVME_TYPES_H
+#define _LIBNVME_TYPES_H
+
+#include <endian.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <linux/types.h>
+
+#ifdef __CHECKER__
+#define __force __attribute__((force))
+#else
+#define __force
+#endif
+
+static inline __le16 cpu_to_le16(uint16_t x) { return (__force __le16)htole16(x); }
+static inline __le32 cpu_to_le32(uint32_t x) { return (__force __le32)htole32(x); }
+static inline __le64 cpu_to_le64(uint64_t x) { return (__force __le64)htole64(x); }
+static inline uint16_t le16_to_cpu(__le16 x) { return le16toh((__force __u16)x); }
+static inline uint32_t le32_to_cpu(__le32 x) { return le32toh((__force __u32)x); }
+static inline uint64_t le64_to_cpu(__le64 x) { return le64toh((__force __u64)x); }
+
+/**
+ * enum nvme_constants -
+ * @NVME_NSID_ALL:
+ * @NVME_NSID_NONE:
+ * @NVME_UUID_NONE:
+ * @NVME_CNTLID_NONE:
+ * @NVME_NVMSETID_NONE:
+ * @NVME_LOG_LSP_NONE:
+ * @NVME_LOG_LSI_NONE:
+ * @NVME_IDENTIFY_DATA_SIZE:
+ * @NVME_ID_NVMSET_LIST_MAX:
+ * @NVME_ID_UUID_LIST_MAX:
+ * @NVME_ID_CTRL_LIST_MAX:
+ * @NVME_ID_NS_LIST_MAX:
+ * @NVME_ID_SECONDARY_CTRL_MAX:
+ * @NVME_FEAT_LBA_RANGE_MAX:
+ * @NVME_LOG_ST_MAX_RESULTS:
+ * @NVME_DSM_MAX_RANGES:
+ */
+enum nvme_constants {
+ NVME_NSID_ALL = 0xffffffff,
+ NVME_NSID_NONE = 0,
+ NVME_UUID_NONE = 0,
+ NVME_CNTLID_NONE = 0,
+ NVME_NVMSETID_NONE = 0,
+ NVME_LOG_LSP_NONE = 0,
+ NVME_LOG_LSI_NONE = 0,
+ NVME_IDENTIFY_DATA_SIZE = 4096,
+ NVME_ID_NVMSET_LIST_MAX = 31,
+ NVME_ID_UUID_LIST_MAX = 127,
+ NVME_ID_CTRL_LIST_MAX = 2047,
+ NVME_ID_NS_LIST_MAX = 1024,
+ NVME_ID_SECONDARY_CTRL_MAX = 127,
+ NVME_FEAT_LBA_RANGE_MAX = 64,
+ NVME_LOG_ST_MAX_RESULTS = 20,
+ NVME_DSM_MAX_RANGES = 256,
+};
+
+/**
+ * DOC: NVMe controller registers/properties
+ */
+
+/**
+ * enum nvme_registers -
+ * @NVME_REG_CAP: Controller Capabilities
+ * @NVME_REG_VS: Version
+ * @NVME_REG_INTMS: Interrupt Mask Set
+ * @NVME_REG_INTMC: Interrupt Mask Clear
+ * @NVME_REG_CC: Controller Configuration
+ * @NVME_REG_CSTS: Controller Status
+ * @NVME_REG_NSSR: NVM Subsystem Reset
+ * @NVME_REG_AQA: Admin Queue Attributes
+ * @NVME_REG_ASQ: Admin SQ Base Address
+ * @NVME_REG_ACQ: Admin CQ Base Address
+ * @NVME_REG_CMBLOC: Controller Memory Buffer Location
+ * @NVME_REG_CMBSZ: Controller Memory Buffer Size
+ * @NVME_REG_BPINFO: Boot Partition Information
+ * @NVME_REG_BPRSEL: Boot Partition Read Select
+ * @NVME_REG_BPMBL: Boot Partition Memory Buffer Location
+ * @NVME_REG_CMBMSC: Controller Memory Buffer Memory Space Control
+ * @NVME_REG_CMBSTS: Controller Memory Buffer Status
+ * @NVME_REG_PMRCAP: Persistent Memory Capabilities
+ * @NVME_REG_PMRCTL: Persistent Memory Region Control
+ * @NVME_REG_PMRSTS: Persistent Memory Region Status
+ * @NVME_REG_PMREBS: Persistent Memory Region Elasticity Buffer Size
+ * @NVME_REG_PMRSWTP: Memory Region Sustained Write Throughput
+ * @NVME_REG_PMRMSC: Persistent Memory Region Controller Memory Space Control
+ * @NVME_REG_DBS: SQ 0 Tail Doorbell
+ */
+enum nvme_registers {
+ NVME_REG_CAP = 0x0000,
+ NVME_REG_VS = 0x0008,
+ NVME_REG_INTMS = 0x000c,
+ NVME_REG_INTMC = 0x0010,
+ NVME_REG_CC = 0x0014,
+ NVME_REG_CSTS = 0x001c,
+ NVME_REG_NSSR = 0x0020,
+ NVME_REG_AQA = 0x0024,
+ NVME_REG_ASQ = 0x0028,
+ NVME_REG_ACQ = 0x0030,
+ NVME_REG_CMBLOC = 0x0038,
+ NVME_REG_CMBSZ = 0x003c,
+ NVME_REG_BPINFO = 0x0040,
+ NVME_REG_BPRSEL = 0x0044,
+ NVME_REG_BPMBL = 0x0048,
+ NVME_REG_CMBMSC = 0x0050,
+ NVME_REG_CMBSTS = 0x0058,
+ NVME_REG_PMRCAP = 0x0e00,
+ NVME_REG_PMRCTL = 0x0e04,
+ NVME_REG_PMRSTS = 0x0e08,
+ NVME_REG_PMREBS = 0x0e0c,
+ NVME_REG_PMRSWTP= 0x0e10,
+ NVME_REG_PMRMSC = 0x0e14,
+ NVME_REG_DBS = 0x1000,
+};
+
+#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
+#define NVME_CAP_CQR(cap) (((cap) >> 16) & 0x1)
+#define NVME_CAP_AMS(cap) (((cap) >> 17) & 0x3)
+#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
+#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
+#define NVME_CAP_BPS(cap) (((cap) >> 45) & 0x1)
+#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 1)
+#define NVME_CAP_PMRS(cap) (((cap) >> 56) & 1)
+
+#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
+#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
+#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
+#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
+
+#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
+#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
+#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
+#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
+#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CC_ENABLE = 1 << 0,
+ NVME_CC_CSS_NVM = 0 << 4,
+ NVME_CC_EN_SHIFT = 0,
+ NVME_CC_CSS_SHIFT = 4,
+ NVME_CC_MPS_SHIFT = 7,
+ NVME_CC_AMS_SHIFT = 11,
+ NVME_CC_SHN_SHIFT = 14,
+ NVME_CC_IOSQES_SHIFT = 16,
+ NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
+ NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
+ NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
+ NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
+ NVME_CSTS_RDY = 1 << 0,
+ NVME_CSTS_CFS = 1 << 1,
+ NVME_CSTS_NSSRO = 1 << 4,
+ NVME_CSTS_PP = 1 << 5,
+ NVME_CSTS_SHST_NORMAL = 0 << 2,
+ NVME_CSTS_SHST_OCCUR = 1 << 2,
+ NVME_CSTS_SHST_CMPLT = 2 << 2,
+ NVME_CSTS_SHST_MASK = 3 << 2,
+};
+
+/*
+ * is_64bit_reg() - Checks if offset of the controller register is 64bit or not.
+ * @offset: Offset of controller register field in bytes
+ *
+ * This function does not care about transport so that the offset is not going
+ * to be checked inside of this function for the unsupported fields in a
+ * specific transport. For example, BPMBL(Boot Partition Memory Buffer
+ * Location) register is not supported by fabrics, but it can be chcked here.
+ *
+ * Returns true if given offset is 64bit register, otherwise it returns false.
+ */
+static inline bool is_64bit_reg(__u32 offset)
+{
+ switch (offset) {
+ case NVME_REG_CAP:
+ case NVME_REG_ASQ:
+ case NVME_REG_ACQ:
+ case NVME_REG_BPMBL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * DOC: NVMe Identify
+ */
+
+/**
+ * struct nvme_id_psd -
+ * @mp:
+ * @flags:
+ * @enlat:
+ * @exlat:
+ * @rrt:
+ * @rrl:
+ * @rwt:
+ * @rwl:
+ * @idlp:
+ * @ips:
+ * @actp:
+ * @apw:
+ * @aps:
+ */
+struct nvme_id_psd {
+ __le16 mp;
+ __u8 rsvd2;
+ __u8 flags;
+ __le32 enlat;
+ __le32 exlat;
+ __u8 rrt;
+ __u8 rrl;
+ __u8 rwt;
+ __u8 rwl;
+ __le16 idlp;
+ __u8 ips;
+ __u8 rsvd19;
+ __le16 actp;
+ __u8 apw;
+ __u8 aps;
+ __u8 rsvd23[8];
+};
+
+/**
+ * nvme_psd_power_scale() - power scale occupies the upper 3 bits
+ */
+static inline unsigned nvme_psd_power_scale(__u8 ps)
+{
+ return ps >> 6;
+}
+
+/**
+ * enum -
+ * @NVME_PSD_FLAGS_MAX_POWER_SCALE:
+ * @NVME_PSD_FLAGS_NON_OP_STATE:
+ * @NVME_PSD_RELATIVE_MASK:
+ * @NVME_PSD_APW_MASK:
+ */
+enum {
+ NVME_PSD_FLAGS_MAX_POWER_SCALE = 1 << 0,
+ NVME_PSD_FLAGS_NON_OP_STATE = 1 << 1,
+ NVME_PSD_RELATIVE_MASK = 0x1f,
+ NVME_PSD_APW_MASK = 0x7,
+};
+
+/**
+ * struct nvme_id_ctrl - Identify Controller data structure
+ * @vid: Vendor ID
+ * @ssvid: Subsystem Vendor Id
+ * @sn: Serial Number
+ * @mn: Model Number
+ * @fr: Firmware Revision
+ * @rab: Recommended Arbitration Burst
+ * @ieee: IEEE
+ * @cmic: Controller Mulitpathing Capabilities
+ * @mdts: Max Data Transfer Size
+ * @cntlid: Controller Identifier
+ * @ver: Version
+ * @rtd3r: Runtime D3 Resume
+ * @rtd3e: Runtime D3 Exit
+ * @oaes: Optional Async Events Supported
+ * @ctratt: Controller Attributes
+ * @rrls: Read Recovery Levels
+ * @cntrltype: Controller Type
+ * @fguid: FRU GUID
+ * @crdt1: Controller Retry Delay 1
+ * @crdt2: Controller Retry Delay 2
+ * @crdt3: Controller Retry Delay 3
+ * @nvmsr:
+ * @vwci:
+ * @mec:
+ * @oacs: Optional Admin Commands Supported
+ * @acl: Abort Command Limit
+ * @aerl: Async Event Request Limit
+ * @frmw:
+ * @lpa: Log Page Attributes
+ * @elpe:
+ * @npss: Number of Power States Supported
+ * @avscc:
+ * @apsta:
+ * @wctemp:
+ * @cctemp:
+ * @mtfa:
+ * @hmpre:
+ * @hmmin:
+ * @tnvmcap:
+ * @unvmcap:
+ * @rpmbs:
+ * @edstt:
+ * @dsto:
+ * @fwug:
+ * @kas:
+ * @hctma:
+ * @mntmt:
+ * @mxtmt:
+ * @sanicap:
+ * @hmminds:
+ * @hmmaxd:
+ * @nsetidmax:
+ * @endgidmax:
+ * @anatt:
+ * @anacap:
+ * @anagrpmax:
+ * @nanagrpid:
+ * @pels:
+ * @sqes:
+ * @cqes:
+ * @maxcmd:
+ * @nn:
+ * @onc:
+ * @fuses:
+ * @fna:
+ * @vwc:
+ * @awun:
+ * @awupf:
+ * @nvscc:
+ * @nwpc:
+ * @acwu:
+ * @sgls:
+ * @mnan:
+ * @subnqn:
+ * @ioccsz:
+ * @iorcsz:
+ * @icdoff:
+ * @fcatt:
+ * @msdbd:
+ * @ofcs:
+ * @psd:
+ * @vs:
+ */
+struct nvme_id_ctrl {
+ __le16 vid;
+ __le16 ssvid;
+ char sn[20];
+ char mn[40];
+ char fr[8];
+ __u8 rab;
+ __u8 ieee[3];
+ __u8 cmic;
+ __u8 mdts;
+ __le16 cntlid;
+ __le32 ver;
+ __le32 rtd3r;
+ __le32 rtd3e;
+ __le32 oaes;
+ __le32 ctratt;
+ __le16 rrls;
+ __u8 rsvd102[9];
+ __u8 cntrltype;
+ __u8 fguid[16];
+ __le16 crdt1;
+ __le16 crdt2;
+ __le16 crdt3;
+ __u8 rsvd134[119];
+ __u8 nvmsr;
+ __u8 vwci;
+ __u8 mec;
+ __le16 oacs;
+ __u8 acl;
+ __u8 aerl;
+ __u8 frmw;
+ __u8 lpa;
+ __u8 elpe;
+ __u8 npss;
+ __u8 avscc;
+ __u8 apsta;
+ __le16 wctemp;
+ __le16 cctemp;
+ __le16 mtfa;
+ __le32 hmpre;
+ __le32 hmmin;
+ __u8 tnvmcap[16];
+ __u8 unvmcap[16];
+ __le32 rpmbs;
+ __le16 edstt;
+ __u8 dsto;
+ __u8 fwug;
+ __le16 kas;
+ __le16 hctma;
+ __le16 mntmt;
+ __le16 mxtmt;
+ __le32 sanicap;
+ __le32 hmminds;
+ __le16 hmmaxd;
+ __le16 nsetidmax;
+ __le16 endgidmax;
+ __u8 anatt;
+ __u8 anacap;
+ __le32 anagrpmax;
+ __le32 nanagrpid;
+ __le32 pels;
+ __u8 rsvd356[156];
+ __u8 sqes;
+ __u8 cqes;
+ __le16 maxcmd;
+ __le32 nn;
+ __le16 oncs;
+ __le16 fuses;
+ __u8 fna;
+ __u8 vwc;
+ __le16 awun;
+ __le16 awupf;
+ __u8 nvscc;
+ __u8 nwpc;
+ __le16 acwu;
+ __u8 rsvd534[2];
+ __le32 sgls;
+ __le32 mnan;
+ __u8 rsvd544[224];
+ char subnqn[256];
+ __u8 rsvd1024[768];
+
+ /* Fabrics Only */
+ __le32 ioccsz;
+ __le32 iorcsz;
+ __le16 icdoff;
+ __u8 fcatt;
+ __u8 msdbd;
+ __le16 ofcs;
+ __u8 rsvd1806[242];
+
+ struct nvme_id_psd psd[32];
+ __u8 vs[1024];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
+ NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
+ NVME_CTRL_CMIC_MULTI_SRIOV = 1 << 2,
+ NVME_CTRL_CMIC_MULTI_ANA_REPORTING = 1 << 3,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_OAES_NA = 1 << 8,
+ NVME_CTRL_OAES_FA = 1 << 9,
+ NVME_CTRL_OAES_ANA = 1 << 11,
+ NVME_CTRL_OAES_PLEA = 1 << 12,
+ NVME_CTRL_OAES_LBAS = 1 << 13,
+ NVME_CTRL_OAES_EGE = 1 << 14,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_CTRATT_128_ID = 1 << 0,
+ NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
+ NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
+ NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
+ NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
+ NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
+ NVME_CTRL_CTRATT_TBKAS = 1 << 6,
+ NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
+ NVME_CTRL_CTRATT_SQ_ASSOCIATIONS = 1 << 8,
+ NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_CNTRLTYPE_RESERVED = 0,
+ NVME_CTRL_CNTRLTYPE_IO = 1,
+ NVME_CTRL_CNTRLTYPE_DISCOVERY = 2,
+ NVME_CTRL_CNTRLTYPE_ADMIN = 3,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_NVMSR_NVMESD = 1 << 0,
+ NVME_CTRL_NVMSR_NVMEE = 1 << 1,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_VWCI_VWCR = 0x7f << 0,
+ NVME_CTRL_VWCI_VWCRV = 1 << 7,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_MEC_SMBUSME = 1 << 0,
+ NVME_CTRL_MEC_PCIEME = 1 << 1,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_OACS_SECURITY = 1 << 0,
+ NVME_CTRL_OACS_FORMAT = 1 << 1,
+ NVME_CTRL_OACS_FW = 1 << 2,
+ NVME_CTRL_OACS_NS_MGMT = 1 << 3,
+ NVME_CTRL_OACS_SELF_TEST = 1 << 4,
+ NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
+ NVME_CTRL_OACS_NVME_MI = 1 << 6,
+ NVME_CTRL_OACS_VIRT_MGMT = 1 << 7,
+ NVME_CTRL_OACS_DBBUF_CFG = 1 << 8,
+ NVME_CTRL_OACS_LBA_STATUS = 1 << 9,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_FRMW_1ST_RO = 1 << 0,
+ NVME_CTRL_FRMW_NR_SLOTS = 3 << 1,
+ NVME_CTRL_FRMW_FW_ACT_NO_RESET = 1 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_LPA_SMART_PER_NS = 1 << 0,
+ NVME_CTRL_LPA_CMD_EFFECTS = 1 << 1,
+ NVME_CTRL_LPA_EXTENDED = 1 << 2,
+ NVME_CTRL_LPA_TELEMETRY = 1 << 3,
+ NVME_CTRL_LPA_PERSETENT_EVENT = 1 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_AVSCC_AVS = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_APSTA_APST = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_RPMBS_NR_UNITS = 7 << 0,
+ NVME_CTRL_RPMBS_AUTH_METHOD = 7 << 3,
+ NVME_CTRL_RPMBS_TOTAL_SIZE = 255 << 16,
+ NVME_CTRL_RPMBS_ACCESS_SIZE = 255 << 24,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_DSTO_ONE_DST = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_HCTMA_HCTM = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_SANICAP_CES = 1 << 0,
+ NVME_CTRL_SANICAP_BES = 1 << 1,
+ NVME_CTRL_SANICAP_OWS = 1 << 2,
+ NVME_CTRL_SANICAP_NDI = 1 << 29,
+ NVME_CTRL_SANICAP_NODMMAS = 3 << 30,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_ANACAP_OPT = 1 << 0,
+ NVME_CTRL_ANACAP_NON_OPT = 1 << 1,
+ NVME_CTRL_ANACAP_INACCESSIBLE = 1 << 2,
+ NVME_CTRL_ANACAP_PERSISTENT_LOSS = 1 << 3,
+ NVME_CTRL_ANACAP_CHANGE = 1 << 4,
+ NVME_CTRL_ANACAP_GRPID_NO_CHG = 1 << 6,
+ NVME_CTRL_ANACAP_GRPID_MGMT = 1 << 7,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_SQES_MIN = 15 << 0,
+ NVME_CTRL_SQES_MAX = 15 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_CQES_MIN = 15 << 0,
+ NVME_CTRL_CQES_MAX = 15 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_ONCS_COMPARE = 1 << 0,
+ NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
+ NVME_CTRL_ONCS_DSM = 1 << 2,
+ NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
+ NVME_CTRL_ONCS_SAVE_FEATURES = 1 << 4,
+ NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
+ NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
+ NVME_CTRL_ONCS_VERIFY = 1 << 7,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_FUSES_COMPARE_AND_WRITE = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_FNA_FMT_ALL_NAMESPACES = 1 << 0,
+ NVME_CTRL_FNA_SEC_ALL_NAMESPACES = 1 << 1,
+ NVME_CTRL_FNA_CRYPTO_ERASE = 1 << 2,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_VWC_PRESENT = 1 << 0,
+ NVME_CTRL_VWC_FLUSH = 3 << 1,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_NVSCC_FMT = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_NWPC_WRITE_PROTECT = 1 << 0,
+ NVME_CTRL_NWPC_WRITE_PROTECT_POWER_CYCLE= 1 << 1,
+ NVME_CTRL_NWPC_WRITE_PROTECT_PERMANENT = 1 << 2,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_SGLS_SUPPORTED = 3 << 0,
+ NVME_CTRL_SGLS_KEYED = 1 << 2,
+ NVME_CTRL_SGLS_BIT_BUCKET = 1 << 16,
+ NVME_CTRL_SGLS_MPTR_BYTE_ALIGNED = 1 << 17,
+ NVME_CTRL_SGLS_OVERSIZE = 1 << 18,
+ NVME_CTRL_SGLS_MPTR_SGL = 1 << 19,
+ NVME_CTRL_SGLS_OFFSET = 1 << 20,
+ NVME_CTRL_SGLS_TPORT = 1 << 21,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_FCATT_DYNAMIC = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CTRL_OFCS_DISCONNECT = 1 << 0,
+};
+
+/**
+ * struct nvme_lbaf -
+ * @ms:
+ * @ds:
+ * @rp:
+ */
+struct nvme_lbaf {
+ __le16 ms;
+ __u8 ds;
+ __u8 rp;
+};
+
+/**
+ * enum -
+ * @NVME_LBAF_RP_BEST:
+ * @NVME_LBAF_RP_BETTER:
+ * @NVME_LBAF_RP_GOOD:
+ * @NVME_LBAF_RP_DEGRADED:
+ */
+enum {
+ NVME_LBAF_RP_BEST = 0,
+ NVME_LBAF_RP_BETTER = 1,
+ NVME_LBAF_RP_GOOD = 2,
+ NVME_LBAF_RP_DEGRADED = 3,
+ NVME_LBAF_RP_MASK = 3,
+};
+
+/**
+ * struct nvme_id_ns -
+ * @nsze:
+ * @ncap:
+ * @nuse:
+ * @nsfeat:
+ * @nlbaf:
+ * @flbas:
+ * @mc:
+ * @dpc:
+ * @dps:
+ * @nmic:
+ * @rescap:
+ * @fpi:
+ * @dlfeat:
+ * @nawun:
+ * @nawupf:
+ * @nacwu:
+ * @nabsn:
+ * @nabo:
+ * @nabspf:
+ * @noiob:
+ * @nvmcap:
+ * @npwg:
+ * @npwa:
+ * @npdg:
+ * @npda:
+ * @nows:
+ * @anagrpid:
+ * @nsattr:
+ * @nvmsetid:
+ * @endgid:
+ * @nguid:
+ * @eui64:
+ * @lbaf:
+ * @vs:
+ */
+struct nvme_id_ns {
+ __le64 nsze;
+ __le64 ncap;
+ __le64 nuse;
+ __u8 nsfeat;
+ __u8 nlbaf;
+ __u8 flbas;
+ __u8 mc;
+ __u8 dpc;
+ __u8 dps;
+ __u8 nmic;
+ __u8 rescap;
+ __u8 fpi;
+ __u8 dlfeat;
+ __le16 nawun;
+ __le16 nawupf;
+ __le16 nacwu;
+ __le16 nabsn;
+ __le16 nabo;
+ __le16 nabspf;
+ __le16 noiob;
+ __u8 nvmcap[16];
+ __le16 npwg;
+ __le16 npwa;
+ __le16 npdg;
+ __le16 npda;
+ __le16 nows;
+ __u8 rsvd74[18];
+ __le32 anagrpid;
+ __u8 rsvd96[3];
+ __u8 nsattr;
+ __le16 nvmsetid;
+ __le16 endgid;
+ __u8 nguid[16];
+ __u8 eui64[8];
+ struct nvme_lbaf lbaf[16];
+ __u8 rsvd192[192];
+ __u8 vs[3712];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_FEAT_THIN = 1 << 0,
+ NVME_NS_FEAT_NATOMIC = 1 << 1,
+ NVME_NS_FEAT_DULBE = 1 << 2,
+ NVME_NS_FEAT_ID_REUSE = 1 << 3,
+ NVME_NS_FEAT_IO_OPT = 1 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_FLBAS_LBA_MASK = 15 << 0,
+ NVME_NS_FLBAS_META_EXT = 1 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_MC_EXTENDED = 1 << 0,
+ NVME_NS_MC_SEPARATE = 1 << 1,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_DPC_PI_TYPE1 = 1 << 0,
+ NVME_NS_DPC_PI_TYPE2 = 1 << 1,
+ NVME_NS_DPC_PI_TYPE3 = 1 << 2,
+ NVME_NS_DPC_PI_FIRST = 1 << 3,
+ NVME_NS_DPC_PI_LAST = 1 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_DPS_PI_NONE = 0,
+ NVME_NS_DPS_PI_TYPE1 = 1,
+ NVME_NS_DPS_PI_TYPE2 = 2,
+ NVME_NS_DPS_PI_TYPE3 = 3,
+ NVME_NS_DPS_PI_MASK = 7 << 0,
+ NVME_NS_DPS_PI_FIRST = 1 << 3,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_NMIC_SHARED = 1 << 0,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_RESCAP_PTPL = 1 << 0,
+ NVME_NS_RESCAP_WE = 1 << 1,
+ NVME_NS_RESCAP_EA = 1 << 2,
+ NVME_NS_RESCAP_WERO = 1 << 3,
+ NVME_NS_RESCAP_EARO = 1 << 4,
+ NVME_NS_RESCAP_WEAR = 1 << 5,
+ NVME_NS_RESCAP_EAAR = 1 << 6,
+ NVME_NS_RESCAP_IEK_13 = 1 << 7,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_FPI_REMAINING = 127 << 0,
+ NVME_NS_FPI_SUPPORTED = 1 << 7,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_DLFEAT_RB = 7 << 0,
+ NVME_NS_DLFEAT_RB_NR = 0,
+ NVME_NS_DLFEAT_RB_ALL_0S = 1,
+ NVME_NS_DLFEAT_RB_ALL_FS = 2,
+ NVME_NS_DLFEAT_WRITE_ZEROES = 1 << 3,
+ NVME_NS_DLFEAT_CRC_GUARD = 1 << 4,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NS_NSATTR_WRITE_PROTECTED = 1 << 0
+};
+
+/**
+ * struct nvme_ns_id_desc -
+ */
+struct nvme_ns_id_desc {
+ __u8 nidt;
+ __u8 nidl;
+ __le16 reserved;
+ __u8 nid[];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NIDT_EUI64 = 1,
+ NVME_NIDT_NGUID = 2,
+ NVME_NIDT_UUID = 3,
+};
+
+/**
+ * struct nvme_nvmset_attr -
+ */
+struct nvme_nvmset_attr {
+ __le16 id;
+ __le16 endurance_group_id;
+ __u8 rsvd4[4];
+ __le32 random_4k_read_typical;
+ __le32 opt_write_size;
+ __u8 total_nvmset_cap[16];
+ __u8 unalloc_nvmset_cap[16];
+ __u8 rsvd48[80];
+};
+
+/**
+ * struct nvme_id_nvmset_list -
+ */
+struct nvme_id_nvmset_list {
+ __u8 nid;
+ __u8 rsvd1[127];
+ struct nvme_nvmset_attr ent[NVME_ID_NVMSET_LIST_MAX];
+};
+
+/**
+ * struct nvme_id_ns_granularity_list_entry -
+ */
+struct nvme_id_ns_granularity_list_entry {
+ __le64 namespace_size_granularity;
+ __le64 namespace_capacity_granularity;
+};
+
+/**
+ * struct nvme_id_ns_granularity_list -
+ */
+struct nvme_id_ns_granularity_list {
+ __le32 attributes;
+ __u8 num_descriptors;
+ __u8 rsvd[27];
+ struct nvme_id_ns_granularity_list_entry entry[16];
+};
+
+/**
+ * struct nvme_id_uuid_list_entry -
+ */
+struct nvme_id_uuid_list_entry {
+ __u8 header;
+ __u8 rsvd1[15];
+ __u8 uuid[16];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ID_UUID_HDR_ASSOCIATION_MASK = 0x3,
+ NVME_ID_UUID_ASSOCIATION_NONE = 0,
+ NVME_ID_UUID_ASSOCIATION_VENDOR = 1,
+ NVME_ID_UUID_ASSOCIATION_SUBSYSTEM_VENDOR = 2,
+};
+
+/**
+ * struct nvme_id_uuid_list -
+ */
+struct nvme_id_uuid_list {
+ __u8 rsvd0[32];
+ struct nvme_id_uuid_list_entry entry[NVME_ID_UUID_LIST_MAX];
+};
+
+/**
+ * struct nvme_ctrl_list -
+ */
+struct nvme_ctrl_list {
+ __le16 num;
+ __le16 identifier[NVME_ID_CTRL_LIST_MAX];
+};
+
+/**
+ * struct nvme_ns_list -
+ */
+struct nvme_ns_list {
+ __le32 ns[NVME_ID_NS_LIST_MAX];
+};
+
+/**
+ * struct nvme_primary_ctrl_cap -
+ */
+struct nvme_primary_ctrl_cap {
+ __le16 cntlid;
+ __le16 portid;
+ __u8 crt;
+ __u8 rsvd5[27];
+ __le32 vqfrt;
+ __le32 vqrfa;
+ __le16 vqrfap;
+ __le16 vqprt;
+ __le16 vqfrsm;
+ __le16 vqgran;
+ __u8 rsvd48[16];
+ __le32 vifrt;
+ __le32 virfa;
+ __le16 virfap;
+ __le16 viprt;
+ __le16 vifrsm;
+ __le16 vigran;
+ __u8 rsvd80[4016];
+};
+
+/**
+ * struct nvme_secondary_ctrl -
+ */
+struct nvme_secondary_ctrl {
+ __le16 scid;
+ __le16 pcid;
+ __u8 scs;
+ __u8 rsvd5[3];
+ __le16 vfn;
+ __le16 nvq;
+ __le16 nvi;
+ __u8 rsvd14[18];
+};
+
+/**
+ * struct nvme_secondary_ctrl_list -
+ */
+struct nvme_secondary_ctrl_list {
+ __u8 num;
+ __u8 rsvd[31];
+ struct nvme_secondary_ctrl sc_entry[NVME_ID_SECONDARY_CTRL_MAX];
+};
+
+/**
+ * DOC: NVMe Logs
+ */
+
+/**
+ * struct nvme_error_log_page -
+ */
+struct nvme_error_log_page {
+ __le64 error_count;
+ __le16 sqid;
+ __le16 cmdid;
+ __le16 status_field;
+ __le16 parm_error_location;
+ __le64 lba;
+ __le32 nsid;
+ __u8 vs;
+ __u8 trtype;
+ __u8 resv[2];
+ __le64 cs;
+ __le16 trtype_spec_info;
+ __u8 resv2[22];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ERR_PEL_BYTE_MASK = 0xf,
+ NVME_ERR_PEL_BIT_MASK = 0x70,
+};
+
+/**
+ * struct nvme_smart_log -
+ */
+struct nvme_smart_log {
+ __u8 critical_warning;
+ __u8 temperature[2];
+ __u8 avail_spare;
+ __u8 spare_thresh;
+ __u8 percent_used;
+ __u8 endu_grp_crit_warn_sumry;
+ __u8 rsvd7[25];
+ __u8 data_units_read[16];
+ __u8 data_units_written[16];
+ __u8 host_reads[16];
+ __u8 host_writes[16];
+ __u8 ctrl_busy_time[16];
+ __u8 power_cycles[16];
+ __u8 power_on_hours[16];
+ __u8 unsafe_shutdowns[16];
+ __u8 media_errors[16];
+ __u8 num_err_log_entries[16];
+ __le32 warning_temp_time;
+ __le32 critical_comp_time;
+ __le16 temp_sensor[8];
+ __le32 thm_temp1_trans_count;
+ __le32 thm_temp2_trans_count;
+ __le32 thm_temp1_total_time;
+ __le32 thm_temp2_total_time;
+ __u8 rsvd232[280];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_SMART_CRIT_SPARE = 1 << 0,
+ NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
+ NVME_SMART_CRIT_DEGRADED = 1 << 2,
+ NVME_SMART_CRIT_MEDIA = 1 << 3,
+ NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
+ NVME_SMART_CRIT_PMR_RO = 1 << 5,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_SMART_EGCW_SPARE = 1 << 0,
+ NVME_SMART_EGCW_DEGRADED = 1 << 2,
+ NVME_SMART_EGCW_RO = 1 << 3,
+};
+
+/**
+ * struct nvme_frs -
+ */
+struct nvme_frs {
+ char frs[8];
+};
+
+/**
+ * struct nvme_firmware_slot -
+ */
+struct nvme_firmware_slot {
+ __u8 afi;
+ __u8 resv[7];
+ struct nvme_frs frs[7];
+ __u8 resv2[448];
+};
+
+/**
+ * struct nvme_cmd_effects_log -
+ */
+struct nvme_cmd_effects_log {
+ __le32 acs[256];
+ __le32 iocs[256];
+ __u8 resv[2048];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_CMD_EFFECTS_CSUPP = 1 << 0,
+ NVME_CMD_EFFECTS_LBCC = 1 << 1,
+ NVME_CMD_EFFECTS_NCC = 1 << 2,
+ NVME_CMD_EFFECTS_NIC = 1 << 3,
+ NVME_CMD_EFFECTS_CCC = 1 << 4,
+ NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
+};
+
+/**
+ * struct nvme_st_result -
+ */
+struct nvme_st_result {
+ __u8 dsts;
+ __u8 seg;
+ __u8 vdi;
+ __u8 rsvd;
+ __le64 poh;
+ __le32 nsid;
+ __le64 flba;
+ __u8 sct;
+ __u8 sc;
+ __u8 vs[2];
+} __attribute__((packed));
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ST_RESULT_NO_ERR = 0x0,
+ NVME_ST_RESULT_ABORTED = 0x1,
+ NVME_ST_RESULT_CLR = 0x2,
+ NVME_ST_RESULT_NS_REMOVED = 0x3,
+ NVME_ST_RESULT_ABORTED_FORMAT = 0x4,
+ NVME_ST_RESULT_FATAL_ERR = 0x5,
+ NVME_ST_RESULT_UNKNOWN_SEG_FAIL = 0x6,
+ NVME_ST_RESULT_KNOWN_SEG_FAIL = 0x7,
+ NVME_ST_RESULT_ABORTED_UNKNOWN = 0x8,
+ NVME_ST_RESULT_ABORTED_SANITIZE = 0x9,
+ NVME_ST_RESULT_NOT_USED = 0xf,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ST_OPERATION_NONE = 0x0,
+ NVME_ST_OPERATION_SHORT = 0x1,
+ NVME_ST_OPERATION_EXTENDED = 0x2,
+ NVME_ST_OPERATION_VS = 0xe,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ST_VALID_DIAG_INFO_NSID = 1 << 0,
+ NVME_ST_VALID_DIAG_INFO_FLBA = 1 << 1,
+ NVME_ST_VALID_DIAG_INFO_SCT = 1 << 2,
+ NVME_ST_VALID_DIAG_INFO_SC = 1 << 3,
+};
+
+
+/**
+ * struct nvme_self_test_log -
+ */
+struct nvme_self_test_log {
+ __u8 current_operation;
+ __u8 completion;
+ __u8 rsvd[2];
+ struct nvme_st_result result[NVME_LOG_ST_MAX_RESULTS];
+} __attribute__((packed));
+
+/**
+ * struct nvme_telemetry_log -
+ */
+struct nvme_telemetry_log {
+ __u8 lpi;
+ __u8 rsvd[4];
+ __u8 ieee[3];
+ __le16 dalb1;
+ __le16 dalb2;
+ __le16 dalb3;
+ __u8 rsvd1[368];
+ __u8 ctrlavail;
+ __u8 ctrldgn;
+ __u8 rsnident[128];
+ __u8 telemetry_dataarea[];
+};
+
+/**
+ * struct nvme_endurance_group_log -
+ */
+struct nvme_endurance_group_log {
+ __u8 critical_warning;
+ __u8 rsvd1[2];
+ __u8 avl_spare;
+ __u8 avl_spare_threshold;
+ __u8 percent_used;
+ __u8 rsvd6[26];
+ __u8 endurance_estimate[16];
+ __u8 data_units_read[16];
+ __u8 data_units_written[16];
+ __u8 media_units_written[16];
+ __u8 host_read_cmds[16];
+ __u8 host_write_cmds[16];
+ __u8 media_data_integrity_err[16];
+ __u8 num_err_info_log_entries[16];
+ __u8 rsvd160[352];
+};
+
+/**
+ * enum -
+ */
+enum nvme_eg_critical_warning_flags {
+ NVME_EG_CRITICAL_WARNING_SPARE = 1 << 0,
+ NVME_EG_CRITICAL_WARNING_DEGRADED = 1 << 2,
+ NVME_EG_CRITICAL_WARNING_READ_ONLY = 1 << 3,
+};
+
+/**
+ * struct nvme_aggregate_endurance_group_event -
+ */
+struct nvme_aggregate_endurance_group_event {
+ __le64 num_entries;
+ __le16 entries[];
+};
+
+/**
+ * struct nvme_nvmset_predictable_lat_log -
+ */
+struct nvme_nvmset_predictable_lat_log {
+ __u8 status;
+ __u8 rsvd1;
+ __le16 event_type;
+ __u8 rsvd4[28];
+ __le64 dtwin_rt;
+ __le64 dtwin_wt;
+ __le64 dtwin_tmax;
+ __le64 dtwin_tmin_hi;
+ __le64 dtwin_tmin_lo;
+ __u8 rsvd72[56];
+ __le64 dtwin_re;
+ __le64 dtwin_we;
+ __le64 dtwin_te;
+ __u8 rsvd152[360];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NVMSET_PL_STATUS_DISABLED = 0,
+ NVME_NVMSET_PL_STATUS_DTWIN = 1,
+ NVME_NVMSET_PL_STATUS_NDWIN = 2,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_NVMSET_PL_EVENT_DTWIN_READ_WARN = 1 << 0,
+ NVME_NVMSET_PL_EVENT_DTWIN_WRITE_WARN = 1 << 1,
+ NVME_NVMSET_PL_EVENT_DTWIN_TIME_WARN = 1 << 2,
+ NVME_NVMSET_PL_EVENT_DTWIN_EXCEEDED = 1 << 14,
+ NVME_NVMSET_PL_EVENT_DTWIN_EXCURSION = 1 << 15,
+};
+
+/**
+ * struct nvme_aggregate_predictable_lat_event -
+ */
+struct nvme_aggregate_predictable_lat_event {
+ __le64 num_entries;
+ __le16 entries[];
+};
+
+/**
+ * struct nvme_ana_group_desc -
+ */
+struct nvme_ana_group_desc {
+ __le32 grpid;
+ __le32 nnsids;
+ __le64 chgcnt;
+ __u8 state;
+ __u8 rsvd17[15];
+ __le32 nsids[];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ANA_STATE_OPTIMIZED = 0x1,
+ NVME_ANA_STATE_NONOPTIMIZED = 0x2,
+ NVME_ANA_STATE_INACCESSIBLE = 0x3,
+ NVME_ANA_STATE_PERSISTENT_LOSS = 0x4,
+ NVME_ANA_STATE_CHANGE = 0xf,
+};
+
+/**
+ * struct nvme_ana_log -
+ */
+struct nvme_ana_log {
+ __le64 chgcnt;
+ __le16 ngrps;
+ __u8 rsvd10[6];
+ struct nvme_ana_group_desc descs[];
+};
+
+/**
+ * struct nvme_persistent_event_log -
+ */
+struct nvme_persistent_event_log {
+ __u8 lid;
+ __u8 rsvd1[3];
+ __le32 ttl;
+ __u8 rv;
+ __u8 rsvd17;
+ __le16 lht;
+ __le64 ts;
+ __u8 poh[16];
+ __le64 pcc;
+ __le16 vid;
+ __le16 ssvid;
+ char sn[20];
+ char mn[40];
+ char subnqn[256];
+ __u8 rsvd372;
+ __u8 seb[32];
+};
+
+/**
+ * struct nvme_lba_rd -
+ */
+struct nvme_lba_rd {
+ __le64 rslba;
+ __le32 rnlb;
+ __u8 rsvd12[4];
+};
+
+/**
+ * struct nvme_lbas_ns_element -
+ */
+struct nvme_lbas_ns_element {
+ __le32 neid;
+ __le32 nrld;
+ __u8 ratype;
+ __u8 rsvd8[7];
+ struct nvme_lba_rd lba_rd[];
+};
+
+/**
+ * enum nvme_lba_status_atype -
+ * @NVME_LBA_STATUS_ATYPE_SCAN_UNTRACKED:
+ * @NVME_LBA_STATUS_ATYPE_SCAN_TRACKED:
+ */
+enum nvme_lba_status_atype {
+ NVME_LBA_STATUS_ATYPE_SCAN_UNTRACKED = 0x10,
+ NVME_LBA_STATUS_ATYPE_SCAN_TRACKED = 0x11,
+};
+
+/**
+ * struct nvme_lba_status_log -
+ */
+struct nvme_lba_status_log {
+ __le32 lslplen;
+ __le32 nlslne;
+ __le32 estulb;
+ __u8 rsvd12[2];
+ __le16 lsgc;
+ struct nvme_lbas_ns_element elements[];
+};
+
+/**
+ * struct nvme_eg_event_aggregate_log -
+ */
+struct nvme_eg_event_aggregate_log {
+ __le64 nr_entries;
+ __le16 egids[];
+};
+
+/**
+ * struct nvme_resv_notification_log -
+ */
+struct nvme_resv_notification_log {
+ __le64 lpc;
+ __u8 rnlpt;
+ __u8 nalp;
+ __u8 rsvd9[2];
+ __le32 nsid;
+ __u8 rsvd16[48];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_RESV_NOTIFY_RNLPT_EMPTY = 0,
+ NVME_RESV_NOTIFY_RNLPT_REGISTRATION_PREEMPTED = 1,
+ NVME_RESV_NOTIFY_RNLPT_RESERVATION_RELEASED = 2,
+ NVME_RESV_NOTIFY_RNLPT_RESERVATION_PREEMPTED = 3,
+};
+
+/**
+ * struct nvme_sanitize_log_page -
+ */
+struct nvme_sanitize_log_page {
+ __le16 sprog;
+ __le16 sstat;
+ __le32 scdw10;
+ __le32 eto;
+ __le32 etbe;
+ __le32 etce;
+ __le32 etond;
+ __le32 etbend;
+ __le32 etcend;
+ __u8 rsvd32[480];
+};
+
+/**
+ * DOC: NVMe Directives
+ */
+
+/**
+ * enum -
+ */
+enum {
+ NVME_SANITIZE_SSTAT_NEVER_SANITIZED = 0,
+ NVME_SANITIZE_SSTAT_COMPLETE_SUCCESS = 1,
+ NVME_SANITIZE_SSTAT_IN_PROGESS = 2,
+ NVME_SANITIZE_SSTAT_COMPLETED_FAILED = 3,
+ NVME_SANITIZE_SSTAT_ND_COMPLETE_SUCCESS = 4,
+};
+
+/**
+ * struct nvme_lba_status_desc -
+ */
+struct nvme_lba_status_desc {
+ __le64 dslba;
+ __le32 nlb;
+ __u8 rsvd12;
+ __u8 status;
+ __u8 rsvd14[2];
+};
+
+/**
+ * struct nvme_lba_status -
+ */
+struct nvme_lba_status {
+ __le32 nlsd;
+ __u8 cmpc;
+ __u8 rsvd5[3];
+ struct nvme_lba_status_desc descs[];
+};
+
+
+/**
+ * DOC: NVMe Management Interface
+ */
+
+/**
+ * struct nvme_mi_read_nvm_ss_info -
+ */
+struct nvme_mi_read_nvm_ss_info {
+ __u8 nump;
+ __u8 mjr;
+ __u8 mnr;
+ __u8 rsvd3[29];
+};
+
+/**
+ * struct nvme_mi_port_pcie -
+ */
+struct nvme_mi_port_pcie {
+ __u8 mps;
+ __u8 sls;
+ __u8 cls;
+ __u8 mlw;
+ __u8 nlw;
+ __u8 pn;
+ __u8 rsvd14[18];
+};
+
+/**
+ * struct nvme_mi_port_smb -
+ */
+struct nvme_mi_port_smb {
+ __u8 vpd_addr;
+ __u8 mvpd_freq;
+ __u8 mme_addr;
+ __u8 mme_freq;
+ __u8 nvmebm;
+ __u8 rsvd13[19];
+};
+
+/**
+ * struct nvme_mi_read_port_info -
+ */
+struct nvme_mi_read_port_info {
+ __u8 portt;
+ __u8 rsvd1;
+ __le16 mmctptus;
+ __le32 meb;
+ union {
+ struct nvme_mi_port_pcie pcie;
+ struct nvme_mi_port_smb smb;
+ };
+};
+
+/**
+ * struct nvme_mi_read_ctrl_info -
+ */
+struct nvme_mi_read_ctrl_info {
+ __u8 portid;
+ __u8 rsvd1[4];
+ __u8 prii;
+ __le16 pri;
+ __le16 vid;
+ __le16 did;
+ __le16 ssvid;
+ __le16 ssid;
+ __u8 rsvd16[16];
+};
+
+/**
+ * struct nvme_mi_osc -
+ */
+struct nvme_mi_osc {
+ __u8 type;
+ __u8 opc;
+};
+
+/**
+ * struct nvme_mi_read_sc_list -
+ */
+struct nvme_mi_read_sc_list {
+ __le16 numcmd;
+ struct nvme_mi_osc cmds[];
+};
+
+/**
+ * struct nvme_mi_nvm_ss_health_status -
+ */
+struct nvme_mi_nvm_ss_health_status {
+ __u8 nss;
+ __u8 sw;
+ __u8 ctemp;
+ __u8 pdlu;
+ __le16 ccs;
+ __u8 rsvd8[2];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_MI_CCS_RDY = 1 << 0,
+ NVME_MI_CSS_CFS = 1 << 1,
+ NVME_MI_CSS_SHST = 1 << 2,
+ NVME_MI_CSS_NSSRO = 1 << 4,
+ NVME_MI_CSS_CECO = 1 << 5,
+ NVME_MI_CSS_NAC = 1 << 6,
+ NVME_MI_CSS_FA = 1 << 7,
+ NVME_MI_CSS_CSTS = 1 << 8,
+ NVME_MI_CSS_CTEMP = 1 << 9,
+ NVME_MI_CSS_PDLU = 1 << 10,
+ NVME_MI_CSS_SPARE = 1 << 11,
+ NVME_MI_CSS_CCWARN = 1 << 12,
+};
+
+/**
+ * struct nvme_mi_ctrl_heal_status -
+ */
+struct nvme_mi_ctrl_heal_status {
+ __le16 ctlid;
+ __le16 csts;
+ __le16 ctemp;
+ __u8 pdlu;
+ __u8 spare;
+ __u8 cwarn;
+ __u8 rsvd9[7];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_MI_CSTS_RDY = 1 << 0,
+ NVME_MI_CSTS_CFS = 1 << 1,
+ NVME_MI_CSTS_SHST = 1 << 2,
+ NVME_MI_CSTS_NSSRO = 1 << 4,
+ NVME_MI_CSTS_CECO = 1 << 5,
+ NVME_MI_CSTS_NAC = 1 << 6,
+ NVME_MI_CSTS_FA = 1 << 7,
+ NVME_MI_CWARN_ST = 1 << 0,
+ NVME_MI_CWARN_TAUT = 1 << 1,
+ NVME_MI_CWARN_RD = 1 << 2,
+ NVME_MI_CWARN_RO = 1 << 3,
+ NVME_MI_CWARN_VMBF = 1 << 4,
+};
+
+/**
+ * struct nvme_mi_vpd_mra -
+ */
+struct nvme_mi_vpd_mra {
+ __u8 nmravn;
+ __u8 ff;
+ __u8 rsvd7[6];
+ __u8 i18vpwr;
+ __u8 m18vpwr;
+ __u8 i33vpwr;
+ __u8 m33vpwr;
+ __u8 rsvd17;
+ __u8 m33vapsr;
+ __u8 i5vapsr;
+ __u8 m5vapsr;
+ __u8 i12vapsr;
+ __u8 m12vapsr;
+ __u8 mtl;
+ __u8 tnvmcap[16];
+ __u8 rsvd37[27];
+};
+
+/**
+ * struct nvme_mi_vpd_ppmra -
+ */
+struct nvme_mi_vpd_ppmra {
+ __u8 nppmravn;
+ __u8 pn;
+ __u8 ppi;
+ __u8 ls;
+ __u8 mlw;
+ __u8 mctp;
+ __u8 refccap;
+ __u8 pi;
+ __u8 rsvd13[3];
+};
+
+/**
+ * struct nvme_mi_vpd_telem -
+ */
+struct nvme_mi_vpd_telem {
+ __u8 type;
+ __u8 rev;
+ __u8 len;
+ __u8 data[0];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_MI_ELEM_EED = 1,
+ NVME_MI_ELEM_USCE = 2,
+ NVME_MI_ELEM_ECED = 3,
+ NVME_MI_ELEM_LED = 4,
+ NVME_MI_ELEM_SMBMED = 5,
+ NVME_MI_ELEM_PCIESED = 6,
+ NVME_MI_ELEM_NVMED = 7,
+};
+
+/**
+ * struct nvme_mi_vpd_tra -
+ */
+struct nvme_mi_vpd_tra {
+ __u8 vn;
+ __u8 rsvd6;
+ __u8 ec;
+ struct nvme_mi_vpd_telem elems[0];
+};
+
+/**
+ * struct nvme_mi_vpd_mr_common -
+ */
+struct nvme_mi_vpd_mr_common {
+ __u8 type;
+ __u8 rf;
+ __u8 rlen;
+ __u8 rchksum;
+ __u8 hchksum;
+
+ union {
+ struct nvme_mi_vpd_mra nmra;
+ struct nvme_mi_vpd_ppmra ppmra;
+ struct nvme_mi_vpd_tra tmra;
+ };
+};
+
+/**
+ * struct nvme_mi_vpd_hdr -
+ */
+struct nvme_mi_vpd_hdr {
+ __u8 ipmiver;
+ __u8 iuaoff;
+ __u8 ciaoff;
+ __u8 biaoff;
+ __u8 piaoff;
+ __u8 mrioff;
+ __u8 rsvd6;
+ __u8 chchk;
+ __u8 vpd[];
+};
+
+/**
+ * DOC: NVMe Features
+ */
+
+/**
+ * struct nvme_feat_auto_pst -
+ */
+struct nvme_feat_auto_pst {
+ __le64 apst_entry[32];
+};
+
+/**
+ * struct nvme_timestamp -
+ */
+struct nvme_timestamp {
+ __u8 timestamp[6];
+ __u8 attr;
+ __u8 rsvd;
+};
+
+/**
+ * struct nvme_lba_range_type_entry -
+ */
+struct nvme_lba_range_type_entry {
+ __u8 type;
+ __u8 attributes;
+ __u8 rsvd2[14];
+ __u64 slba;
+ __u64 nlb;
+ __u8 guid[16];
+ __u8 rsvd48[16];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_LBART_TYPE_GP = 0,
+ NVME_LBART_TYPE_FS = 1,
+ NVME_LBART_TYPE_RAID = 2,
+ NVME_LBART_TYPE_CACHE = 3,
+ NVME_LBART_TYPE_SWAP = 4,
+ NVME_LBART_ATTRIB_TEMP = 1 << 0,
+ NVME_LBART_ATTRIB_HIDE = 1 << 1,
+};
+
+/**
+ * struct nvme_lba_range_type -
+ */
+struct nvme_lba_range_type {
+ struct nvme_lba_range_type_entry entry[NVME_FEAT_LBA_RANGE_MAX];
+};
+
+/**
+ * struct nvme_plm_config -
+ */
+struct nvme_plm_config {
+ __le16 ee;
+ __u8 rsvd2[30];
+ __le64 dtwinrt;
+ __le64 dtwinwt;
+ __le64 dtwintt;
+ __u8 rsvd56[456];
+};
+
+/**
+ * struct nvme_feat_host_behavior -
+ */
+struct nvme_feat_host_behavior {
+ __u8 acre;
+ __u8 resv1[511];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ENABLE_ACRE = 1 << 0,
+};
+
+/**
+ * struct nvme_dsm_range -
+ */
+struct nvme_dsm_range {
+ __le32 cattr;
+ __le32 nlb;
+ __le64 slba;
+};
+
+/**
+ * struct nvme_registered_ctrl -
+ */
+struct nvme_registered_ctrl {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 hostid;
+ __le64 rkey;
+};
+
+/**
+ * struct nvme_registered_ctrl_ext -
+ */
+struct nvme_registered_ctrl_ext {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 resv3[5];
+ __le64 rkey;
+ __u8 hostid[16];
+ __u8 resv32[32];
+};
+
+/**
+ * struct nvme_reservation_status -
+ */
+struct nvme_reservation_status {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 rsvd7[2];
+ __u8 ptpls;
+ __u8 rsvd10[14];
+ union {
+ struct {
+ __u8 resv24[40];
+ struct nvme_registered_ctrl_ext regctl_eds[0];
+ };
+ struct nvme_registered_ctrl regctl_ds[0];
+ };
+};
+
+enum {
+ NVME_FEAT_ARB_BURST_MASK = 0x00000007,
+ NVME_FEAT_ARB_LPW_MASK = 0x0000ff00,
+ NVME_FEAT_ARB_MPW_MASK = 0x00ff0000,
+ NVME_FEAT_ARB_HPW_MASK = 0xff000000,
+ NVME_FEAT_PM_PS_MASK = 0x0000001f,
+ NVME_FEAT_PM_WH_MASK = 0x000000e0,
+ NVME_FEAT_LBAR_NR_MASK = 0x0000003f,
+ NVME_FEAT_TT_TMPTH_MASK = 0x0000ffff,
+ NVME_FEAT_TT_TMPSEL_MASK = 0x000f0000,
+ NVME_FEAT_TT_THSEL_MASK = 0x00300000,
+ NVME_FEAT_ER_TLER_MASK = 0x0000ffff,
+ NVME_FEAT_ER_DULBE_MASK = 0x00010000,
+ NVME_FEAT_VWC_WCE_MASK = 0x00000001,
+ NVME_FEAT_NRQS_NSQR_MASK = 0x0000ffff,
+ NVME_FEAT_NRQS_NCQR_MASK = 0xffff0000,
+ NVME_FEAT_ICOAL_THR_MASK = 0x000000ff,
+ NVME_FEAT_ICOAL_TIME_MASK = 0x0000ff00,
+ NVME_FEAT_ICFG_IV_MASK = 0x0000ffff,
+ NVME_FEAT_ICFG_CD_MASK = 0x00010000,
+ NVME_FEAT_WA_DN_MASK = 0x00000001,
+ NVME_FEAT_AE_SMART_MASK = 0x000000ff,
+ NVME_FEAT_AE_NAN_MASK = 0x00000100,
+ NVME_FEAT_AE_FW_MASK = 0x00000200,
+ NVME_FEAT_AE_TELEM_MASK = 0x00000400,
+ NVME_FEAT_AE_ANA_MASK = 0x00000800,
+ NVME_FEAT_AE_PLA_MASK = 0x00001000,
+ NVME_FEAT_AE_LBAS_MASK = 0x00002000,
+ NVME_FEAT_AE_EGA_MASK = 0x00004000,
+ NVME_FEAT_APST_APSTE_MASK = 0x00000001,
+ NVME_FEAT_HMEM_EHM_MASK = 0x00000001,
+ NVME_FEAT_TS_SYNCH_MASK = 0x00000001,
+ NVME_FEAT_TS_ORIGIN_MASK = 0x0000000e,
+ NVME_FEAT_TS_ORIGIN_CLR = 0x00000001,
+ NVME_FEAT_TS_ORIGIN_SF = 0x00000002,
+ NVME_FEAT_HCTM_TMT2_MASK = 0x0000ffff,
+ NVME_FEAT_HCTM_TMT1_MASK = 0xffff0000,
+ NVME_FEAT_NOPS_NOPPME_MASK = 0x00000001,
+ NVME_FEAT_RRL_RRL_MASK = 0x000000ff,
+ NVME_FEAT_PLM_PLME_MASK = 0x00000001,
+ NVME_FEAT_PLMW_WS_MASK = 0x00000007,
+ NVME_FEAT_LBAS_LSIRI_MASK = 0x0000ffff,
+ NVME_FEAT_LBAS_LSIPI_MASK = 0xffff0000,
+ NVME_FEAT_SC_NODRM_MASK = 0x00000001,
+ NVME_FEAT_EG_ENDGID_MASK = 0x0000ffff,
+ NVME_FEAT_EG_EGCW_MASK = 0x00ff0000,
+ NVME_FEAT_SPM_PBSLC_MASK = 0x000000ff,
+ NVME_FEAT_HOSTID_EXHID_MASK = 0x00000001,
+ NVME_FEAT_RM_REGPRE_MASK = 0x00000002,
+ NVME_FEAT_RM_RESREL_MASK = 0x00000004,
+ NVME_FEAT_RM_RESPRE_MASK = 0x00000008,
+ NVME_FEAT_RP_PTPL_MASK = 0x00000001,
+ NVME_FEAT_WP_WPS_MASK = 0x00000007,
+};
+
+#define shift(v, s, m) ((v & m) >> s)
+
+#define NVME_FEAT_ARB_BURST(v) shift(v, 0, NVME_FEAT_ARB_BURST_MASK)
+#define NVME_FEAT_ARB_LPW(v) shift(v, 8, NVME_FEAT_ARB_LPW_MASK)
+#define NVME_FEAT_ARB_MPW(v) shift(v, 16, NVME_FEAT_ARB_MPW_MASK)
+#define NVME_FEAT_ARB_HPW(v) shift(v, 24, NVME_FEAT_ARB_HPW_MASK)
+#define NVME_FEAT_PM_PS(v) shift(v, 0, NVME_FEAT_PM_PS_MASK)
+#define NVME_FEAT_PM_WH(v) shift(v, 5, NVME_FEAT_PM_WH_MASK)
+#define NVME_FEAT_LBAR_NR(v) shift(v, 0, NVME_FEAT_LBAR_NR_MASK)
+#define NVME_FEAT_TT_TMPTH(v) shift(v, 0, NVME_FEAT_TT_TMPTH_MASK)
+#define NVME_FEAT_TT_TMPSEL(v) shift(v, 16, NVME_FEAT_TT_TMPSEL_MASK)
+#define NVME_FEAT_TT_THSEL(v) shift(v, 20, NVME_FEAT_TT_THSEL_MASK)
+#define NVME_FEAT_ER_TLER(v) shift(v, 0, NVME_FEAT_ER_TLER_MASK)
+#define NVME_FEAT_ER_DULBE(v) shift(v, 16, NVME_FEAT_ER_DULBE_MASK)
+#define NVME_FEAT_VWC_WCE(v) shift(v, 0, NVME_FEAT_VWC_WCE_MASK)
+#define NVME_FEAT_NRQS_NSQR(v) shift(v, 0, NVME_FEAT_NRQS_NSQR_MASK)
+#define NVME_FEAT_NRQS_NCQR(v) shift(v, 16, NVME_FEAT_NRQS_NCQR_MASK)
+#define NVME_FEAT_ICOAL_THR(v) shift(v, 0, NVME_FEAT_ICOAL_THR_MASK)
+#define NVME_FEAT_ICOAL_TIME(v) shift(v, 8, NVME_FEAT_ICOAL_TIME_MASK)
+#define NVME_FEAT_ICFG_IV(v) shift(v, 0, NVME_FEAT_ICFG_IV_MASK)
+#define NVME_FEAT_ICFG_CD(v) shift(v, 16, NVME_FEAT_ICFG_CD_MASK)
+#define NVME_FEAT_WA_DN(v) shift(v, 0, NVME_FEAT_WA_DN_MASK)
+#define NVME_FEAT_AE_SMART(v) shift(v, 0, NVME_FEAT_AE_SMART_MASK)
+#define NVME_FEAT_AE_NAN(v) shift(v, 8, NVME_FEAT_AE_NAN_MASK)
+#define NVME_FEAT_AE_FW(v) shift(v, 9, NVME_FEAT_AE_FW_MASK)
+#define NVME_FEAT_AE_TELEM(v) shift(v, 10, NVME_FEAT_AE_TELEM_MASK)
+#define NVME_FEAT_AE_ANA(v) shift(v, 11, NVME_FEAT_AE_ANA_MASK)
+#define NVME_FEAT_AE_PLA(v) shift(v, 12, NVME_FEAT_AE_PLA_MASK)
+#define NVME_FEAT_AE_LBAS(v) shift(v, 13, NVME_FEAT_AE_LBAS_MASK)
+#define NVME_FEAT_AE_EGA(v) shift(v, 14, NVME_FEAT_AE_EGA_MASK)
+#define NVME_FEAT_APST_APSTE(v) shift(v, 0, NVME_FEAT_APST_APSTE_MASK)
+#define NVME_FEAT_HMEM_EHM(v) shift(v, 0, NVME_FEAT_HMEM_EHM_MASK)
+#define NVME_FEAT_TS_SYNC(v) shift(v, 0, NVME_FEAT_TS_SYNCH_MASK)
+#define NVME_FEAT_TS_ORIGIN(v) shift(v, 1, NVME_FEAT_TS_ORIGIN_MASK)
+#define NVME_FEAT_HCTM_TMT2(v) shift(v, 0, NVME_FEAT_HCTM_TMT2_MASK)
+#define NVME_FEAT_HCTM_TMT1(v) shift(v, 16, NVME_FEAT_HCTM_TMT1_MASK)
+#define NVME_FEAT_NOPS_NOPPME(v) shift(v, 0, NVME_FEAT_NOPS_NOPPME_MASK)
+#define NVME_FEAT_RRL_RRL(v) shift(v, 0, NVME_FEAT_RRL_RRL_MASK)
+#define NVME_FEAT_PLM_PLME(v) shift(v, 0, NVME_FEAT_PLM_PLME_MASK)
+#define NVME_FEAT_PLMW_WS(v) shift(v, 0, NVME_FEAT_PLMW_WS_MASK)
+#define NVME_FEAT_LBAS_LSIRI(v) shift(v, 0, NVME_FEAT_LBAS_LSIRI_MASK)
+#define NVME_FEAT_LBAS_LSIPI(v) shift(v, 16, NVME_FEAT_LBAS_LSIPI_MASK)
+#define NVME_FEAT_SC_NODRM(v) shift(v, 0, NVME_FEAT_SC_NODRM_MASK)
+#define NVME_FEAT_EG_ENDGID(v) shift(v, 0, NVME_FEAT_EG_ENDGID_MASK)
+#define NVME_FEAT_EG_EGCW(v) shift(v, 16, NVME_FEAT_EG_EGCW_MASK)
+#define NVME_FEAT_SPM_PBSLC(v) shift(v, 0, NVME_FEAT_SPM_PBSLC_MASK)
+#define NVME_FEAT_HOSTID_EXHID(v) shift(v, 0, NVME_FEAT_HOSTID_EXHID_MASK)
+#define NVME_FEAT_RM_REGPRE(v) shift(v, 1, NVME_FEAT_RM_REGPRE_MASK)
+#define NVME_FEAT_RM_RESREL(v) shift(v, 2, NVME_FEAT_RM_RESREL_MASK)
+#define NVME_FEAT_RM_RESPRE(v) shift(v, 3, NVME_FEAT_RM_RESPRE_MASK)
+#define NVME_FEAT_RP_PTPL(v) shift(v, 0, NVME_FEAT_RP_PTPL_MASK)
+#define NVME_FEAT_WP_WPS(v) shift(v, 0, NVME_FEAT_WP_WPS_MASK)
+
+/**
+ * struct nvme_streams_directive_params -
+ */
+struct nvme_streams_directive_params {
+ __le16 msl;
+ __le16 nssa;
+ __le16 nsso;
+ __u8 nssc;
+ __u8 rsvd[9];
+ __le32 sws;
+ __le16 sgs;
+ __le16 nsa;
+ __le16 nso;
+ __u8 rsvd2[6];
+};
+
+/**
+ * struct nvme_streams_directive_status -
+ */
+struct nvme_streams_directive_status {
+ __le16 osc;
+ __le16 sid[];
+};
+
+/**
+ * struct nvme_id_directives -
+ */
+struct nvme_id_directives {
+ __u8 supported[32];
+ __u8 enabled[32];
+ __u8 rsvd64[4032];
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_ID_DIR_ID_BIT = 0,
+ NVME_ID_DIR_SD_BIT = 1,
+};
+
+/**
+ * struct nvme_host_mem_buf_desc -
+ */
+struct nvme_host_mem_buf_desc {
+ __le64 addr;
+ __le32 size;
+ __u32 rsvd;
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_AER_ERROR = 0,
+ NVME_AER_SMART = 1,
+ NVME_AER_NOTICE = 2,
+ NVME_AER_CSS = 6,
+ NVME_AER_VS = 7,
+};
+
+/**
+ * enum -
+ */
+enum {
+ NVME_AER_NOTICE_NS_CHANGED = 0x00,
+ NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
+ NVME_AER_NOTICE_ANA = 0x03,
+ NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
+};
+
+/**
+ * enum nvme_subsys_type -
+ * @NVME_NQN_DISC: Discovery type target subsystem
+ * @NVME_NQN_NVME: NVME type target subsystem
+ */
+enum nvme_subsys_type {
+ NVME_NQN_DISC = 1,
+ NVME_NQN_NVME = 2,
+};
+
+#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
+#define NVME_RDMA_IP_PORT 4420
+
+/* NQN names in commands fields specified one size */
+#define NVMF_NQN_FIELD_LEN 256
+
+/* However the max length of a qualified name is another size */
+#define NVMF_NQN_SIZE 223
+#define NVMF_TRSVCID_SIZE 32
+#define NVMF_TRADDR_SIZE 256
+#define NVMF_TSAS_SIZE 256
+
+/**
+ * struct nvmf_disc_log_entry -
+ *
+ * Discovery log page entry
+ */
+struct nvmf_disc_log_entry {
+ __u8 trtype;
+ __u8 adrfam;
+ __u8 subtype;
+ __u8 treq;
+ __le16 portid;
+ __le16 cntlid;
+ __le16 asqsz;
+ __u8 resv10[22];
+ char trsvcid[NVMF_TRSVCID_SIZE];
+ __u8 resv64[192];
+ char subnqn[NVMF_NQN_FIELD_LEN];
+ char traddr[NVMF_TRADDR_SIZE];
+ union tsas {
+ char common[NVMF_TSAS_SIZE];
+ struct rdma {
+ __u8 qptype;
+ __u8 prtype;
+ __u8 cms;
+ __u8 resv3[5];
+ __u16 pkey;
+ __u8 resv10[246];
+ } rdma;
+ struct tcp {
+ __u8 sectype;
+ } tcp;
+ } tsas;
+};
+
+/**
+ * enum -
+ * @NVMF_TRTYPE_UNSPECIFIED: Not indicated
+ * @NVMF_TRTYPE_RDMA: RDMA
+ * @NVMF_TRTYPE_FC: Fibre Channel
+ * @NVMF_TRTYPE_TCP: TCP
+ * @NVMF_TRTYPE_LOOP: Reserved for host usage
+ *
+ * Transport Type codes for Discovery Log Page entry TRTYPE field
+ */
+enum {
+ NVMF_TRTYPE_UNSPECIFIED = 0,
+ NVMF_TRTYPE_RDMA = 1,
+ NVMF_TRTYPE_FC = 2,
+ NVMF_TRTYPE_TCP = 3,
+ NVMF_TRTYPE_LOOP = 254,
+ NVMF_TRTYPE_MAX,
+};
+
+/**
+ * enum -
+ * @NVMF_ADDR_FAMILY_PCI: PCIe
+ * @NVMF_ADDR_FAMILY_IP4: IPv4
+ * @NVMF_ADDR_FAMILY_IP6: IPv6
+ * @NVMF_ADDR_FAMILY_IB: InfiniBand
+ * @NVMF_ADDR_FAMILY_FC: Fibre Channel
+ *
+ * Address Family codes for Discovery Log Page entry ADRFAM field
+ */
+enum {
+ NVMF_ADDR_FAMILY_PCI = 0,
+ NVMF_ADDR_FAMILY_IP4 = 1,
+ NVMF_ADDR_FAMILY_IP6 = 2,
+ NVMF_ADDR_FAMILY_IB = 3,
+ NVMF_ADDR_FAMILY_FC = 4,
+};
+
+/**
+ * enum -
+ * @NVMF_TREQ_NOT_SPECIFIED: Not specified
+ * @NVMF_TREQ_REQUIRED: Required
+ * @NVMF_TREQ_NOT_REQUIRED: Not Required
+ * @NVMF_TREQ_DISABLE_SQFLOW: SQ flow control disable supported
+ *
+ * Transport Requirements codes for Discovery Log Page entry TREQ field
+ */
+enum {
+ NVMF_TREQ_NOT_SPECIFIED = 0,
+ NVMF_TREQ_REQUIRED = 1,
+ NVMF_TREQ_NOT_REQUIRED = 2,
+ NVMF_TREQ_DISABLE_SQFLOW = 4,
+};
+
+/**
+ * enum -
+ * @NVMF_RDMA_QPTYPE_CONNECTED: Reliable Connected
+ * @NVMF_RDMA_QPTYPE_DATAGRAM: Reliable Datagram
+ *
+ * RDMA QP Service Type codes for Discovery Log Page entry TSAS
+ * RDMA_QPTYPE field
+ */
+enum {
+ NVMF_RDMA_QPTYPE_CONNECTED = 1,
+ NVMF_RDMA_QPTYPE_DATAGRAM = 2,
+};
+
+/**
+ * enum -
+ * @NVMF_RDMA_PRTYPE_NOT_SPECIFIED: No Provider Specified
+ * @NVMF_RDMA_PRTYPE_IB: InfiniBand
+ * @NVMF_RDMA_PRTYPE_ROCE: InfiniBand RoCE
+ * @NVMF_RDMA_PRTYPE_ROCEV2: InfiniBand RoCEV2
+ * @NVMF_RDMA_PRTYPE_IWARP: iWARP
+ *
+ * RDMA Provider Type codes for Discovery Log Page entry TSAS
+ * RDMA_PRTYPE field
+ */
+enum {
+ NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1,
+ NVMF_RDMA_PRTYPE_IB = 2,
+ NVMF_RDMA_PRTYPE_ROCE = 3,
+ NVMF_RDMA_PRTYPE_ROCEV2 = 4,
+ NVMF_RDMA_PRTYPE_IWARP = 5,
+};
+
+/**
+ * enum -
+ * @NVMF_RDMA_CMS_RDMA_CM: Sockets based endpoint addressing
+ *
+ * RDMA Connection Management Service Type codes for Discovery Log Page
+ * entry TSAS RDMA_CMS field
+ */
+enum {
+ NVMF_RDMA_CMS_RDMA_CM = 1,
+};
+
+/**
+ * enum -
+ * @NVMF_TCP_SECTYPE_NONE: No Security
+ * @NVMF_TCP_SECTYPE_TLS: Transport Layer Security
+ */
+enum {
+ NVMF_TCP_SECTYPE_NONE = 0,
+ NVMF_TCP_SECTYPE_TLS = 1,
+};
+
+/**
+ * struct nvmf_discovery_log -
+ */
+struct nvmf_discovery_log {
+ __le64 genctr;
+ __le64 numrec;
+ __le16 recfmt;
+ __u8 resv14[1006];
+ struct nvmf_disc_log_entry entries[0];
+};
+
+/**
+ * struct nvmf_connect_data -
+ */
+struct nvmf_connect_data {
+ __u8 hostid[16];
+ __le16 cntlid;
+ char resv4[238];
+ char subsysnqn[NVMF_NQN_FIELD_LEN];
+ char hostnqn[NVMF_NQN_FIELD_LEN];
+ char resv5[256];
+};
+
+/**
+ * enum -
+ */
+enum {
+ /*
+ * Status code type
+ */
+ NVME_SCT_GENERIC = 0x000,
+ NVME_SCT_CMD_SPECIFIC = 0x100,
+ NVME_SCT_MEDIA = 0x200,
+ NVME_SCT_PATH = 0x300,
+ NVME_SCT_VS = 0x700,
+ NVME_SCT_MASK = 0x700,
+
+ /*
+ * Generic Command Status:
+ */
+ NVME_SC_SUCCESS = 0x0,
+ NVME_SC_INVALID_OPCODE = 0x1,
+ NVME_SC_INVALID_FIELD = 0x2,
+ NVME_SC_CMDID_CONFLICT = 0x3,
+ NVME_SC_DATA_XFER_ERROR = 0x4,
+ NVME_SC_POWER_LOSS = 0x5,
+ NVME_SC_INTERNAL = 0x6,
+ NVME_SC_ABORT_REQ = 0x7,
+ NVME_SC_ABORT_QUEUE = 0x8,
+ NVME_SC_FUSED_FAIL = 0x9,
+ NVME_SC_FUSED_MISSING = 0xa,
+ NVME_SC_INVALID_NS = 0xb,
+ NVME_SC_CMD_SEQ_ERROR = 0xc,
+ NVME_SC_SGL_INVALID_LAST = 0xd,
+ NVME_SC_SGL_INVALID_COUNT = 0xe,
+ NVME_SC_SGL_INVALID_DATA = 0xf,
+ NVME_SC_SGL_INVALID_METADATA = 0x10,
+ NVME_SC_SGL_INVALID_TYPE = 0x11,
+ NVME_SC_CMB_INVALID_USE = 0x12,
+ NVME_SC_PRP_INVALID_OFFSET = 0x13,
+ NVME_SC_AWU_EXCEEDED = 0x14,
+ NVME_SC_OP_DENIED = 0x15,
+ NVME_SC_SGL_INVALID_OFFSET = 0x16,
+
+ NVME_SC_HOSTID_FORMAT = 0x18,
+ NVME_SC_KAT_EXPIRED = 0x19,
+ NVME_SC_KAT_INVALID = 0x1a,
+ NVME_SC_CMD_ABORTED_PREMEPT = 0x1b,
+ NVME_SC_SANITIZE_FAILED = 0x1c,
+ NVME_SC_SANITIZE_IN_PROGRESS = 0x1d,
+ NVME_SC_SGL_INVALID_GRANULARITY = 0x1e,
+ NVME_SC_CMD_IN_CMBQ_NOT_SUPP = 0x1f,
+ NVME_SC_NS_WRITE_PROTECTED = 0x20,
+ NVME_SC_CMD_INTERRUPTED = 0x21,
+ NVME_SC_TRAN_TPORT_ERROR = 0x22,
+
+ NVME_SC_LBA_RANGE = 0x80,
+ NVME_SC_CAP_EXCEEDED = 0x81,
+ NVME_SC_NS_NOT_READY = 0x82,
+ NVME_SC_RESERVATION_CONFLICT = 0x83,
+ NVME_SC_FORMAT_IN_PROGRESS = 0x84,
+
+ /*
+ * Command Specific Status:
+ */
+ NVME_SC_CQ_INVALID = 0x00,
+ NVME_SC_QID_INVALID = 0x01,
+ NVME_SC_QUEUE_SIZE = 0x02,
+ NVME_SC_ABORT_LIMIT = 0x03,
+ NVME_SC_ABORT_MISSING = 0x04,
+ NVME_SC_ASYNC_LIMIT = 0x05,
+ NVME_SC_FIRMWARE_SLOT = 0x06,
+ NVME_SC_FIRMWARE_IMAGE = 0x07,
+ NVME_SC_INVALID_VECTOR = 0x08,
+ NVME_SC_INVALID_LOG_PAGE = 0x09,
+ NVME_SC_INVALID_FORMAT = 0x0a,
+ NVME_SC_FW_NEEDS_CONV_RESET = 0x0b,
+ NVME_SC_INVALID_QUEUE = 0x0c,
+ NVME_SC_FEATURE_NOT_SAVEABLE = 0x0d,
+ NVME_SC_FEATURE_NOT_CHANGEABLE = 0x0e,
+ NVME_SC_FEATURE_NOT_PER_NS = 0x0f,
+ NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x10,
+ NVME_SC_FW_NEEDS_RESET = 0x11,
+ NVME_SC_FW_NEEDS_MAX_TIME = 0x12,
+ NVME_SC_FW_ACTIVATE_PROHIBITED = 0x13,
+ NVME_SC_OVERLAPPING_RANGE = 0x14,
+ NVME_SC_NS_INSUFFICIENT_CAP = 0x15,
+ NVME_SC_NS_ID_UNAVAILABLE = 0x16,
+ NVME_SC_NS_ALREADY_ATTACHED = 0x18,
+ NVME_SC_NS_IS_PRIVATE = 0x19,
+ NVME_SC_NS_NOT_ATTACHED = 0x1a,
+ NVME_SC_THIN_PROV_NOT_SUPP = 0x1b,
+ NVME_SC_CTRL_LIST_INVALID = 0x1c,
+ NVME_SC_SELF_TEST_IN_PROGRESS = 0x1d,
+ NVME_SC_BP_WRITE_PROHIBITED = 0x1e,
+ NVME_SC_INVALID_CTRL_ID = 0x1f,
+ NVME_SC_INVALID_SEC_CTRL_STATE = 0x20,
+ NVME_SC_INVALID_CTRL_RESOURCES = 0x21,
+ NVME_SC_INVALID_RESOURCE_ID = 0x22,
+ NVME_SC_PMR_SAN_PROHIBITED = 0x23,
+ NVME_SC_ANA_GROUP_ID_INVALID = 0x24,
+ NVME_SC_ANA_ATTACH_FAILED = 0x25,
+
+ /*
+ * I/O Command Set Specific - NVM commands:
+ */
+ NVME_SC_BAD_ATTRIBUTES = 0x80,
+ NVME_SC_INVALID_PI = 0x81,
+ NVME_SC_READ_ONLY = 0x82,
+
+ /*
+ * I/O Command Set Specific - Fabrics commands:
+ */
+ NVME_SC_CONNECT_FORMAT = 0x80,
+ NVME_SC_CONNECT_CTRL_BUSY = 0x81,
+ NVME_SC_CONNECT_INVALID_PARAM = 0x82,
+ NVME_SC_CONNECT_RESTART_DISC = 0x83,
+ NVME_SC_CONNECT_INVALID_HOST = 0x84,
+ NVME_SC_DISCONNECT_INVALID_QTYPE= 0x85,
+
+ NVME_SC_DISCOVERY_RESTART = 0x90,
+ NVME_SC_AUTH_REQUIRED = 0x91,
+
+ /*
+ * Media and Data Integrity Errors:
+ */
+ NVME_SC_WRITE_FAULT = 0x80,
+ NVME_SC_READ_ERROR = 0x81,
+ NVME_SC_GUARD_CHECK = 0x82,
+ NVME_SC_APPTAG_CHECK = 0x83,
+ NVME_SC_REFTAG_CHECK = 0x84,
+ NVME_SC_COMPARE_FAILED = 0x85,
+ NVME_SC_ACCESS_DENIED = 0x86,
+ NVME_SC_UNWRITTEN_BLOCK = 0x87,
+
+ /*
+ * Path-related Errors:
+ */
+ NVME_SC_ANA_INTERNAL_PATH_ERROR = 0x00,
+ NVME_SC_ANA_PERSISTENT_LOSS = 0x01,
+ NVME_SC_ANA_INACCESSIBLE = 0x02,
+ NVME_SC_ANA_TRANSITION = 0x03,
+
+ NVME_SC_CTRL_PATH_ERROR = 0x60,
+
+ NVME_SC_HOST_PATH_ERROR = 0x70,
+ NVME_SC_CMD_ABORTED_BY_HOST = 0x71,
+
+ /*
+ * Status code mask
+ */
+ NVME_SC_MASK = 0xff,
+
+ /*
+ * Additional status info
+ */
+ NVME_SC_CRD = 0x1800,
+ NVME_SC_MORE = 0x2000,
+ NVME_SC_DNR = 0x4000,
+};
+
+#define NVME_MAJOR(ver) ((ver) >> 16)
+#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
+#define NVME_TERTIARY(ver) ((ver) & 0xff)
+
+#endif /* _LIBNVME_TYPES_H */
--- /dev/null
+#define _GNU_SOURCE
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <dirent.h>
+#include <libgen.h>
+
+#include <linux/types.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "filters.h"
+#include "types.h"
+#include "cmd.h"
+#include "ioctl.h"
+#include "util.h"
+#include "tree.h"
+
+static inline __u8 nvme_generic_status_to_errno(__u16 status)
+{
+ switch (status) {
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
+ case NVME_SC_SGL_INVALID_LAST:
+ case NVME_SC_SGL_INVALID_COUNT:
+ case NVME_SC_SGL_INVALID_DATA:
+ case NVME_SC_SGL_INVALID_METADATA:
+ case NVME_SC_SGL_INVALID_TYPE:
+ case NVME_SC_SGL_INVALID_OFFSET:
+ return EINVAL;
+ case NVME_SC_CMDID_CONFLICT:
+ return EADDRINUSE;
+ case NVME_SC_DATA_XFER_ERROR:
+ case NVME_SC_INTERNAL:
+ case NVME_SC_SANITIZE_FAILED:
+ return EIO;
+ case NVME_SC_POWER_LOSS:
+ case NVME_SC_ABORT_REQ:
+ case NVME_SC_ABORT_QUEUE:
+ case NVME_SC_FUSED_FAIL:
+ case NVME_SC_FUSED_MISSING:
+ return EWOULDBLOCK;
+ case NVME_SC_CMD_SEQ_ERROR:
+ return EILSEQ;
+ case NVME_SC_SANITIZE_IN_PROGRESS:
+ return EINPROGRESS;
+ case NVME_SC_NS_WRITE_PROTECTED:
+ case NVME_SC_NS_NOT_READY:
+ case NVME_SC_RESERVATION_CONFLICT:
+ return EACCES;
+ case NVME_SC_LBA_RANGE:
+ return EREMOTEIO;
+ case NVME_SC_CAP_EXCEEDED:
+ return ENOSPC;
+ }
+ return EIO;
+}
+
+static inline __u8 nvme_cmd_specific_status_to_errno(__u16 status)
+{
+ switch (status) {
+ case NVME_SC_CQ_INVALID:
+ case NVME_SC_QID_INVALID:
+ case NVME_SC_QUEUE_SIZE:
+ case NVME_SC_FIRMWARE_SLOT:
+ case NVME_SC_FIRMWARE_IMAGE:
+ case NVME_SC_INVALID_VECTOR:
+ case NVME_SC_INVALID_LOG_PAGE:
+ case NVME_SC_INVALID_FORMAT:
+ case NVME_SC_INVALID_QUEUE:
+ case NVME_SC_NS_INSUFFICIENT_CAP:
+ case NVME_SC_NS_ID_UNAVAILABLE:
+ case NVME_SC_CTRL_LIST_INVALID:
+ case NVME_SC_BAD_ATTRIBUTES:
+ case NVME_SC_INVALID_PI:
+ return EINVAL;
+ case NVME_SC_ABORT_LIMIT:
+ case NVME_SC_ASYNC_LIMIT:
+ return EDQUOT;
+ case NVME_SC_FW_NEEDS_CONV_RESET:
+ case NVME_SC_FW_NEEDS_SUBSYS_RESET:
+ case NVME_SC_FW_NEEDS_MAX_TIME:
+ return ERESTART;
+ case NVME_SC_FEATURE_NOT_SAVEABLE:
+ case NVME_SC_FEATURE_NOT_CHANGEABLE:
+ case NVME_SC_FEATURE_NOT_PER_NS:
+ case NVME_SC_FW_ACTIVATE_PROHIBITED:
+ case NVME_SC_NS_IS_PRIVATE:
+ case NVME_SC_BP_WRITE_PROHIBITED:
+ case NVME_SC_READ_ONLY:
+ case NVME_SC_PMR_SAN_PROHIBITED:
+ return EPERM;
+ case NVME_SC_OVERLAPPING_RANGE:
+ case NVME_SC_NS_NOT_ATTACHED:
+ return ENOSPC;
+ case NVME_SC_NS_ALREADY_ATTACHED:
+ return EALREADY;
+ case NVME_SC_THIN_PROV_NOT_SUPP:
+ return EOPNOTSUPP;
+ }
+
+ return EIO;
+}
+
+static inline __u8 nvme_fabrics_status_to_errno(__u16 status)
+{
+ switch (status) {
+ case NVME_SC_CONNECT_FORMAT:
+ case NVME_SC_CONNECT_INVALID_PARAM:
+ return EINVAL;
+ case NVME_SC_CONNECT_CTRL_BUSY:
+ return EBUSY;
+ case NVME_SC_CONNECT_RESTART_DISC:
+ return ERESTART;
+ case NVME_SC_CONNECT_INVALID_HOST:
+ return ECONNREFUSED;
+ case NVME_SC_DISCOVERY_RESTART:
+ return EAGAIN;
+ case NVME_SC_AUTH_REQUIRED:
+ return EPERM;
+ }
+
+ return EIO;
+}
+
+__u8 nvme_status_to_errno(int status, bool fabrics)
+{
+ __u16 sc, sct;
+
+ if (!status)
+ return 0;
+ if (status < 0)
+ return errno;
+
+ sc = status & NVME_SC_MASK;
+ sct = status & NVME_SCT_MASK;
+
+ switch (sct) {
+ case NVME_SCT_GENERIC:
+ return nvme_generic_status_to_errno(sc);
+ case NVME_SCT_CMD_SPECIFIC:
+ if (fabrics)
+ return nvme_fabrics_status_to_errno(sc);
+ return nvme_cmd_specific_status_to_errno(sc);
+ default:
+ /*
+ * Media, integrity related status, and the others will be
+ * mapped to EIO.
+ */
+ return EIO;
+ }
+}
+
+static int __nvme_open(const char *name)
+{
+ char *path;
+ int fd, ret;
+
+ ret = asprintf(&path, "%s/%s", "/dev", name);
+ if (ret < 0) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ fd = open(path, O_RDONLY);
+ free(path);
+ return fd;
+}
+
+int nvme_open(const char *name)
+{
+ int ret, fd, id, ns;
+ struct stat stat;
+ bool c;
+
+ ret = sscanf(name, "nvme%dn%d", &id, &ns);
+ if (ret != 1 && ret != 2) {
+ errno = EINVAL;
+ return -1;
+ }
+ c = ret == 1;
+
+ fd = __nvme_open(name);
+ if (fd < 0)
+ return fd;
+
+ ret = fstat(fd, &stat);
+ if (ret < 0)
+ goto close_fd;
+
+ if (c) {
+ if (!S_ISCHR(stat.st_mode)) {
+ errno = EINVAL;
+ goto close_fd;
+ }
+ } else if (!S_ISBLK(stat.st_mode)) {
+ errno = EINVAL;
+ goto close_fd;
+ }
+
+ return fd;
+
+close_fd:
+ close(fd);
+ return -1;
+}
+
+int nvme_fw_download_split(int fd, __u32 size, __u32 xfer, __u32 offset,
+ void *buf)
+{
+ int err = 0;
+
+ while (size > 0) {
+ xfer = MIN(xfer, size);
+ err = nvme_fw_download(fd, offset, xfer, buf);
+ if (err)
+ break;
+
+ buf += xfer;
+ size -= xfer;
+ offset += xfer;
+ }
+
+ return err;
+}
+
+int nvme_get_log_page(int fd, __u32 nsid, __u8 log_id, bool rae,
+ __u32 data_len, void *data)
+{
+ __u64 offset = 0, xfer_len = data_len;
+ void *ptr = data;
+ int ret;
+
+ /*
+ * 4k is the smallest possible transfer unit, so restricting to 4k
+ * avoids having to check the MDTS value of the controller.
+ */
+ do {
+ xfer_len = data_len - offset;
+ if (xfer_len > 4096)
+ xfer_len = 4096;
+
+ ret = nvme_get_log(fd, log_id, nsid, offset, NVME_LOG_LSP_NONE,
+ NVME_LOG_LSI_NONE, rae, NVME_UUID_NONE,
+ xfer_len, ptr);
+ if (ret)
+ return ret;
+
+ offset += xfer_len;
+ ptr += xfer_len;
+ } while (offset < data_len);
+
+ return 0;
+}
+
+int nvme_get_telemetry_log(int fd, bool create, bool ctrl, int data_area,
+ void **buf, __u32 *log_size)
+{
+ struct nvme_telemetry_log *telem;
+ static const __u32 xfer = 512;
+ __u32 size, offset = xfer;
+ void *log;
+ int err;
+
+ log = malloc(xfer);
+ if (!log) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ if (ctrl)
+ err = nvme_get_log_telemetry_ctrl(fd, true, 0, xfer, log);
+ else if (create)
+ err = nvme_get_log_create_telemetry_host(fd, log);
+ else
+ err = nvme_get_log_telemetry_host(fd, 0, xfer, log);
+
+ if (err)
+ goto free;
+
+ telem = log;
+ if (!telem->ctrlavail) {
+ size = xfer;
+ goto done;
+ }
+
+ switch (data_area) {
+ case 1:
+ size = (le16_to_cpu(telem->dalb1) * xfer) + xfer;
+ break;
+ case 2:
+ size = (le16_to_cpu(telem->dalb2) * xfer) + xfer;
+ break;
+ case 3:
+ size = (le16_to_cpu(telem->dalb3) * xfer) + xfer;
+ break;
+ default:
+ errno = EINVAL;
+ err = -1;
+ goto free;
+ }
+
+ log = realloc(log, size);
+ if (!log) {
+ errno = ENOMEM;
+ err = -1;
+ goto free;
+ }
+
+ while (offset != size) {
+ if (ctrl)
+ err = nvme_get_log_telemetry_ctrl(fd, true, offset,
+ xfer, log + offset);
+ else
+ err = nvme_get_log_telemetry_host(fd, offset, xfer,
+ log + offset);
+ if (err)
+ goto free;
+ offset += xfer;
+ }
+done:
+ *log_size = size;
+ *buf = log;
+ return 0;
+free:
+ free(log);
+ return err;
+}
+
+void nvme_setup_dsm_range(struct nvme_dsm_range *dsm, __u32 *ctx_attrs,
+ __u32 *llbas, __u64 *slbas, __u16 nr_ranges)
+{
+ int i;
+
+ for (i = 0; i < nr_ranges; i++) {
+ dsm[i].cattr = cpu_to_le32(ctx_attrs[i]);
+ dsm[i].nlb = cpu_to_le32(llbas[i]);
+ dsm[i].slba = cpu_to_le64(slbas[i]);
+ }
+}
+
+void nvme_setup_id_ns(struct nvme_id_ns *ns, __u64 nsze, __u64 ncap, __u8 flbas,
+ __u8 dps, __u8 nmic, __u32 anagrpid, __u16 nvmsetid)
+{
+ memset(ns, 0, sizeof(*ns));
+ ns->nsze = cpu_to_le64(nsze);
+ ns->ncap = cpu_to_le64(ncap);
+ ns->flbas = flbas;
+ ns->dps = dps;
+ ns->nmic = nmic;
+ ns->anagrpid = cpu_to_le32(anagrpid);
+ ns->nvmsetid = cpu_to_le16(nvmsetid);
+}
+
+void nvme_setup_ctrl_list(struct nvme_ctrl_list *cntlist, __u16 num_ctrls,
+ __u16 *ctrlist)
+{
+ int i;
+
+ cntlist->num = cpu_to_le16(num_ctrls);
+ for (i = 0; i < num_ctrls; i++)
+ cntlist->identifier[i] = cpu_to_le16(ctrlist[i]);
+}
+
+static int nvme_ns_attachment(int fd, __u32 nsid, __u16 num_ctrls, __u16 *ctrlist, bool attach)
+{
+ struct nvme_ctrl_list cntlist = { 0 };
+ enum nvme_ns_attach_sel sel;
+
+ if (attach)
+ sel = NVME_NS_ATTACH_SEL_CTRL_ATTACH;
+ else
+ sel = NVME_NS_ATTACH_SEL_CTRL_DEATTACH;
+
+ nvme_setup_ctrl_list(&cntlist, num_ctrls, ctrlist);
+ return nvme_ns_attach(fd, nsid, sel, &cntlist);
+}
+
+int nvme_namespace_attach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, __u16 *ctrlist)
+{
+ return nvme_ns_attachment(fd, nsid, num_ctrls, ctrlist, true);
+}
+
+int nvme_namespace_detach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, __u16 *ctrlist)
+{
+ return nvme_ns_attachment(fd, nsid, num_ctrls, ctrlist, false);
+}
+
+int nvme_get_ana_log_len(int fd, size_t *analen)
+{
+ struct nvme_id_ctrl ctrl;
+ int ret;
+
+ ret = nvme_identify_ctrl(fd, &ctrl);
+ if (ret)
+ return ret;
+
+ *analen = sizeof(struct nvme_ana_log) +
+ le32_to_cpu(ctrl.nanagrpid) * sizeof(struct nvme_ana_group_desc) +
+ le32_to_cpu(ctrl.mnan) * sizeof(__le32);
+ return 0;
+}
+
+int nvme_get_feature_length(int fid, __u32 cdw11, __u32 *len)
+{
+ switch (fid) {
+ case NVME_FEAT_FID_LBA_RANGE:
+ *len = sizeof(struct nvme_lba_range_type);
+ break;
+ case NVME_FEAT_FID_AUTO_PST:
+ *len = sizeof(struct nvme_feat_auto_pst);
+ break;
+ case NVME_FEAT_FID_PLM_CONFIG:
+ *len = sizeof(struct nvme_plm_config);
+ break;
+ case NVME_FEAT_FID_TIMESTAMP:
+ *len = sizeof(struct nvme_timestamp);
+ break;
+ case NVME_FEAT_FID_HOST_BEHAVIOR:
+ *len = sizeof(struct nvme_feat_host_behavior);
+ break;
+ case NVME_FEAT_FID_HOST_ID:
+ *len = (cdw11 & 0x1) ? 16 : 8;
+ break;
+ case NVME_FEAT_FID_ARBITRATION:
+ case NVME_FEAT_FID_POWER_MGMT:
+ case NVME_FEAT_FID_TEMP_THRESH:
+ case NVME_FEAT_FID_ERR_RECOVERY:
+ case NVME_FEAT_FID_VOLATILE_WC:
+ case NVME_FEAT_FID_NUM_QUEUES:
+ case NVME_FEAT_FID_IRQ_COALESCE:
+ case NVME_FEAT_FID_IRQ_CONFIG:
+ case NVME_FEAT_FID_WRITE_ATOMIC:
+ case NVME_FEAT_FID_ASYNC_EVENT:
+ case NVME_FEAT_FID_HOST_MEM_BUF:
+ case NVME_FEAT_FID_KATO:
+ case NVME_FEAT_FID_HCTM:
+ case NVME_FEAT_FID_NOPSC:
+ case NVME_FEAT_FID_RRL:
+ case NVME_FEAT_FID_PLM_WINDOW:
+ case NVME_FEAT_FID_LBA_STS_INTERVAL:
+ case NVME_FEAT_FID_SANITIZE:
+ case NVME_FEAT_FID_ENDURANCE_EVT_CFG:
+ case NVME_FEAT_FID_SW_PROGRESS:
+ case NVME_FEAT_FID_RESV_MASK:
+ case NVME_FEAT_RESV_PERSIST:
+ case NVME_FEAT_FID_WRITE_PROTECT:
+ *len = 0;
+ break;
+ default:
+ return EINVAL;
+ }
+ return 0;
+}
+
+int nvme_get_directive_receive_length(__u8 dtype, __u8 doper, __u32 *len)
+{
+ switch (dtype) {
+ case NVME_DIRECTIVE_DTYPE_IDENTIFY:
+ switch (doper) {
+ case NVME_DIRECTIVE_RECEIVE_IDENTIFY_DOPER_PARAM:
+ *len = sizeof(struct nvme_id_directives);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case NVME_DIRECTIVE_DTYPE_STREAMS:
+ switch (doper) {
+ case NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_PARAM:
+ *len = sizeof(struct nvme_streams_directive_params);
+ break;
+ case NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_STATUS:
+ *len = (128 * 1024) * sizeof(__le16);
+ break;
+ case NVME_DIRECTIVE_RECEIVE_STREAMS_DOPER_RESOURCE:
+ *len = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __nvme_set_attr(const char *path, const char *value)
+{
+ int ret, fd;
+
+ fd = open(path, O_WRONLY);
+ if (fd < 0)
+ return -1;
+
+ ret = write(fd, value, strlen(value));
+ close(fd);
+ return ret;
+}
+
+int nvme_set_attr(const char *dir, const char *attr, const char *value)
+{
+ char *path;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", dir, attr);
+ if (ret < 0)
+ return -1;
+
+ ret = __nvme_set_attr(path, value);
+ free(path);
+ return ret;
+}
+
+static char *__nvme_get_attr(const char *path)
+{
+ char value[4096] = { 0 };
+ int ret, fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return NULL;
+
+ memset(value, 0, sizeof(value));
+ ret = read(fd, value, sizeof(value) - 1);
+ if (ret < 0) {
+ close(fd);
+ return NULL;
+ }
+
+ if (value[strlen(value) - 1] == '\n')
+ value[strlen(value) - 1] = '\0';
+ while (strlen(value) > 0 && value[strlen(value) - 1] == ' ')
+ value[strlen(value) - 1] = '\0';
+
+ close(fd);
+ return strdup(value);
+}
+
+static char *nvme_get_attr(const char *dir, const char *attr)
+{
+ char *path, *value;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", dir, attr);
+ if (ret < 0)
+ return NULL;
+
+ value = __nvme_get_attr(path);
+ free(path);
+ return value;
+}
+
+char *nvme_get_subsys_attr(nvme_subsystem_t s, const char *attr)
+{
+ return nvme_get_attr(nvme_subsystem_get_sysfs_dir(s), attr);
+}
+
+char *nvme_get_ctrl_attr(nvme_ctrl_t c, const char *attr)
+{
+ return nvme_get_attr(nvme_ctrl_get_sysfs_dir(c), attr);
+}
+
+char *nvme_get_ns_attr(nvme_ns_t n, const char *attr)
+{
+ return nvme_get_attr(nvme_ns_get_sysfs_dir(n), attr);
+}
+
+char *nvme_get_path_attr(nvme_path_t p, const char *attr)
+{
+ return nvme_get_attr(nvme_path_get_sysfs_dir(p), attr);
+}
--- /dev/null
+#ifndef _LIBNVME_UTIL_H
+#define _LIBNVME_UTIL_H
+
+#include <stdbool.h>
+#include <linux/types.h>
+
+#include "types.h"
+
+/**
+ * nvme_status_type() - Returns SCT(Status Code Type) in status field of
+ * the completion queue entry.
+ * @status: return value from nvme passthrough commands, which is the nvme
+ * status field, located at DW3 in completion queue entry
+ */
+static inline __u8 nvme_status_type(__u16 status)
+{
+ return (status & NVME_SCT_MASK) >> 8;
+}
+
+/**
+ * nvme_status_to_string() -
+ */
+const char *nvme_status_to_string(int status, bool fabrics);
+
+/*
+ * nvme_status_to_errno() - Converts nvme return status to errno
+ * @status: >= 0 for nvme status field in completion queue entry,
+ * < 0 for linux internal errors
+ * @fabrics: true if given status is for fabrics
+ *
+ * Notes: This function will convert a given status to an errno
+ */
+__u8 nvme_status_to_errno(int status, bool fabrics);
+
+/**
+ * nvme_fw_download_split() -
+ */
+int nvme_fw_download_split(int fd, __u32 size, __u32 xfer, __u32 offset,
+ void *buf);
+
+/**
+ * nvme_get_telemetry_log() -
+ */
+int nvme_get_telemetry_log(int fd, bool create, bool ctrl, int data_area,
+ void **buf, __u32 *log_size);
+
+/**
+ * nvme_setup_id_ns() -
+ */
+void nvme_setup_id_ns(struct nvme_id_ns *ns, __u64 nsze, __u64 ncap, __u8 flbas,
+ __u8 dps, __u8 nmic, __u32 anagrpid, __u16 nvmsetid);
+
+/**
+ * nvme_setup_ctrl_list() -
+ */
+void nvme_setup_ctrl_list(struct nvme_ctrl_list *cntlist, __u16 num_ctrls,
+ __u16 *ctrlist);
+
+/**
+ * nvme_dsm_range() - Constructs a data set range structure
+ * @dsm: DSM range array
+ * @ctx_attrs: Array of context attributes
+ * @llbas: Array of length in logical blocks
+ * @slbas: Array of starting logical blocks
+ * @nr_ranges: The size of the dsm arrays
+ *
+ * Each array must be the same size of size 'nr_ranges'.
+ *
+ * Return: The nvme command status if a response was received or -errno
+ * otherwise.
+ */
+void nvme_setup_dsm_range(struct nvme_dsm_range *dsm, __u32 *ctx_attrs,
+ __u32 *llbas, __u64 *slbas, __u16 nr_ranges);
+
+/**
+ * nvme_get_log_page() -
+ */
+int nvme_get_log_page(int fd, __u32 nsid, __u8 log_id, bool rae,
+ __u32 data_len, void *data);
+
+/**
+ * nvme_get_ana_log_len() -
+ */
+int nvme_get_ana_log_len(int fd, size_t *analen);
+
+/**
+ * nvme_namespace_attach_ctrls() - Attach namespace to controller(s)
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to attach
+ * @num_ctrls: Number of controllers in ctrlist
+ * @ctrlist: List of controller IDs to perform the attach action
+ *
+ * Return: The nvme command status if a response was received or -errno
+ * otherwise.
+ */
+int nvme_namespace_attach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, __u16 *ctrlist);
+
+/**
+ * nvme_namespace_detach_ctrls() - Detach namespace from controller(s)
+ * @fd: File descriptor of nvme device
+ * @nsid: Namespace ID to detach
+ * @num_ctrls: Number of controllers in ctrlist
+ * @ctrlist: List of controller IDs to perform the detach action
+ *
+ * Return: The nvme command status if a response was received or -errno
+ * otherwise.
+ */
+int nvme_namespace_detach_ctrls(int fd, __u32 nsid, __u16 num_ctrls, __u16 *ctrlist);
+
+/**
+ * nvme_get_feature_length() -
+ */
+int nvme_get_feature_length(int fid, __u32 cdw11, __u32 *len);
+
+/**
+ * nvme_get_directive_receive_length() -
+ */
+int nvme_get_directive_receive_length(__u8 dtype, __u8 doper, __u32 *len);
+
+/**
+ * nvme_open() - Open an nvme controller or namespace device
+ * @name: The basename of the device to open
+ *
+ * This will look for the handle in /dev/ and validate the name and filetype
+ * match linux conventions.
+ *
+ * Return: A file descriptor for the device on a successful open, or -1 with
+ * errno set otherwise.
+ */
+int nvme_open(const char *name);
+
+int nvme_set_attr(const char *dir, const char *attr, const char *value);
+#endif /* _LIBNVME_UTIL_H */
--- /dev/null
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include <libnvme.h>
+
+static char *nqn_match;
+
+static bool nvme_match_subsysnqn_filter(nvme_subsystem_t s)
+{
+ return strcmp(nvme_subsystem_get_nqn(s), nqn_match) == 0;
+}
+
+static int test_ctrl(nvme_ctrl_t c)
+{
+ static __u8 buf[0x1000];
+
+ enum nvme_get_features_sel sel = NVME_GET_FEATURES_SEL_CURRENT;
+ int ret, temp, fd = nvme_ctrl_get_fd(c);
+ struct nvme_error_log_page error[64];
+ struct nvme_smart_log smart = { 0 };
+ struct nvme_firmware_slot fw = { 0 };
+ struct nvme_ns_list ns_list = { 0 };
+ struct nvme_cmd_effects_log cfx = { 0 };
+ struct nvme_self_test_log st = { 0 };
+ struct nvme_telemetry_log *telem = (void *)buf;
+ struct nvme_endurance_group_log eglog = { 0 };
+ struct nvme_ana_group_desc *analog = (void *)buf;
+ struct nvme_resv_notification_log resvnotify = { 0 };
+ struct nvme_sanitize_log_page sanlog = { 0 };
+
+ struct nvme_id_uuid_list uuid = { 0 };
+ struct nvme_id_ns_granularity_list gran = { 0 };
+ struct nvme_secondary_ctrl_list sec = { 0 };
+ struct nvme_primary_ctrl_cap prim = { 0 };
+ struct nvme_ctrl_list ctrlist = { 0 };
+ struct nvme_id_ctrl id = { 0 };
+
+ __u32 result;
+
+ ret = nvme_ctrl_identify(c, &id);
+ if (ret) {
+ printf("ERROR: no identify for:%s\n", nvme_ctrl_get_name(c));
+ return ret;
+ }
+
+ ret = nvme_get_log_smart(fd, NVME_NSID_ALL, true, &smart);
+ if (ret) {
+ printf("ERROR: no smart log for:%s %x\n", nvme_ctrl_get_name(c), ret);
+ return ret;
+ }
+
+ temp = ((smart.temperature[1] << 8) | smart.temperature[0]) - 273;
+ printf("Controller:%s\n", nvme_ctrl_get_name(c));
+ printf("\nIdentify:\n");
+ printf(" vid:%04x ssvid:%04x oacs:%x lpa%x\n",
+ le16_to_cpu(id.vid), le16_to_cpu(id.ssvid),
+ id.oacs, id.lpa);
+ printf(" sn:%-.20s model:%-.40s\n", id.sn, id.mn);
+
+ ret = nvme_identify_allocated_ns_list(fd, 0, &ns_list);
+ if (!ret)
+ printf(" Allocated NS List:\n");
+ else
+ printf(" ERROR: Allocated NS List:%x\n", ret);
+ ret = nvme_identify_active_ns_list(fd, 0, &ns_list);
+ if (!ret)
+ printf(" Active NS List:\n");
+ else
+ printf(" ERROR: Active NS List:%x\n", ret);
+ ret = nvme_identify_ctrl_list(fd, 0, &ctrlist);
+ if (!ret)
+ printf(" Ctrl List:\n");
+ else
+ printf(" ERROR: CtrlList:%x\n", ret);
+ ret = nvme_identify_nsid_ctrl_list(fd, 1, 0, &ctrlist);
+ if (!ret)
+ printf(" NSID Ctrl List:\n");
+ else
+ printf(" ERROR: NSID CtrlList:%x\n", ret);
+ ret = nvme_identify_primary_ctrl(fd, 0, &prim);
+ if (!ret)
+ printf(" Identify Primary:\n");
+ else
+ printf(" ERROR: Identify Primary:%x\n", ret);
+ ret = nvme_identify_secondary_ctrl_list(fd, 0, &sec);
+ if (!ret)
+ printf(" Identify Secondary:\n");
+ else
+ printf(" ERROR: Identify Secondary:%x\n", ret);
+ ret = nvme_identify_ns_granularity(fd, &gran);
+ if (!ret)
+ printf(" Identify NS granularity:\n");
+ else
+ printf(" ERROR: Identify NS granularity:%x\n", ret);
+ ret = nvme_identify_uuid(fd, &uuid);
+ if (!ret)
+ printf(" Identify UUID List:\n");
+ else
+ printf(" ERROR: Identify UUID List:%x\n", ret);
+
+ printf("\nLogs\n");
+ printf(" SMART: Current temperature:%d percent used:%d%%\n", temp,
+ smart.percent_used);
+ ret = nvme_get_log_sanitize(fd, true, &sanlog);
+ if (!ret)
+ printf(" Sanitize Log:\n");
+ else
+ printf(" ERROR: Sanitize Log:%x\n", ret);
+ ret = nvme_get_log_reservation(fd, true, &resvnotify);
+ if (!ret)
+ printf(" Reservation Log\n");
+ else
+ printf(" ERROR: Reservation Log :%x\n", ret);
+ ret = nvme_get_log_ana_groups(fd, true, sizeof(buf), analog);
+ if (!ret)
+ printf(" ANA Groups\n");
+ else
+ printf(" ERROR: ANA Groups :%x\n", ret);
+ ret = nvme_get_log_endurance_group(fd, 0, &eglog);
+ if (!ret)
+ printf(" Endurance Group\n");
+ else
+ printf(" ERROR: Endurance Group :%x\n", ret);
+ ret = nvme_get_log_telemetry_ctrl(fd, true, 0, sizeof(buf), telem);
+ if (!ret)
+ printf(" Telemetry Controller\n");
+ else
+ printf(" ERROR: Telemetry Controller :%x\n", ret);
+ ret = nvme_get_log_device_self_test(fd, &st);
+ if (!ret)
+ printf(" Device Self Test\n");
+ else
+ printf(" ERROR: Device Self Test :%x\n", ret);
+ ret = nvme_get_log_cmd_effects(fd, &cfx);
+ if (!ret)
+ printf(" Command Effects\n");
+ else
+ printf(" ERROR: Command Effects :%x\n", ret);
+ ret = nvme_get_log_changed_ns_list(fd, true, &ns_list);
+ if (!ret)
+ printf(" Change NS List\n");
+ else
+ printf(" ERROR: Change NS List :%x\n", ret);
+ ret = nvme_get_log_fw_slot(fd, true, &fw);
+ if (ret)
+ printf(" FW Slot\n");
+ else
+ printf(" ERROR: FW Slot :%x\n", ret);
+ ret = nvme_get_log_error(fd, 64, true, error);
+ if (!ret)
+ printf(" Error Log\n");
+ else
+ printf(" ERROR: Error Log :%x\n", ret);
+ printf("\nFeatures\n");
+ ret = nvme_get_features_arbitration(fd, sel, &result);
+ if (!ret)
+ printf(" Arbitration:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Arbitration:%x\n", ret);
+ ret = nvme_get_features_power_mgmt(fd, sel, &result);
+ if (!ret)
+ printf(" Power Management:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Power Management:%x\n", ret);
+ ret = nvme_get_features_temp_thresh(fd, sel, &result);
+ if (!ret)
+ printf(" Temperature Threshold:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Temperature Threshold:%x\n", ret);
+ ret = nvme_get_features_err_recovery(fd, sel, &result);
+ if (!ret)
+ printf(" Error Recovery:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Error Recovery:%x\n", ret);
+ ret = nvme_get_features_volatile_wc(fd, sel, &result);
+ if (!ret)
+ printf(" Volatile Write Cache:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Volatile Write Cache:%x\n", ret);
+ ret = nvme_get_features_num_queues(fd, sel, &result);
+ if (!ret)
+ printf(" Number of Queues:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Number of Queues:%x\n", ret);
+ ret = nvme_get_features_irq_coalesce(fd, sel, &result);
+ if (!ret)
+ printf(" IRQ Coalescing:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: IRQ Coalescing:%x\n", ret);
+ ret = nvme_get_features_write_atomic(fd, sel, &result);
+ if (!ret)
+ printf(" Write Atomic:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Write Atomic:%x\n", ret);
+ ret = nvme_get_features_async_event(fd, sel, &result);
+ if (!ret)
+ printf(" Asycn Event Config:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Asycn Event Config:%x\n", ret);
+ ret = nvme_get_features_hctm(fd, sel, &result);
+ if (!ret)
+ printf(" HCTM:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: HCTM:%x\n", ret);
+ ret = nvme_get_features_nopsc(fd, sel, &result);
+ if (!ret)
+ printf(" NOP Power State Config:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: NOP Power State Configrbitration:%x\n", ret);
+ ret = nvme_get_features_rrl(fd, sel, &result);
+ if (!ret)
+ printf(" Read Recover Levels:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Read Recover Levels:%x\n", ret);
+ ret = nvme_get_features_lba_sts_interval(fd, sel, &result);
+ if (!ret)
+ printf(" LBA Status Interval:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: LBA Status Interval:%x\n", ret);
+ ret = nvme_get_features_sanitize(fd, sel, &result);
+ if (!ret)
+ printf(" Sanitize:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: SW Progress Marker:%x\n", ret);
+ ret = nvme_get_features_sw_progress(fd, sel, &result);
+ if (!ret)
+ printf(" SW Progress Marker:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Sanitize:%x\n", ret);
+ ret = nvme_get_features_resv_mask(fd, sel, &result);
+ if (!ret)
+ printf(" Reservation Mask:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Reservation Mask:%x\n", ret);
+ ret = nvme_get_features_resv_persist(fd, sel, &result);
+ if (!ret)
+ printf(" Reservation Persistence:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Reservation Persistence:%x\n", ret);
+ ret = nvme_get_features_write_protect(fd, 1, sel, &result);
+ if (!ret)
+ printf(" Write Protect:%x\n", result);
+ else if (ret > 0)
+ printf(" ERROR: Write Protect:%x\n", ret);
+ return 0;
+}
+
+static int test_namespace(nvme_ns_t n)
+{
+ struct nvme_id_ns ns = { 0 }, allocated = { 0 };
+ int nsid = nvme_ns_get_nsid(n);
+ int ret, fd = nvme_ns_get_fd(n);
+ struct nvme_ns_id_desc descs = { 0 };
+
+ ret = nvme_ns_identify(n, &ns);
+ if (ret)
+ return ret;
+
+ printf("%s: nsze:%lx lba size:%d\n", nvme_ns_get_name(n), le64_to_cpu(ns.nsze),
+ 1 << ns.lbaf[ns.flbas & NVME_NS_FLBAS_LBA_MASK].ds);
+
+ ret = nvme_identify_allocated_ns(fd, nsid, &allocated);
+ if (!ret)
+ printf(" Identify allocated ns\n");
+ else
+ printf(" ERROR: Identify allocated ns:%x\n", ret);
+ ret = nvme_identify_ns_descs(fd, nsid, &descs);
+ if (!ret)
+ printf(" Identify NS Descriptorss\n");
+ else
+ printf(" ERROR: Identify NS Descriptors:%x\n", ret);
+ return 0;
+}
+
+int main()
+{
+ nvme_root_t r;
+ nvme_subsystem_t s;
+ nvme_ctrl_t c;
+ nvme_path_t p;
+ nvme_ns_t n;
+
+ r = nvme_scan();
+ if (!r)
+ return -1;
+
+ printf("Test walking the topology\n");
+ nvme_for_each_subsystem(r, s) {
+ printf("%s - NQN=%s\n", nvme_subsystem_get_name(s),
+ nvme_subsystem_get_nqn(s));
+ nvme_subsystem_for_each_ctrl(s, c) {
+ printf(" +- %s %s %s %s\n", nvme_ctrl_get_name(c),
+ nvme_ctrl_get_transport(c),
+ nvme_ctrl_get_address(c),
+ nvme_ctrl_get_state(c));
+
+ nvme_ctrl_for_each_ns(c, n)
+ printf(" +- %s lba size:%d lba max:%lu\n",
+ nvme_ns_get_name(n), nvme_ns_get_lba_size(n),
+ nvme_ns_get_lba_count(n));
+
+ nvme_ctrl_for_each_path(c, p)
+ printf(" +- %s %s\n", nvme_path_get_name(p),
+ nvme_path_get_ana_state(p));
+ }
+
+ nvme_subsystem_for_each_ns(s, n) {
+ printf(" +- %s lba size:%d lba max:%lu\n",
+ nvme_ns_get_name(n), nvme_ns_get_lba_size(n),
+ nvme_ns_get_lba_count(n));
+ }
+ }
+ printf("\n");
+
+ nvme_for_each_subsystem(r, s) {
+ bool first = true;
+ printf("%s %s ", nvme_subsystem_get_name(s),
+ nvme_subsystem_get_nqn(s));
+
+ nvme_subsystem_for_each_ctrl(s, c) {
+ printf("%s%s", first ? "": ", ", nvme_ctrl_get_name(c));
+ first = false;
+ }
+ printf("\n");
+ }
+ printf("\n");
+
+ nvme_for_each_subsystem(r, s) {
+ nvme_subsystem_for_each_ctrl(s, c) {
+ bool first = true;
+
+ printf("%s %s %s %s %s %s %s ", nvme_ctrl_get_name(c),
+ nvme_ctrl_get_serial(c), nvme_ctrl_get_model(c),
+ nvme_ctrl_get_firmware(c),
+ nvme_ctrl_get_transport(c),
+ nvme_ctrl_get_address(c),
+ nvme_subsystem_get_name(s));
+
+ nvme_ctrl_for_each_ns(c, n) {
+ printf("%s%s", first ? "": ", ",
+ nvme_ns_get_name(n));
+ first = false;
+ }
+
+ nvme_ctrl_for_each_path(c, p) {
+ printf("%s%s", first ? "": ", ",
+ nvme_ns_get_name(nvme_path_get_ns(p)));
+ first = false;
+ }
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ nvme_for_each_subsystem(r, s) {
+ nvme_subsystem_for_each_ctrl(s, c)
+ nvme_ctrl_for_each_ns(c, n)
+ printf("%s %d %lu/%lu %d %s\n",
+ nvme_ns_get_name(n),
+ nvme_ns_get_nsid(n),
+ nvme_ns_get_lba_count(n),
+ nvme_ns_get_lba_util(n),
+ nvme_ns_get_lba_size(n),
+ nvme_ctrl_get_name(c));
+
+ nvme_subsystem_for_each_ns(s, n) {
+ bool first = true;
+
+ printf("%s %d %lu/%lu %d ", nvme_ns_get_name(n),
+ nvme_ns_get_nsid(n),
+ nvme_ns_get_lba_count(n),
+ nvme_ns_get_lba_util(n),
+ nvme_ns_get_lba_size(n));
+ nvme_subsystem_for_each_ctrl(s, c) {
+ printf("%s%s", first ? "" : ", ",
+ nvme_ctrl_get_name(c));
+ first = false;
+ }
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ printf("Test identification, logs, and features\n");
+ nvme_for_each_subsystem(r, s) {
+ nvme_subsystem_for_each_ctrl(s, c) {
+ test_ctrl(c);
+ nvme_ctrl_for_each_ns(c, n)
+ test_namespace(n);
+ printf("\n");
+ }
+ nvme_subsystem_for_each_ns(s, n)
+ test_namespace(n);
+ }
+ printf("\n");
+
+ nvme_free_tree(r);
+
+ printf("Test filter for common loop back target\n");
+ nqn_match = "testnqn";
+ r = nvme_scan_filter(nvme_match_subsysnqn_filter);
+ nvme_for_each_subsystem(r, s) {
+ printf("%s - NQN=%s\n", nvme_subsystem_get_name(s),
+ nvme_subsystem_get_nqn(s));
+ nvme_subsystem_for_each_ctrl(s, c) {
+ printf(" `- %s %s %s %s\n", nvme_ctrl_get_name(c),
+ nvme_ctrl_get_transport(c),
+ nvme_ctrl_get_address(c),
+ nvme_ctrl_get_state(c));
+ }
+ }
+ printf("\n");
+ nvme_free_tree(r);
+
+ printf("Test scan specific controller\n");
+ c = nvme_scan_ctrl("nvme4");
+ if (c) {
+ printf("%s %s %s %s\n", nvme_ctrl_get_name(c),
+ nvme_ctrl_get_transport(c),
+ nvme_ctrl_get_address(c),
+ nvme_ctrl_get_state(c));
+ nvme_free_ctrl(c);
+ }
+
+ return 0;
+}