From: Keith Busch Date: Tue, 9 Jun 2020 16:49:58 +0000 (-0700) Subject: common code all the complex value accessors X-Git-Tag: v1.0-rc0~145 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=5f857ba56c78f5826f961d19c653e0d3ed77c46b;p=users%2Fsagi%2Flibnvme.git common code all the complex value accessors Signed-off-by: Keith Busch --- diff --git a/src/nvme/ioctl.c b/src/nvme/ioctl.c index d02df36f..0730f866 100644 --- a/src/nvme/ioctl.c +++ b/src/nvme/ioctl.c @@ -20,6 +20,7 @@ #include #include "ioctl.h" +#include "util.h" static int nvme_verify_chr(int fd) { @@ -355,16 +356,14 @@ enum features { NVME_FEATURES_IOCSP_IOCSCI_MASK = 0xff, }; -#define DW(value, prefix) ((value) & (prefix ## _MASK)) << prefix ## _SHIFT - int nvme_identify(int fd, enum nvme_identify_cns cns, __u32 nsid, __u16 cntid, __u16 nvmsetid, __u8 uuidx, __u8 csi, void *data) { - __u32 cdw10 = DW(cntid, NVME_IDENTIFY_CDW10_CNTID) | - DW(cns, NVME_IDENTIFY_CDW10_CNS); - __u32 cdw11 = DW(nvmsetid, NVME_IDENTIFY_CDW11_NVMSETID) | - DW(csi, NVME_IDENTIFY_CDW11_CSI); - __u32 cdw14 = DW(uuidx, NVME_IDENTIFY_CDW14_UUID); + __u32 cdw10 = NVME_SET(cntid, IDENTIFY_CDW10_CNTID) | + NVME_SET(cns, IDENTIFY_CDW10_CNS); + __u32 cdw11 = NVME_SET(nvmsetid, IDENTIFY_CDW11_NVMSETID) | + NVME_SET(csi, IDENTIFY_CDW11_CSI); + __u32 cdw14 = NVME_SET(uuidx, IDENTIFY_CDW14_UUID); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_identify, @@ -526,16 +525,16 @@ int nvme_get_log(int fd, enum nvme_cmd_get_log_lid lid, __u32 nsid, __u64 lpo, __u32 numd = (len >> 2) - 1; __u16 numdu = numd >> 16, numdl = numd & 0xffff; - __u32 cdw10 = DW(lid, NVME_LOG_CDW10_LID) | - DW(lsp, NVME_LOG_CDW10_LSP) | - DW(!!rae, NVME_LOG_CDW10_RAE) | - DW(numdl, NVME_LOG_CDW10_NUMDL); - __u32 cdw11 = DW(numdu, NVME_LOG_CDW11_NUMDU) | - DW(lsi, NVME_LOG_CDW11_LSI); + __u32 cdw10 = NVME_SET(lid, LOG_CDW10_LID) | + NVME_SET(lsp, LOG_CDW10_LSP) | + NVME_SET(!!rae, LOG_CDW10_RAE) | + NVME_SET(numdl, LOG_CDW10_NUMDL); + __u32 cdw11 = NVME_SET(numdu, LOG_CDW11_NUMDU) | + NVME_SET(lsi, LOG_CDW11_LSI); __u32 cdw12 = lpo & 0xffffffff; __u32 cdw13 = lpo >> 32; - __u32 cdw14 = DW(uuidx, NVME_LOG_CDW14_UUID) | - DW(csi, NVME_LOG_CDW14_CSI); + __u32 cdw14 = NVME_SET(uuidx, LOG_CDW14_UUID) | + NVME_SET(csi, LOG_CDW14_CSI); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_get_log_page, @@ -731,9 +730,9 @@ int nvme_set_features(int fd, __u8 fid, __u32 nsid, __u32 cdw11, __u32 cdw12, bool save, __u8 uuidx, __u32 cdw15, __u32 data_len, void *data, __u32 *result) { - __u32 cdw10 = DW(fid, NVME_FEATURES_CDW10_FID) | - DW(!!save, NVME_SET_FEATURES_CDW10_SAVE); - __u32 cdw14 = DW(uuidx, NVME_FEATURES_CDW14_UUID); + __u32 cdw10 = NVME_SET(fid, FEATURES_CDW10_FID) | + NVME_SET(!!save, SET_FEATURES_CDW10_SAVE); + __u32 cdw14 = NVME_SET(uuidx, FEATURES_CDW14_UUID); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_set_features, @@ -760,10 +759,10 @@ static int __nvme_set_features(int fd, __u8 fid, __u32 cdw11, bool save, int nvme_set_features_arbitration(int fd, __u8 ab, __u8 lpw, __u8 mpw, __u8 hpw, bool save, __u32 *result) { - __u32 value = DW(ab, NVME_FEATURES_ARBITRATION_BURST) | - DW(lpw, NVME_FEATURES_ARBITRATION_LPW) | - DW(mpw, NVME_FEATURES_ARBITRATION_MPW) | - DW(hpw, NVME_FEATURES_ARBITRATION_HPW); + __u32 value = NVME_SET(ab, FEAT_ARBITRATION_BURST) | + NVME_SET(lpw, FEAT_ARBITRATION_LPW) | + NVME_SET(mpw, FEAT_ARBITRATION_MPW) | + NVME_SET(hpw, FEAT_ARBITRATION_HPW); return __nvme_set_features(fd, NVME_FEAT_FID_ARBITRATION, value, save, result); @@ -772,8 +771,8 @@ int nvme_set_features_arbitration(int fd, __u8 ab, __u8 lpw, __u8 mpw, int nvme_set_features_power_mgmt(int fd, __u8 ps, __u8 wh, bool save, __u32 *result) { - __u32 value = DW(ps, NVME_FEATURES_PWRMGMT_PS) | - DW(wh, NVME_FEATURES_PWRMGMT_PS); + __u32 value = NVME_SET(ps, FEAT_PWRMGMT_PS) | + NVME_SET(wh, FEAT_PWRMGMT_PS); return __nvme_set_features(fd, NVME_FEAT_FID_POWER_MGMT, value, save, result); @@ -788,9 +787,9 @@ int nvme_set_features_lba_range(int fd, __u32 nsid, __u32 nr_ranges, bool save, int nvme_set_features_temp_thresh(int fd, __u16 tmpth, __u8 tmpsel, enum nvme_feat_tmpthresh_thsel thsel, bool save, __u32 *result) { - __u32 value = DW(tmpth, NVME_FEATURES_TMPTH) | - DW(tmpsel, NVME_FEATURES_TMPSEL) | - DW(thsel, NVME_FEATURES_THSEL); + __u32 value = NVME_SET(tmpth, FEAT_TT_TMPTH) | + NVME_SET(tmpsel, FEAT_TT_TMPSEL) | + NVME_SET(thsel, FEAT_TT_THSEL); return __nvme_set_features(fd, NVME_FEAT_FID_TEMP_THRESH, value, save, result); @@ -799,8 +798,8 @@ int nvme_set_features_temp_thresh(int fd, __u16 tmpth, __u8 tmpsel, int nvme_set_features_err_recovery(int fd, __u32 nsid, __u16 tler, bool dulbe, bool save, __u32 *result) { - __u32 value = DW(tler, NVME_FEATURES_ERROR_RECOVERY_TLER) | - DW(!!dulbe, NVME_FEATURES_ERROR_RECOVERY_DULBE); + __u32 value = NVME_SET(tler, FEAT_ERROR_RECOVERY_TLER) | + NVME_SET(!!dulbe, FEAT_ERROR_RECOVERY_DULBE); return __nvme_set_features(fd, NVME_FEAT_FID_ERR_RECOVERY, value, save, result); @@ -808,7 +807,7 @@ int nvme_set_features_err_recovery(int fd, __u32 nsid, __u16 tler, bool dulbe, int nvme_set_features_volatile_wc(int fd, bool wce, bool save, __u32 *result) { - __u32 value = DW(!!wce, NVME_FEATURES_VWC_WCE); + __u32 value = NVME_SET(!!wce, FEAT_VWC_WCE); return __nvme_set_features(fd, NVME_FEAT_FID_VOLATILE_WC, value, save, result); @@ -817,8 +816,8 @@ int nvme_set_features_volatile_wc(int fd, bool wce, bool save, __u32 *result) int nvme_set_features_irq_coalesce(int fd, __u8 thr, __u8 time, bool save, __u32 *result) { - __u32 value = DW(thr, NVME_FEATURES_IRQC_TIME) | - DW(time, NVME_FEATURES_IRQC_THR); + __u32 value = NVME_SET(thr, FEAT_IRQC_TIME) | + NVME_SET(time, FEAT_IRQC_THR); return __nvme_set_features(fd, NVME_FEAT_FID_IRQ_COALESCE, value, save, result); @@ -827,8 +826,8 @@ int nvme_set_features_irq_coalesce(int fd, __u8 thr, __u8 time, bool save, int nvme_set_features_irq_config(int fd, __u16 iv, bool cd, bool save, __u32 *result) { - __u32 value = DW(iv, NVME_FEATURES_IVC_IV) | - DW(!!cd, NVME_FEATURES_IVC_CD); + __u32 value = NVME_SET(iv, FEAT_ICFG_IV) | + NVME_SET(!!cd, FEAT_ICFG_CD); return __nvme_set_features(fd, NVME_FEAT_FID_IRQ_CONFIG, value, save, result); @@ -836,7 +835,7 @@ int nvme_set_features_irq_config(int fd, __u16 iv, bool cd, bool save, int nvme_set_features_write_atomic(int fd, bool dn, bool save, __u32 *result) { - __u32 value = DW(!!dn, NVME_FEATURES_WAN_DN); + __u32 value = NVME_SET(!!dn, FEAT_WA_DN); return __nvme_set_features(fd, NVME_FEAT_FID_WRITE_ATOMIC, value, save, result); @@ -852,7 +851,7 @@ int nvme_set_features_async_event(int fd, __u32 events, int nvme_set_features_auto_pst(int fd, bool apste, bool save, struct nvme_feat_auto_pst *apst, __u32 *result) { - __u32 value = DW(!!apste, NVME_FEATURES_APST_APSTE); + __u32 value = NVME_SET(!!apste, FEAT_APST_APSTE); return __nvme_set_features(fd, NVME_FEAT_FID_AUTO_PST, value, save, result); @@ -872,8 +871,8 @@ int nvme_set_features_timestamp(int fd, bool save, __u64 timestamp) int nvme_set_features_hctm(int fd, __u16 tmt2, __u16 tmt1, bool save, __u32 *result) { - __u32 value = DW(tmt2, NVME_FEATURES_HCTM_TMT2) | - DW(tmt1, NVME_FEATURES_HCTM_TMT1); + __u32 value = NVME_SET(tmt2, FEAT_HCTM_TMT2) | + NVME_SET(tmt1, FEAT_HCTM_TMT1); return __nvme_set_features(fd, NVME_FEAT_FID_HCTM, value, save, result); @@ -881,7 +880,7 @@ int nvme_set_features_hctm(int fd, __u16 tmt2, __u16 tmt1, int nvme_set_features_nopsc(int fd, bool noppme, bool save, __u32 *result) { - __u32 value = DW(noppme, NVME_FEATURES_NOPS_NOPPME); + __u32 value = NVME_SET(noppme, FEAT_NOPS_NOPPME); return __nvme_set_features(fd, NVME_FEAT_FID_NOPSC, value, save, result); @@ -906,7 +905,7 @@ int nvme_set_features_plm_config(int fd, bool plm, __u16 nvmsetid, bool save, int nvme_set_features_plm_window(int fd, enum nvme_feat_plm_window_select sel, __u16 nvmsetid, bool save, __u32 *result) { - __u32 cdw12 = DW(sel, NVME_FEATURES_PLM_WINDOW_SELECT); + __u32 cdw12 = NVME_SET(sel, FEAT_PLMW_WS); return nvme_set_features(fd, NVME_FEAT_FID_PLM_WINDOW, NVME_NSID_NONE, nvmsetid, cdw12, save, NVME_UUID_NONE, 0, 0, @@ -916,8 +915,8 @@ int nvme_set_features_plm_window(int fd, enum nvme_feat_plm_window_select sel, int nvme_set_features_lba_sts_interval(int fd, __u16 lsiri, __u16 lsipi, bool save, __u32 *result) { - __u32 value = DW(lsiri, NVME_FEATURES_LBAS_LSIRI) | - DW(lsipi, NVME_FEATURES_LBAS_LSIPI); + __u32 value = NVME_SET(lsiri, FEAT_LBAS_LSIRI) | + NVME_SET(lsipi, FEAT_LBAS_LSIPI); return __nvme_set_features(fd, NVME_FEAT_FID_LBA_STS_INTERVAL, value, save, result); @@ -983,7 +982,7 @@ int nvme_set_features_write_protect(int fd, enum nvme_feat_nswpcfg_state state, int nvme_set_features_iocs_profile(int fd, __u8 iocsi, bool save) { - __u32 value = DW(iocsi, NVME_FEATURES_IOCSP_IOCSCI); + __u32 value = NVME_SET(iocsi, FEAT_IOCSP_IOCSCI); return __nvme_set_features(fd, NVME_FEAT_FID_IOCS_PROFILE, value, save, NULL); @@ -993,9 +992,9 @@ int nvme_get_features(int fd, enum nvme_features_id fid, __u32 nsid, enum nvme_get_features_sel sel, __u32 cdw11, __u8 uuidx, __u32 data_len, void *data, __u32 *result) { - __u32 cdw10 = DW(fid, NVME_FEATURES_CDW10_FID) | - DW(sel, NVME_GET_FEATURES_CDW10_SEL); - __u32 cdw14 = DW(uuidx, NVME_FEATURES_CDW14_UUID); + __u32 cdw10 = NVME_SET(fid, FEATURES_CDW10_FID) | + NVME_SET(sel, GET_FEATURES_CDW10_SEL); + __u32 cdw14 = NVME_SET(uuidx, FEATURES_CDW14_UUID); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_get_features, @@ -1218,11 +1217,11 @@ int nvme_format_nvm(int fd, __u32 nsid, __u8 lbaf, enum nvme_cmd_format_pil pil, enum nvme_cmd_format_ses ses, __u32 timeout) { - __u32 cdw10 = DW(lbaf, NVME_FORMAT_CDW10_LBAF) | - DW(mset, NVME_FORMAT_CDW10_MSET) | - DW(pi, NVME_FORMAT_CDW10_PI) | - DW(pil, NVME_FORMAT_CDW10_PIL) | - DW(ses, NVME_FORMAT_CDW10_SES); + __u32 cdw10 = NVME_SET(lbaf, FORMAT_CDW10_LBAF) | + NVME_SET(mset, FORMAT_CDW10_MSET) | + NVME_SET(pi, FORMAT_CDW10_PI) | + NVME_SET(pil, FORMAT_CDW10_PIL) | + NVME_SET(ses, FORMAT_CDW10_SES); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_format_nvm, @@ -1237,7 +1236,7 @@ int nvme_format_nvm(int fd, __u32 nsid, __u8 lbaf, int nvme_ns_mgmt(int fd, __u32 nsid, enum nvme_ns_mgmt_sel sel, struct nvme_id_ns *ns, __u32 *result, __u32 timeout) { - __u32 cdw10 = DW(sel, NVME_NAMESPACE_MGMT_CDW10_SEL); + __u32 cdw10 = NVME_SET(sel, NAMESPACE_MGMT_CDW10_SEL); __u32 data_len = ns ? sizeof(*ns) : 0; struct nvme_passthru_cmd cmd = { @@ -1267,7 +1266,7 @@ int nvme_ns_mgmt_delete(int fd, __u32 nsid) int nvme_ns_attach(int fd, __u32 nsid, enum nvme_ns_attach_sel sel, struct nvme_ctrl_list *ctrlist) { - __u32 cdw10 = DW(sel, NVME_NAMESPACE_ATTACH_CDW10_SEL); + __u32 cdw10 = NVME_SET(sel, NAMESPACE_ATTACH_CDW10_SEL); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_ns_attach, @@ -1309,9 +1308,9 @@ int nvme_fw_download(int fd, __u32 offset, __u32 data_len, void *data) int nvme_fw_commit(int fd, __u8 slot, enum nvme_fw_commit_ca action, bool bpid) { - __u32 cdw10 = DW(slot, NVME_FW_COMMIT_CDW10_FS) | - DW(action, NVME_FW_COMMIT_CDW10_CA) | - DW(bpid, NVME_FW_COMMIT_CDW10_BPID); + __u32 cdw10 = NVME_SET(slot, FW_COMMIT_CDW10_FS) | + NVME_SET(action, FW_COMMIT_CDW10_CA) | + NVME_SET(bpid, FW_COMMIT_CDW10_BPID); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_fw_commit, @@ -1325,10 +1324,10 @@ int nvme_security_send(int fd, __u32 nsid, __u8 nssf, __u8 spsp0, __u8 spsp1, __u8 secp, __u32 tl, __u32 data_len, void *data, __u32 *result) { - __u32 cdw10 = DW(secp, NVME_SECURITY_SECP) | - DW(spsp0, NVME_SECURITY_SPSP0) | - DW(spsp1, NVME_SECURITY_SPSP1) | - DW(nssf, NVME_SECURITY_NSSF); + __u32 cdw10 = NVME_SET(secp, SECURITY_SECP) | + NVME_SET(spsp0, SECURITY_SPSP0) | + NVME_SET(spsp1, SECURITY_SPSP1) | + NVME_SET(nssf, SECURITY_NSSF); __u32 cdw11 = tl; struct nvme_passthru_cmd cmd = { @@ -1347,10 +1346,10 @@ int nvme_security_receive(int fd, __u32 nsid, __u8 nssf, __u8 spsp0, __u8 spsp1, __u8 secp, __u32 al, __u32 data_len, void *data, __u32 *result) { - __u32 cdw10 = DW(secp, NVME_SECURITY_SECP) | - DW(spsp0, NVME_SECURITY_SPSP0) | - DW(spsp1, NVME_SECURITY_SPSP1) | - DW(nssf, NVME_SECURITY_NSSF); + __u32 cdw10 = NVME_SET(secp, SECURITY_SECP) | + NVME_SET(spsp0, SECURITY_SPSP0) | + NVME_SET(spsp1, SECURITY_SPSP1) | + NVME_SET(nssf, SECURITY_NSSF); __u32 cdw11 = al; struct nvme_passthru_cmd cmd = { @@ -1372,8 +1371,8 @@ int nvme_get_lba_status(int fd, __u32 nsid, __u64 slba, __u32 mndw, __u16 rl, __u32 cdw10 = slba & 0xffffffff; __u32 cdw11 = slba >> 32; __u32 cdw12 = mndw; - __u32 cdw13 = DW(rl, NVME_GET_LBA_STATUS_CDW13_RL) | - DW(atype, NVME_GET_LBA_STATUS_CDW13_ATYPE); + __u32 cdw13 = NVME_SET(rl, GET_LBA_STATUS_CDW13_RL) | + NVME_SET(atype, GET_LBA_STATUS_CDW13_ATYPE); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_get_lba_status, @@ -1394,9 +1393,9 @@ int nvme_directive_send(int fd, __u32 nsid, __u16 dspec, __u32 data_len, void *data, __u32 *result) { __u32 cdw10 = data_len ? (data_len >> 2) - 1 : 0; - __u32 cdw11 = DW(doper, NVME_DIRECTIVE_CDW11_DOPER) | - DW(dtype, NVME_DIRECTIVE_CDW11_DTYPE) | - DW(dspec, NVME_DIRECTIVE_CDW11_DPSEC); + __u32 cdw11 = NVME_SET(doper, DIRECTIVE_CDW11_DOPER) | + NVME_SET(dtype, DIRECTIVE_CDW11_DTYPE) | + NVME_SET(dspec, DIRECTIVE_CDW11_DPSEC); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_directive_send, @@ -1415,8 +1414,8 @@ int nvme_directive_send_id_endir(int fd, __u32 nsid, bool endir, enum nvme_directive_dtype dtype, struct nvme_id_directives *id) { - __u32 cdw12 = DW(dtype, NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE) | - DW(endir, NVME_DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR); + __u32 cdw12 = NVME_SET(dtype, DIRECTIVE_SEND_IDENTIFY_CDW12_DTYPE) | + NVME_SET(endir, DIRECTIVE_SEND_IDENTIFY_CDW12_ENDIR); return nvme_directive_send(fd, nsid, 0, NVME_DIRECTIVE_DTYPE_IDENTIFY, NVME_DIRECTIVE_SEND_IDENTIFY_DOPER_ENDIR, @@ -1449,9 +1448,9 @@ int nvme_directive_recv(int fd, __u32 nsid, __u16 dspec, __u32 data_len, void *data, __u32 *result) { __u32 cdw10 = data_len ? (data_len >> 2) - 1 : 0; - __u32 cdw11 = DW(doper, NVME_DIRECTIVE_CDW11_DOPER) | - DW(dtype, NVME_DIRECTIVE_CDW11_DTYPE) | - DW(dspec, NVME_DIRECTIVE_CDW11_DPSEC); + __u32 cdw11 = NVME_SET(doper, DIRECTIVE_CDW11_DOPER) | + NVME_SET(dtype, DIRECTIVE_CDW11_DTYPE) | + NVME_SET(dspec, DIRECTIVE_CDW11_DPSEC); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_directive_recv, @@ -1539,11 +1538,11 @@ int nvme_get_property(int fd, int offset, __u64 *value) int nvme_sanitize_nvm(int fd, enum nvme_sanitize_sanact sanact, bool ause, __u8 owpass, bool oipbp, bool nodas, __u32 ovrpat) { - __u32 cdw10 = DW(sanact, NVME_SANITIZE_CDW10_SANACT) | - DW(!!ause, NVME_SANITIZE_CDW10_AUSE) | - DW(owpass, NVME_SANITIZE_CDW10_OWPASS) | - DW(!!oipbp, NVME_SANITIZE_CDW10_OIPBP) | - DW(!!nodas, NVME_SANITIZE_CDW10_NODAS); + __u32 cdw10 = NVME_SET(sanact, SANITIZE_CDW10_SANACT) | + NVME_SET(!!ause, SANITIZE_CDW10_AUSE) | + NVME_SET(owpass, SANITIZE_CDW10_OWPASS) | + NVME_SET(!!oipbp, SANITIZE_CDW10_OIPBP) | + NVME_SET(!!nodas, SANITIZE_CDW10_NODAS); __u32 cdw11 = ovrpat; struct nvme_passthru_cmd cmd = { @@ -1557,7 +1556,7 @@ int nvme_sanitize_nvm(int fd, enum nvme_sanitize_sanact sanact, bool ause, int nvme_dev_self_test(int fd, __u32 nsid, enum nvme_dst_stc stc) { - __u32 cdw10 = DW(stc, NVME_DEVICE_SELF_TEST_CDW10_STC); + __u32 cdw10 = NVME_SET(stc, DEVICE_SELF_TEST_CDW10_STC); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_dev_self_test, @@ -1572,10 +1571,10 @@ int nvme_virtual_mgmt(int fd, enum nvme_virt_mgmt_act act, enum nvme_virt_mgmt_rt rt, __u16 cntlid, __u16 nr, __u32 *result) { - __u32 cdw10 = DW(act, NVME_VIRT_MGMT_CDW10_ACT) | - DW(rt, NVME_VIRT_MGMT_CDW10_RT) | - DW(cntlid, NVME_VIRT_MGMT_CDW10_CNTLID); - __u32 cdw11 = DW(nr, NVME_VIRT_MGMT_CDW11_NR); + __u32 cdw10 = NVME_SET(act, VIRT_MGMT_CDW10_ACT) | + NVME_SET(rt, VIRT_MGMT_CDW10_RT) | + NVME_SET(cntlid, VIRT_MGMT_CDW10_CNTLID); + __u32 cdw11 = NVME_SET(nr, VIRT_MGMT_CDW11_NR); struct nvme_passthru_cmd cmd = { .opcode = nvme_admin_virtual_mgmt, @@ -1800,8 +1799,8 @@ int nvme_zns_mgmt_send(int fd, __u32 nsid, __u64 slba, bool select_all, { __u32 cdw10 = slba & 0xffffffff; __u32 cdw11 = slba >> 32; - __u32 cdw13 = DW(!!select_all, NVME_ZNS_MGMT_SEND_SEL) | - DW(zsa, NVME_ZNS_MGMT_SEND_ZSA); + __u32 cdw13 = NVME_SET(!!select_all, ZNS_MGMT_SEND_SEL) | + NVME_SET(zsa, ZNS_MGMT_SEND_ZSA); struct nvme_passthru_cmd cmd = { .opcode = nvme_zns_cmd_mgmt_send, @@ -1823,9 +1822,9 @@ int nvme_zns_mgmt_recv(int fd, __u32 nsid, __u64 slba, __u32 cdw10 = slba & 0xffffffff; __u32 cdw11 = slba >> 32; __u32 cdw12 = (data_len >> 2) - 1; - __u32 cdw13 = DW(zra , NVME_ZNS_MGMT_RECV_ZRA) | - DW(zrasf, NVME_ZNS_MGMT_RECV_ZRASF) | - DW(zras_feat, NVME_ZNS_MGMT_RECV_ZRAS_FEAT); + __u32 cdw13 = NVME_SET(zra, ZNS_MGMT_RECV_ZRA) | + NVME_SET(zrasf, ZNS_MGMT_RECV_ZRASF) | + NVME_SET(zras_feat, ZNS_MGMT_RECV_ZRAS_FEAT); struct nvme_passthru_cmd cmd = { .opcode = nvme_zns_cmd_mgmt_recv, diff --git a/src/nvme/types.h b/src/nvme/types.h index f5fb46ad..e1333b3c 100644 --- a/src/nvme/types.h +++ b/src/nvme/types.h @@ -24,8 +24,8 @@ /** * NVME_GET() - extract field from complex value - * @name: The name of the sub-field within an nvme value * @value: The original value of a complex field + * @name: The name of the sub-field within an nvme value * * By convention, this library defines _SHIFT and _MASK such that mask can be * applied after the shift to isolate a specific set of bits that decode to a @@ -33,17 +33,17 @@ * * Returns: The 'name' field from 'value' */ -#define NVME_GET(name, value) \ +#define NVME_GET(value, name) \ (((value) >> NVME_##name##_SHIFT) & NVME_##name##_MASK) /** * NVME_SET() - set field into complex value + * @value: The value to be set in its completed position * @name: The name of the sub-field within an nvme value - * @value: The value to be set * - * Returns: The 'name' field from 'value' + * Returns: The */ -#define NVME_SET(name, value) \ +#define NVME_SET(value, name) \ (((value) & NVME_##name##_MASK) << NVME_##name##_SHIFT) /** @@ -260,7 +260,8 @@ static inline __u64 nvme_mmio_read64(void *addr) __le32 *p = (__le32 *)addr; /* - * Some devices fail 64-bit MMIO. Access 64-bit registers as 2 32-bit. + * Some devices fail 64-bit MMIO, and at least one 64-bit register is + * not aligned to 64-bit. Access 64-bit registers as two 32-bit. */ return le32_to_cpu(*p) | ((uint64_t)le32_to_cpu(*(p + 1)) << 32); } @@ -296,18 +297,18 @@ enum nvme_cap { NVME_CAP_CSS_ADMIN = 1 << 7, }; -#define NVME_CAP_MQES(cap) NVME_GET(CAP_MQES, cap) -#define NVME_CAP_CQR(cap) NVME_GET(CAP_CQR, cap) -#define NVME_CAP_AMS(cap) NVME_GET(CAP_AMS, cap) -#define NVME_CAP_TO(cap) NVME_GET(CAP_TO, cap) -#define NVME_CAP_DSTRD(cap) NVME_GET(CAP_DSTRD, cap) -#define NVME_CAP_NSSRC(cap) NVME_GET(CAP_NSSRC, cap) -#define NVME_CAP_CSS(cap) NVME_GET(CAP_CSS, cap) -#define NVME_CAP_BPS(cap) NVME_GET(CAP_BPS, cap) -#define NVME_CAP_MPSMIN(cap) NVME_GET(CAP_MPSMIN, cap) -#define NVME_CAP_MPSMAX(cap) NVME_GET(CAP_MPSMAX, cap) -#define NVME_CAP_CMBS(cap) NVME_GET(CAP_CMBS, cap) -#define NVME_CAP_PMRS(cap) NVME_GET(CAP_PMRS, cap) +#define NVME_CAP_MQES(cap) NVME_GET(cap, CAP_MQES) +#define NVME_CAP_CQR(cap) NVME_GET(cap, CAP_CQR) +#define NVME_CAP_AMS(cap) NVME_GET(cap, CAP_AMS) +#define NVME_CAP_TO(cap) NVME_GET(cap, CAP_TO) +#define NVME_CAP_DSTRD(cap) NVME_GET(cap, CAP_DSTRD) +#define NVME_CAP_NSSRC(cap) NVME_GET(cap, CAP_NSSRC) +#define NVME_CAP_CSS(cap) NVME_GET(cap, CAP_CSS) +#define NVME_CAP_BPS(cap) NVME_GET(cap, CAP_BPS) +#define NVME_CAP_MPSMIN(cap) NVME_GET(cap, CAP_MPSMIN) +#define NVME_CAP_MPSMAX(cap) NVME_GET(cap, CAP_MPSMAX) +#define NVME_CAP_CMBS(cap) NVME_GET(cap, CAP_CMBS) +#define NVME_CAP_PMRS(cap) NVME_GET(cap, CAP_PMRS) enum nvme_vs { NVME_VS_TER_SHIFT = 0, @@ -318,9 +319,9 @@ enum nvme_vs { NVME_VS_MJR_MASK = 0xffff, }; -#define NVME_VS_TER(vs) NVME_GET(VS_TER, vs) -#define NVME_VS_MNR(vs) NVME_GET(VS_MNR, vs) -#define NVME_VS_MJR(vs) NVME_GET(VS_MJR, vs) +#define NVME_VS_TER(vs) NVME_GET(vs, VS_TER) +#define NVME_VS_MNR(vs) NVME_GET(vs, VS_MNR) +#define NVME_VS_MJR(vs) NVME_GET(vs, VS_MJR) #define NVME_MAJOR(ver) NVME_VS_MJR(ver) #define NVME_MINOR(ver) NVME_VS_MNR(ver) @@ -352,13 +353,13 @@ enum nvme_cc { NVME_CC_SHN_ABRUPT = 2, }; -#define NVME_CC_EN(cc) NVME_GET(CC_EN, cc) -#define NVME_CC_CSS(cc) NVME_GET(CC_CSS, cc) -#define NVME_CC_MPS(cc) NVME_GET(CC_MPS, cc) -#define NVME_CC_AMS(cc) NVME_GET(CC_AMS, cc) -#define NVME_CC_SHN(cc) NVME_GET(CC_SHN, cc) -#define NVME_CC_IOSQES(cc) NVME_GET(CC_IOSQES, cc) -#define NVME_CC_IOCQES(cc) NVME_GET(CC_IOCQES, cc) +#define NVME_CC_EN(cc) NVME_GET(cc, CC_EN) +#define NVME_CC_CSS(cc) NVME_GET(cc, CC_CSS) +#define NVME_CC_MPS(cc) NVME_GET(cc, CC_MPS) +#define NVME_CC_AMS(cc) NVME_GET(cc, CC_AMS) +#define NVME_CC_SHN(cc) NVME_GET(cc, CC_SHN) +#define NVME_CC_IOSQES(cc) NVME_GET(cc, CC_IOSQES) +#define NVME_CC_IOCQES(cc) NVME_GET(cc, CC_IOCQES) enum nvme_csts { NVME_CSTS_RDY_SHIFT = 0, @@ -377,11 +378,11 @@ enum nvme_csts { NVME_CSTS_SHST_MASK = 3, }; -#define NVME_CSTS_RDY(csts) NVME_GET(CSTS_RDY, csts) -#define NVME_CSTS_CFS(csts) NVME_GET(CSTS_CFS, csts) -#define NVME_CSTS_SHST(csts) NVME_GET(CSTS_SHST, csts) -#define NVME_CSTS_NSSRO(csts) NVME_GET(CSTS_NSSRO, csts) -#define NVME_CSTS_PP(csts) NVME_GET(CSTS_PP, csts) +#define NVME_CSTS_RDY(csts) NVME_GET(csts, CSTS_RDY) +#define NVME_CSTS_CFS(csts) NVME_GET(csts, CSTS_CFS) +#define NVME_CSTS_SHST(csts) NVME_GET(csts, CSTS_SHST) +#define NVME_CSTS_NSSRO(csts) NVME_GET(csts, CSTS_NSSRO) +#define NVME_CSTS_PP(csts) NVME_GET(csts, CSTS_PP) enum nvme_aqa { NVME_AQA_ASQS_SHIFT = 0, @@ -390,8 +391,8 @@ enum nvme_aqa { NVME_AQA_ACQS_MASK = 0xfff, }; -#define NVME_AQA_ASQS(aqa) NVME_GET(AQA_ASQS, aqa) -#define NVME_AQA_ACQS(aqa) NVME_GET(AQA_ACQS, aqa) +#define NVME_AQA_ASQS(aqa) NVME_GET(aqa, AQA_ASQS) +#define NVME_AQA_ACQS(aqa) NVME_GET(aqa, AQA_ACQS) enum nvme_cmbloc { NVME_CMBLOC_BIR_SHIFT = 0, @@ -412,14 +413,14 @@ enum nvme_cmbloc { NVME_CMBLOC_OFST_MASK = 0xfffff, }; -#define NVME_CMBLOC_BIR(cmbloc) NVME_GET(CMBLOC_BIR, cmbloc) -#define NVME_CMBLOC_CQMMS(cmbloc) NVME_GET(CMBLOC_CQMMS, cmbloc) -#define NVME_CMBLOC_CQPDS(cmbloc) NVME_GET(CMBLOC_CQPDS, cmbloc) -#define NVME_CMBLOC_CDPLMS(cmbloc) NVME_GET(CMBLOC_CDPLMS, cmbloc) -#define NVME_CMBLOC_CDPCILS(cmbloc) NVME_GET(CMBLOC_CDPCILS, cmbloc) -#define NVME_CMBLOC_CDMMMS(cmbloc) NVME_GET(CMBLOC_CDMMMS, cmbloc) -#define NVME_CMBLOC_CQDA(cmbloc) NVME_GET(CMBLOC_CQDA, cmbloc) -#define NVME_CMBLOC_OFST(cmbloc) NVME_GET(CMBLOC_OFST, cmbloc) +#define NVME_CMBLOC_BIR(cmbloc) NVME_GET(cmbloc, CMBLOC_BIR) +#define NVME_CMBLOC_CQMMS(cmbloc) NVME_GET(cmbloc, CMBLOC_CQMMS) +#define NVME_CMBLOC_CQPDS(cmbloc) NVME_GET(cmbloc, CMBLOC_CQPDS) +#define NVME_CMBLOC_CDPLMS(cmbloc) NVME_GET(cmbloc, CMBLOC_CDPLMS) +#define NVME_CMBLOC_CDPCILS(cmbloc) NVME_GET(cmbloc, CMBLOC_CDPCILS) +#define NVME_CMBLOC_CDMMMS(cmbloc) NVME_GET(cmbloc, CMBLOC_CDMMMS) +#define NVME_CMBLOC_CQDA(cmbloc) NVME_GET(cmbloc, CMBLOC_CQDA) +#define NVME_CMBLOC_OFST(cmbloc) NVME_GET(cmbloc, CMBLOC_OFST) enum nvme_cmbsz { NVME_CMBSZ_SQS_SHIFT = 0, @@ -445,13 +446,13 @@ enum nvme_cmbsz { NVME_CMBSZ_SZU_64G = 6, }; -#define NVME_CMBSZ_SQS(cmbsz) NVME_GET(CMBSZ_SQS, cmbsz) -#define NVME_CMBSZ_CQS(cmbsz) NVME_GET(CMBSZ_CQS, cmbsz) -#define NVME_CMBSZ_LISTS(cmbsz) NVME_GET(CMBSZ_LISTS, cmbsz) -#define NVME_CMBSZ_RDS(cmbsz) NVME_GET(CMBSZ_RDS, cmbsz) -#define NVME_CMBSZ_WDS(cmbsz) NVME_GET(CMBSZ_WDS, cmbsz) -#define NVME_CMBSZ_SZU(cmbsz) NVME_GET(CMBSZ_SZU, cmbsz) -#define NVME_CMBSZ_SZ(cmbsz) NVME_GET(CMBSZ_SZ, cmbsz) +#define NVME_CMBSZ_SQS(cmbsz) NVME_GET(cmbsz, CMBSZ_SQS) +#define NVME_CMBSZ_CQS(cmbsz) NVME_GET(cmbsz, CMBSZ_CQS) +#define NVME_CMBSZ_LISTS(cmbsz) NVME_GET(cmbsz, CMBSZ_LISTS) +#define NVME_CMBSZ_RDS(cmbsz) NVME_GET(cmbsz, CMBSZ_RDS) +#define NVME_CMBSZ_WDS(cmbsz) NVME_GET(cmbsz, CMBSZ_WDS) +#define NVME_CMBSZ_SZU(cmbsz) NVME_GET(cmbsz, CMBSZ_SZU) +#define NVME_CMBSZ_SZ(cmbsz) NVME_GET(cmbsz, CMBSZ_SZ) /** * nvme_cmb_size() - Calculate size of the controller memory buffer @@ -478,9 +479,9 @@ enum nvme_bpinfo { NVME_BPINFO_BRS_READ_ERROR = 3, }; -#define NVME_BPINFO_BPSZ(bpinfo) NVME_GET(BPINFO_BPSZ, bpinfo) -#define NVME_BPINFO_BRS(bpinfo) NVME_GET(BPINFO_BRS, bpinfo) -#define NVME_BPINFO_ABPID(bpinfo) NVME_GET(BPINFO_ABPID, bpinfo) +#define NVME_BPINFO_BPSZ(bpinfo) NVME_GET(bpinfo, BPINFO_BPSZ) +#define NVME_BPINFO_BRS(bpinfo) NVME_GET(bpinfo, BPINFO_BRS) +#define NVME_BPINFO_ABPID(bpinfo) NVME_GET(bpinfo, BPINFO_ABPID) enum nvme_bprsel { NVME_BPRSEL_BPRSZ_SHIFT = 0, @@ -491,9 +492,9 @@ enum nvme_bprsel { NVME_BPRSEL_BPID_MASK = 0x1, }; -#define NVME_BPRSEL_BPRSZ(bprsel) NVME_GET(BPRSEL_BPRSZ, bprsel) -#define NVME_BPRSEL_BPROF(bprsel) NVME_GET(BPRSEL_BPROF, bprsel) -#define NVME_BPRSEL_BPID(bprsel) NVME_GET(BPRSEL_BPID, bprsel) +#define NVME_BPRSEL_BPRSZ(bprsel) NVME_GET(bprsel, BPRSEL_BPRSZ) +#define NVME_BPRSEL_BPROF(bprsel) NVME_GET(bprsel, BPRSEL_BPROF) +#define NVME_BPRSEL_BPID(bprsel) NVME_GET(bprsel, BPRSEL_BPID) enum nvme_cmbmsc { NVME_CMBMSC_CRE_SHIFT = 0, @@ -504,16 +505,16 @@ enum nvme_cmbmsc { }; static const __u64 NVME_CMBMSC_CBA_MASK = 0xfffffffffffffull; -#define NVME_CMBMSC_CRE(cmbmsc) NVME_GET(CMBMSC_CRE, cmbmsc) -#define NVME_CMBMSC_CMSE(cmbmsc) NVME_GET(CMBMSC_CMSE, cmbmsc) -#define NVME_CMBMSC_CBA(cmbmsc) NVME_GET(CMBMSC_CBA, cmbmsc) +#define NVME_CMBMSC_CRE(cmbmsc) NVME_GET(cmbmsc, CMBMSC_CRE) +#define NVME_CMBMSC_CMSE(cmbmsc) NVME_GET(cmbmsc, CMBMSC_CMSE) +#define NVME_CMBMSC_CBA(cmbmsc) NVME_GET(cmbmsc, CMBMSC_CBA) enum nvme_cmbsts { NVME_CMBSTS_CBAI_SHIFT = 0, NVME_CMBSTS_CBAI_MASK = 0x1, }; -#define NVME_CMBSTS_CBAI(cmbsts) NVME_GET(CMBSTS_CBAI, cmbsts) +#define NVME_CMBSTS_CBAI(cmbsts) NVME_GET(cmbsts, CMBSTS_CBAI) enum nvme_pmrcap { NVME_PMRCAP_RDS_SHIFT = 3, @@ -534,20 +535,20 @@ enum nvme_pmrcap { NVME_PMRCAP_PMRTU_60S = 1, }; -#define NVME_PMRCAP_RDS(pmrcap) NVME_GET(PMRCAP_RDS, pmrcap) -#define NVME_PMRCAP_WDS(pmrcap) NVME_GET(PMRCAP_WDS, pmrcap) -#define NVME_PMRCAP_BIR(pmrcap) NVME_GET(PMRCAP_BIR, pmrcap) -#define NVME_PMRCAP_PMRTU(pmrcap) NVME_GET(PMRCAP_PMRTU, pmrcap) -#define NVME_PMRCAP_PMRWMB(pmrcap) NVME_GET(PMRCAP_PMRWMB, pmrcap) -#define NVME_PMRCAP_PMRTO(pmrcap) NVME_GET(PMRCAP_PMRTO, pmrcap) -#define NVME_PMRCAP_CMSS(pmrcap) NVME_GET(PMRCAP_CMSS, pmrcap) +#define NVME_PMRCAP_RDS(pmrcap) NVME_GET(pmrcap, PMRCAP_RDS) +#define NVME_PMRCAP_WDS(pmrcap) NVME_GET(pmrcap, PMRCAP_WDS) +#define NVME_PMRCAP_BIR(pmrcap) NVME_GET(pmrcap, PMRCAP_BIR) +#define NVME_PMRCAP_PMRTU(pmrcap) NVME_GET(pmrcap, PMRCAP_PMRTU) +#define NVME_PMRCAP_PMRWMB(pmrcap) NVME_GET(pmrcap, PMRCAP_PMRWMB) +#define NVME_PMRCAP_PMRTO(pmrcap) NVME_GET(pmrcap, PMRCAP_PMRTO) +#define NVME_PMRCAP_CMSS(pmrcap) NVME_GET(pmrcap, PMRCAP_CMSS) enum nvme_pmrctl { NVME_PMRCTL_EN_SHIFT = 0, NVME_PMRCTL_EN_MASK = 0x1, }; -#define NVME_PMRCTL_EN(pmrctl) NVME_GET(PMRCTL_EN, pmrctl) +#define NVME_PMRCTL_EN(pmrctl) NVME_GET(pmrctl, PMRCTL_EN) enum nvme_pmrsts { NVME_PMRSTS_ERR_SHIFT = 0, @@ -560,10 +561,10 @@ enum nvme_pmrsts { NVME_PMRSTS_CBAI_MASK = 0x1, }; -#define NVME_PMRSTS_ERR(pmrsts) NVME_GET(PMRSTS_ERR, pmrsts) -#define NVME_PMRSTS_NRDY(pmrsts) NVME_GET(PMRSTS_NRDY, pmrsts) -#define NVME_PMRSTS_HSTS(pmrsts) NVME_GET(PMRSTS_HSTS, pmrsts) -#define NVME_PMRSTS_CBAI(pmrsts) NVME_GET(PMRSTS_CBAI, pmrsts) +#define NVME_PMRSTS_ERR(pmrsts) NVME_GET(pmrsts, PMRSTS_ERR) +#define NVME_PMRSTS_NRDY(pmrsts) NVME_GET(pmrsts, PMRSTS_NRDY) +#define NVME_PMRSTS_HSTS(pmrsts) NVME_GET(pmrsts, PMRSTS_HSTS) +#define NVME_PMRSTS_CBAI(pmrsts) NVME_GET(pmrsts, PMRSTS_CBAI) enum nvme_pmrebs { NVME_PMREBS_PMRSZU_SHIFT = 0, @@ -578,9 +579,9 @@ enum nvme_pmrebs { NVME_PMREBS_PMRSZU_1G = 3, }; -#define NVME_PMREBS_PMRSZU(pmrebs) NVME_GET(PMREBS_PMRSZU, pmrebs) -#define NVME_PMREBS_RBB(pmrebs) NVME_GET(PMREBS_RBB, pmrebs) -#define NVME_PMREBS_PMRWBZ(pmrebs) NVME_GET(PMREBS_PMRWBZ, pmrebs) +#define NVME_PMREBS_PMRSZU(pmrebs) NVME_GET(pmrebs, PMREBS_PMRSZU) +#define NVME_PMREBS_RBB(pmrebs) NVME_GET(pmrebs, PMREBS_RBB) +#define NVME_PMREBS_PMRWBZ(pmrebs) NVME_GET(pmrebs, PMREBS_PMRWBZ) /** * nvme_pmr_size() - Calculate size of persistent memory region elasticity @@ -606,8 +607,8 @@ enum nvme_pmrswtp { NVME_PMRSWTP_PMRSWTU_GBPS = 3, }; -#define NVME_PMRSWTP_PMRSWTU(pmrswtp) NVME_GET(PMRSWTP_PMRSWTU, pmrswtp) -#define NVME_PMRSWTP_PMRSWTV(pmrswtp) NVME_GET(PMRSWTP_PMRSWTU, pmrswtp) +#define NVME_PMRSWTP_PMRSWTU(pmrswtp) NVME_GET(pmrswtp, PMRSWTP_PMRSWTU) +#define NVME_PMRSWTP_PMRSWTV(pmrswtp) NVME_GET(pmrswtp, PMRSWTP_PMRSWTU) /** * nvme_pmr_throughput() - Calculate throughput of persistent memory buffer @@ -628,8 +629,8 @@ enum nvme_pmrmsc { }; static const __u64 NVME_PMRMSC_CBA_MASK = 0xfffffffffffffull; -#define NVME_PMRMSC_CMSE(pmrmsc) NVME_GET(PMRMSC_CMSE, pmrmsc) -#define NVME_PMRMSC_CBA(pmrmsc) NVME_GET(PMRMSC_CBA, pmrmsc) +#define NVME_PMRMSC_CMSE(pmrmsc) NVME_GET(pmrmsc, PMRMSC_CMSE) +#define NVME_PMRMSC_CBA(pmrmsc) NVME_GET(pmrmsc, PMRMSC_CBA) /** * enum nvme_psd_flags - Possible flag values in nvme power state descriptor diff --git a/src/nvme/util.h b/src/nvme/util.h index 6cf607a2..6534f631 100644 --- a/src/nvme/util.h +++ b/src/nvme/util.h @@ -238,40 +238,40 @@ static inline void nvme_chomp(char *s, int l) } enum { - NVME_FEAT_ARB_BURST_SHIFT = 0, - NVME_FEAT_ARB_BURST_MASK = 0x7, - NVME_FEAT_ARB_LPW_SHIFT = 8, - NVME_FEAT_ARB_LPW_MASK = 0xff, - NVME_FEAT_ARB_MPW_SHIFT = 16, - NVME_FEAT_ARB_MPW_MASK = 0xff, - NVME_FEAT_ARB_HPW_SHIFT = 24, - NVME_FEAT_ARB_HPW_MASK = 0xff, - NVME_FEAT_PM_PS_SHIFT = 0, - NVME_FEAT_PM_PS_MASK = 0x1f, - NVME_FEAT_PM_WH_SHIFT = 5, - NVME_FEAT_PM_WH_MASK = 0x7, - NVME_FEAT_LBAR_NR_SHIFT = 0, - NVME_FEAT_LBAR_NR_MASK = 0x3f, - NVME_FEAT_TT_TMPTH_SHIFT = 0, - NVME_FEAT_TT_TMPTH_MASK = 0xffff, - NVME_FEAT_TT_TMPSEL_SHIFT = 16, - NVME_FEAT_TT_TMPSEL_MASK = 0xf, - NVME_FEAT_TT_THSEL_SHIFT = 20, - NVME_FEAT_TT_THSEL_MASK = 0x3, - NVME_FEAT_ER_TLER_SHIFT = 0, - NVME_FEAT_ER_TLER_MASK = 0xffff, - NVME_FEAT_ER_DULBE_SHIFT = 16, - NVME_FEAT_ER_DULBE_MASK = 0x1, + NVME_FEAT_ARBITRATION_BURST_SHIFT = 0, + NVME_FEAT_ARBITRATION_BURST_MASK = 0x7, + NVME_FEAT_ARBITRATION_LPW_SHIFT = 8, + NVME_FEAT_ARBITRATION_LPW_MASK = 0xff, + NVME_FEAT_ARBITRATION_MPW_SHIFT = 16, + NVME_FEAT_ARBITRATION_MPW_MASK = 0xff, + NVME_FEAT_ARBITRATION_HPW_SHIFT = 24, + NVME_FEAT_ARBITRATION_HPW_MASK = 0xff, + NVME_FEAT_PWRMGMT_PS_SHIFT = 0, + NVME_FEAT_PWRMGMT_PS_MASK = 0x1f, + NVME_FEAT_PWRMGMT_WH_SHIFT = 5, + NVME_FEAT_PWRMGMT_WH_MASK = 0x7, + NVME_FEAT_LBAR_NR_SHIFT = 0, + NVME_FEAT_LBAR_NR_MASK = 0x3f, + NVME_FEAT_TT_TMPTH_SHIFT = 0, + NVME_FEAT_TT_TMPTH_MASK = 0xffff, + NVME_FEAT_TT_TMPSEL_SHIFT = 16, + NVME_FEAT_TT_TMPSEL_MASK = 0xf, + NVME_FEAT_TT_THSEL_SHIFT = 20, + NVME_FEAT_TT_THSEL_MASK = 0x3, + NVME_FEAT_ERROR_RECOVERY_TLER_SHIFT = 0, + NVME_FEAT_ERROR_RECOVERY_TLER_MASK = 0xffff, + NVME_FEAT_ERROR_RECOVERY_DULBE_SHIFT = 16, + NVME_FEAT_ERROR_RECOVERY_DULBE_MASK = 0x1, NVME_FEAT_VWC_WCE_SHIFT = 0, NVME_FEAT_VWC_WCE_MASK = 0x1, NVME_FEAT_NRQS_NSQR_SHIFT = 0, NVME_FEAT_NRQS_NSQR_MASK = 0xffff, NVME_FEAT_NRQS_NCQR_SHIFT = 16, NVME_FEAT_NRQS_NCQR_MASK = 0xffff, - NVME_FEAT_ICOAL_THR_SHIFT = 0, - NVME_FEAT_ICOAL_THR_MASK = 0xff, - NVME_FEAT_ICOAL_TIME_SHIFT = 8, - NVME_FEAT_ICOAL_TIME_MASK = 0xff, + NVME_FEAT_IRQC_THR_SHIFT = 0, + NVME_FEAT_IRQC_THR_MASK = 0xff, + NVME_FEAT_IRQC_TIME_SHIFT = 8, + NVME_FEAT_IRQC_TIME_MASK = 0xff, NVME_FEAT_ICFG_IV_SHIFT = 0, NVME_FEAT_ICFG_IV_MASK = 0xffff, NVME_FEAT_ICFG_CD_SHIFT = 16, @@ -338,10 +338,10 @@ enum { NVME_FEAT_IOCSP_IOCSCI_MASK = 0xff, }; -#define NVME_FEAT_ARB_BURST(v) NVME_GET(FEAT_ARB_BURST, v) -#define NVME_FEAT_ARB_LPW(v) NVME_GET(FEAT_ARB_LPW, v) -#define NVME_FEAT_ARB_MPW(v) NVME_GET(FEAT_ARB_MPW, v) -#define NVME_FEAT_ARB_HPW(v) NVME_GET(FEAT_ARB_HPW, v) +#define NVME_FEAT_ARB_BURST(v) NVME_GET(v, FEAT_ARBITRATION_BURST) +#define NVME_FEAT_ARB_LPW(v) NVME_GET(v, FEAT_ARBITRATION_LPW) +#define NVME_FEAT_ARB_MPW(v) NVME_GET(v, FEAT_ARBITRATION_MPW) +#define NVME_FEAT_ARB_HPW(v) NVME_GET(v, FEAT_ARBITRATION_HPW) inline void nvme_feature_decode_arbitration(__u32 value, __u8 *ab, __u8 *lpw, __u8 *mpw, __u8 *hpw) @@ -352,8 +352,8 @@ inline void nvme_feature_decode_arbitration(__u32 value, __u8 *ab, __u8 *lpw, *hpw = NVME_FEAT_ARB_HPW(value); }; -#define NVME_FEAT_PM_PS(v) NVME_GET(FEAT_PM_PS, v) -#define NVME_FEAT_PM_WH(v) NVME_GET(FEAT_PM_WH, v) +#define NVME_FEAT_PM_PS(v) NVME_GET(v, FEAT_PWRMGMT_PS) +#define NVME_FEAT_PM_WH(v) NVME_GET(v, FEAT_PWRMGMT_WH) inline void nvme_feature_decode_power_mgmt(__u32 value, __u8 *ps, __u8 *wh) { @@ -361,16 +361,16 @@ inline void nvme_feature_decode_power_mgmt(__u32 value, __u8 *ps, __u8 *wh) *wh = NVME_FEAT_PM_WH(value); } -#define NVME_FEAT_LBAR_NR(v) NVME_GET(FEAT_LBAR_NR, v) +#define NVME_FEAT_LBAR_NR(v) NVME_GET(v, FEAT_LBAR_NR) inline void nvme_feature_decode_lba_range(__u32 value, __u8 *num) { *num = NVME_FEAT_LBAR_NR(value); } -#define NVME_FEAT_TT_TMPTH(v) NVME_GET(FEAT_TT_TMPTH, v) -#define NVME_FEAT_TT_TMPSEL(v) NVME_GET(FEAT_TT_TMPSEL, v) -#define NVME_FEAT_TT_THSEL(v) NVME_GET(FEAT_TT_THSEL, v) +#define NVME_FEAT_TT_TMPTH(v) NVME_GET(v, FEAT_TT_TMPTH) +#define NVME_FEAT_TT_TMPSEL(v) NVME_GET(v, FEAT_TT_TMPSEL) +#define NVME_FEAT_TT_THSEL(v) NVME_GET(v, FEAT_TT_THSEL) inline void nvme_feature_decode_temp_threshold(__u32 value, __u16 *tmpth, __u8 *tmpsel, __u8 *thsel) @@ -380,8 +380,8 @@ inline void nvme_feature_decode_temp_threshold(__u32 value, __u16 *tmpth, *thsel = NVME_FEAT_TT_THSEL(value); } -#define NVME_FEAT_ER_TLER(v) NVME_GET(FEAT_ER_TLER, v) -#define NVME_FEAT_ER_DULBE(v) NVME_GET(FEAT_ER_DULBE, v) +#define NVME_FEAT_ER_TLER(v) NVME_GET(v, FEAT_ERROR_RECOVERY_TLER) +#define NVME_FEAT_ER_DULBE(v) NVME_GET(v, FEAT_ERROR_RECOVERY_DULBE) inline void nvme_feature_decode_error_recovery(__u32 value, __u16 *tler, bool *dulbe) { @@ -389,15 +389,15 @@ inline void nvme_feature_decode_error_recovery(__u32 value, __u16 *tler, bool *d *dulbe = NVME_FEAT_ER_DULBE(value); } -#define NVME_FEAT_VWC_WCE(v) NVME_GET(FEAT_VWC_WCE, v) +#define NVME_FEAT_VWC_WCE(v) NVME_GET(v, FEAT_VWC_WCE) inline void nvme_feature_decode_volatile_write_cache(__u32 value, bool *wce) { *wce = NVME_FEAT_VWC_WCE(value); } -#define NVME_FEAT_NRQS_NSQR(v) NVME_GET(FEAT_NRQS_NSQR, v) -#define NVME_FEAT_NRQS_NCQR(v) NVME_GET(FEAT_NRQS_NCQR, v) +#define NVME_FEAT_NRQS_NSQR(v) NVME_GET(v, FEAT_NRQS_NSQR) +#define NVME_FEAT_NRQS_NCQR(v) NVME_GET(v, FEAT_NRQS_NCQR) inline void nvme_feature_decode_number_of_queues(__u32 value, __u16 *nsqr, __u16 *ncqr) { @@ -405,17 +405,17 @@ inline void nvme_feature_decode_number_of_queues(__u32 value, __u16 *nsqr, __u16 *ncqr = NVME_FEAT_NRQS_NCQR(value); } -#define NVME_FEAT_ICOAL_THR(v) NVME_GET(FEAT_ICOAL_THR, v) -#define NVME_FEAT_ICOAL_TIME(v) NVME_GET(FEAT_ICOAL_TIME, v) +#define NVME_FEAT_IRQC_THR(v) NVME_GET(v, FEAT_IRQC_THR) +#define NVME_FEAT_IRQC_TIME(v) NVME_GET(v, FEAT_IRQC_TIME) inline void nvme_feature_decode_interrupt_coalescing(__u32 value, __u8 *thr, __u8 *time) { - *thr = NVME_FEAT_ICOAL_THR(value); - *time = NVME_FEAT_ICOAL_TIME(value); + *thr = NVME_FEAT_IRQC_THR(value); + *time = NVME_FEAT_IRQC_TIME(value); } -#define NVME_FEAT_ICFG_IV(v) NVME_GET(FEAT_ICFG_IV, v) -#define NVME_FEAT_ICFG_CD(v) NVME_GET(FEAT_ICFG_CD, v) +#define NVME_FEAT_ICFG_IV(v) NVME_GET(v, FEAT_ICFG_IV) +#define NVME_FEAT_ICFG_CD(v) NVME_GET(v, FEAT_ICFG_CD) inline void nvme_feature_decode_interrupt_config(__u32 value, __u16 *iv, bool *cd) { @@ -423,21 +423,21 @@ inline void nvme_feature_decode_interrupt_config(__u32 value, __u16 *iv, bool *c *cd = NVME_FEAT_ICFG_CD(value); } -#define NVME_FEAT_WA_DN(v) NVME_GET(FEAT_WA_DN, v) +#define NVME_FEAT_WA_DN(v) NVME_GET(v, FEAT_WA_DN) inline void nvme_feature_decode_write_atomicity(__u32 value, bool *dn) { *dn = NVME_FEAT_WA_DN(value); } -#define NVME_FEAT_AE_SMART(v) NVME_GET(FEAT_AE_SMART, v) -#define NVME_FEAT_AE_NAN(v) NVME_GET(FEAT_AE_NAN, v) -#define NVME_FEAT_AE_FW(v) NVME_GET(FEAT_AE_FW, v) -#define NVME_FEAT_AE_TELEM(v) NVME_GET(FEAT_AE_TELEM, v) -#define NVME_FEAT_AE_ANA(v) NVME_GET(FEAT_AE_ANA, v) -#define NVME_FEAT_AE_PLA(v) NVME_GET(FEAT_AE_PLA, v) -#define NVME_FEAT_AE_LBAS(v) NVME_GET(FEAT_AE_LBAS, v) -#define NVME_FEAT_AE_EGA(v) NVME_GET(FEAT_AE_EGA, v) +#define NVME_FEAT_AE_SMART(v) NVME_GET(v, FEAT_AE_SMART) +#define NVME_FEAT_AE_NAN(v) NVME_GET(v, FEAT_AE_NAN) +#define NVME_FEAT_AE_FW(v) NVME_GET(v, FEAT_AE_FW) +#define NVME_FEAT_AE_TELEM(v) NVME_GET(v, FEAT_AE_TELEM) +#define NVME_FEAT_AE_ANA(v) NVME_GET(v, FEAT_AE_ANA) +#define NVME_FEAT_AE_PLA(v) NVME_GET(v, FEAT_AE_PLA) +#define NVME_FEAT_AE_LBAS(v) NVME_GET(v, FEAT_AE_LBAS) +#define NVME_FEAT_AE_EGA(v) NVME_GET(v, FEAT_AE_EGA) inline void nvme_feature_decode_async_event_config(__u32 value, __u8 *smart, bool *nan, bool *fw, bool *telem, bool *ana, bool *pla, bool *lbas, @@ -453,22 +453,22 @@ inline void nvme_feature_decode_async_event_config(__u32 value, __u8 *smart, *ega = NVME_FEAT_AE_EGA(value); } -#define NVME_FEAT_APST_APSTE(v) NVME_GET(FEAT_APST_APSTE, v) +#define NVME_FEAT_APST_APSTE(v) NVME_GET(v, FEAT_APST_APSTE) inline void nvme_feature_decode_auto_power_state(__u32 value, bool *apste) { *apste = NVME_FEAT_APST_APSTE(value); } -#define NVME_FEAT_HMEM_EHM(v) NVME_GET(FEAT_HMEM_EHM, v) +#define NVME_FEAT_HMEM_EHM(v) NVME_GET(v, FEAT_HMEM_EHM) inline void nvme_feature_decode_host_memory_buffer(__u32 value, bool *ehm) { *ehm = NVME_FEAT_HMEM_EHM(value); } -#define NVME_FEAT_HCTM_TMT2(v) NVME_GET(FEAT_HCTM_TMT2, v) -#define NVME_FEAT_HCTM_TMT1(v) NVME_GET(FEAT_HCTM_TMT1, v) +#define NVME_FEAT_HCTM_TMT2(v) NVME_GET(v, FEAT_HCTM_TMT2) +#define NVME_FEAT_HCTM_TMT1(v) NVME_GET(v, FEAT_HCTM_TMT1) inline void nvme_feature_decode_host_thermal_mgmt(__u32 value, __u16 *tmt2, __u16 *tmt1) { @@ -476,36 +476,36 @@ inline void nvme_feature_decode_host_thermal_mgmt(__u32 value, __u16 *tmt2, __u1 *tmt1 = NVME_FEAT_HCTM_TMT1(value); } -#define NVME_FEAT_NOPS_NOPPME(v) NVME_GET(FEAT_NOPS_NOPPME, v) +#define NVME_FEAT_NOPS_NOPPME(v) NVME_GET(v, FEAT_NOPS_NOPPME) inline void nvme_feature_decode_non_op_power_config(__u32 value, bool *noppme) { *noppme = NVME_FEAT_NOPS_NOPPME(value); } -#define NVME_FEAT_RRL_RRL(v) NVME_GET(FEAT_RRL_RRL, v) +#define NVME_FEAT_RRL_RRL(v) NVME_GET(v, FEAT_RRL_RRL) inline void nvme_feature_decode_read_recovery_level_config(__u32 value, __u8 *rrl) { *rrl = NVME_FEAT_RRL_RRL(value); } -#define NVME_FEAT_PLM_PLME(v) NVME_GET(FEAT_PLM_PLME, v) +#define NVME_FEAT_PLM_PLME(v) NVME_GET(v, FEAT_PLM_PLME) inline void nvme_feature_decode_predictable_latency_mode_config(__u32 value, bool *plme) { *plme = NVME_FEAT_PLM_PLME(value); } -#define NVME_FEAT_PLMW_WS(v) NVME_GET(FEAT_PLMW_WS, v) +#define NVME_FEAT_PLMW_WS(v) NVME_GET(v, FEAT_PLMW_WS) inline void nvme_feature_decode_predictable_latency_mode_window(__u32 value, __u8 *ws) { *ws = NVME_FEAT_PLMW_WS(value); } -#define NVME_FEAT_LBAS_LSIRI(v) NVME_GET(FEAT_LBAS_LSIRI, v) -#define NVME_FEAT_LBAS_LSIPI(v) NVME_GET(FEAT_LBAS_LSIPI, v) +#define NVME_FEAT_LBAS_LSIRI(v) NVME_GET(v, FEAT_LBAS_LSIRI) +#define NVME_FEAT_LBAS_LSIPI(v) NVME_GET(v, FEAT_LBAS_LSIPI) inline void nvme_feature_decode_lba_status_attributes(__u32 value, __u16 *lsiri, __u16 *lsipi) { @@ -513,15 +513,15 @@ inline void nvme_feature_decode_lba_status_attributes(__u32 value, __u16 *lsiri, *lsipi = NVME_FEAT_LBAS_LSIPI(value); } -#define NVME_FEAT_SC_NODRM(v) NVME_GET(FEAT_SC_NODRM, v) +#define NVME_FEAT_SC_NODRM(v) NVME_GET(v, FEAT_SC_NODRM) inline void nvme_feature_decode_sanitize_config(__u32 value, bool *nodrm) { *nodrm = NVME_FEAT_SC_NODRM(value); } -#define NVME_FEAT_EG_ENDGID(v) NVME_GET(FEAT_EG_ENDGID, v) -#define NVME_FEAT_EG_EGCW(v) NVME_GET(FEAT_EG_EGCW, v) +#define NVME_FEAT_EG_ENDGID(v) NVME_GET(v, FEAT_EG_ENDGID) +#define NVME_FEAT_EG_EGCW(v) NVME_GET(v, FEAT_EG_EGCW) inline void nvme_feature_decode_endurance_group_event_config(__u32 value, __u16 *endgid, __u8 *endgcw) @@ -530,23 +530,23 @@ inline void nvme_feature_decode_endurance_group_event_config(__u32 value, *endgcw = NVME_FEAT_EG_EGCW(value); } -#define NVME_FEAT_SPM_PBSLC(v) NVME_GET(FEAT_SPM_PBSLC, v) +#define NVME_FEAT_SPM_PBSLC(v) NVME_GET(v, FEAT_SPM_PBSLC) inline void nvme_feature_decode_software_progress_marker(__u32 value, __u8 *pbslc) { *pbslc = NVME_FEAT_SPM_PBSLC(value); } -#define NVME_FEAT_HOSTID_EXHID(v) NVME_GET(FEAT_HOSTID_EXHID, v) +#define NVME_FEAT_HOSTID_EXHID(v) NVME_GET(v, FEAT_HOSTID_EXHID) inline void nvme_feature_decode_host_identifier(__u32 value, bool *exhid) { *exhid = NVME_FEAT_HOSTID_EXHID(value); } -#define NVME_FEAT_RM_REGPRE(v) NVME_GET(FEAT_RM_REGPRE, v) -#define NVME_FEAT_RM_RESREL(v) NVME_GET(FEAT_RM_RESREL, v) -#define NVME_FEAT_RM_RESPRE(v) NVME_GET(FEAT_RM_RESPRE, v) +#define NVME_FEAT_RM_REGPRE(v) NVME_GET(v, FEAT_RM_REGPRE) +#define NVME_FEAT_RM_RESREL(v) NVME_GET(v, FEAT_RM_RESREL) +#define NVME_FEAT_RM_RESPRE(v) NVME_GET(v, FEAT_RM_RESPRE) inline void nvme_feature_decode_reservation_notification(__u32 value, bool *regpre, bool *resrel, bool *respre) @@ -556,14 +556,14 @@ inline void nvme_feature_decode_reservation_notification(__u32 value, bool *regp *respre = NVME_FEAT_RM_RESPRE(value); } -#define NVME_FEAT_RP_PTPL(v) NVME_GET(FEAT_RP_PTPL, v) +#define NVME_FEAT_RP_PTPL(v) NVME_GET(v, FEAT_RP_PTPL) inline void nvme_feature_decode_reservation_persistance(__u32 value, bool *ptpl) { *ptpl = NVME_FEAT_RP_PTPL(value); } -#define NVME_FEAT_WP_WPS(v) NVME_GET(FEAT_WP_WPS, v) +#define NVME_FEAT_WP_WPS(v) NVME_GET(v, FEAT_WP_WPS) inline void nvme_feature_decode_namespace_write_protect(__u32 value, __u8 *wps) {