]> www.infradead.org Git - nvme.git/commitdiff
nvmet-tcp: switch to using the crc32c library nvme-6.16
authorEric Biggers <ebiggers@google.com>
Wed, 26 Feb 2025 06:28:40 +0000 (22:28 -0800)
committerChristoph Hellwig <hch@lst.de>
Tue, 22 Apr 2025 08:08:32 +0000 (10:08 +0200)
Now that the crc32c() library function directly takes advantage of
architecture-specific optimizations, it is unnecessary to go through the
crypto API.  Just use crc32c().  This is much simpler, and it improves
performance due to eliminating the crypto API overhead.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/tcp.c

index f2d0c920269b949cd7d75172afe6d8bfd073732d..1d34e839e1ec7e4e755fd314a299e335a41cdfdf 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/crc32c.h>
 #include <linux/err.h>
 #include <linux/nvme-tcp.h>
 #include <linux/nvme-keyring.h>
@@ -17,7 +18,6 @@
 #include <net/handshake.h>
 #include <linux/inet.h>
 #include <linux/llist.h>
-#include <crypto/hash.h>
 #include <trace/events/sock.h>
 
 #include "nvmet.h"
@@ -172,8 +172,6 @@ struct nvmet_tcp_queue {
        /* digest state */
        bool                    hdr_digest;
        bool                    data_digest;
-       struct ahash_request    *snd_hash;
-       struct ahash_request    *rcv_hash;
 
        /* TLS state */
        key_serial_t            tls_pskid;
@@ -294,14 +292,9 @@ static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
        return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 }
 
-static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
-               void *pdu, size_t len)
+static inline void nvmet_tcp_hdgst(void *pdu, size_t len)
 {
-       struct scatterlist sg;
-
-       sg_init_one(&sg, pdu, len);
-       ahash_request_set_crypt(hash, &sg, pdu + len, len);
-       crypto_ahash_digest(hash);
+       put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len);
 }
 
 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
@@ -318,7 +311,7 @@ static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
        }
 
        recv_digest = *(__le32 *)(pdu + hdr->hlen);
-       nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
+       nvmet_tcp_hdgst(pdu, len);
        exp_digest = *(__le32 *)(pdu + hdr->hlen);
        if (recv_digest != exp_digest) {
                pr_err("queue %d: header digest error: recv %#x expected %#x\n",
@@ -441,12 +434,24 @@ err:
        return NVME_SC_INTERNAL;
 }
 
-static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
-               struct nvmet_tcp_cmd *cmd)
+static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd)
 {
-       ahash_request_set_crypt(hash, cmd->req.sg,
-               (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
-       crypto_ahash_digest(hash);
+       size_t total_len = cmd->req.transfer_len;
+       struct scatterlist *sg = cmd->req.sg;
+       u32 crc = ~0;
+
+       while (total_len) {
+               size_t len = min_t(size_t, total_len, sg->length);
+
+               /*
+                * Note that the scatterlist does not contain any highmem pages,
+                * as it was allocated by sgl_alloc() with GFP_KERNEL.
+                */
+               crc = crc32c(crc, sg_virt(sg), len);
+               total_len -= len;
+               sg = sg_next(sg);
+       }
+       cmd->exp_ddgst = cpu_to_le32(~crc);
 }
 
 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
@@ -473,19 +478,18 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
 
        if (queue->data_digest) {
                pdu->hdr.flags |= NVME_TCP_F_DDGST;
-               nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
+               nvmet_tcp_calc_ddgst(cmd);
        }
 
        if (cmd->queue->hdr_digest) {
                pdu->hdr.flags |= NVME_TCP_F_HDGST;
-               nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+               nvmet_tcp_hdgst(pdu, sizeof(*pdu));
        }
 }
 
 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
 {
        struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
-       struct nvmet_tcp_queue *queue = cmd->queue;
        u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 
        cmd->offset = 0;
@@ -503,14 +507,13 @@ static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
        pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
        if (cmd->queue->hdr_digest) {
                pdu->hdr.flags |= NVME_TCP_F_HDGST;
-               nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+               nvmet_tcp_hdgst(pdu, sizeof(*pdu));
        }
 }
 
 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
 {
        struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
-       struct nvmet_tcp_queue *queue = cmd->queue;
        u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
 
        cmd->offset = 0;
@@ -523,7 +526,7 @@ static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
        pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
        if (cmd->queue->hdr_digest) {
                pdu->hdr.flags |= NVME_TCP_F_HDGST;
-               nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+               nvmet_tcp_hdgst(pdu, sizeof(*pdu));
        }
 }
 
@@ -857,42 +860,6 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
        smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
 }
 
-static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
-{
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
-
-       ahash_request_free(queue->rcv_hash);
-       ahash_request_free(queue->snd_hash);
-       crypto_free_ahash(tfm);
-}
-
-static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
-{
-       struct crypto_ahash *tfm;
-
-       tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(tfm))
-               return PTR_ERR(tfm);
-
-       queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
-       if (!queue->snd_hash)
-               goto free_tfm;
-       ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
-
-       queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
-       if (!queue->rcv_hash)
-               goto free_snd_hash;
-       ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
-
-       return 0;
-free_snd_hash:
-       ahash_request_free(queue->snd_hash);
-free_tfm:
-       crypto_free_ahash(tfm);
-       return -ENOMEM;
-}
-
-
 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
 {
        struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
@@ -921,11 +888,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
 
        queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
        queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
-       if (queue->hdr_digest || queue->data_digest) {
-               ret = nvmet_tcp_alloc_crypto(queue);
-               if (ret)
-                       return ret;
-       }
 
        memset(icresp, 0, sizeof(*icresp));
        icresp->hdr.type = nvme_tcp_icresp;
@@ -1247,7 +1209,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
 {
        struct nvmet_tcp_queue *queue = cmd->queue;
 
-       nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
+       nvmet_tcp_calc_ddgst(cmd);
        queue->offset = 0;
        queue->left = NVME_TCP_DIGEST_LENGTH;
        queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1617,8 +1579,6 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
        /* ->sock will be released by fput() */
        fput(queue->sock->file);
        nvmet_tcp_free_cmds(queue);
-       if (queue->hdr_digest || queue->data_digest)
-               nvmet_tcp_free_crypto(queue);
        ida_free(&nvmet_tcp_queue_ida, queue->idx);
        page_frag_cache_drain(&queue->pf_cache);
        kfree(queue);