]> www.infradead.org Git - users/hch/misc.git/commitdiff
sunrpc: simplify xdr_partial_copy_from_skb
authorChristoph Hellwig <hch@lst.de>
Mon, 17 Feb 2025 07:28:07 +0000 (08:28 +0100)
committerChristoph Hellwig <hch@lst.de>
Tue, 13 May 2025 08:23:19 +0000 (10:23 +0200)
csum_partial_copy_to_xdr can handle a checksumming and non-checksumming
case and implements this using a callback, which leads to a lot of
boilerplate code and indirect calls in the fast path.

Switch to storing a no_checksum flag in struct xdr_skb_reader instead
to remove the indirect call and simplify the code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
net/sunrpc/socklib.c

index 1b2b84feeec69fed27ce02756728004837d379d6..7196e7042e0fafabdf708099cfdaae6282746228 100644 (file)
 struct xdr_skb_reader {
        struct sk_buff  *skb;
        unsigned int    offset;
+       bool            no_checksum;
        size_t          count;
        __wsum          csum;
 };
 
-typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to,
-                                    size_t len);
-
 /**
  * xdr_skb_read_bits - copy some data bits from skb to internal buffer
  * @desc: sk_buff copy helper
  * @to: copy destination
  * @len: number of bytes to copy
  *
- * Possibly called several times to iterate over an sk_buff and copy
- * data out of it.
+ * Possibly called several times to iterate over an sk_buff and copy data out of
+ * it.
  */
 static size_t
 xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 {
-       if (len > desc->count)
-               len = desc->count;
-       if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
-               return 0;
-       desc->count -= len;
-       desc->offset += len;
-       return len;
-}
+       len = min(len, desc->count);
+
+       if (desc->no_checksum) {
+               if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
+                       return 0;
+       } else {
+               __wsum csum;
+
+               csum = skb_copy_and_csum_bits(desc->skb, desc->offset, to, len);
+               desc->csum = csum_block_add(desc->csum, csum, desc->offset);
+       }
 
-/**
- * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
- * @desc: sk_buff copy helper
- * @to: copy destination
- * @len: number of bytes to copy
- *
- * Same as skb_read_bits, but calculate a checksum at the same time.
- */
-static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
-{
-       unsigned int pos;
-       __wsum csum2;
-
-       if (len > desc->count)
-               len = desc->count;
-       pos = desc->offset;
-       csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len);
-       desc->csum = csum_block_add(desc->csum, csum2, pos);
        desc->count -= len;
        desc->offset += len;
        return len;
 }
 
-/**
- * xdr_partial_copy_from_skb - copy data out of an skb
- * @xdr: target XDR buffer
- * @base: starting offset
- * @desc: sk_buff copy helper
- * @copy_actor: virtual method for copying data
- *
- */
 static ssize_t
-xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
+xdr_partial_copy_from_skb(struct xdr_buf *xdr, struct xdr_skb_reader *desc)
 {
-       struct page     **ppage = xdr->pages;
-       unsigned int    len, pglen = xdr->page_len;
-       ssize_t         copied = 0;
-       size_t          ret;
-
-       len = xdr->head[0].iov_len;
-       if (base < len) {
-               len -= base;
-               ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
-               copied += ret;
-               if (ret != len || !desc->count)
-                       goto out;
-               base = 0;
-       } else
-               base -= len;
-
-       if (unlikely(pglen == 0))
-               goto copy_tail;
-       if (unlikely(base >= pglen)) {
-               base -= pglen;
-               goto copy_tail;
-       }
-       if (base || xdr->page_base) {
-               pglen -= base;
-               base += xdr->page_base;
-               ppage += base >> PAGE_SHIFT;
-               base &= ~PAGE_MASK;
-       }
-       do {
+       struct page **ppage = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+       unsigned int poff = xdr->page_base & ~PAGE_MASK;
+       unsigned int pglen = xdr->page_len;
+       ssize_t copied = 0;
+       size_t ret;
+
+       if (xdr->head[0].iov_len == 0)
+               return 0;
+
+       ret = xdr_skb_read_bits(desc, xdr->head[0].iov_base,
+                       xdr->head[0].iov_len);
+       if (ret != xdr->head[0].iov_len || !desc->count)
+               return ret;
+       copied += ret;
+
+       while (pglen) {
+               unsigned int len = min(PAGE_SIZE - poff, pglen);
                char *kaddr;
 
                /* ACL likes to be lazy in allocating pages - ACLs
@@ -126,36 +89,29 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb
                        *ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
                        if (unlikely(*ppage == NULL)) {
                                if (copied == 0)
-                                       copied = -ENOMEM;
-                               goto out;
+                                       return -ENOMEM;
+                               return copied;
                        }
                }
 
-               len = PAGE_SIZE;
                kaddr = kmap_atomic(*ppage);
-               if (base) {
-                       len -= base;
-                       if (pglen < len)
-                               len = pglen;
-                       ret = copy_actor(desc, kaddr + base, len);
-                       base = 0;
-               } else {
-                       if (pglen < len)
-                               len = pglen;
-                       ret = copy_actor(desc, kaddr, len);
-               }
+               ret = xdr_skb_read_bits(desc, kaddr + poff, len);
                flush_dcache_page(*ppage);
                kunmap_atomic(kaddr);
+
                copied += ret;
                if (ret != len || !desc->count)
-                       goto out;
+                       return copied;
                ppage++;
-       } while ((pglen -= len) != 0);
-copy_tail:
-       len = xdr->tail[0].iov_len;
-       if (base < len)
-               copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
-out:
+               pglen -= len;
+               poff = 0;
+       }
+
+       if (xdr->tail[0].iov_len) {
+               copied += xdr_skb_read_bits(desc, xdr->tail[0].iov_base,
+                                       xdr->tail[0].iov_len);
+       }
+
        return copied;
 }
 
@@ -174,12 +130,13 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
        desc.skb = skb;
        desc.offset = 0;
        desc.count = skb->len - desc.offset;
+       desc.no_checksum = skb_csum_unnecessary(skb);
 
-       if (skb_csum_unnecessary(skb))
+       if (desc.no_checksum)
                goto no_checksum;
 
        desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-       if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
+       if (xdr_partial_copy_from_skb(xdr, &desc) < 0)
                return -1;
        if (desc.offset != skb->len) {
                __wsum csum2;
@@ -195,7 +152,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
                netdev_rx_csum_fault(skb->dev, skb);
        return 0;
 no_checksum:
-       if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
+       if (xdr_partial_copy_from_skb(xdr, &desc) < 0)
                return -1;
        if (desc.count)
                return -1;