]> www.infradead.org Git - users/hch/misc.git/commitdiff
net: devmem: pre-read requested rx queues during bind
authorDragos Tatulea <dtatulea@nvidia.com>
Wed, 27 Aug 2025 14:40:00 +0000 (17:40 +0300)
committerJakub Kicinski <kuba@kernel.org>
Thu, 28 Aug 2025 23:05:32 +0000 (16:05 -0700)
Instead of reading the requested rx queues after binding the buffer,
read the rx queues in advance in a bitmap and iterate over them when
needed.

This is a preparation for fetching the DMA device for each queue.

This patch has no functional changes besides adding an extra
rq index bounds check.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Mina Almasry <almasrymina@google.com>
Link: https://patch.msgid.link/20250827144017.1529208-8-dtatulea@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/netdev-genl.c

index 3e2d6aa6e0606fe14da394500d2c5e5bbf6ff206..739598d3465724b3fe8b57e8e0ea43818df2b5f4 100644 (file)
@@ -869,17 +869,55 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
        return err;
 }
 
-int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+static int netdev_nl_read_rxq_bitmap(struct genl_info *info,
+                                    u32 rxq_bitmap_len,
+                                    unsigned long *rxq_bitmap)
 {
+       const int maxtype = ARRAY_SIZE(netdev_queue_id_nl_policy) - 1;
        struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+       struct nlattr *attr;
+       int rem, err = 0;
+       u32 rxq_idx;
+
+       nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
+                              genlmsg_data(info->genlhdr),
+                              genlmsg_len(info->genlhdr), rem) {
+               err = nla_parse_nested(tb, maxtype, attr,
+                                      netdev_queue_id_nl_policy, info->extack);
+               if (err < 0)
+                       return err;
+
+               if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
+                   NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE))
+                       return -EINVAL;
+
+               if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+                       NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
+                       return -EINVAL;
+               }
+
+               rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
+               if (rxq_idx >= rxq_bitmap_len) {
+                       NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_ID]);
+                       return -EINVAL;
+               }
+
+               bitmap_set(rxq_bitmap, rxq_idx, 1);
+       }
+
+       return 0;
+}
+
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+{
        struct net_devmem_dmabuf_binding *binding;
        u32 ifindex, dmabuf_fd, rxq_idx;
        struct netdev_nl_sock *priv;
        struct net_device *netdev;
+       unsigned long *rxq_bitmap;
        struct device *dma_dev;
        struct sk_buff *rsp;
-       struct nlattr *attr;
-       int rem, err = 0;
+       int err = 0;
        void *hdr;
 
        if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
@@ -922,37 +960,26 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock;
        }
 
+       rxq_bitmap = bitmap_zalloc(netdev->real_num_rx_queues, GFP_KERNEL);
+       if (!rxq_bitmap) {
+               err = -ENOMEM;
+               goto err_unlock;
+       }
+
+       err = netdev_nl_read_rxq_bitmap(info, netdev->real_num_rx_queues,
+                                       rxq_bitmap);
+       if (err)
+               goto err_rxq_bitmap;
+
        dma_dev = netdev_queue_get_dma_dev(netdev, 0);
        binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE,
                                         dmabuf_fd, priv, info->extack);
        if (IS_ERR(binding)) {
                err = PTR_ERR(binding);
-               goto err_unlock;
+               goto err_rxq_bitmap;
        }
 
-       nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
-                              genlmsg_data(info->genlhdr),
-                              genlmsg_len(info->genlhdr), rem) {
-               err = nla_parse_nested(
-                       tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
-                       netdev_queue_id_nl_policy, info->extack);
-               if (err < 0)
-                       goto err_unbind;
-
-               if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
-                   NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
-                       err = -EINVAL;
-                       goto err_unbind;
-               }
-
-               if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
-                       NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
-                       err = -EINVAL;
-                       goto err_unbind;
-               }
-
-               rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
-
+       for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
                err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
                                                      info->extack);
                if (err)
@@ -966,6 +993,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
        if (err)
                goto err_unbind;
 
+       bitmap_free(rxq_bitmap);
+
        netdev_unlock(netdev);
 
        mutex_unlock(&priv->lock);
@@ -974,6 +1003,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
 
 err_unbind:
        net_devmem_unbind_dmabuf(binding);
+err_rxq_bitmap:
+       bitmap_free(rxq_bitmap);
 err_unlock:
        netdev_unlock(netdev);
 err_unlock_sock: