]> www.infradead.org Git - users/hch/misc.git/commitdiff
gve: merge packet buffer size fields
authorJoshua Washington <joshwash@google.com>
Fri, 21 Mar 2025 00:29:08 +0000 (00:29 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 25 Mar 2025 20:51:15 +0000 (13:51 -0700)
The data_buffer_size_dqo field in gve_priv and the packet_buffer_size
field in gve_rx_ring theoretically have the same meaning, but they are
defined in two different places and used in two separate contexts. There
is no good reason for this, so this change merges those fields into the
packet_buffer_size field in the RX ring.

This change also introduces a packet_buffer_size field to struct
gve_rx_queue_config to account for cases where queues are not allocated,
such as when the interface is down.

Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
Link: https://patch.msgid.link/20250321002910.1343422-5-hramamurthy@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_adminq.c
drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
drivers/net/ethernet/google/gve/gve_ethtool.c
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_rx_dqo.c

index e5cc3fada9c92f66d9077ddfbff4af9cacc3d7e3..9895541eddaeca8c6139b77c8188f04150bc4eb7 100644 (file)
@@ -227,6 +227,7 @@ struct gve_rx_cnts {
 /* Contains datapath state used to represent an RX queue. */
 struct gve_rx_ring {
        struct gve_priv *gve;
+       u16 packet_buffer_size;
        union {
                /* GQI fields */
                struct {
@@ -235,7 +236,6 @@ struct gve_rx_ring {
 
                        /* threshold for posting new buffs and descs */
                        u32 db_threshold;
-                       u16 packet_buffer_size;
 
                        u32 qpl_copy_pool_mask;
                        u32 qpl_copy_pool_head;
@@ -635,6 +635,7 @@ struct gve_notify_block {
 struct gve_rx_queue_config {
        u16 max_queues;
        u16 num_queues;
+       u16 packet_buffer_size;
 };
 
 /* Tracks allowed and current tx queue settings */
@@ -842,7 +843,6 @@ struct gve_priv {
        struct gve_ptype_lut *ptype_lut_dqo;
 
        /* Must be a power of two. */
-       u16 data_buffer_size_dqo;
        u16 max_rx_buffer_size; /* device limit */
 
        enum gve_queue_format queue_format;
index be7a423e5ab970511aa0a3384842ec0148d91c16..3e8fc33cc11fdb4da63baa11fefa7b119cdd740e 100644 (file)
@@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
                .ntfy_id = cpu_to_be32(rx->ntfy_id),
                .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
                .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
+               .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
        };
 
        if (gve_is_gqi(priv)) {
@@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
                        cpu_to_be64(rx->data.data_bus);
                cmd->create_rx_queue.index = cpu_to_be32(queue_index);
                cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
-               cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
        } else {
                u32 qpl_id = 0;
 
@@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
                        cpu_to_be64(rx->dqo.complq.bus);
                cmd->create_rx_queue.rx_data_ring_addr =
                        cpu_to_be64(rx->dqo.bufq.bus);
-               cmd->create_rx_queue.packet_buffer_size =
-                       cpu_to_be16(priv->data_buffer_size_dqo);
                cmd->create_rx_queue.rx_buff_ring_size =
                        cpu_to_be16(priv->rx_desc_cnt);
                cmd->create_rx_queue.enable_rsc =
index af84cb88f828c1dde46e8e77baf23e94307e27ec..f9824664d04c53f045cf06e5c383bd644583a2f3 100644 (file)
@@ -139,7 +139,7 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
        buf_state->page_info.page_offset = 0;
        buf_state->page_info.page_address =
                page_address(buf_state->page_info.page);
-       buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+       buf_state->page_info.buf_size = rx->packet_buffer_size;
        buf_state->last_single_ref_offset = 0;
 
        /* The page already has 1 ref. */
@@ -162,7 +162,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
                         struct gve_rx_buf_state_dqo *buf_state)
 {
-       const u16 data_buffer_size = priv->data_buffer_size_dqo;
+       const u16 data_buffer_size = rx->packet_buffer_size;
        int pagecount;
 
        /* Can't reuse if we only fit one buffer per page */
@@ -217,10 +217,9 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
 static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
                                    struct gve_rx_buf_state_dqo *buf_state)
 {
-       struct gve_priv *priv = rx->gve;
        netmem_ref netmem;
 
-       buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+       buf_state->page_info.buf_size = rx->packet_buffer_size;
        netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
                                        &buf_state->page_info.page_offset,
                                        &buf_state->page_info.buf_size,
index a862031ba5d160ed6d4dd435f061639f384042ee..31a21ccf486348c4d1ba72e1b822993031220b13 100644 (file)
@@ -647,8 +647,7 @@ static int gve_set_tunable(struct net_device *netdev,
        switch (etuna->id) {
        case ETHTOOL_RX_COPYBREAK:
        {
-               u32 max_copybreak = gve_is_gqi(priv) ?
-                       GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
+               u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
 
                len = *(u32 *)value;
                if (len > max_copybreak)
index 354f526a923851a5e8f4618a40ca8ee8416084b0..20aabbe0e5180f459eb5e4668a928457b9fa37ae 100644 (file)
@@ -1224,9 +1224,7 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
        cfg->raw_addressing = !gve_is_qpl(priv);
        cfg->enable_header_split = priv->header_split_enabled;
        cfg->ring_size = priv->rx_desc_cnt;
-       cfg->packet_buffer_size = gve_is_gqi(priv) ?
-                                 GVE_DEFAULT_RX_BUFFER_SIZE :
-                                 priv->data_buffer_size_dqo;
+       cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
        cfg->rx = priv->rx;
 }
 
@@ -1331,7 +1329,7 @@ static int gve_queues_start(struct gve_priv *priv,
                goto reset;
 
        priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
-       priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+       priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
 
        err = gve_create_rings(priv);
        if (err)
@@ -2627,7 +2625,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        priv->service_task_flags = 0x0;
        priv->state_flags = 0x0;
        priv->ethtool_flags = 0x0;
-       priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+       priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
        priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
 
        gve_set_probe_in_progress(priv);
index 9d444e723fcd4faa50451f15d116f85a70c5896d..90e875c1832f8bf893ebe630a2c413e1ee0233b6 100644 (file)
@@ -288,7 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
 
        rx->gve = priv;
        rx->q_num = idx;
-       rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
+       rx->packet_buffer_size = cfg->packet_buffer_size;
 
        rx->mask = slots - 1;
        rx->data.raw_addressing = cfg->raw_addressing;
index dcdad6d09bf39ccb84374f52609aec49cb271823..5fbcf93a54e0780b3bafa51f70e707b1dfc12aaa 100644 (file)
@@ -223,6 +223,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
        memset(rx, 0, sizeof(*rx));
        rx->gve = priv;
        rx->q_num = idx;
+       rx->packet_buffer_size = cfg->packet_buffer_size;
 
        rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
                gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);