#include "ena_xdp.h"
 
-static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
+static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
 {
        struct ena_tx_buffer *tx_info;
 
-       tx_info = &xdp_ring->tx_buffer_info[req_id];
+       tx_info = &tx_ring->tx_buffer_info[req_id];
        if (likely(tx_info->xdpf))
                return 0;
 
-       return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
+       return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
 }
 
-static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
                                struct ena_tx_buffer *tx_info,
                                struct xdp_frame *xdpf,
                                struct ena_com_tx_ctx *ena_tx_ctx)
 {
-       struct ena_adapter *adapter = xdp_ring->adapter;
+       struct ena_adapter *adapter = tx_ring->adapter;
        struct ena_com_buf *ena_buf;
        int push_len = 0;
        dma_addr_t dma;
        data = tx_info->xdpf->data;
        size = tx_info->xdpf->len;
 
-       if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+       if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
                /* Designate part of the packet for LLQ */
-               push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+               push_len = min_t(u32, size, tx_ring->tx_max_header_size);
 
                ena_tx_ctx->push_header = data;
 
        ena_tx_ctx->header_len = push_len;
 
        if (size > 0) {
-               dma = dma_map_single(xdp_ring->dev,
+               dma = dma_map_single(tx_ring->dev,
                                     data,
                                     size,
                                     DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
+               if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
                        goto error_report_dma_error;
 
                tx_info->map_linear_data = 0;
        return 0;
 
 error_report_dma_error:
-       ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
-                         &xdp_ring->syncp);
+       ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
+                         &tx_ring->syncp);
        netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
 
        return -EINVAL;
 }
 
-int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
                       struct ena_adapter *adapter,
                       struct xdp_frame *xdpf,
                       int flags)
        u16 next_to_use, req_id;
        int rc;
 
-       next_to_use = xdp_ring->next_to_use;
-       req_id = xdp_ring->free_ids[next_to_use];
-       tx_info = &xdp_ring->tx_buffer_info[req_id];
+       next_to_use = tx_ring->next_to_use;
+       req_id = tx_ring->free_ids[next_to_use];
+       tx_info = &tx_ring->tx_buffer_info[req_id];
        tx_info->num_of_bufs = 0;
 
-       rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
+       rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
        if (unlikely(rc))
                return rc;
 
        ena_tx_ctx.req_id = req_id;
 
        rc = ena_xmit_common(adapter,
-                            xdp_ring,
+                            tx_ring,
                             tx_info,
                             &ena_tx_ctx,
                             next_to_use,
         * calls a memory barrier inside it.
         */
        if (flags & XDP_XMIT_FLUSH)
-               ena_ring_tx_doorbell(xdp_ring);
+               ena_ring_tx_doorbell(tx_ring);
 
        return rc;
 
 error_unmap_dma:
-       ena_unmap_tx_buff(xdp_ring, tx_info);
+       ena_unmap_tx_buff(tx_ring, tx_info);
        tx_info->xdpf = NULL;
        return rc;
 }
                 struct xdp_frame **frames, u32 flags)
 {
        struct ena_adapter *adapter = netdev_priv(dev);
-       struct ena_ring *xdp_ring;
+       struct ena_ring *tx_ring;
        int qid, i, nxmit = 0;
 
        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 
        qid = smp_processor_id() % adapter->xdp_num_queues;
        qid += adapter->xdp_first_ring;
-       xdp_ring = &adapter->tx_ring[qid];
+       tx_ring = &adapter->tx_ring[qid];
 
        /* Other CPU ids might try to send thorugh this queue */
-       spin_lock(&xdp_ring->xdp_tx_lock);
+       spin_lock(&tx_ring->xdp_tx_lock);
 
        for (i = 0; i < n; i++) {
-               if (ena_xdp_xmit_frame(xdp_ring, adapter, frames[i], 0))
+               if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
                        break;
                nxmit++;
        }
 
        /* Ring doorbell to make device aware of the packets */
        if (flags & XDP_XMIT_FLUSH)
-               ena_ring_tx_doorbell(xdp_ring);
+               ena_ring_tx_doorbell(tx_ring);
 
-       spin_unlock(&xdp_ring->xdp_tx_lock);
+       spin_unlock(&tx_ring->xdp_tx_lock);
 
        /* Return number of packets sent */
        return nxmit;
        return 0;
 }
 
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
+static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
 {
        u32 total_done = 0;
        u16 next_to_clean;
        u16 req_id;
        int rc;
 
-       if (unlikely(!xdp_ring))
+       if (unlikely(!tx_ring))
                return 0;
-       next_to_clean = xdp_ring->next_to_clean;
+       next_to_clean = tx_ring->next_to_clean;
 
        while (tx_pkts < budget) {
                struct ena_tx_buffer *tx_info;
                struct xdp_frame *xdpf;
 
-               rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
+               rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
                                                &req_id);
                if (rc) {
                        if (unlikely(rc == -EINVAL))
-                               handle_invalid_req_id(xdp_ring, req_id, NULL,
-                                                     true);
+                               handle_invalid_req_id(tx_ring, req_id, NULL, true);
                        break;
                }
 
                /* validate that the request id points to a valid xdp_frame */
-               rc = validate_xdp_req_id(xdp_ring, req_id);
+               rc = validate_xdp_req_id(tx_ring, req_id);
                if (rc)
                        break;
 
-               tx_info = &xdp_ring->tx_buffer_info[req_id];
+               tx_info = &tx_ring->tx_buffer_info[req_id];
                xdpf = tx_info->xdpf;
 
                tx_info->xdpf = NULL;
                tx_info->last_jiffies = 0;
-               ena_unmap_tx_buff(xdp_ring, tx_info);
+               ena_unmap_tx_buff(tx_ring, tx_info);
 
-               netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
-                         "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
+               netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+                         "tx_poll: q %d skb %p completed\n", tx_ring->qid,
                          xdpf);
 
                tx_pkts++;
                total_done += tx_info->tx_descs;
 
                xdp_return_frame(xdpf);
-               xdp_ring->free_ids[next_to_clean] = req_id;
+               tx_ring->free_ids[next_to_clean] = req_id;
                next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
-                                                    xdp_ring->ring_size);
+                                                    tx_ring->ring_size);
        }
 
-       xdp_ring->next_to_clean = next_to_clean;
-       ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
-       ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
+       tx_ring->next_to_clean = next_to_clean;
+       ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
+       ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
 
-       netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+       netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
                  "tx_poll: q %d done. total pkts: %d\n",
-                 xdp_ring->qid, tx_pkts);
+                 tx_ring->qid, tx_pkts);
 
        return tx_pkts;
 }
 {
        struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
        u32 xdp_work_done, xdp_budget;
-       struct ena_ring *xdp_ring;
+       struct ena_ring *tx_ring;
        int napi_comp_call = 0;
        int ret;
 
-       xdp_ring = ena_napi->xdp_ring;
+       tx_ring = ena_napi->tx_ring;
 
        xdp_budget = budget;
 
-       if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
-           test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
+       if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+           test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
                napi_complete_done(napi, 0);
                return 0;
        }
 
-       xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
+       xdp_work_done = ena_clean_xdp_irq(tx_ring, xdp_budget);
 
        /* If the device is about to reset or down, avoid unmask
         * the interrupt and return 0 so NAPI won't reschedule
         */
-       if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
+       if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
                napi_complete_done(napi, 0);
                ret = 0;
        } else if (xdp_budget > xdp_work_done) {
                napi_comp_call = 1;
                if (napi_complete_done(napi, xdp_work_done))
-                       ena_unmask_interrupt(xdp_ring, NULL);
-               ena_update_ring_numa_node(xdp_ring, NULL);
+                       ena_unmask_interrupt(tx_ring, NULL);
+               ena_update_ring_numa_node(tx_ring, NULL);
                ret = xdp_work_done;
        } else {
                ret = xdp_budget;
        }
 
-       u64_stats_update_begin(&xdp_ring->syncp);
-       xdp_ring->tx_stats.napi_comp += napi_comp_call;
-       xdp_ring->tx_stats.tx_poll++;
-       u64_stats_update_end(&xdp_ring->syncp);
-       xdp_ring->tx_stats.last_napi_jiffies = jiffies;
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->tx_stats.napi_comp += napi_comp_call;
+       tx_ring->tx_stats.tx_poll++;
+       u64_stats_update_end(&tx_ring->syncp);
+       tx_ring->tx_stats.last_napi_jiffies = jiffies;
 
        return ret;
 }