rc->total_packets = 0;
 }
 
+static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+       return &rx_ring->rx_bi[idx];
+}
+
 /**
  * i40e_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
        struct i40e_rx_buffer *new_buff;
        u16 nta = rx_ring->next_to_alloc;
 
-       new_buff = &rx_ring->rx_bi[nta];
+       new_buff = i40e_rx_bi(rx_ring, nta);
 
        /* update, and store next to alloc */
        nta++;
        ntc = rx_ring->next_to_clean;
 
        /* fetch, update, and store next to clean */
-       rx_buffer = &rx_ring->rx_bi[ntc++];
+       rx_buffer = i40e_rx_bi(rx_ring, ntc++);
        ntc = (ntc < rx_ring->count) ? ntc : 0;
        rx_ring->next_to_clean = ntc;
 
 
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
-               struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+               struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
 
                if (!rx_bi->page)
                        continue;
                return false;
 
        rx_desc = I40E_RX_DESC(rx_ring, ntu);
-       bi = &rx_ring->rx_bi[ntu];
+       bi = i40e_rx_bi(rx_ring, ntu);
 
        do {
                if (!i40e_alloc_mapped_page(rx_ring, bi))
                ntu++;
                if (unlikely(ntu == rx_ring->count)) {
                        rx_desc = I40E_RX_DESC(rx_ring, 0);
-                       bi = rx_ring->rx_bi;
+                       bi = i40e_rx_bi(rx_ring, 0);
                        ntu = 0;
                }
 
 {
        struct i40e_rx_buffer *rx_buffer;
 
-       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+       rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
        prefetchw(rx_buffer->page);
 
        /* we are reusing so sync this buffer for CPU use */
 
 #include "i40e_txrx_common.h"
 #include "i40e_xsk.h"
 
+static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+       return &rx_ring->rx_bi[idx];
+}
+
 /**
  * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
  * @vsi: Current VSI
        bool ok = true;
 
        rx_desc = I40E_RX_DESC(rx_ring, ntu);
-       bi = &rx_ring->rx_bi[ntu];
+       bi = i40e_rx_bi(rx_ring, ntu);
        do {
                if (!alloc(rx_ring, bi)) {
                        ok = false;
 
                if (unlikely(ntu == rx_ring->count)) {
                        rx_desc = I40E_RX_DESC(rx_ring, 0);
-                       bi = rx_ring->rx_bi;
+                       bi = i40e_rx_bi(rx_ring, 0);
                        ntu = 0;
                }
 
 {
        struct i40e_rx_buffer *bi;
 
-       bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
+       bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
 
        /* we are reusing so sync this buffer for CPU use */
        dma_sync_single_range_for_cpu(rx_ring->dev,
 static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
                                    struct i40e_rx_buffer *old_bi)
 {
-       struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
+       struct i40e_rx_buffer *new_bi = i40e_rx_bi(rx_ring,
+                                                  rx_ring->next_to_alloc);
        u16 nta = rx_ring->next_to_alloc;
 
        /* update, and store next to alloc */
        mask = rx_ring->xsk_umem->chunk_mask;
 
        nta = rx_ring->next_to_alloc;
-       bi = &rx_ring->rx_bi[nta];
+       bi = i40e_rx_bi(rx_ring, nta);
 
        nta++;
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
        u16 i;
 
        for (i = 0; i < rx_ring->count; i++) {
-               struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+               struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
 
                if (!rx_bi->addr)
                        continue;