static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
 
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
-                                          struct ixgbevf_ring *rx_ring,
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
                                           u32 val)
 {
+       rx_ring->next_to_use = val;
+
        /*
         * Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * such as IA-64).
         */
        wmb();
-       IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+       writel(val, rx_ring->tail);
 }
 
 /**
        }
 
 no_buffers:
-       if (rx_ring->next_to_use != i) {
-               rx_ring->next_to_use = i;
-               ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
-       }
+       if (rx_ring->next_to_use != i)
+               ixgbevf_release_rx_desc(rx_ring, i);
 }
 
 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
                IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
                IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
                IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
-               adapter->tx_ring[i].head = IXGBE_VFTDH(j);
-               adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+               ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
+               ring->next_to_clean = 0;
+               ring->next_to_use = 0;
                /* Disable Tx Head Writeback RO bit, since this hoses
                 * bookkeeping if things aren't delivered in order.
                 */
        /* set_rx_buffer_len must be called before ring initialization */
        ixgbevf_set_rx_buffer_len(adapter);
 
-       rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               rdba = adapter->rx_ring[i].dma;
-               j = adapter->rx_ring[i].reg_idx;
+               struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+               rdba = ring->dma;
+               j = ring->reg_idx;
+               rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
                IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
                                (rdba & DMA_BIT_MASK(32)));
                IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
                IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
                IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
                IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
-               adapter->rx_ring[i].head = IXGBE_VFRDH(j);
-               adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+               ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
+               ring->next_to_clean = 0;
+               ring->next_to_use = 0;
 
                ixgbevf_configure_srrctl(adapter, j);
        }
                hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
                       rxr);
 
-       ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+       ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
                                (adapter->rx_ring[rxr].count - 1));
 }
 
 
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
-
-       rx_ring->next_to_clean = 0;
-       rx_ring->next_to_use = 0;
-
-       if (rx_ring->head)
-               writel(0, adapter->hw.hw_addr + rx_ring->head);
-       if (rx_ring->tail)
-               writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
        memset(tx_ring->tx_buffer_info, 0, size);
 
        memset(tx_ring->desc, 0, tx_ring->size);
-
-       tx_ring->next_to_use = 0;
-       tx_ring->next_to_clean = 0;
-
-       if (tx_ring->head)
-               writel(0, adapter->hw.hw_addr + tx_ring->head);
-       if (tx_ring->tail)
-               writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
        if (!tx_ring->desc)
                goto err;
 
-       tx_ring->next_to_use = 0;
-       tx_ring->next_to_clean = 0;
        return 0;
 
 err:
                goto alloc_failed;
        }
 
-       rx_ring->next_to_clean = 0;
-       rx_ring->next_to_use = 0;
-
        return 0;
 alloc_failed:
        return -ENOMEM;
                         ixgbevf_tx_map(tx_ring, skb, tx_flags),
                         first, skb->len, hdr_len);
 
-       writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+       writel(tx_ring->next_to_use, tx_ring->tail);
 
        ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);