struct ethtool_ringparam *ring)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_ring *tx_ring = adapter->tx_ring;
-       struct ixgbe_ring *rx_ring = adapter->rx_ring;
+       struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+       struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
 
        ring->rx_max_pending = IXGBE_MAX_RXD;
        ring->tx_max_pending = IXGBE_MAX_TXD;
        new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
        new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
 
-       if ((new_tx_count == adapter->tx_ring->count) &&
-           (new_rx_count == adapter->rx_ring->count)) {
+       if ((new_tx_count == adapter->tx_ring[0]->count) &&
+           (new_rx_count == adapter->rx_ring[0]->count)) {
                /* nothing to do */
                return 0;
        }
 
        if (!netif_running(adapter->netdev)) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i].count = new_tx_count;
+                       adapter->tx_ring[i]->count = new_tx_count;
                for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i].count = new_rx_count;
+                       adapter->rx_ring[i]->count = new_rx_count;
                adapter->tx_ring_count = new_tx_count;
                adapter->rx_ring_count = new_rx_count;
-               goto err_setup;
+               goto clear_reset;
        }
 
-       temp_tx_ring = kcalloc(adapter->num_tx_queues,
-                              sizeof(struct ixgbe_ring), GFP_KERNEL);
+       temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
        if (!temp_tx_ring) {
                err = -ENOMEM;
-               goto err_setup;
+               goto clear_reset;
        }
 
        if (new_tx_count != adapter->tx_ring_count) {
-               memcpy(temp_tx_ring, adapter->tx_ring,
-                      adapter->num_tx_queues * sizeof(struct ixgbe_ring));
                for (i = 0; i < adapter->num_tx_queues; i++) {
+                       memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+                              sizeof(struct ixgbe_ring));
                        temp_tx_ring[i].count = new_tx_count;
                        err = ixgbe_setup_tx_resources(adapter,
                                                       &temp_tx_ring[i]);
                                while (i) {
                                        i--;
                                        ixgbe_free_tx_resources(adapter,
-                                                               &temp_tx_ring[i]);
+                                                             &temp_tx_ring[i]);
                                }
-                               goto err_setup;
+                               goto clear_reset;
                        }
                }
                need_update = true;
        }
 
-       temp_rx_ring = kcalloc(adapter->num_rx_queues,
-                              sizeof(struct ixgbe_ring), GFP_KERNEL);
-       if ((!temp_rx_ring) && (need_update)) {
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
-               kfree(temp_tx_ring);
+       temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+       if (!temp_rx_ring) {
                err = -ENOMEM;
                goto err_setup;
        }
 
        if (new_rx_count != adapter->rx_ring_count) {
-               memcpy(temp_rx_ring, adapter->rx_ring,
-                      adapter->num_rx_queues * sizeof(struct ixgbe_ring));
                for (i = 0; i < adapter->num_rx_queues; i++) {
+                       memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+                              sizeof(struct ixgbe_ring));
                        temp_rx_ring[i].count = new_rx_count;
                        err = ixgbe_setup_rx_resources(adapter,
                                                       &temp_rx_ring[i]);
 
                /* tx */
                if (new_tx_count != adapter->tx_ring_count) {
-                       kfree(adapter->tx_ring);
-                       adapter->tx_ring = temp_tx_ring;
-                       temp_tx_ring = NULL;
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               ixgbe_free_tx_resources(adapter,
+                                                       adapter->tx_ring[i]);
+                               memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+                                      sizeof(struct ixgbe_ring));
+                       }
                        adapter->tx_ring_count = new_tx_count;
                }
 
                /* rx */
                if (new_rx_count != adapter->rx_ring_count) {
-                       kfree(adapter->rx_ring);
-                       adapter->rx_ring = temp_rx_ring;
-                       temp_rx_ring = NULL;
+                       for (i = 0; i < adapter->num_rx_queues; i++) {
+                               ixgbe_free_rx_resources(adapter,
+                                                       adapter->rx_ring[i]);
+                               memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+                                      sizeof(struct ixgbe_ring));
+                       }
                        adapter->rx_ring_count = new_rx_count;
                }
                ixgbe_up(adapter);
        }
+
+       vfree(temp_rx_ring);
 err_setup:
+       vfree(temp_tx_ring);
+clear_reset:
        clear_bit(__IXGBE_RESETTING, &adapter->state);
        return err;
 }
                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
        for (j = 0; j < adapter->num_tx_queues; j++) {
-               queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+               queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
                for (k = 0; k < stat_count; k++)
                        data[i + k] = queue_stat[k];
                i += k;
        }
        for (j = 0; j < adapter->num_rx_queues; j++) {
-               queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+               queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
                for (k = 0; k < stat_count; k++)
                        data[i + k] = queue_stat[k];
                i += k;
        reg_data |= IXGBE_RXDCTL_ENABLE;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               int j = adapter->rx_ring[0].reg_idx;
+               int j = adapter->rx_ring[0]->reg_idx;
                u32 k;
                for (k = 0; k < 10; k++) {
                        if (IXGBE_READ_REG(&adapter->hw,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+       ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
 
        /* only valid if in constant ITR mode */
        switch (adapter->rx_itr_setting) {
                return -EINVAL;
 
        if (ec->tx_max_coalesced_frames_irq)
-               adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+               adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
 
        if (ec->rx_coalesce_usecs > 1) {
                /* check the limits */
 
 {
        u32 rxctrl;
        int cpu = get_cpu();
-       int q = rx_ring - adapter->rx_ring;
+       int q = rx_ring->reg_idx;
 
        if (rx_ring->cpu != cpu) {
                rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
 {
        u32 txctrl;
        int cpu = get_cpu();
-       int q = tx_ring - adapter->tx_ring;
+       int q = tx_ring->reg_idx;
        struct ixgbe_hw *hw = &adapter->hw;
 
        if (tx_ring->cpu != cpu) {
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               adapter->tx_ring[i].cpu = -1;
-               ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+               adapter->tx_ring[i]->cpu = -1;
+               ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
        }
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i].cpu = -1;
-               ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+               adapter->rx_ring[i]->cpu = -1;
+               ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
        }
 }
 
                                       adapter->num_rx_queues);
 
                for (i = 0; i < q_vector->rxr_count; i++) {
-                       j = adapter->rx_ring[r_idx].reg_idx;
+                       j = adapter->rx_ring[r_idx]->reg_idx;
                        ixgbe_set_ivar(adapter, 0, j, v_idx);
                        r_idx = find_next_bit(q_vector->rxr_idx,
                                              adapter->num_rx_queues,
                                       adapter->num_tx_queues);
 
                for (i = 0; i < q_vector->txr_count; i++) {
-                       j = adapter->tx_ring[r_idx].reg_idx;
+                       j = adapter->tx_ring[r_idx]->reg_idx;
                        ixgbe_set_ivar(adapter, 1, j, v_idx);
                        r_idx = find_next_bit(q_vector->txr_idx,
                                              adapter->num_tx_queues,
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
-               tx_ring = &(adapter->tx_ring[r_idx]);
+               tx_ring = adapter->tx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->tx_itr,
                                           tx_ring->total_packets,
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
+               rx_ring = adapter->rx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->rx_itr,
                                           rx_ring->total_packets,
                        netif_tx_stop_all_queues(netdev);
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *tx_ring =
-                                                          &adapter->tx_ring[i];
+                                                           adapter->tx_ring[i];
                                if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
                                                       &tx_ring->reinit_state))
                                        schedule_work(&adapter->fdir_reinit_task);
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
-               tx_ring = &(adapter->tx_ring[r_idx]);
+               tx_ring = adapter->tx_ring[r_idx];
                tx_ring->total_bytes = 0;
                tx_ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0;  i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
+               rx_ring = adapter->rx_ring[r_idx];
                rx_ring->total_bytes = 0;
                rx_ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
-               ring = &(adapter->tx_ring[r_idx]);
+               ring = adapter->tx_ring[r_idx];
                ring->total_bytes = 0;
                ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               ring = &(adapter->rx_ring[r_idx]);
+               ring = adapter->rx_ring[r_idx];
                ring->total_bytes = 0;
                ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
        long r_idx;
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = &(adapter->rx_ring[r_idx]);
+       rx_ring = adapter->rx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                ixgbe_update_rx_dca(adapter, rx_ring);
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
-               ring = &(adapter->tx_ring[r_idx]);
+               ring = adapter->tx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                        ixgbe_update_tx_dca(adapter, ring);
        budget = max(budget, 1);
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               ring = &(adapter->rx_ring[r_idx]);
+               ring = adapter->rx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                        ixgbe_update_rx_dca(adapter, ring);
        }
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       ring = &(adapter->rx_ring[r_idx]);
+       ring = adapter->rx_ring[r_idx];
        /* If all Rx work done, exit the polling mode */
        if (work_done < budget) {
                napi_complete(napi);
        long r_idx;
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       tx_ring = &(adapter->tx_ring[r_idx]);
+       tx_ring = adapter->tx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                ixgbe_update_tx_dca(adapter, tx_ring);
        struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
        u8 current_itr;
        u32 new_itr = q_vector->eitr;
-       struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
-       struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+       struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+       struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
 
        q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
                                            q_vector->tx_itr,
        ixgbe_check_fan_failure(adapter, eicr);
 
        if (napi_schedule_prep(&(q_vector->napi))) {
-               adapter->tx_ring[0].total_packets = 0;
-               adapter->tx_ring[0].total_bytes = 0;
-               adapter->rx_ring[0].total_packets = 0;
-               adapter->rx_ring[0].total_bytes = 0;
+               adapter->tx_ring[0]->total_packets = 0;
+               adapter->tx_ring[0]->total_bytes = 0;
+               adapter->rx_ring[0]->total_packets = 0;
+               adapter->rx_ring[0]->total_bytes = 0;
                /* would disable interrupts here but EIAM disabled it */
                __napi_schedule(&(q_vector->napi));
        }
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *ring = &adapter->tx_ring[i];
+               struct ixgbe_ring *ring = adapter->tx_ring[i];
                j = ring->reg_idx;
                tdba = ring->dma;
                tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
                IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
                IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
                IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
-               adapter->tx_ring[i].head = IXGBE_TDH(j);
-               adapter->tx_ring[i].tail = IXGBE_TDT(j);
+               adapter->tx_ring[i]->head = IXGBE_TDH(j);
+               adapter->tx_ring[i]->tail = IXGBE_TDT(j);
                /*
                 * Disable Tx Head Writeback RO bit, since this hoses
                 * bookkeeping if things aren't delivered in order.
        u32 rscctrl;
        int rx_buf_len;
 
-       rx_ring = &adapter->rx_ring[index];
+       rx_ring = adapter->rx_ring[index];
        j = rx_ring->reg_idx;
        rx_buf_len = rx_ring->rx_buf_len;
        rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
 #endif
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
-       rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+       rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
        /* disable receives while setting up the descriptors */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
         * the Base and Length of the Rx Descriptor Ring
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               rx_ring = &adapter->rx_ring[i];
+               rx_ring = adapter->rx_ring[i];
                rdba = rx_ring->dma;
                j = rx_ring->reg_idx;
                IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
        } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        u32 ctrl;
-                       j = adapter->rx_ring[i].reg_idx;
+                       j = adapter->rx_ring[i]->reg_idx;
                        ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
                        ctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
        ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
+               j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                /* PThresh workaround for Tx hang with DFP enabled. */
                txdctl |= 32;
                vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       j = adapter->rx_ring[i].reg_idx;
+                       j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
                        vlnctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
 #endif /* IXGBE_FCOE */
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i].atr_sample_rate =
+                       adapter->tx_ring[i]->atr_sample_rate =
                                                       adapter->atr_sample_rate;
                ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
        } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
-                                      (adapter->rx_ring[i].count - 1));
+               ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
+                                      (adapter->rx_ring[i]->count - 1));
 }
 
 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
 static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
                                              int rxr)
 {
-       int j = adapter->rx_ring[rxr].reg_idx;
+       int j = adapter->rx_ring[rxr]->reg_idx;
        int k;
 
        for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
                DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
                        "not set within the polling period\n", rxr);
        }
-       ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
-                             (adapter->rx_ring[rxr].count - 1));
+       ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
+                             (adapter->rx_ring[rxr]->count - 1));
 }
 
 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        }
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
+               j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                /* enable WTHRESH=8 descriptors, to encourage burst writeback */
                txdctl |= (8 << 16);
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
        }
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
+               j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                txdctl |= IXGBE_TXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
        }
 
        for (i = 0; i < num_rx_rings; i++) {
-               j = adapter->rx_ring[i].reg_idx;
+               j = adapter->rx_ring[i]->reg_idx;
                rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
                /* enable PTHRESH=32 descriptors (half the internal cache)
                 * and HTHRESH=0 descriptors (to minimize latency on fetch),
 
        for (i = 0; i < adapter->num_tx_queues; i++)
                set_bit(__IXGBE_FDIR_INIT_DONE,
-                       &(adapter->tx_ring[i].reinit_state));
+                       &(adapter->tx_ring[i]->reinit_state));
 
        /* enable transmits */
        netif_tx_start_all_queues(netdev);
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+               ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
 /**
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+               ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
 }
 
 void ixgbe_down(struct ixgbe_adapter *adapter)
 
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i].reg_idx;
+               j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
 
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
-               ixgbe_update_tx_dca(adapter, adapter->tx_ring);
-               ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+               ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
+               ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
        }
 #endif
 
-       tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
-       ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+       tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
+       ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
 
        if (!tx_clean_complete)
                work_done = budget;
 
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
                for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i].reg_idx = i;
+                       adapter->rx_ring[i]->reg_idx = i;
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i].reg_idx = i;
+                       adapter->tx_ring[i]->reg_idx = i;
                ret = true;
        } else {
                ret = false;
                if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
                        /* the number of queues is assumed to be symmetric */
                        for (i = 0; i < dcb_i; i++) {
-                               adapter->rx_ring[i].reg_idx = i << 3;
-                               adapter->tx_ring[i].reg_idx = i << 2;
+                               adapter->rx_ring[i]->reg_idx = i << 3;
+                               adapter->tx_ring[i]->reg_idx = i << 2;
                        }
                        ret = true;
                } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
                                 * Rx TC0-TC7 are offset by 16 queues each
                                 */
                                for (i = 0; i < 3; i++) {
-                                       adapter->tx_ring[i].reg_idx = i << 5;
-                                       adapter->rx_ring[i].reg_idx = i << 4;
+                                       adapter->tx_ring[i]->reg_idx = i << 5;
+                                       adapter->rx_ring[i]->reg_idx = i << 4;
                                }
                                for ( ; i < 5; i++) {
-                                       adapter->tx_ring[i].reg_idx =
+                                       adapter->tx_ring[i]->reg_idx =
                                                                 ((i + 2) << 4);
-                                       adapter->rx_ring[i].reg_idx = i << 4;
+                                       adapter->rx_ring[i]->reg_idx = i << 4;
                                }
                                for ( ; i < dcb_i; i++) {
-                                       adapter->tx_ring[i].reg_idx =
+                                       adapter->tx_ring[i]->reg_idx =
                                                                 ((i + 8) << 3);
-                                       adapter->rx_ring[i].reg_idx = i << 4;
+                                       adapter->rx_ring[i]->reg_idx = i << 4;
                                }
 
                                ret = true;
                                 *
                                 * Rx TC0-TC3 are offset by 32 queues each
                                 */
-                               adapter->tx_ring[0].reg_idx = 0;
-                               adapter->tx_ring[1].reg_idx = 64;
-                               adapter->tx_ring[2].reg_idx = 96;
-                               adapter->tx_ring[3].reg_idx = 112;
+                               adapter->tx_ring[0]->reg_idx = 0;
+                               adapter->tx_ring[1]->reg_idx = 64;
+                               adapter->tx_ring[2]->reg_idx = 96;
+                               adapter->tx_ring[3]->reg_idx = 112;
                                for (i = 0 ; i < dcb_i; i++)
-                                       adapter->rx_ring[i].reg_idx = i << 5;
+                                       adapter->rx_ring[i]->reg_idx = i << 5;
 
                                ret = true;
                        } else {
            ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
             (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
                for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i].reg_idx = i;
+                       adapter->rx_ring[i]->reg_idx = i;
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i].reg_idx = i;
+                       adapter->tx_ring[i]->reg_idx = i;
                ret = true;
        }
 
 
                        ixgbe_cache_ring_dcb(adapter);
                        /* find out queues in TC for FCoE */
-                       fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
-                       fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+                       fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+                       fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
                        /*
                         * In 82599, the number of Tx queues for each traffic
                         * class for both 8-TC and 4-TC modes are:
                        fcoe_tx_i = f->mask;
                }
                for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
-                       adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
-                       adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+                       adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+                       adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
                }
                ret = true;
        }
  */
 static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 {
-       adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
-       adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
+       adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+       adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
        if (adapter->num_vfs)
                return true;
        else
 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 {
        /* start with default case */
-       adapter->rx_ring[0].reg_idx = 0;
-       adapter->tx_ring[0].reg_idx = 0;
+       adapter->rx_ring[0]->reg_idx = 0;
+       adapter->tx_ring[0]->reg_idx = 0;
 
        if (ixgbe_cache_ring_sriov(adapter))
                return;
 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 {
        int i;
-
-       adapter->tx_ring = kcalloc(adapter->num_tx_queues,
-                                  sizeof(struct ixgbe_ring), GFP_KERNEL);
-       if (!adapter->tx_ring)
-               goto err_tx_ring_allocation;
-
-       adapter->rx_ring = kcalloc(adapter->num_rx_queues,
-                                  sizeof(struct ixgbe_ring), GFP_KERNEL);
-       if (!adapter->rx_ring)
-               goto err_rx_ring_allocation;
+       int orig_node = adapter->node;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               adapter->tx_ring[i].count = adapter->tx_ring_count;
-               adapter->tx_ring[i].queue_index = i;
+               struct ixgbe_ring *ring = adapter->tx_ring[i];
+               if (orig_node == -1) {
+                       int cur_node = next_online_node(adapter->node);
+                       if (cur_node == MAX_NUMNODES)
+                               cur_node = first_online_node;
+                       adapter->node = cur_node;
+               }
+               ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+                                   adapter->node);
+               if (!ring)
+                       ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+               if (!ring)
+                       goto err_tx_ring_allocation;
+               ring->count = adapter->tx_ring_count;
+               ring->queue_index = i;
+               ring->numa_node = adapter->node;
+
+               adapter->tx_ring[i] = ring;
        }
 
+       /* Restore the adapter's original node */
+       adapter->node = orig_node;
+
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i].count = adapter->rx_ring_count;
-               adapter->rx_ring[i].queue_index = i;
+               struct ixgbe_ring *ring = adapter->rx_ring[i];
+               if (orig_node == -1) {
+                       int cur_node = next_online_node(adapter->node);
+                       if (cur_node == MAX_NUMNODES)
+                               cur_node = first_online_node;
+                       adapter->node = cur_node;
+               }
+               ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+                                   adapter->node);
+               if (!ring)
+                       ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+               if (!ring)
+                       goto err_rx_ring_allocation;
+               ring->count = adapter->rx_ring_count;
+               ring->queue_index = i;
+               ring->numa_node = adapter->node;
+
+               adapter->rx_ring[i] = ring;
        }
 
+       /* Restore the adapter's original node */
+       adapter->node = orig_node;
+
        ixgbe_cache_ring_register(adapter);
 
        return 0;
 
 err_rx_ring_allocation:
-       kfree(adapter->tx_ring);
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               kfree(adapter->tx_ring[i]);
 err_tx_ring_allocation:
        return -ENOMEM;
 }
  **/
 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
 {
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
-       adapter->tx_ring = NULL;
-       adapter->rx_ring = NULL;
+       int i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               kfree(adapter->tx_ring[i]);
+               adapter->tx_ring[i] = NULL;
+       }
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               kfree(adapter->rx_ring[i]);
+               adapter->rx_ring[i] = NULL;
+       }
 
        ixgbe_free_q_vectors(adapter);
        ixgbe_reset_interrupt_capability(adapter);
        int size;
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
-       tx_ring->tx_buffer_info = vmalloc_node(size, adapter->node);
+       tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
        if (!tx_ring->tx_buffer_info)
                tx_ring->tx_buffer_info = vmalloc(size);
        if (!tx_ring->tx_buffer_info)
 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
 {
        int i, err = 0;
-       int orig_node = adapter->node;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+               err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
                if (!err)
                        continue;
                DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
                break;
        }
 
-       /* reset the node back to its starting value */
-       adapter->node = orig_node;
-
        return err;
 }
 
 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 {
        int i, err = 0;
-       int orig_node = adapter->node;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+               err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
                if (!err)
                        continue;
                DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
                break;
        }
 
-       /* reset the node back to its starting value */
-       adapter->node = orig_node;
-
        return err;
 }
 
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               if (adapter->tx_ring[i].desc)
-                       ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+               if (adapter->tx_ring[i]->desc)
+                       ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
 }
 
 /**
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               if (adapter->rx_ring[i].desc)
-                       ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+               if (adapter->rx_ring[i]->desc)
+                       ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
 }
 
 /**
                        adapter->hw_rx_no_dma_resources +=
                                             IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       rsc_count += adapter->rx_ring[i].rsc_count;
-                       rsc_flush += adapter->rx_ring[i].rsc_flush;
+                       rsc_count += adapter->rx_ring[i]->rsc_count;
+                       rsc_flush += adapter->rx_ring[i]->rsc_flush;
                }
                adapter->rsc_total_count = rsc_count;
                adapter->rsc_total_flush = rsc_flush;
 
        /* gather some stats to the adapter struct that are per queue */
        for (i = 0; i < adapter->num_tx_queues; i++)
-               restart_queue += adapter->tx_ring[i].restart_queue;
+               restart_queue += adapter->tx_ring[i]->restart_queue;
        adapter->restart_queue = restart_queue;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+               non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
        adapter->non_eop_descs = non_eop_descs;
 
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
                        set_bit(__IXGBE_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i].reinit_state));
+                               &(adapter->tx_ring[i]->reinit_state));
        } else {
                DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
                        "ignored adding FDIR ATR filters \n");
 
        if (!netif_carrier_ok(netdev)) {
                for (i = 0; i < adapter->num_tx_queues; i++) {
-                       tx_ring = &adapter->tx_ring[i];
+                       tx_ring = adapter->tx_ring[i];
                        if (tx_ring->next_to_use != tx_ring->next_to_clean) {
                                some_tx_pending = 1;
                                break;
                }
        }
 
-       tx_ring = &adapter->tx_ring[skb->queue_mapping];
+       tx_ring = adapter->tx_ring[skb->queue_mapping];
 
        if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
            (skb->protocol == htons(ETH_P_FCOE))) {