tx_ring->count = IXGBE_DEFAULT_TXD;
        tx_ring->queue_index = 0;
        tx_ring->dev = &adapter->pdev->dev;
+       tx_ring->netdev = adapter->netdev;
        tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
        tx_ring->numa_node = adapter->node;
 
        rx_ring->count = IXGBE_DEFAULT_RXD;
        rx_ring->queue_index = 0;
        rx_ring->dev = &adapter->pdev->dev;
+       rx_ring->netdev = adapter->netdev;
        rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
        rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
        rx_ring->numa_node = adapter->node;
        return 13;
 }
 
-static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *rx_ring,
+static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
                                   struct ixgbe_ring *tx_ring,
                                   unsigned int size)
 {
        }
 
        /* re-map buffers to ring, store next to clean values */
-       ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+       ixgbe_alloc_rx_buffers(rx_ring, count);
        rx_ring->next_to_clean = rx_ntc;
        tx_ring->next_to_clean = tx_ntc;
 
                for (i = 0; i < 64; i++) {
                        skb_get(skb);
                        tx_ret_val = ixgbe_xmit_frame_ring(skb,
-                                                          adapter->netdev,
                                                           adapter,
                                                           tx_ring);
                        if (tx_ret_val == NETDEV_TX_OK)
                /* allow 200 milliseconds for packets to go from Tx to Rx */
                msleep(200);
 
-               good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
-                                                 tx_ring, size);
+               good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
                if (good_cnt != 64) {
                        ret_val = 13;
                        break;
 
                               struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int i, eop, count = 0;
        tx_ring->next_to_clean = i;
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-       if (unlikely(count && netif_carrier_ok(netdev) &&
+       if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
                     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
                smp_mb();
-               if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+               if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
                    !test_bit(__IXGBE_DOWN, &adapter->state)) {
-                       netif_wake_subqueue(netdev, tx_ring->queue_index);
+                       netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
                        ++tx_ring->tx_stats.restart_queue;
                }
        }
 
 /**
  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
  **/
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-                           struct ixgbe_ring *rx_ring,
-                           u16 cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 {
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
        struct sk_buff *skb;
        u16 i = rx_ring->next_to_use;
 
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev)
+               return;
+
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
                bi = &rx_ring->rx_buffer_info[i];
                skb = bi->skb;
 
                if (!skb) {
-                       skb = netdev_alloc_skb_ip_align(adapter->netdev,
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
                                                        rx_ring->rx_buf_len);
                        if (!skb) {
                                rx_ring->rx_stats.alloc_rx_buff_failed++;
 
                if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
                        if (!bi->page) {
-                               bi->page = netdev_alloc_page(adapter->netdev);
+                               bi->page = netdev_alloc_page(rx_ring->netdev);
                                if (!bi->page) {
                                        rx_ring->rx_stats.alloc_rx_page_failed++;
                                        goto no_buffers;
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               skb->protocol = eth_type_trans(skb, adapter->netdev);
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
-                       ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+                       ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
        cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
 
        if (cleaned_count)
-               ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+               ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 
 #ifdef IXGBE_FCOE
        /* include DDPed FCoE data */
        if (ddp_bytes > 0) {
                unsigned int mss;
 
-               mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+               mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
                        sizeof(struct fc_frame_header) -
                        sizeof(struct fcoe_crc_eof);
                if (mss > 512)
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 
        ixgbe_rx_desc_queue_enable(adapter, ring);
-       ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+       ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
 }
 
 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
                ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
                ring->numa_node = adapter->node;
 
                adapter->tx_ring[i] = ring;
                ring->count = rx_count;
                ring->queue_index = i;
                ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
                ring->numa_node = adapter->node;
 
                adapter->rx_ring[i] = ring;
        ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
 }
 
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
-                                struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
-       netif_stop_subqueue(netdev, tx_ring->queue_index);
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
                return -EBUSY;
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(netdev, tx_ring->queue_index);
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
        ++tx_ring->tx_stats.restart_queue;
        return 0;
 }
 
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
-                             struct ixgbe_ring *tx_ring, int size)
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
        if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
                return 0;
-       return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+       return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        return skb_tx_hash(dev, skb);
 }
 
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
 {
+       struct net_device *netdev = tx_ring->netdev;
        struct netdev_queue *txq;
        unsigned int first;
        unsigned int tx_flags = 0;
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
-       if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
+       if (ixgbe_maybe_stop_tx(tx_ring, count)) {
                tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
                txq->tx_bytes += skb->len;
                txq->tx_packets++;
                ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
-               ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+               ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        } else {
                dev_kfree_skb_any(skb);
        struct ixgbe_ring *tx_ring;
 
        tx_ring = adapter->tx_ring[skb->queue_mapping];
-       return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+       return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
 
 /**