u64 alloc_rx_buff_failed;
 };
 
+enum ixbge_ring_state_t {
+       __IXGBE_TX_FDIR_INIT_DONE,
+       __IXGBE_TX_DETECT_HANG,
+       __IXGBE_RX_PS_ENABLED,
+       __IXGBE_RX_RSC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+       test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+       set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+       clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+       test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+       set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+       clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_rsc_enabled(ring) \
+       test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+       set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+       clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 struct ixgbe_ring {
        void *desc;                     /* descriptor ring memory */
        struct device *dev;             /* device for DMA mapping */
                struct ixgbe_tx_buffer *tx_buffer_info;
                struct ixgbe_rx_buffer *rx_buffer_info;
        };
+       unsigned long state;
        u8 atr_sample_rate;
        u8 atr_count;
        u16 count;                      /* amount of descriptors */
        u16 next_to_clean;
 
        u8 queue_index; /* needed for multiqueue queue management */
+       u8 reg_idx;                     /* holds the special value that gets
+                                        * the hardware register offset
+                                        * associated with this ring, which is
+                                        * different for DCB and RSS modes
+                                        */
+
+       u16 work_limit;                 /* max work per interrupt */
 
-#define IXGBE_RING_RX_PS_ENABLED                (u8)(1)
-       u8 flags;                       /* per ring feature flags */
        u8 __iomem *tail;
 
        unsigned int total_bytes;
        unsigned int total_packets;
 
-       u16 work_limit;                 /* max work per interrupt */
-       u16 reg_idx;                    /* holds the special value that gets
-                                        * the hardware register offset
-                                        * associated with this ring, which is
-                                        * different for DCB and RSS modes
-                                        */
-
        struct ixgbe_queue_stats stats;
        struct u64_stats_sync syncp;
        union {
                struct ixgbe_tx_queue_stats tx_stats;
                struct ixgbe_rx_queue_stats rx_stats;
        };
-       unsigned long reinit_state;
        int numa_node;
        unsigned int size;              /* length in bytes */
        dma_addr_t dma;                 /* phys. address of descriptor ring */
        __IXGBE_TESTING,
        __IXGBE_RESETTING,
        __IXGBE_DOWN,
-       __IXGBE_FDIR_INIT_DONE,
        __IXGBE_SFP_MODULE_NOT_FOUND
 };
 
 
 
        /* Detect a transmit hang in hardware, this serializes the
         * check with the clearing of time_stamp and movement of eop */
-       adapter->detect_tx_hung = false;
+       clear_check_for_tx_hang(tx_ring);
        if (tx_ring->tx_buffer_info[eop].time_stamp &&
            time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
            ixgbe_tx_xon_state(adapter, tx_ring)) {
                }
        }
 
-       if (adapter->detect_tx_hung) {
-               if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
-                       /* schedule immediate reset if we believe we hung */
-                       e_info(probe, "tx hang %d detected, resetting "
-                              "adapter\n", adapter->tx_timeout_count + 1);
-                       ixgbe_tx_timeout(adapter->netdev);
-               }
+       if (check_for_tx_hang(tx_ring) &&
+           ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+               /* schedule immediate reset if we believe we hung */
+               e_info(probe, "tx hang %d detected, resetting "
+                      "adapter\n", adapter->tx_timeout_count + 1);
+               ixgbe_tx_timeout(adapter->netdev);
        }
 
        /* re-arm the interrupt */
                        }
                }
 
-               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+               if (ring_is_ps_enabled(rx_ring)) {
                        if (!bi->page) {
                                bi->page = netdev_alloc_page(rx_ring->netdev);
                                if (!bi->page) {
                (*work_done)++;
 
                rmb(); /* read descriptor and rx_buffer_info after status DD */
-               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+               if (ring_is_ps_enabled(rx_ring)) {
                        hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
                        len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
                               IXGBE_RXDADV_HDRBUFLEN_SHIFT;
                prefetch(next_rxd);
                cleaned_count++;
 
-               if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+               if (ring_is_rsc_enabled(rx_ring))
                        rsc_count = ixgbe_get_rsc_count(rx_desc);
 
                if (rsc_count) {
                        if (skb->prev)
                                skb = ixgbe_transform_rsc_queue(skb,
                                                &(rx_ring->rx_stats.rsc_count));
-                       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+                       if (ring_is_rsc_enabled(rx_ring)) {
                                if (IXGBE_RSC_CB(skb)->delay_unmap) {
                                        dma_unmap_single(rx_ring->dev,
                                                         IXGBE_RSC_CB(skb)->dma,
                                        IXGBE_RSC_CB(skb)->dma = 0;
                                        IXGBE_RSC_CB(skb)->delay_unmap = false;
                                }
-                               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
+                               if (ring_is_ps_enabled(rx_ring))
                                        rx_ring->rx_stats.rsc_count +=
                                                 skb_shinfo(skb)->nr_frags;
                                else
                        rx_ring->stats.bytes += skb->len;
                        u64_stats_update_end(&rx_ring->syncp);
                } else {
-                       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+                       if (ring_is_ps_enabled(rx_ring)) {
                                rx_buffer_info->skb = next_buffer->skb;
                                rx_buffer_info->dma = next_buffer->dma;
                                next_buffer->skb = skb;
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *tx_ring =
                                                            adapter->tx_ring[i];
-                               if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
-                                                      &tx_ring->reinit_state))
+                               if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                                                      &tx_ring->state))
                                        schedule_work(&adapter->fdir_reinit_task);
                        }
                }
        }
 
        /* reinitialize flowdirector state */
-       set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+       set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
 
        /* enable queue */
        txdctl |= IXGBE_TXDCTL_ENABLE;
        srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                  IXGBE_SRRCTL_BSIZEHDR_MASK;
 
-       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+       if (ring_is_ps_enabled(rx_ring)) {
 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
                srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
        int rx_buf_len;
        u16 reg_idx = ring->reg_idx;
 
-       if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+       if (!ring_is_rsc_enabled(ring))
                return;
 
        rx_buf_len = ring->rx_buf_len;
         * total size of max desc * buf_len is not greater
         * than 65535
         */
-       if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+       if (ring_is_ps_enabled(ring)) {
 #if (MAX_SKB_FRAGS > 16)
                rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
 #elif (MAX_SKB_FRAGS > 8)
                rx_ring->rx_buf_len = rx_buf_len;
 
                if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
-                       rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+                       set_ring_ps_enabled(rx_ring);
+               else
+                       clear_ring_ps_enabled(rx_ring);
+
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+                       set_ring_rsc_enabled(rx_ring);
                else
-                       rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+                       clear_ring_rsc_enabled(rx_ring);
 
 #ifdef IXGBE_FCOE
                if (netdev->features & NETIF_F_FCOE_MTU) {
                        struct ixgbe_ring_feature *f;
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((i >= f->mask) && (i < f->mask + f->indices)) {
-                               rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+                               clear_ring_ps_enabled(rx_ring);
                                if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
                                        rx_ring->rx_buf_len =
                                                IXGBE_FCOE_JUMBO_FRAME_SIZE;
+                       } else if (!ring_is_rsc_enabled(rx_ring) &&
+                                  !ring_is_ps_enabled(rx_ring)) {
+                               rx_ring->rx_buf_len =
+                                               IXGBE_FCOE_JUMBO_FRAME_SIZE;
                        }
                }
 #endif /* IXGBE_FCOE */
 
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       set_bit(__IXGBE_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i]->reinit_state));
+                       set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                               &(adapter->tx_ring[i]->state));
        } else {
                e_err(probe, "failed to finish FDIR re-initialization, "
                      "ignored adding FDIR ATR filters\n");
                        netif_carrier_on(netdev);
                } else {
                        /* Force detection of hung controller */
-                       adapter->detect_tx_hung = true;
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               tx_ring = adapter->tx_ring[i];
+                               set_check_for_tx_hang(tx_ring);
+                       }
                }
        } else {
                adapter->link_up = false;
                if (tx_ring->atr_sample_rate) {
                        ++tx_ring->atr_count;
                        if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-                            test_bit(__IXGBE_FDIR_INIT_DONE,
-                                     &tx_ring->reinit_state)) {
+                            test_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                                     &tx_ring->state)) {
                                ixgbe_atr(adapter, skb, tx_ring->queue_index,
                                          tx_flags, protocol);
                                tx_ring->atr_count = 0;