if (adapter->ring_feature[RING_F_FCOE].indices) {
                /* Use multiple rx queues for FCoE by redirection table */
                for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
-                       fcoe_i = f->mask + i % f->indices;
+                       fcoe_i = f->offset + i % f->indices;
                        fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
                        fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
                        IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
                IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
        } else  {
                /* Use single rx queue for FCoE */
-               fcoe_i = f->mask;
+               fcoe_i = f->offset;
                fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
                IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
                IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
                                (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
        }
        /* send FIP frames to the first FCoE queue */
-       fcoe_i = f->mask;
+       fcoe_i = f->offset;
        fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
        IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
                        IXGBE_ETQS_QUEUE_EN |
 
                else
                        ixgbe_cache_ring_rss(adapter);
 
-               fcoe_rx_i = f->mask;
-               fcoe_tx_i = f->mask;
+               fcoe_rx_i = f->offset;
+               fcoe_tx_i = f->offset;
        }
        for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
-               adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
-               adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+               adapter->rx_ring[f->offset + i]->reg_idx = fcoe_rx_i;
+               adapter->tx_ring[f->offset + i]->reg_idx = fcoe_tx_i;
        }
        return true;
 }
  * @adapter: board private structure to initialize
  *
  * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
- *
+ * Offset is used as the index of the first rx queue used by FCoE.
  **/
 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 {
        }
 
        /* adding FCoE rx rings to the end */
-       f->mask = adapter->num_rx_queues;
+       f->offset = adapter->num_rx_queues;
        adapter->num_rx_queues += f->indices;
        adapter->num_tx_queues += f->indices;
 
 
 #ifdef IXGBE_FCOE
        /* FCoE enabled queues require special configuration indexed
-        * by feature specific indices and mask. Here we map FCoE
+        * by feature specific indices and offset. Here we map FCoE
         * indices onto the DCB queue pairs allowing FCoE to own
         * configuration later.
         */
                ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
                tc = prio_tc[adapter->fcoe.up];
                f->indices = dev->tc_to_txq[tc].count;
-               f->mask = dev->tc_to_txq[tc].offset;
+               f->offset = dev->tc_to_txq[tc].offset;
        }
 #endif
 
                if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
                        struct ixgbe_ring_feature *f;
                        f = &adapter->ring_feature[RING_F_FCOE];
-                       if ((rxr_idx >= f->mask) &&
-                           (rxr_idx < f->mask + f->indices))
+                       if ((rxr_idx >= f->offset) &&
+                           (rxr_idx < f->offset + f->indices))
                                set_bit(__IXGBE_RX_FCOE, &ring->state);
                }