if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
                return false;
 
+       /* limit VMDq instances on the PF by number of Tx queues */
+       vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
        /* Add starting offset to total pool count */
        vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 
 #ifdef IXGBE_FCOE
        u16 fcoe_i = 0;
 #endif
-       bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
 
        /* only proceed if SR-IOV is enabled */
        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
                return false;
 
+       /* limit l2fwd RSS based on total Tx queue limit */
+       rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
        /* Add starting offset to total pool count */
        vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 
        vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
 
        /* 64 pool mode with 2 queues per pool */
-       if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+       if (vmdq_i > 32) {
                vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
                rss_m = IXGBE_RSS_2Q_MASK;
                rss_i = min_t(u16, rss_i, 2);
 
        unsigned int rxbase, txbase, queues;
        int i, baseq, err = 0;
 
-       if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+       if (!test_bit(accel->pool, adapter->fwd_bitmask))
                return 0;
 
        baseq = accel->pool * adapter->num_rx_queues_per_pool;
-       netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+       netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
                   accel->pool, adapter->num_rx_pools,
-                  baseq, baseq + adapter->num_rx_queues_per_pool,
-                  adapter->fwd_bitmask);
+                  baseq, baseq + adapter->num_rx_queues_per_pool);
 
        accel->netdev = vdev;
        accel->rx_base_queue = rxbase = baseq;
        }
 
        /* PF holds first pool slot */
-       set_bit(0, &adapter->fwd_bitmask);
+       set_bit(0, adapter->fwd_bitmask);
        set_bit(__IXGBE_DOWN, &adapter->state);
 
        return 0;
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
-       bool pools;
 
        /* Hardware supports up to 8 traffic classes */
        if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
        if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
                return -EINVAL;
 
-       pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
-       if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
-               return -EBUSY;
-
        /* Hardware has to reinitialize queues and interrupts to
         * match packet buffer alignment. Unfortunately, the
         * hardware is not flexible enough to do this dynamically.
        struct ixgbe_fwd_adapter *fwd_adapter = NULL;
        struct ixgbe_adapter *adapter = netdev_priv(pdev);
        int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+       int tcs = netdev_get_num_tc(pdev) ? : 1;
        unsigned int limit;
        int pool, err;
 
        }
 
        if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
-             adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+             adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
            (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
                return ERR_PTR(-EBUSY);
 
        if (!fwd_adapter)
                return ERR_PTR(-ENOMEM);
 
-       pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
-       set_bit(pool, &adapter->fwd_bitmask);
-       limit = find_last_bit(&adapter->fwd_bitmask, 32);
+       pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+       set_bit(pool, adapter->fwd_bitmask);
+       limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
 
        /* Enable VMDq flag so device will be set in VM mode */
        adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
        /* unwind counter and free adapter struct */
        netdev_info(pdev,
                    "%s: dfwd hardware acceleration failed\n", vdev->name);
-       clear_bit(pool, &adapter->fwd_bitmask);
+       clear_bit(pool, adapter->fwd_bitmask);
        kfree(fwd_adapter);
        return ERR_PTR(err);
 }
        struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
        unsigned int limit;
 
-       clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
+       clear_bit(fwd_adapter->pool, adapter->fwd_bitmask);
 
-       limit = find_last_bit(&adapter->fwd_bitmask, 32);
+       limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
        adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
        ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
 
        }
 
        ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
-       netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+       netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
                   fwd_adapter->pool, adapter->num_rx_pools,
                   fwd_adapter->rx_base_queue,
-                  fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
-                  adapter->fwd_bitmask);
+                  fwd_adapter->rx_base_queue +
+                  adapter->num_rx_queues_per_pool);
        kfree(fwd_adapter);
 }
 
 
 {
 #ifdef CONFIG_PCI_IOV
        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
-       int err = 0;
-       u8 num_tc;
-       int i;
        int pre_existing_vfs = pci_num_vf(dev);
+       int err = 0, num_rx_pools, i, limit;
+       u8 num_tc;
 
        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
                err = ixgbe_disable_sriov(adapter);
         * other values out of range.
         */
        num_tc = netdev_get_num_tc(adapter->netdev);
+       num_rx_pools = adapter->num_rx_pools;
+       limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+               (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
 
-       if (num_tc > 4) {
-               if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
-                       e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
-                       return -EPERM;
-               }
-       } else if ((num_tc > 1) && (num_tc <= 4)) {
-               if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
-                       e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
-                       return -EPERM;
-               }
-       } else {
-               if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
-                       e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
-                       return -EPERM;
-               }
+       if (num_vfs > (limit - num_rx_pools)) {
+               e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+                         num_tc, num_rx_pools - 1, limit - num_rx_pools);
+               return -EPERM;
        }
 
        err = __ixgbe_enable_sriov(adapter, num_vfs);