edev->ndev->features = args->u.features;
 }
 
+netdev_features_t qede_fix_features(struct net_device *dev,
+                                   netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+           !(features & NETIF_F_GRO))
+               features &= ~NETIF_F_GRO_HW;
+
+       return features;
+}
+
 int qede_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct qede_dev *edev = netdev_priv(dev);
        netdev_features_t changes = features ^ dev->features;
        bool need_reload = false;
 
-       /* No action needed if hardware GRO is disabled during driver load */
-       if (changes & NETIF_F_GRO) {
-               if (dev->features & NETIF_F_GRO)
-                       need_reload = !edev->gro_disable;
-               else
-                       need_reload = edev->gro_disable;
-       }
+       if (changes & NETIF_F_GRO_HW)
+               need_reload = true;
 
        if (need_reload) {
                struct qede_reload_args args;
 
 #endif
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_fix_features = qede_fix_features,
        .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
 #ifdef CONFIG_QED_SRIOV
        .ndo_change_mtu = qede_change_mtu,
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_fix_features = qede_fix_features,
        .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
        .ndo_udp_tunnel_add = qede_udp_tunnel_add,
        .ndo_change_mtu = qede_change_mtu,
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_fix_features = qede_fix_features,
        .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
        .ndo_udp_tunnel_add = qede_udp_tunnel_add,
        ndev->priv_flags |= IFF_UNICAST_FLT;
 
        /* user-changeble features */
-       hw_features = NETIF_F_GRO | NETIF_F_SG |
+       hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                      NETIF_F_TSO | NETIF_F_TSO6;
 
        dma_addr_t mapping;
        int i;
 
-       /* Don't perform FW aggregations in case of XDP */
-       if (edev->xdp_prog)
-               edev->gro_disable = 1;
-
        if (edev->gro_disable)
                return 0;
 
-       if (edev->ndev->mtu > PAGE_SIZE) {
-               edev->gro_disable = 1;
-               return 0;
-       }
-
        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
                struct sw_rx_data *replace_buf = &tpa_info->buffer;
 err:
        qede_free_sge_mem(edev, rxq);
        edev->gro_disable = 1;
+       edev->ndev->features &= ~NETIF_F_GRO_HW;
        return -ENOMEM;
 }
 
                         edev->ndev->name, queue_id);
        }
 
-       edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+       edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
 }
 
 static int qede_set_real_num_queues(struct qede_dev *edev)