ICE_F_PTP_EXTTS,
        ICE_F_SMA_CTRL,
        ICE_F_GNSS,
+       ICE_F_ROCE_LAG,
+       ICE_F_SRIOV_LAG,
        ICE_F_MAX
 };
 
        struct mutex sw_mutex;          /* lock for protecting VSI alloc flow */
        struct mutex tc_mutex;          /* lock to protect TC changes */
        struct mutex adev_mutex;        /* lock to protect aux device access */
+       struct mutex lag_mutex;         /* protect ice_lag struct in PF */
        u32 msg_enable;
        struct ice_ptp ptp;
        struct gnss_serial *gnss_serial;
        struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
 };
 
+extern struct workqueue_struct *ice_lag_wq;
+
 struct ice_netdev_priv {
        struct ice_vsi *vsi;
        struct ice_repr *repr;
 
 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE              0x0076
 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT                0x0077
 #define ICE_AQC_CAPS_NVM_MGMT                          0x0080
+#define ICE_AQC_CAPS_FW_LAG_SUPPORT                    0x0092
+#define ICE_AQC_BIT_ROCEV2_LAG                         0x01
+#define ICE_AQC_BIT_SRIOV_LAG                          0x02
 
        u8 major_ver;
        u8 minor_ver;
 
                          "%s: reset_restrict_support = %d\n", prefix,
                          caps->reset_restrict_support);
                break;
+       case ICE_AQC_CAPS_FW_LAG_SUPPORT:
+               caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+               ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
+                         prefix, caps->roce_lag);
+               caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+               ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
+                         prefix, caps->sriov_lag);
+               break;
        default:
                /* Not one of the recognized common capabilities */
                found = false;
 
 /* Link Aggregation code */
 
 #include "ice.h"
+#include "ice_lib.h"
 #include "ice_lag.h"
 
+#define ICE_LAG_RES_SHARED     BIT(14)
+#define ICE_LAG_RES_VALID      BIT(15)
+
 /**
  * ice_lag_set_primary - set PF LAG state as Primary
  * @lag: LAG info struct
        lag->role = ICE_LAG_NONE;
 }
 
+/**
+ * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * @pf: PF struct
+ */
+static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
+{
+       struct ice_hw_common_caps *caps;
+
+       caps = &pf->hw.dev_caps.common_cap;
+       if (caps->roce_lag)
+               ice_set_feature_support(pf, ICE_F_ROCE_LAG);
+       else
+               ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
+
+       if (caps->sriov_lag)
+               ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
+       else
+               ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+}
+
 /**
  * ice_lag_changeupper_event - handle LAG changeupper event
  * @lag: LAG info struct
        ice_display_lag_info(lag);
 }
 
-/**
- * ice_lag_changelower_event - handle LAG changelower event
- * @lag: LAG info struct
- * @ptr: opaque data pointer
- *
- * ptr to be cast to netdev_notifier_changelowerstate_info
- */
-static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
-{
-       struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-
-       if (netdev != lag->netdev)
-               return;
-
-       netdev_dbg(netdev, "bonding info\n");
-
-       if (!netif_is_lag_port(netdev))
-               netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
-}
-
 /**
  * ice_lag_event_handler - handle LAG events from netdev
  * @notif_blk: notifier block registered by this netdev
        case NETDEV_CHANGEUPPER:
                ice_lag_changeupper_event(lag, ptr);
                break;
-       case NETDEV_CHANGELOWERSTATE:
-               ice_lag_changelower_event(lag, ptr);
-               break;
        case NETDEV_BONDING_INFO:
                ice_lag_info_event(lag, ptr);
                break;
        struct ice_vsi *vsi;
        int err;
 
+       ice_lag_init_feature_support_flag(pf);
+
        pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
        if (!pf->lag)
                return -ENOMEM;
        if (lag->pf)
                ice_unregister_lag_handler(lag);
 
-       dev_put(lag->upper_netdev);
-
-       dev_put(lag->peer_netdev);
+       flush_workqueue(ice_lag_wq);
 
        kfree(lag);
 
 
  * @pf: pointer to the struct ice_pf instance
  * @f: feature enum to set
  */
-static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
 {
        if (f < 0 || f >= ICE_F_MAX)
                return;
 
 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
 void ice_init_feature_support(struct ice_pf *pf);
 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
 
 }
 
 static struct workqueue_struct *ice_wq;
+struct workqueue_struct *ice_lag_wq;
 static const struct net_device_ops ice_netdev_safe_mode_ops;
 static const struct net_device_ops ice_netdev_ops;
 
 static void ice_deinit_pf(struct ice_pf *pf)
 {
        ice_service_task_stop(pf);
+       mutex_destroy(&pf->lag_mutex);
        mutex_destroy(&pf->adev_mutex);
        mutex_destroy(&pf->sw_mutex);
        mutex_destroy(&pf->tc_mutex);
        mutex_init(&pf->sw_mutex);
        mutex_init(&pf->tc_mutex);
        mutex_init(&pf->adev_mutex);
+       mutex_init(&pf->lag_mutex);
 
        INIT_HLIST_HEAD(&pf->aq_wait_list);
        spin_lock_init(&pf->aq_wait_lock);
  */
 static int __init ice_module_init(void)
 {
-       int status;
+       int status = -ENOMEM;
 
        pr_info("%s\n", ice_driver_string);
        pr_info("%s\n", ice_copyright);
        ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
        if (!ice_wq) {
                pr_err("Failed to create workqueue\n");
-               return -ENOMEM;
+               return status;
+       }
+
+       ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
+       if (!ice_lag_wq) {
+               pr_err("Failed to create LAG workqueue\n");
+               goto err_dest_wq;
        }
 
        status = pci_register_driver(&ice_driver);
        if (status) {
                pr_err("failed to register PCI driver, err %d\n", status);
-               destroy_workqueue(ice_wq);
+               goto err_dest_lag_wq;
        }
 
+       return 0;
+
+err_dest_lag_wq:
+       destroy_workqueue(ice_lag_wq);
+err_dest_wq:
+       destroy_workqueue(ice_wq);
        return status;
 }
 module_init(ice_module_init);
 {
        pci_unregister_driver(&ice_driver);
        destroy_workqueue(ice_wq);
+       destroy_workqueue(ice_lag_wq);
        pr_info("module unloaded\n");
 }
 module_exit(ice_module_exit);
 
        u8 dcb;
        u8 ieee_1588;
        u8 rdma;
+       u8 roce_lag;
+       u8 sriov_lag;
 
        bool nvm_update_pending_nvm;
        bool nvm_update_pending_orom;