of the devices that use this firmware is available here:
          https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
 
+config IWLMLD
+       tristate "Intel Wireless WiFi MLD Firmware support"
+       select WANT_DEV_COREDUMP
+       depends on MAC80211
+       depends on PTP_1588_CLOCK_OPTIONAL
+       help
+         This is the driver that supports firmwares of MLD capable devices.
+         The list of the devices that use this firmware is available here:
+         https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
+
 # don't call it _MODULE -- will confuse Kconfig/fixdep/...
 config IWLWIFI_OPMODE_MODULAR
        bool
        default y if IWLDVM=m
        default y if IWLMVM=m
+       default y if IWLMLD=m
 
-comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
-       depends on IWLDVM=n && IWLMVM=n
+comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM or IWLMLD"
+       depends on IWLDVM=n && IWLMVM=n && IWLMLD=n
 
 menu "Debugging Options"
 
 
 iwlwifi-objs           += pcie/trans-gen2.o pcie/tx-gen2.o
 iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
 iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o
+iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o
 iwlwifi-objs           += iwl-dbg-tlv.o
 iwlwifi-objs           += iwl-trans.o
 
 iwlwifi-objs           += fw/dbg.o fw/pnvm.o fw/dump.o
 iwlwifi-objs           += fw/regulatory.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
+iwlwifi-$(CONFIG_IWLMLD) += fw/smem.o fw/init.o
 iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
 iwlwifi-$(CONFIG_EFI)  += fw/uefi.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
 obj-$(CONFIG_IWLDVM)   += dvm/
 obj-$(CONFIG_IWLMVM)   += mvm/
 obj-$(CONFIG_IWLMEI)   += mei/
+obj-$(CONFIG_IWLMLD)   += mld/
 
 obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
 
 
 #define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
        IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
 
+#if !IS_ENABLED(CONFIG_IWLMVM)
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
+const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+#endif
+
 static const struct iwl_base_params iwl_bz_base_params = {
        .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
        .num_of_queues = 512,
 
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
 #ifndef __iwl_fw_api_phy_h__
 #define __iwl_fw_api_phy_h__
+#include <linux/types.h>
+#include <linux/bits.h>
 
 /**
  * enum iwl_phy_ops_subcmd_ids - PHY group commands
 
 
 extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
 extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
+#endif /* CONFIG_IWLMVM */
+
+#if IS_ENABLED(CONFIG_IWLMLD)
+extern const struct iwl_ht_params iwl_bz_ht_params;
 
 extern const struct iwl_ht_params iwl_bz_ht_params;
 
 extern const struct iwl_cfg iwl_cfg_sc2f;
 extern const struct iwl_cfg iwl_cfg_dr;
 extern const struct iwl_cfg iwl_cfg_br;
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
 
 #endif /* __IWL_CONFIG_H__ */
 
 enum {
        DVM_OP_MODE,
        MVM_OP_MODE,
+#if IS_ENABLED(CONFIG_IWLMLD)
+       MLD_OP_MODE,
+#endif
 };
 
 /* Protects the table contents, i.e. the ops pointer & drv list */
 } iwlwifi_opmode_table[] = {           /* ops set when driver is initialized */
        [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
        [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
+#if IS_ENABLED(CONFIG_IWLMLD)
+       [MLD_OP_MODE] = { .name = "iwlmld", .ops = NULL },
+#endif
 };
 
 #define IWL_DEFAULT_SCAN_CHANNELS 40
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
        size_t n_mem_tlv;
+       u32 major;
 };
 
 static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
                        break;
                case IWL_UCODE_TLV_FW_VERSION: {
                        const __le32 *ptr = (const void *)tlv_data;
-                       u32 major, minor;
+                       u32 minor;
                        u8 local_comp;
 
                        if (tlv_len != sizeof(u32) * 3)
                                goto invalid_tlv_len;
 
-                       major = le32_to_cpup(ptr++);
+                       pieces->major = le32_to_cpup(ptr++);
                        minor = le32_to_cpup(ptr++);
                        local_comp = le32_to_cpup(ptr);
 
                        snprintf(drv->fw.fw_version,
                                 sizeof(drv->fw.fw_version),
-                                "%u.%08x.%u %s", major, minor,
+                                "%u.%08x.%u %s", pieces->major, minor,
                                 local_comp, iwl_reduced_fw_name(drv));
                        break;
                        }
        }
 }
 
+#define IWL_MLD_SUPPORTED_FW_VERSION 97
+
 /*
  * iwl_req_fw_callback - callback when firmware was loaded
  *
                break;
        }
 
+#if IS_ENABLED(CONFIG_IWLMLD)
+       if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION)
+               op = &iwlwifi_opmode_table[MLD_OP_MODE];
+       else
+#else
+       if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION) {
+               IWL_ERR(drv,
+                       "IWLMLD needs to be compiled to support this firmware\n");
+               mutex_unlock(&iwlwifi_opmode_table_mtx);
+               goto out_unbind;
+       }
+#endif
+
        IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
                 drv->fw.fw_version, op->name);
 
 
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+obj-$(CONFIG_IWLMLD)   += iwlmld.o
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
+
+iwlmld-y += mld.o notif.o mac80211.o fw.o power.o iface.o link.o rx.o mcc.o session-protect.o phy.o
+iwlmld-y += scan.o sta.o tx.o coex.o tlc.o agg.o key.o regulatory.o ap.o thermal.o roc.o stats.o
+iwlmld-y += low_latency.o mlo.o ptp.o time_sync.o ftm-initiator.o
+iwlmld-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmld-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmld-$(CONFIG_PM_SLEEP) += d3.o
+
+# non-upstream things
+iwlmld-$(CONFIG_IWL_VENDOR_CMDS) += vendor-cmd.o
+iwlmld-$(CONFIG_IWLMVM_AX_SOFTAP_TESTMODE) += ax-softap-testmode.o
+
+subdir-ccflags-y += -I$(src)/../
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "agg.h"
+#include "sta.h"
+#include "hcmd.h"
+
+static void
+iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                              struct napi_struct *napi,
+                              struct iwl_mld_baid_data *baid_data,
+                              struct iwl_mld_reorder_buffer *reorder_buf,
+                              u16 nssn)
+{
+       struct iwl_mld_reorder_buf_entry *entries =
+               &baid_data->entries[reorder_buf->queue *
+                                   baid_data->entries_per_queue];
+       u16 ssn = reorder_buf->head_sn;
+
+       while (ieee80211_sn_less(ssn, nssn)) {
+               int index = ssn % baid_data->buf_size;
+               struct sk_buff_head *skb_list = &entries[index].frames;
+               struct sk_buff *skb;
+
+               ssn = ieee80211_sn_inc(ssn);
+
+               /* Empty the list. Will have more than one frame for A-MSDU.
+                * Empty list is valid as well since nssn indicates frames were
+                * received.
+                */
+               while ((skb = __skb_dequeue(skb_list))) {
+                       iwl_mld_pass_packet_to_mac80211(mld, napi, skb,
+                                                       reorder_buf->queue,
+                                                       sta);
+                       reorder_buf->num_stored--;
+               }
+       }
+       reorder_buf->head_sn = nssn;
+}
+
+static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld,
+                                             struct napi_struct *napi,
+                                             u8 baid, u16 nssn, int queue)
+{
+       struct iwl_mld_reorder_buffer *reorder_buf;
+       struct iwl_mld_baid_data *ba_data;
+       struct ieee80211_link_sta *link_sta;
+       u32 sta_id;
+
+       IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n",
+                    baid, nssn);
+
+       if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+                        baid >= ARRAY_SIZE(mld->fw_id_to_ba)))
+               return;
+
+       rcu_read_lock();
+
+       ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+       if (!ba_data) {
+               IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
+               goto out_unlock;
+       }
+
+       /* pick any STA ID to find the pointer */
+       sta_id = ffs(ba_data->sta_mask) - 1;
+       link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
+               goto out_unlock;
+
+       reorder_buf = &ba_data->reorder_buf[queue];
+
+       iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data,
+                                      reorder_buf, nssn);
+out_unlock:
+       rcu_read_unlock();
+}
+
+void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
+                                       struct napi_struct *napi,
+                                       struct iwl_rx_packet *pkt, int queue)
+{
+       struct iwl_frame_release *release = (void *)pkt->data;
+       u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+       if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
+                        "Unexpected frame release notif size %u (expected %zu)\n",
+                        pkt_len, sizeof(*release)))
+               return;
+
+       iwl_mld_release_frames_from_notif(mld, napi, release->baid,
+                                         le16_to_cpu(release->nssn),
+                                         queue);
+}
+
+void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
+                                           struct napi_struct *napi,
+                                           struct iwl_rx_packet *pkt,
+                                           int queue)
+{
+       struct iwl_bar_frame_release *release = (void *)pkt->data;
+       struct iwl_mld_baid_data *baid_data;
+       unsigned int baid, nssn, sta_id, tid;
+       u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+       if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
+                        "Unexpected frame release notif size %u (expected %zu)\n",
+                        pkt_len, sizeof(*release)))
+               return;
+
+       baid = le32_get_bits(release->ba_info,
+                            IWL_BAR_FRAME_RELEASE_BAID_MASK);
+       nssn = le32_get_bits(release->ba_info,
+                            IWL_BAR_FRAME_RELEASE_NSSN_MASK);
+       sta_id = le32_get_bits(release->sta_tid,
+                              IWL_BAR_FRAME_RELEASE_STA_MASK);
+       tid = le32_get_bits(release->sta_tid,
+                           IWL_BAR_FRAME_RELEASE_TID_MASK);
+
+       if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba),
+                        "BAR release: invalid BAID (%x)\n", baid))
+               return;
+
+       rcu_read_lock();
+       baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+       if (!IWL_FW_CHECK(mld, !baid_data,
+                         "Got valid BAID %d but not allocated, invalid BAR release!\n",
+                         baid))
+               goto out_unlock;
+
+       if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
+                        sta_id > mld->fw->ucode_capa.num_stations ||
+                        !(baid_data->sta_mask & BIT(sta_id)),
+                        "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
+                        baid, baid_data->sta_mask, baid_data->tid, sta_id,
+                        tid))
+               goto out_unlock;
+
+       IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n",
+                      nssn);
+
+       iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue);
+out_unlock:
+       rcu_read_unlock();
+}
+
+void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
+                   struct iwl_mld_delba_data *data)
+{
+       struct iwl_mld_baid_data *ba_data;
+       struct iwl_mld_reorder_buffer *reorder_buf;
+       struct ieee80211_link_sta *link_sta;
+       u8 baid = data->baid;
+       u32 sta_id;
+
+       if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
+               return;
+
+       rcu_read_lock();
+
+       ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+       if (WARN_ON_ONCE(!ba_data))
+               goto out_unlock;
+
+       /* pick any STA ID to find the pointer */
+       sta_id = ffs(ba_data->sta_mask) - 1;
+       link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
+               goto out_unlock;
+
+       reorder_buf = &ba_data->reorder_buf[queue];
+
+       /* release all frames that are in the reorder buffer to the stack */
+       iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL,
+                                      ba_data, reorder_buf,
+                                      ieee80211_sn_add(reorder_buf->head_sn,
+                                                       ba_data->buf_size));
+out_unlock:
+       rcu_read_unlock();
+}
+
+/* Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+enum iwl_mld_reorder_result
+iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
+               int queue, struct ieee80211_sta *sta,
+               struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc)
+{
+       struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+       struct iwl_mld_baid_data *baid_data;
+       struct iwl_mld_reorder_buffer *buffer;
+       struct iwl_mld_reorder_buf_entry *entries;
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_link_sta *mld_link_sta;
+       u32 reorder = le32_to_cpu(desc->reorder_data);
+       bool amsdu, last_subframe, is_old_sn, is_dup;
+       u8 tid = ieee80211_get_tid(hdr);
+       u8 baid;
+       u16 nssn, sn;
+       u32 sta_mask = 0;
+       int index;
+       u8 link_id;
+
+       baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK);
+
+       /* This also covers the case of receiving a Block Ack Request
+        * outside a BA session; we'll pass it to mac80211 and that
+        * then sends a delBA action frame.
+        * This also covers pure monitor mode, in which case we won't
+        * have any BA sessions.
+        */
+       if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+               return IWL_MLD_PASS_SKB;
+
+       /* no sta yet */
+       if (WARN_ONCE(!sta,
+                     "Got valid BAID without a valid station assigned\n"))
+               return IWL_MLD_PASS_SKB;
+
+       /* not a data packet */
+       if (!ieee80211_is_data_qos(hdr->frame_control) ||
+           is_multicast_ether_addr(hdr->addr1))
+               return IWL_MLD_PASS_SKB;
+
+       if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+               return IWL_MLD_PASS_SKB;
+
+       baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+       if (!baid_data) {
+               IWL_DEBUG_HT(mld,
+                            "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n",
+                            baid, reorder);
+               return IWL_MLD_PASS_SKB;
+       }
+
+       for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+               sta_mask |= BIT(mld_link_sta->fw_id);
+
+       /* verify the BAID is correctly mapped to the sta and tid */
+       if (IWL_FW_CHECK(mld,
+                        tid != baid_data->tid ||
+                        !(sta_mask & baid_data->sta_mask),
+                        "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
+                        baid, baid_data->sta_mask, baid_data->tid,
+                        sta_mask, tid))
+               return IWL_MLD_PASS_SKB;
+
+       buffer = &baid_data->reorder_buf[queue];
+       entries = &baid_data->entries[queue * baid_data->entries_per_queue];
+
+       is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN);
+
+       if (!buffer->valid && is_old_sn)
+               return IWL_MLD_PASS_SKB;
+
+       buffer->valid = true;
+
+       is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE));
+
+       /* drop any duplicated or outdated packets */
+       if (is_dup || is_old_sn)
+               return IWL_MLD_DROP_SKB;
+
+       sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK);
+       nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK);
+       amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+       last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+
+       /* release immediately if allowed by nssn and no stored frames */
+       if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+               if (!amsdu || last_subframe)
+                       buffer->head_sn = nssn;
+               return IWL_MLD_PASS_SKB;
+       }
+
+       /* release immediately if there are no stored frames, and the sn is
+        * equal to the head.
+        * This can happen due to reorder timer, where NSSN is behind head_sn.
+        * When we released everything, and we got the next frame in the
+        * sequence, according to the NSSN we can't release immediately,
+        * while technically there is no hole and we can move forward.
+        */
+       if (!buffer->num_stored && sn == buffer->head_sn) {
+               if (!amsdu || last_subframe)
+                       buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+               return IWL_MLD_PASS_SKB;
+       }
+
+       /* put in reorder buffer */
+       index = sn % baid_data->buf_size;
+       __skb_queue_tail(&entries[index].frames, skb);
+       buffer->num_stored++;
+
+       /* We cannot trust NSSN for AMSDU sub-frames that are not the last. The
+        * reason is that NSSN advances on the first sub-frame, and may cause
+        * the reorder buffer to advance before all the sub-frames arrive.
+        *
+        * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+        * SN 1. NSSN for first sub frame will be 3 with the result of driver
+        * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+        * already ahead and it will be dropped.
+        * If the last sub-frame is not on this queue - we will get frame
+        * release notification with up to date NSSN.
+        */
+       if (!amsdu || last_subframe)
+               iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
+                                              buffer, nssn);
+
+       return IWL_MLD_BUFFERED_SKB;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
+
+static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
+{
+       struct iwl_mld_baid_data *data =
+               from_timer(data, t, session_timer);
+       struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
+       struct iwl_mld_baid_data *ba_data;
+       struct ieee80211_link_sta *link_sta;
+       struct iwl_mld_sta *mld_sta;
+       unsigned long timeout;
+       unsigned int sta_id;
+
+       rcu_read_lock();
+
+       ba_data = rcu_dereference(*rcu_ptr);
+       if (WARN_ON(!ba_data))
+               goto unlock;
+
+       if (WARN_ON(!ba_data->timeout))
+               goto unlock;
+
+       timeout = ba_data->last_rx_timestamp +
+                 TU_TO_JIFFIES(ba_data->timeout * 2);
+       if (time_is_after_jiffies(timeout)) {
+               mod_timer(&ba_data->session_timer, timeout);
+               goto unlock;
+       }
+
+       /* timer expired, pick any STA ID to find the pointer */
+       sta_id = ffs(ba_data->sta_mask) - 1;
+       link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]);
+
+       /* sta should be valid unless the following happens:
+        * The firmware asserts which triggers a reconfig flow, but
+        * the reconfig fails before we set the pointer to sta into
+        * the fw_id_to_link_sta pointer table. mac80211 can't stop
+        * A-MPDU and hence the timer continues to run. Then, the
+        * timer expires and sta is NULL.
+        */
+       if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta))
+               goto unlock;
+
+       mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+       ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr,
+                                     ba_data->tid);
+unlock:
+       rcu_read_unlock();
+}
+
+static int
+iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid)
+{
+       struct iwl_rx_baid_cfg_cmd cmd = {
+               .action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+               .remove.sta_id_mask =
+                       cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
+               .remove.tid = cpu_to_le32(tid),
+
+       };
+       int ret;
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(DATA_PATH_GROUP,
+                                          RX_BAID_ALLOCATION_CONFIG_CMD),
+                                  &cmd);
+       if (ret)
+               return ret;
+
+       IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n");
+
+       return ret;
+}
+
+static int
+iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                      int tid, u16 ssn, u16 buf_size)
+{
+       struct iwl_rx_baid_cfg_cmd cmd = {
+               .action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD),
+               .alloc.sta_id_mask =
+                       cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
+               .alloc.tid = tid,
+               .alloc.ssn = cpu_to_le16(ssn),
+               .alloc.win_size = cpu_to_le16(buf_size),
+       };
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+               .flags = CMD_WANT_SKB,
+               .len[0] = sizeof(cmd),
+               .data[0] = &cmd,
+       };
+       struct iwl_rx_baid_cfg_resp *resp;
+       struct iwl_rx_packet *pkt;
+       u32 resp_len;
+       int ret, baid;
+
+       BUILD_BUG_ON(sizeof(*resp) != sizeof(baid));
+
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+       if (ret)
+               return ret;
+
+       pkt = hcmd.resp_pkt;
+
+       resp_len = iwl_rx_packet_payload_len(pkt);
+       if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
+                        "BAID_ALLOC_CMD: unexpected response length %d\n",
+                        resp_len)) {
+               ret = -EIO;
+               goto out;
+       }
+
+       IWL_DEBUG_HT(mld, "RX BA Session started in fw\n");
+
+       resp = (void *)pkt->data;
+       baid = le32_to_cpu(resp->baid);
+
+       if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba),
+                        "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = baid;
+out:
+       iwl_free_resp(&hcmd);
+       return ret;
+}
+
+static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
+                                       struct iwl_mld_baid_data *data,
+                                       u16 ssn)
+{
+       for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+               struct iwl_mld_reorder_buffer *reorder_buf =
+                       &data->reorder_buf[i];
+               struct iwl_mld_reorder_buf_entry *entries =
+                       &data->entries[i * data->entries_per_queue];
+
+               reorder_buf->head_sn = ssn;
+               reorder_buf->queue = i;
+
+               for (int j = 0; j < data->buf_size; j++)
+                       __skb_queue_head_init(&entries[j].frames);
+       }
+}
+
+static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
+                                       struct iwl_mld_baid_data *data)
+{
+       struct iwl_mld_delba_data delba_data = {
+               .baid = data->baid,
+       };
+
+       iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
+                              &delba_data, sizeof(delba_data));
+
+       for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+               struct iwl_mld_reorder_buffer *reorder_buf =
+                       &data->reorder_buf[i];
+               struct iwl_mld_reorder_buf_entry *entries =
+                       &data->entries[i * data->entries_per_queue];
+
+               if (likely(!reorder_buf->num_stored))
+                       continue;
+
+               /* This shouldn't happen in regular DELBA since the RX queues
+                * sync internal DELBA notification should trigger a release
+                * of all frames in the reorder buffer.
+                */
+               WARN_ON(1);
+
+               for (int j = 0; j < data->buf_size; j++)
+                       __skb_queue_purge(&entries[j].frames);
+       }
+}
+
+int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                          int tid, u16 ssn, u16 buf_size, u16 timeout)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_baid_data *baid_data = NULL;
+       u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+       int ret, baid;
+       u32 sta_mask;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) {
+               IWL_DEBUG_HT(mld,
+                            "Max num of RX BA sessions reached; blocking new session\n");
+               return -ENOSPC;
+       }
+
+       sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
+       if (WARN_ON(!sta_mask))
+               return -EINVAL;
+
+       /* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+       /* The division below will be OK if either the cache line size
+        * can be divided by the entry size (ALIGN will round up) or if
+        * the entry size can be divided by the cache line size, in which
+        * case the ALIGN() will do nothing.
+        */
+       BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+                    sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+       /* Upward align the reorder buffer size to fill an entire cache
+        * line for each queue, to avoid sharing cache lines between
+        * different queues.
+        */
+       reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
+       /* Allocate here so if allocation fails we can bail out early
+        * before starting the BA session in the firmware
+        */
+       baid_data = kzalloc(sizeof(*baid_data) +
+                           mld->trans->num_rx_queues * reorder_buf_size,
+                           GFP_KERNEL);
+       if (!baid_data)
+               return -ENOMEM;
+
+       /* This division is why we need the above BUILD_BUG_ON(),
+        * if that doesn't hold then this will not be right.
+        */
+       baid_data->entries_per_queue =
+               reorder_buf_size / sizeof(baid_data->entries[0]);
+
+       baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size);
+       if (baid < 0) {
+               ret = baid;
+               goto out_free;
+       }
+
+       mld->num_rx_ba_sessions++;
+       mld_sta->tid_to_baid[tid] = baid;
+
+       baid_data->baid = baid;
+       baid_data->mld = mld;
+       baid_data->tid = tid;
+       baid_data->buf_size = buf_size;
+       baid_data->sta_mask = sta_mask;
+       baid_data->timeout = timeout;
+       baid_data->last_rx_timestamp = jiffies;
+       baid_data->rcu_ptr = &mld->fw_id_to_ba[baid];
+
+       iwl_mld_init_reorder_buffer(mld, baid_data, ssn);
+
+       timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired,
+                   0);
+       if (timeout)
+               mod_timer(&baid_data->session_timer,
+                         TU_TO_EXP_TIME(timeout * 2));
+
+       IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n",
+                    baid_data->sta_mask, tid, baid);
+
+       /* protect the BA data with RCU to cover a case where our
+        * internal RX sync mechanism will timeout (not that it's
+        * supposed to happen) and we will free the session data while
+        * RX is being processed in parallel
+        */
+       WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid]));
+       rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
+
+       return 0;
+
+out_free:
+       kfree(baid_data);
+       return ret;
+}
+
+int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                         int tid)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       int baid = mld_sta->tid_to_baid[tid];
+       struct iwl_mld_baid_data *baid_data;
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* during firmware restart, do not send the command as the firmware no
+        * longer recognizes the session. instead, only clear the driver BA
+        * session data.
+        */
+       if (!mld->fw_status.in_hw_restart) {
+               ret = iwl_mld_stop_ba_in_fw(mld, sta, tid);
+               if (ret)
+                       return ret;
+       }
+
+       if (!WARN_ON(mld->num_rx_ba_sessions == 0))
+               mld->num_rx_ba_sessions--;
+
+       baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
+       if (WARN_ON(!baid_data))
+               return -EINVAL;
+
+       if (timer_pending(&baid_data->session_timer))
+               timer_shutdown_sync(&baid_data->session_timer);
+
+       iwl_mld_free_reorder_buffer(mld, baid_data);
+
+       RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL);
+       kfree_rcu(baid_data, rcu_head);
+
+       IWL_DEBUG_HT(mld, "BAID %d is free\n", baid);
+
+       return 0;
+}
+
+int iwl_mld_update_sta_baids(struct iwl_mld *mld,
+                            u32 old_sta_mask,
+                            u32 new_sta_mask)
+{
+       struct iwl_rx_baid_cfg_cmd cmd = {
+               .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
+               .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
+               .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
+       };
+       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+       int baid;
+
+       /* mac80211 will remove sessions later, but we ignore all that */
+       if (mld->fw_status.in_hw_restart)
+               return 0;
+
+       BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+       for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) {
+               struct iwl_mld_baid_data *data;
+               int ret;
+
+               data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
+               if (!data)
+                       continue;
+
+               if (!(data->sta_mask & old_sta_mask))
+                       continue;
+
+               WARN_ONCE(data->sta_mask != old_sta_mask,
+                         "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
+                         baid, old_sta_mask, data->sta_mask);
+
+               cmd.modify.tid = cpu_to_le32(data->tid);
+
+               ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+               if (ret)
+                       return ret;
+               data->sta_mask = new_sta_mask;
+       }
+
+       return 0;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_agg_h__
+#define __iwl_agg_h__
+
+#include "mld.h"
+#include "fw/api/rx.h"
+
+/**
+ * struct iwl_mld_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sequence number
+ * @num_stored: number of MPDUs stored in the buffer
+ * @queue: queue of this reorder buffer
+ * @valid: true if reordering is valid for this queue
+ */
+struct iwl_mld_reorder_buffer {
+       u16 head_sn;
+       u16 num_stored;
+       int queue;
+       bool valid;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored. a list is necessary because in an A-MSDU,
+ *     all sub-frames share the same sequence number, so they are stored
+ *     together in the same list.
+ */
+struct iwl_mld_reorder_buf_entry {
+       struct sk_buff_head frames;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
+#endif
+;
+
+/**
+ * struct iwl_mld_baid_data - Block Ack session data
+ * @rcu_head: RCU head for freeing this data
+ * @sta_mask: station mask for the BAID
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @buf_size: the reorder buffer size as set by the last ADDBA request
+ * @entries_per_queue: number of buffers per queue, this actually gets
+ *     aligned up to avoid cache line sharing between queues
+ * @timeout: the timeout value specified in the ADDBA request.
+ * @last_rx_timestamp: timestamp of the last received packet (in jiffies). This
+ *     value is updated only when the configured @timeout has passed since
+ *     the last update to minimize cache bouncing between RX queues.
+ * @session_timer: timer is set to expire after 2 * @timeout (since we want
+ *     to minimize the cache bouncing by updating @last_rx_timestamp only once
+ *     after @timeout has passed). If no packets are received within this
+ *     period, it informs mac80211 to initiate delBA flow, terminating the
+ *     BA session.
+ * @rcu_ptr: BA data RCU protected access
+ * @mld: mld pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ * @entries: data
+ */
+struct iwl_mld_baid_data {
+       struct rcu_head rcu_head;
+       u32 sta_mask;
+       u8 tid;
+       u8 baid;
+       u16 buf_size;
+       u16 entries_per_queue;
+       u16 timeout;
+       struct timer_list session_timer;
+       unsigned long last_rx_timestamp;
+       struct iwl_mld_baid_data __rcu **rcu_ptr;
+       struct iwl_mld *mld;
+       struct iwl_mld_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+       struct iwl_mld_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
+};
+
+/**
+ * struct iwl_mld_delba_data - RX queue sync data for %IWL_MLD_RXQ_NOTIF_DEL_BA
+ *
+ * @baid: Block Ack id, used to identify the BA session to be removed
+ */
+struct iwl_mld_delba_data {
+       u32 baid;
+} __packed;
+
+/**
+ * enum iwl_mld_reorder_result - Possible return values for iwl_mld_reorder()
+ * indicating how the caller should handle the skb based on the result.
+ *
+ * @IWL_MLD_PASS_SKB: skb should be passed to upper layer.
+ * @IWL_MLD_BUFFERED_SKB: skb has been buffered, don't pass it to upper layer.
+ * @IWL_MLD_DROP_SKB: skb should be dropped and freed by the caller.
+ */
+enum iwl_mld_reorder_result {
+       IWL_MLD_PASS_SKB,
+       IWL_MLD_BUFFERED_SKB,
+       IWL_MLD_DROP_SKB
+};
+
+int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                          int tid, u16 ssn, u16 buf_size, u16 timeout);
+int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                         int tid);
+
+enum iwl_mld_reorder_result
+iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
+               int queue, struct ieee80211_sta *sta,
+               struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc);
+
+void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
+                                       struct napi_struct *napi,
+                                       struct iwl_rx_packet *pkt, int queue);
+void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
+                                           struct napi_struct *napi,
+                                           struct iwl_rx_packet *pkt,
+                                           int queue);
+
+void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
+                   struct iwl_mld_delba_data *data);
+
+int iwl_mld_update_sta_baids(struct iwl_mld *mld,
+                            u32 old_sta_mask,
+                            u32 new_sta_mask);
+
+#endif /* __iwl_agg_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/crc32.h>
+
+#include <net/mac80211.h>
+
+#include "ap.h"
+#include "hcmd.h"
+#include "tx.h"
+#include "power.h"
+#include "key.h"
+#include "iwl-utils.h"
+
+#include "fw/api/sta.h"
+
+void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
+                        u8 *beacon, u32 frame_size)
+{
+       u32 tim_idx;
+       struct ieee80211_mgmt *mgmt = (void *)beacon;
+
+       /* The index is relative to frame start but we start looking at the
+        * variable-length part of the beacon.
+        */
+       tim_idx = mgmt->u.beacon.variable - beacon;
+
+       /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+       while ((tim_idx < (frame_size - 2)) &&
+              (beacon[tim_idx] != WLAN_EID_TIM))
+               tim_idx += beacon[tim_idx + 1] + 2;
+
+       /* If TIM field was found, set variables */
+       if ((tim_idx < (frame_size - 1)) && beacon[tim_idx] == WLAN_EID_TIM)
+               *tim_index = cpu_to_le32(tim_idx);
+       else
+               IWL_WARN(mld, "Unable to find TIM Element in beacon\n");
+}
+
+u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link,
+                         enum nl80211_band band)
+{
+       u32 legacy = link->beacon_tx_rate.control[band].legacy;
+       u32 rate_idx, rate_flags = 0, fw_rate;
+
+       /* if beacon rate was configured try using it */
+       if (hweight32(legacy) == 1) {
+               u32 rate = ffs(legacy) - 1;
+               struct ieee80211_supported_band *sband =
+                       mld->hw->wiphy->bands[band];
+
+               rate_idx = sband->bitrates[rate].hw_value;
+       } else {
+               rate_idx = iwl_mld_get_lowest_rate(mld, info, vif);
+       }
+
+       if (rate_idx <= IWL_LAST_CCK_RATE)
+               rate_flags = IWL_MAC_BEACON_CCK;
+
+       /* Legacy rates are indexed as follows:
+        * 0 - 3 for CCK and 0 - 7 for OFDM.
+        */
+       fw_rate = (rate_idx >= IWL_FIRST_OFDM_RATE ?
+                    rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
+
+       return fw_rate | rate_flags;
+}
+
+int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
+                                    struct sk_buff *beacon,
+                                    struct iwl_mac_beacon_cmd *cmd)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = BEACON_TEMPLATE_CMD,
+       };
+
+       hcmd.len[0] = sizeof(*cmd);
+       hcmd.data[0] = cmd;
+
+       hcmd.len[1] = beacon->len;
+       hcmd.data[1] = beacon->data;
+       hcmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+       return iwl_mld_send_cmd(mld, &hcmd);
+}
+
+static int iwl_mld_fill_beacon_template_cmd(struct iwl_mld *mld,
+                                           struct ieee80211_vif *vif,
+                                           struct sk_buff *beacon,
+                                           struct iwl_mac_beacon_cmd *cmd,
+                                           struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
+       struct ieee80211_chanctx_conf *ctx;
+       bool enable_fils;
+       u16 flags = 0;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld_link))
+               return -EINVAL;
+
+       cmd->link_id = cpu_to_le32(mld_link->fw_id);
+
+       ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+       if (WARN_ON(!ctx || !ctx->def.chan))
+               return -EINVAL;
+
+       enable_fils = cfg80211_channel_is_psc(ctx->def.chan) ||
+               (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+                ctx->def.width >= NL80211_CHAN_WIDTH_80);
+
+       if (enable_fils) {
+               flags |= IWL_MAC_BEACON_FILS;
+               cmd->short_ssid = cpu_to_le32(~crc32_le(~0, vif->cfg.ssid,
+                                                       vif->cfg.ssid_len));
+       }
+
+       cmd->byte_cnt = cpu_to_le16((u16)beacon->len);
+
+       flags |= iwl_mld_get_rate_flags(mld, info, vif, link,
+                                       ctx->def.chan->band);
+
+       cmd->flags = cpu_to_le16(flags);
+
+       if (vif->type == NL80211_IFTYPE_AP) {
+               iwl_mld_set_tim_idx(mld, &cmd->tim_idx,
+                                   beacon->data, beacon->len);
+
+               cmd->btwt_offset =
+                       cpu_to_le32(iwl_find_ie_offset(beacon->data,
+                                                      WLAN_EID_S1G_TWT,
+                                                      beacon->len));
+       }
+
+       cmd->csa_offset =
+               cpu_to_le32(iwl_find_ie_offset(beacon->data,
+                                              WLAN_EID_CHANNEL_SWITCH,
+                                              beacon->len));
+       cmd->ecsa_offset =
+               cpu_to_le32(iwl_find_ie_offset(beacon->data,
+                                              WLAN_EID_EXT_CHANSWITCH_ANN,
+                                              beacon->len));
+
+       return 0;
+}
+
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
+int iwl_mld_update_beacon_template(struct iwl_mld *mld,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_bss_conf *link_conf)
+{
+       struct iwl_mac_beacon_cmd cmd = {};
+       struct sk_buff *beacon;
+       int ret;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+#endif
+
+       WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+               vif->type != NL80211_IFTYPE_ADHOC);
+
+       if (IWL_MLD_NON_TRANSMITTING_AP)
+               return 0;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mld_vif->beacon_inject_active) {
+               IWL_DEBUG_INFO(mld,
+                              "Can't update template, beacon injection's active\n");
+               return -EBUSY;
+       }
+
+#endif
+       beacon = ieee80211_beacon_get_template(mld->hw, vif, NULL,
+                                              link_conf->link_id);
+       if (!beacon)
+               return -ENOMEM;
+
+       ret = iwl_mld_fill_beacon_template_cmd(mld, vif, beacon, &cmd,
+                                              link_conf);
+
+       if (!ret)
+               ret = iwl_mld_send_beacon_template_cmd(mld, beacon, &cmd);
+
+       dev_kfree_skb(beacon);
+
+       return ret;
+}
+
+void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
+                              struct ieee80211_key_conf *key,
+                              struct iwl_mld_vif *mld_vif)
+{
+       struct iwl_mld_link *link;
+
+       if (WARN_ON(key->link_id < 0))
+               return;
+
+       link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+       if (WARN_ON(!link))
+               return;
+
+       for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
+               if (link->ap_early_keys[i] != key)
+                       continue;
+               /* Those weren't sent to FW, so should be marked as INVALID */
+               if (WARN_ON(key->hw_key_idx != STA_KEY_IDX_INVALID))
+                       key->hw_key_idx = STA_KEY_IDX_INVALID;
+               link->ap_early_keys[i] = NULL;
+       }
+}
+
+int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
+                              struct ieee80211_key_conf *key,
+                              struct iwl_mld_vif *mld_vif)
+{
+       struct iwl_mld_link *link;
+
+       if (WARN_ON(key->link_id < 0))
+               return -EINVAL;
+
+       link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+       if (WARN_ON(!link))
+               return -EINVAL;
+
+       for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
+               if (!link->ap_early_keys[i]) {
+                       link->ap_early_keys[i] = key;
+                       return 0;
+               }
+       }
+
+       return -ENOSPC;
+}
+
+static int iwl_mld_send_ap_early_keys(struct iwl_mld *mld,
+                                     struct ieee80211_vif *vif,
+                                     struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       int ret = 0;
+
+       if (WARN_ON(!link))
+               return -EINVAL;
+
+       for (int i = 0; i < ARRAY_SIZE(mld_link->ap_early_keys); i++) {
+               struct ieee80211_key_conf *key = mld_link->ap_early_keys[i];
+
+               if (!key)
+                       continue;
+
+               mld_link->ap_early_keys[i] = NULL;
+
+               ret = iwl_mld_add_key(mld, vif, NULL, key);
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+
+int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int ret;
+
+       if (vif->type == NL80211_IFTYPE_AP)
+               iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
+
+       ret = iwl_mld_update_beacon_template(mld, vif, link);
+       if (ret)
+               return ret;
+
+       /* the link should be already activated when assigning chan context,
+        * and LINK_CONTEXT_MODIFY_EHT_PARAMS is deprecated
+        */
+       ret = iwl_mld_change_link_in_fw(mld, link,
+                                       LINK_CONTEXT_MODIFY_ALL &
+                                       ~(LINK_CONTEXT_MODIFY_ACTIVE |
+                                         LINK_CONTEXT_MODIFY_EHT_PARAMS));
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_add_mcast_sta(mld, vif, link);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_add_bcast_sta(mld, vif, link);
+       if (ret)
+               goto rm_mcast;
+
+       /* Those keys were configured by the upper layers before starting the
+        * AP. Now that it is started and the bcast and mcast sta were added to
+        * the FW, we can add the keys too.
+        */
+       ret = iwl_mld_send_ap_early_keys(mld, vif, link);
+       if (ret)
+               goto rm_bcast;
+
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+               iwl_mld_vif_update_low_latency(mld, vif, true,
+                                              LOW_LATENCY_VIF_TYPE);
+
+       mld_vif->ap_ibss_active = true;
+
+       if (vif->p2p && mld->p2p_device_vif)
+               return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
+                                            FW_CTXT_ACTION_MODIFY);
+
+       return 0;
+rm_bcast:
+       iwl_mld_remove_bcast_sta(mld, vif, link);
+rm_mcast:
+       iwl_mld_remove_mcast_sta(mld, vif, link);
+       return ret;
+}
+
+void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       mld_vif->ap_ibss_active = false;
+
+       if (vif->p2p && mld->p2p_device_vif)
+               iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
+                                     FW_CTXT_ACTION_MODIFY);
+
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+               iwl_mld_vif_update_low_latency(mld, vif, false,
+                                              LOW_LATENCY_VIF_TYPE);
+
+       iwl_mld_remove_bcast_sta(mld, vif, link);
+
+       iwl_mld_remove_mcast_sta(mld, vif, link);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_ap_h__
+#define __iwl_ap_h__
+
+#include "mld.h"
+#include "iface.h"
+
+#include "fw/api/tx.h"
+
+int iwl_mld_update_beacon_template(struct iwl_mld *mld,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_bss_conf *link_conf);
+
+int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link);
+
+void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link);
+
+int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
+                              struct ieee80211_key_conf *key,
+                              struct iwl_mld_vif *mld_vif);
+
+void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
+                              struct ieee80211_key_conf *key,
+                              struct iwl_mld_vif *mld_vif);
+
+u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link,
+                         enum nl80211_band band);
+
+void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
+                        u8 *beacon, u32 frame_size);
+
+int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
+                                    struct sk_buff *beacon,
+                                    struct iwl_mac_beacon_cmd *cmd);
+
+#endif /* __iwl_ap_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "fw/api/coex.h"
+
+#include "coex.h"
+#include "mld.h"
+#include "hcmd.h"
+#include "mlo.h"
+
+int iwl_mld_send_bt_init_conf(struct iwl_mld *mld)
+{
+       struct iwl_bt_coex_cmd cmd = {
+               .mode = cpu_to_le32(BT_COEX_NW),
+               .enabled_modules = cpu_to_le32(BT_COEX_MPLUT_ENABLED |
+                                              BT_COEX_HIGH_BAND_RET),
+       };
+
+       return iwl_mld_send_cmd_pdu(mld, BT_CONFIG, &cmd);
+}
+
+void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
+                                 struct iwl_rx_packet *pkt)
+{
+       const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+       const struct iwl_bt_coex_profile_notif zero_notif = {};
+       /* zeroed structure means that BT is OFF */
+       bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif));
+
+       if (bt_is_active == mld->bt_is_active)
+               return;
+
+       IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF");
+
+       mld->bt_is_active = bt_is_active;
+
+       iwl_mld_emlsr_check_bt(mld);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_coex_h__
+#define __iwl_mld_coex_h__
+
+#include "mld.h"
+
+int iwl_mld_send_bt_init_conf(struct iwl_mld *mld);
+
+void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
+                                 struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_coex_h__ */
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_constants_h__
+#define __iwl_mld_constants_h__
+
+#define IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD                  4
+#define IWL_MLD_MISSED_BEACONS_THRESHOLD                       8
+#define IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG                  19
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS               5
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH                       15
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED     11
+#define IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH                       -72
+
+#define IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT     (100 * USEC_PER_MSEC)
+#define IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT     (100 * USEC_PER_MSEC)
+#define IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
+#define IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
+#define IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT       (2 * 1024) /* defined in TU */
+#define IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT       (40 * 1024) /* defined in TU */
+
+#define IWL_MLD_UAPSD_RX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
+#define IWL_MLD_UAPSD_TX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
+
+#define IWL_MLD_PS_SNOOZE_INTERVAL             25
+#define IWL_MLD_PS_SNOOZE_INTERVAL             25
+#define IWL_MLD_PS_SNOOZE_WINDOW               50
+
+#define IWL_MLD_PS_SNOOZE_HEAVY_TX_THLD_PACKETS        30
+#define IWL_MLD_PS_SNOOZE_HEAVY_RX_THLD_PACKETS        20
+
+#define IWL_MLD_PS_HEAVY_TX_THLD_PERCENT       50
+#define IWL_MLD_PS_HEAVY_RX_THLD_PERCENT       50
+#define IWL_MLD_PS_HEAVY_TX_THLD_PACKETS       20
+#define IWL_MLD_PS_HEAVY_RX_THLD_PACKETS       8
+
+#define IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC      30
+#define IWL_MLD_SCAN_EXPIRE_TIME_SEC           20
+
+#define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ)
+
+/* OMI reduced BW thresholds (channel load percentage) */
+#define IWL_MLD_OMI_ENTER_CHAN_LOAD            10
+#define IWL_MLD_OMI_EXIT_CHAN_LOAD_160         20
+#define IWL_MLD_OMI_EXIT_CHAN_LOAD_320         30
+/* time (in milliseconds) to let AP "settle" the OMI */
+#define IWL_MLD_OMI_AP_SETTLE_DELAY            27
+/* time (in milliseconds) to not enter OMI reduced BW after leaving */
+#define IWL_MLD_OMI_EXIT_PROTECTION            5000
+
+#define IWL_MLD_DIS_RANDOM_FW_ID                false
+#define IWL_MLD_D3_DEBUG                        false
+#define IWL_MLD_NON_TRANSMITTING_AP            false
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT       3000 /* in seconds */
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60   /* in seconds */
+#define IWL_MLD_CONN_LISTEN_INTERVAL           10
+#define IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE 0
+#define IWL_MLD_AUTO_EML_ENABLE                        true
+
+#define IWL_MLD_HIGH_RSSI_THRESH_20MHZ         -67
+#define IWL_MLD_LOW_RSSI_THRESH_20MHZ          -72
+#define IWL_MLD_HIGH_RSSI_THRESH_40MHZ         -64
+#define IWL_MLD_LOW_RSSI_THRESH_40MHZ          -72
+#define IWL_MLD_HIGH_RSSI_THRESH_80MHZ         -61
+#define IWL_MLD_LOW_RSSI_THRESH_80MHZ          -72
+#define IWL_MLD_HIGH_RSSI_THRESH_160MHZ                -58
+#define IWL_MLD_LOW_RSSI_THRESH_160MHZ         -72
+
+#define IWL_MLD_ENTER_EMLSR_TPT_THRESH         400
+#define IWL_MLD_CHAN_LOAD_THRESH               2 /* in percentage */
+
+#define IWL_MLD_FTM_INITIATOR_ALGO             IWL_TOF_ALGO_TYPE_MAX_LIKE
+#define IWL_MLD_FTM_INITIATOR_DYNACK           true
+#define IWL_MLD_FTM_LMR_FEEDBACK_TERMINATE     false
+#define IWL_MLD_FTM_TEST_INCORRECT_SAC         false
+#define IWL_MLD_FTM_R2I_MAX_REP                        7
+#define IWL_MLD_FTM_I2R_MAX_REP                        7
+#define IWL_MLD_FTM_R2I_MAX_STS                        1
+#define IWL_MLD_FTM_I2R_MAX_STS                        1
+#define IWL_MLD_FTM_R2I_MAX_TOTAL_LTF          3
+#define IWL_MLD_FTM_I2R_MAX_TOTAL_LTF          3
+#define IWL_MLD_FTM_RESP_NDP_SUPPORT           true
+#define IWL_MLD_FTM_RESP_LMR_FEEDBACK_SUPPORT  true
+#define IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR        7
+#define IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR        1000
+
+#endif /* __iwl_mld_constants_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mld.h"
+
+#include "d3.h"
+#include "power.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "mcc.h"
+#include "sta.h"
+#include "mlo.h"
+
+#include "fw/api/d3.h"
+#include "fw/api/offload.h"
+#include "fw/api/sta.h"
+#include "fw/dbg.h"
+
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/bitops.h>
+
+/**
+ * enum iwl_mld_d3_notif - d3 notifications
+ * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF is expected/was received
+ * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF is expected/was received
+ */
+enum iwl_mld_d3_notif {
+       IWL_D3_NOTIF_WOWLAN_INFO =      BIT(0),
+       IWL_D3_NOTIF_WOWLAN_WAKE_PKT =  BIT(1),
+       IWL_D3_NOTIF_PROT_OFFLOAD =     BIT(2),
+       IWL_D3_ND_MATCH_INFO      =     BIT(3),
+       IWL_D3_NOTIF_D3_END_NOTIF =     BIT(4)
+};
+
+struct iwl_mld_resume_key_iter_data {
+       struct iwl_mld *mld;
+       struct iwl_mld_wowlan_status *wowlan_status;
+       u32 num_keys, gtk_cipher, igtk_cipher, bigtk_cipher;
+       bool unhandled_cipher;
+};
+
+struct iwl_mld_suspend_key_iter_data {
+       struct iwl_wowlan_rsc_tsc_params_cmd *rsc;
+       bool have_rsc;
+       int gtks;
+       int found_gtk_idx[4];
+       __le32 gtk_cipher;
+       __le32 igtk_cipher;
+       __le32 bigtk_cipher;
+};
+
+struct iwl_mld_mcast_key_data {
+       u8 key[WOWLAN_KEY_MAX_SIZE];
+       u8 len;
+       u8 flags;
+       u8 id;
+       union {
+               struct {
+                       struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT];
+                       struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
+               } gtk;
+               struct {
+                       struct ieee80211_key_seq cmac_gmac_seq;
+               } igtk_bigtk;
+       };
+
+};
+
+/**
+ * struct iwl_mld_wowlan_status - contains wowlan status data from
+ * all wowlan notifications
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns on packets
+ * @last_qos_seq: QoS sequence counter of offloaded tid
+ * @num_of_gtk_rekeys: number of GTK rekeys during D3
+ * @tid_offloaded_tx: tid used by the firmware to transmit data packets
+ *     while in wowlan
+ * @wake_packet: wakeup packet received
+ * @wake_packet_length: wake packet length
+ * @wake_packet_bufsize: wake packet bufsize
+ * @gtk: data of the last two used gtk's by the FW upon resume
+ * @igtk: data of the last used igtk by the FW upon resume
+ * @bigtk: data of the last two used gtk's by the FW upon resume
+ * @ptk: last seq numbers per tid passed by the FW,
+ *     holds both in tkip and aes formats
+ */
+struct iwl_mld_wowlan_status {
+       u32 wakeup_reasons;
+       u64 replay_ctr;
+       u16 pattern_number;
+       u16 last_qos_seq;
+       u32 num_of_gtk_rekeys;
+       u8 tid_offloaded_tx;
+       u8 *wake_packet;
+       u32 wake_packet_length;
+       u32 wake_packet_bufsize;
+       struct iwl_mld_mcast_key_data gtk[WOWLAN_GTK_KEYS_NUM];
+       struct iwl_mld_mcast_key_data igtk;
+       struct iwl_mld_mcast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM];
+       struct {
+               struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT];
+               struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
+
+       } ptk;
+};
+
+#define NETDETECT_QUERY_BUF_LEN \
+       (sizeof(struct iwl_scan_offload_profile_match) * \
+        IWL_SCAN_MAX_PROFILES_V2)
+
+/**
+ * struct iwl_mld_netdetect_res - contains netdetect results from
+ * match_info_notif
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ *     matches passed in the scan offload request
+ * @matches: array of match information, one for each match
+ */
+struct iwl_mld_netdetect_res {
+       u32 matched_profiles;
+       u8 matches[NETDETECT_QUERY_BUF_LEN];
+};
+
+/**
+ * struct iwl_mld_resume_data - d3 resume flow data
+ * @notifs_expected: bitmap of expected notifications from fw,
+ *     see &enum iwl_mld_d3_notif
+ * @notifs_received: bitmap of received notifications from fw,
+ *     see &enum iwl_mld_d3_notif
+ * @d3_end_flags: bitmap of flags from d3_end_notif
+ * @notif_handling_err: error handling one of the resume notifications
+ * @wowlan_status: wowlan status data from all wowlan notifications
+ * @netdetect_res: contains netdetect results from match_info_notif
+ */
+struct iwl_mld_resume_data {
+       u32 notifs_expected;
+       u32 notifs_received;
+       u32 d3_end_flags;
+       bool notif_handling_err;
+       struct iwl_mld_wowlan_status *wowlan_status;
+       struct iwl_mld_netdetect_res *netdetect_res;
+};
+
+#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT \
+       (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
+       IWL_WOWLAN_WAKEUP_BY_PATTERN | \
+       IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\
+       IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\
+       IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\
+       IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD)
+
+#define IWL_WOWLAN_OFFLOAD_TID 0
+
+void iwl_mld_set_rekey_data(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct cfg80211_gtk_rekey_data *data)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       wowlan_data->rekey_data.kek_len = data->kek_len;
+       wowlan_data->rekey_data.kck_len = data->kck_len;
+       memcpy(wowlan_data->rekey_data.kek, data->kek, data->kek_len);
+       memcpy(wowlan_data->rekey_data.kck, data->kck, data->kck_len);
+       wowlan_data->rekey_data.akm = data->akm & 0xFF;
+       wowlan_data->rekey_data.replay_ctr =
+               cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
+       wowlan_data->rekey_data.valid = true;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct inet6_dev *idev)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+       struct inet6_ifaddr *ifa;
+       int idx = 0;
+
+       memset(wowlan_data->tentative_addrs, 0,
+              sizeof(wowlan_data->tentative_addrs));
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+               wowlan_data->target_ipv6_addrs[idx] = ifa->addr;
+               if (ifa->flags & IFA_F_TENTATIVE)
+                       __set_bit(idx, wowlan_data->tentative_addrs);
+               idx++;
+               if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
+                       break;
+       }
+       read_unlock_bh(&idev->lock);
+
+       wowlan_data->num_target_ipv6_addrs = idx;
+}
+#endif
+
+enum rt_status {
+       FW_ALIVE,
+       FW_NEEDS_RESET,
+       FW_ERROR,
+};
+
+static enum rt_status iwl_mld_check_err_tables(struct iwl_mld *mld,
+                                              struct ieee80211_vif *vif)
+{
+       u32 err_id;
+
+       /* check for lmac1 error */
+       if (iwl_fwrt_read_err_table(mld->trans,
+                                   mld->trans->dbg.lmac_error_event_table[0],
+                                   &err_id)) {
+               if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
+                       struct cfg80211_wowlan_wakeup wakeup = {
+                               .rfkill_release = true,
+                       };
+                       ieee80211_report_wowlan_wakeup(vif, &wakeup,
+                                                      GFP_KERNEL);
+
+                       return FW_NEEDS_RESET;
+               }
+               return FW_ERROR;
+       }
+
+       /* check if we have lmac2 set and check for error */
+       if (iwl_fwrt_read_err_table(mld->trans,
+                                   mld->trans->dbg.lmac_error_event_table[1],
+                                   NULL))
+               return FW_ERROR;
+
+       /* check for umac error */
+       if (iwl_fwrt_read_err_table(mld->trans,
+                                   mld->trans->dbg.umac_error_event_table,
+                                   NULL))
+               return FW_ERROR;
+
+       return FW_ALIVE;
+}
+
+static bool iwl_mld_fw_needs_restart(struct iwl_mld *mld,
+                                    struct ieee80211_vif *vif)
+{
+       enum rt_status rt_status = iwl_mld_check_err_tables(mld, vif);
+
+       if (rt_status == FW_ALIVE)
+               return false;
+
+       if (rt_status == FW_ERROR) {
+               IWL_ERR(mld, "FW Error occurred during suspend\n");
+               iwl_fwrt_dump_error_logs(&mld->fwrt);
+               iwl_dbg_tlv_time_point(&mld->fwrt,
+                                      IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
+       }
+
+       return true;
+}
+
+static int
+iwl_mld_netdetect_config(struct iwl_mld *mld,
+                        struct ieee80211_vif *vif,
+                        const struct cfg80211_wowlan *wowlan)
+{
+       int ret;
+       struct cfg80211_sched_scan_request *netdetect_cfg =
+               wowlan->nd_config;
+       struct ieee80211_scan_ies ies = {};
+
+       ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, true);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_sched_scan_start(mld, vif, netdetect_cfg, &ies,
+                                      IWL_MLD_SCAN_NETDETECT);
+       return ret;
+}
+
+static void
+iwl_mld_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+       u64 pn = le64_to_cpu(le_pn);
+
+       seq->tkip.iv16 = (u16)pn;
+       seq->tkip.iv32 = (u32)(pn >> 16);
+}
+
+static void
+iwl_mld_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+       u64 pn = le64_to_cpu(le_pn);
+
+       seq->ccmp.pn[0] = pn >> 40;
+       seq->ccmp.pn[1] = pn >> 32;
+       seq->ccmp.pn[2] = pn >> 24;
+       seq->ccmp.pn[3] = pn >> 16;
+       seq->ccmp.pn[4] = pn >> 8;
+       seq->ccmp.pn[5] = pn;
+}
+
+static void
+iwl_mld_convert_gtk_resume_seq(struct iwl_mld_mcast_key_data *gtk_data,
+                              const struct iwl_wowlan_all_rsc_tsc_v5 *sc,
+                              int rsc_idx)
+{
+       struct ieee80211_key_seq *aes_seq = gtk_data->gtk.aes_seq;
+       struct ieee80211_key_seq *tkip_seq = gtk_data->gtk.tkip_seq;
+
+       if (rsc_idx >= ARRAY_SIZE(sc->mcast_rsc))
+               return;
+
+       /* We store both the TKIP and AES representations coming from the
+        * FW because we decode the data from there before we iterate
+        * the keys and know which type is used.
+        */
+       for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               iwl_mld_le64_to_tkip_seq(sc->mcast_rsc[rsc_idx][tid],
+                                        &tkip_seq[tid]);
+               iwl_mld_le64_to_aes_seq(sc->mcast_rsc[rsc_idx][tid],
+                                       &aes_seq[tid]);
+       }
+}
+
+static void
+iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
+                               struct iwl_mld_wowlan_status *wowlan_status,
+                               const struct iwl_wowlan_gtk_status_v3 *gtk_data,
+                               const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+       int status_idx = 0;
+
+       BUILD_BUG_ON(sizeof(wowlan_status->gtk[0].key) <
+                    sizeof(gtk_data[0].key));
+       BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->gtk) < WOWLAN_GTK_KEYS_NUM);
+
+       for (int notif_idx = 0; notif_idx < ARRAY_SIZE(wowlan_status->gtk);
+            notif_idx++) {
+               int rsc_idx;
+
+               if (!(gtk_data[notif_idx].key_len))
+                       continue;
+
+               wowlan_status->gtk[status_idx].len =
+                       gtk_data[notif_idx].key_len;
+               wowlan_status->gtk[status_idx].flags =
+                       gtk_data[notif_idx].key_flags;
+               wowlan_status->gtk[status_idx].id =
+                       wowlan_status->gtk[status_idx].flags &
+                       IWL_WOWLAN_GTK_IDX_MASK;
+               memcpy(wowlan_status->gtk[status_idx].key,
+                      gtk_data[notif_idx].key,
+                      sizeof(gtk_data[notif_idx].key));
+
+               /* The rsc for both gtk keys are stored in gtk[0]->sc->mcast_rsc
+                * The gtk ids can be any two numbers between 0 and 3,
+                * the id_map maps between the key id and the index in sc->mcast
+                */
+               rsc_idx =
+                       sc->mcast_key_id_map[wowlan_status->gtk[status_idx].id];
+               iwl_mld_convert_gtk_resume_seq(&wowlan_status->gtk[status_idx],
+                                              sc, rsc_idx);
+
+               /* if it's as long as the TKIP encryption key, copy MIC key */
+               if (wowlan_status->gtk[status_idx].len ==
+                   NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+                       memcpy(wowlan_status->gtk[status_idx].key +
+                              NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+                              gtk_data[notif_idx].tkip_mic_key,
+                              sizeof(gtk_data[notif_idx].tkip_mic_key));
+               status_idx++;
+       }
+}
+
+static void
+iwl_mld_convert_ptk_resume_seq(struct iwl_mld *mld,
+                              struct iwl_mld_wowlan_status *wowlan_status,
+                              const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+       struct ieee80211_key_seq *aes_seq = wowlan_status->ptk.aes_seq;
+       struct ieee80211_key_seq *tkip_seq = wowlan_status->ptk.tkip_seq;
+
+       BUILD_BUG_ON(ARRAY_SIZE(sc->ucast_rsc) != IWL_MAX_TID_COUNT);
+
+       for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               iwl_mld_le64_to_aes_seq(sc->ucast_rsc[tid], &aes_seq[tid]);
+               iwl_mld_le64_to_tkip_seq(sc->ucast_rsc[tid], &tkip_seq[tid]);
+       }
+}
+
+static void
+iwl_mld_convert_mcast_ipn(struct iwl_mld_mcast_key_data *key_status,
+                         const struct iwl_wowlan_igtk_status *key)
+{
+       struct ieee80211_key_seq *seq =
+               &key_status->igtk_bigtk.cmac_gmac_seq;
+       u8 ipn_len = ARRAY_SIZE(key->ipn);
+
+       BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_gmac.pn));
+       BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_cmac.pn));
+       BUILD_BUG_ON(offsetof(struct ieee80211_key_seq, aes_gmac) !=
+                    offsetof(struct ieee80211_key_seq, aes_cmac));
+
+       /* mac80211 expects big endian for memcmp() to work, convert.
+        * We don't have the key cipher yet so copy to both to cmac and gmac
+        */
+       for (int i = 0; i < ipn_len; i++) {
+               seq->aes_gmac.pn[i] = key->ipn[ipn_len - i - 1];
+               seq->aes_cmac.pn[i] = key->ipn[ipn_len - i - 1];
+       }
+}
+
+static void
+iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
+                                const struct iwl_wowlan_igtk_status *igtk)
+{
+       BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
+
+       if (!igtk->key_len)
+               return;
+
+       wowlan_status->igtk.len = igtk->key_len;
+       wowlan_status->igtk.flags = igtk->key_flags;
+       wowlan_status->igtk.id =
+               u32_get_bits(igtk->key_flags,
+                            IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) +
+               WOWLAN_IGTK_MIN_INDEX;
+
+       memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+       iwl_mld_convert_mcast_ipn(&wowlan_status->igtk, igtk);
+}
+
+static void
+iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
+                                 const struct iwl_wowlan_igtk_status *bigtk)
+{
+       int status_idx = 0;
+
+       BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->bigtk) < WOWLAN_BIGTK_KEYS_NUM);
+
+       for (int notif_idx = 0; notif_idx < WOWLAN_BIGTK_KEYS_NUM;
+            notif_idx++) {
+               if (!bigtk[notif_idx].key_len)
+                       continue;
+
+               wowlan_status->bigtk[status_idx].len = bigtk[notif_idx].key_len;
+               wowlan_status->bigtk[status_idx].flags =
+                       bigtk[notif_idx].key_flags;
+               wowlan_status->bigtk[status_idx].id =
+                       u32_get_bits(bigtk[notif_idx].key_flags,
+                                    IWL_WOWLAN_IGTK_BIGTK_IDX_MASK)
+                       + WOWLAN_BIGTK_MIN_INDEX;
+
+               BUILD_BUG_ON(sizeof(wowlan_status->bigtk[status_idx].key) <
+                            sizeof(bigtk[notif_idx].key));
+               memcpy(wowlan_status->bigtk[status_idx].key,
+                      bigtk[notif_idx].key, sizeof(bigtk[notif_idx].key));
+               iwl_mld_convert_mcast_ipn(&wowlan_status->bigtk[status_idx],
+                                         &bigtk[notif_idx]);
+               status_idx++;
+       }
+}
+
+static bool
+iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
+                                struct iwl_mld_wowlan_status *wowlan_status,
+                                struct iwl_rx_packet *pkt)
+{
+       const struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
+       u32 expected_len, len = iwl_rx_packet_payload_len(pkt);
+
+       expected_len = sizeof(*notif);
+
+       if (IWL_FW_CHECK(mld, len < expected_len,
+                        "Invalid wowlan_info_notif (expected=%ud got=%ud)\n",
+                        expected_len, len))
+               return true;
+
+       if (IWL_FW_CHECK(mld, notif->tid_offloaded_tx != IWL_WOWLAN_OFFLOAD_TID,
+                        "Invalid tid_offloaded_tx %d\n",
+                        wowlan_status->tid_offloaded_tx))
+               return true;
+
+       iwl_mld_convert_gtk_resume_data(mld, wowlan_status, notif->gtk,
+                                       ¬if->gtk[0].sc);
+       iwl_mld_convert_ptk_resume_seq(mld, wowlan_status, ¬if->gtk[0].sc);
+       /* only one igtk is passed by FW */
+       iwl_mld_convert_igtk_resume_data(wowlan_status, ¬if->igtk[0]);
+       iwl_mld_convert_bigtk_resume_data(wowlan_status, notif->bigtk);
+
+       wowlan_status->replay_ctr = le64_to_cpu(notif->replay_ctr);
+       wowlan_status->pattern_number = le16_to_cpu(notif->pattern_number);
+
+       wowlan_status->tid_offloaded_tx = notif->tid_offloaded_tx;
+       wowlan_status->last_qos_seq = le16_to_cpu(notif->qos_seq_ctr);
+       wowlan_status->num_of_gtk_rekeys =
+               le32_to_cpu(notif->num_of_gtk_rekeys);
+       wowlan_status->wakeup_reasons = le32_to_cpu(notif->wakeup_reasons);
+       return false;
+       /* TODO: mlo_links (task=MLO)*/
+}
+
+static bool
+iwl_mld_handle_wake_pkt_notif(struct iwl_mld *mld,
+                             struct iwl_mld_wowlan_status *wowlan_status,
+                             struct iwl_rx_packet *pkt)
+{
+       const struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data;
+       u32 actual_size, len = iwl_rx_packet_payload_len(pkt);
+       u32 expected_size = le32_to_cpu(notif->wake_packet_length);
+
+       if (IWL_FW_CHECK(mld, len < sizeof(*notif),
+                        "Invalid WoWLAN wake packet notification (expected size=%zu got=%u)\n",
+                        sizeof(*notif), len))
+               return true;
+
+       if (IWL_FW_CHECK(mld, !(wowlan_status->wakeup_reasons &
+                               IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT),
+                        "Got wake packet but wakeup reason is %x\n",
+                        wowlan_status->wakeup_reasons))
+               return true;
+
+       actual_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif,
+                                    wake_packet);
+
+       /* actual_size got the padding from the notification, remove it. */
+       if (expected_size < actual_size)
+               actual_size = expected_size;
+       wowlan_status->wake_packet = kmemdup(notif->wake_packet, actual_size,
+                                            GFP_ATOMIC);
+       if (!wowlan_status->wake_packet)
+               return true;
+
+       wowlan_status->wake_packet_length = expected_size;
+       wowlan_status->wake_packet_bufsize = actual_size;
+
+       return false;
+}
+
+static void
+iwl_mld_set_wake_packet(struct iwl_mld *mld,
+                       struct ieee80211_vif *vif,
+                       const struct iwl_mld_wowlan_status *wowlan_status,
+                       struct cfg80211_wowlan_wakeup *wakeup,
+                       struct sk_buff **_pkt)
+{
+       int pkt_bufsize = wowlan_status->wake_packet_bufsize;
+       int expected_pktlen = wowlan_status->wake_packet_length;
+       const u8 *pktdata = wowlan_status->wake_packet;
+       const struct ieee80211_hdr *hdr = (const void *)pktdata;
+       int truncated = expected_pktlen - pkt_bufsize;
+
+       if (ieee80211_is_data(hdr->frame_control)) {
+               int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+               int ivlen = 0, icvlen = 4; /* also FCS */
+
+               struct sk_buff *pkt = alloc_skb(pkt_bufsize, GFP_KERNEL);
+               *_pkt = pkt;
+               if (!pkt)
+                       return;
+
+               skb_put_data(pkt, pktdata, hdrlen);
+               pktdata += hdrlen;
+               pkt_bufsize -= hdrlen;
+
+               /* if truncated, FCS/ICV is (partially) gone */
+               if (truncated >= icvlen) {
+                       truncated -= icvlen;
+                       icvlen = 0;
+               } else {
+                       icvlen -= truncated;
+                       truncated = 0;
+               }
+
+               pkt_bufsize -= ivlen + icvlen;
+               pktdata += ivlen;
+
+               skb_put_data(pkt, pktdata, pkt_bufsize);
+
+               if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+                       return;
+               wakeup->packet = pkt->data;
+               wakeup->packet_present_len = pkt->len;
+               wakeup->packet_len = pkt->len - truncated;
+               wakeup->packet_80211 = false;
+       } else {
+               int fcslen = 4;
+
+               if (truncated >= 4) {
+                       truncated -= 4;
+                       fcslen = 0;
+               } else {
+                       fcslen -= truncated;
+                       truncated = 0;
+               }
+               pkt_bufsize -= fcslen;
+               wakeup->packet = wowlan_status->wake_packet;
+               wakeup->packet_present_len = pkt_bufsize;
+               wakeup->packet_len = expected_pktlen - truncated;
+               wakeup->packet_80211 = true;
+       }
+}
+
+static void
+iwl_mld_report_wowlan_wakeup(struct iwl_mld *mld,
+                            struct ieee80211_vif *vif,
+                            struct iwl_mld_wowlan_status *wowlan_status)
+{
+       struct sk_buff *pkt = NULL;
+       struct cfg80211_wowlan_wakeup wakeup = {
+               .pattern_idx = -1,
+       };
+       u32 reasons = wowlan_status->wakeup_reasons;
+
+       if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+               ieee80211_report_wowlan_wakeup(vif, NULL, GFP_KERNEL);
+               return;
+       }
+
+       pm_wakeup_event(mld->dev, 0);
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
+               wakeup.magic_pkt = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
+               wakeup.pattern_idx =
+                       wowlan_status->pattern_number;
+
+       if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+                      IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+                      IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
+               wakeup.disconnect = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
+               wakeup.gtk_rekey_failure = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+               wakeup.rfkill_release = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
+               wakeup.eap_identity_req = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
+               wakeup.four_way_handshake = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
+               wakeup.tcp_connlost = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
+               wakeup.tcp_nomoretokens = true;
+
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
+               wakeup.tcp_match = true;
+
+       if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+               wakeup.unprot_deauth_disassoc = true;
+
+       if (wowlan_status->wake_packet)
+               iwl_mld_set_wake_packet(mld, vif, wowlan_status, &wakeup, &pkt);
+
+       ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+       kfree_skb(pkt);
+}
+
+static void
+iwl_mld_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
+                           struct ieee80211_key_seq *seq)
+{
+       int tid;
+
+       for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+               ieee80211_set_key_rx_seq(key, tid, &seq[tid]);
+}
+
+static void
+iwl_mld_set_key_rx_seq(struct ieee80211_key_conf *key,
+                      struct iwl_mld_mcast_key_data *key_data)
+{
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               iwl_mld_set_key_rx_seq_tids(key,
+                                           key_data->gtk.aes_seq);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               iwl_mld_set_key_rx_seq_tids(key,
+                                           key_data->gtk.tkip_seq);
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               /* igtk/bigtk ciphers*/
+               ieee80211_set_key_rx_seq(key, 0,
+                                        &key_data->igtk_bigtk.cmac_gmac_seq);
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+static void
+iwl_mld_d3_update_mcast_key(struct iwl_mld *mld,
+                           struct ieee80211_vif *vif,
+                           struct iwl_mld_wowlan_status *wowlan_status,
+                           struct ieee80211_key_conf *key,
+                           struct iwl_mld_mcast_key_data *key_data)
+{
+       if (key->keyidx != key_data->id &&
+           (key->keyidx < 4 || key->keyidx > 5)) {
+               IWL_ERR(mld,
+                       "Unexpected keyId mismatch. Old keyId:%d, New keyId:%d\n",
+                       key->keyidx, key_data->id);
+               return;
+       }
+
+       /* All installed keys are sent by the FW, even weren't
+        * rekeyed during D3.
+        * We remove an existing key if it has the same index as
+        * a new key and a rekey has occurred during d3
+        */
+       if (wowlan_status->num_of_gtk_rekeys && key_data->len) {
+               if (key->keyidx == 4 || key->keyidx == 5) {
+                       struct iwl_mld_vif *mld_vif =
+                               iwl_mld_vif_from_mac80211(vif);
+                       struct iwl_mld_link *mld_link;
+                       int link_id = vif->active_links ?
+                               __ffs(vif->active_links) : 0;
+
+                       mld_link = iwl_mld_link_dereference_check(mld_vif,
+                                                                 link_id);
+                       if (WARN_ON(!mld_link))
+                               return;
+
+                       if (mld_link->igtk == key)
+                               mld_link->igtk = NULL;
+                       mld->num_igtks--;
+               }
+
+               ieee80211_remove_key(key);
+               return;
+       }
+
+       iwl_mld_set_key_rx_seq(key, key_data);
+}
+
+static void
+iwl_mld_update_ptk_rx_seq(struct iwl_mld *mld,
+                         struct iwl_mld_wowlan_status *wowlan_status,
+                         struct ieee80211_sta *sta,
+                         struct ieee80211_key_conf *key,
+                         bool is_tkip)
+{
+       struct iwl_mld_sta *mld_sta =
+               iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_ptk_pn *mld_ptk_pn =
+               wiphy_dereference(mld->wiphy,
+                                 mld_sta->ptk_pn[key->keyidx]);
+
+       iwl_mld_set_key_rx_seq_tids(key, is_tkip ?
+                                   wowlan_status->ptk.tkip_seq :
+                                   wowlan_status->ptk.aes_seq);
+       if (is_tkip)
+               return;
+
+       if (WARN_ON(!mld_ptk_pn))
+               return;
+
+       for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               for (int i = 1; i < mld->trans->num_rx_queues; i++)
+                       memcpy(mld_ptk_pn->q[i].pn[tid],
+                              wowlan_status->ptk.aes_seq[tid].ccmp.pn,
+                              IEEE80211_CCMP_PN_LEN);
+       }
+}
+
+static void
+iwl_mld_resume_keys_iter(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif,
+                        struct ieee80211_sta *sta,
+                        struct ieee80211_key_conf *key,
+                        void *_data)
+{
+       struct iwl_mld_resume_key_iter_data *data = _data;
+       struct iwl_mld_wowlan_status *wowlan_status = data->wowlan_status;
+       u8 status_idx;
+
+       /* TODO: check key link id (task=MLO) */
+       if (data->unhandled_cipher)
+               return;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               /* ignore WEP completely, nothing to do */
+               return;
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+       case WLAN_CIPHER_SUITE_TKIP:
+               if (sta) {
+                       iwl_mld_update_ptk_rx_seq(data->mld, wowlan_status,
+                                                 sta, key,
+                                                 key->cipher ==
+                                                 WLAN_CIPHER_SUITE_TKIP);
+                       return;
+               }
+
+               if (WARN_ON(data->gtk_cipher &&
+                           data->gtk_cipher != key->cipher))
+                       return;
+
+               data->gtk_cipher = key->cipher;
+               status_idx = key->keyidx == wowlan_status->gtk[1].id;
+               iwl_mld_d3_update_mcast_key(data->mld, vif, wowlan_status, key,
+                                           &wowlan_status->gtk[status_idx]);
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               if (key->keyidx == 4 || key->keyidx == 5) {
+                       if (WARN_ON(data->igtk_cipher &&
+                                   data->igtk_cipher != key->cipher))
+                               return;
+
+                       data->igtk_cipher = key->cipher;
+                       iwl_mld_d3_update_mcast_key(data->mld, vif,
+                                                   wowlan_status,
+                                                   key, &wowlan_status->igtk);
+               }
+               if (key->keyidx == 6 || key->keyidx == 7) {
+                       if (WARN_ON(data->bigtk_cipher &&
+                                   data->bigtk_cipher != key->cipher))
+                               return;
+
+                       data->bigtk_cipher = key->cipher;
+                       status_idx = key->keyidx == wowlan_status->bigtk[1].id;
+                       iwl_mld_d3_update_mcast_key(data->mld, vif,
+                                                   wowlan_status, key,
+                                                   &wowlan_status->bigtk[status_idx]);
+               }
+               break;
+       default:
+               data->unhandled_cipher = true;
+               return;
+       }
+       data->num_keys++;
+}
+
+static bool
+iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
+                       struct iwl_mld *mld,
+                       struct iwl_mld_mcast_key_data *key_data,
+                       struct ieee80211_bss_conf *link_conf,
+                       u32 cipher)
+{
+       struct ieee80211_key_conf *key_config;
+       struct {
+               struct ieee80211_key_conf conf;
+               u8 key[WOWLAN_KEY_MAX_SIZE];
+       } conf = {
+               .conf.cipher = cipher,
+               .conf.keyidx = key_data->id,
+       };
+       int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+       BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
+       BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
+       BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
+       BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
+       BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_128);
+       BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_256);
+       BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_AES_CMAC);
+       BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
+
+       if (!key_data->len)
+               return true;
+
+       switch (cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_GCMP:
+               conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
+       key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+       if (IS_ERR(key_config))
+               return false;
+
+       iwl_mld_set_key_rx_seq(key_config, key_data);
+
+       /* The FW holds only one igtk so we keep track of the valid one */
+       if (key_config->keyidx == 4 || key_config->keyidx == 5) {
+               struct iwl_mld_link *mld_link =
+                       iwl_mld_link_from_mac80211(link_conf);
+               mld_link->igtk = key_config;
+               mld->num_igtks++;
+       }
+       return true;
+}
+
+static bool
+iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
+                      struct iwl_mld_wowlan_status *wowlan_status,
+                      struct iwl_mld_resume_key_iter_data *key_iter_data,
+                      struct ieee80211_bss_conf *link_conf)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(wowlan_status->gtk); i++)
+               if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+                                            &wowlan_status->gtk[i],
+                                            link_conf,
+                                            key_iter_data->gtk_cipher))
+                       return false;
+
+       if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+                                    &wowlan_status->igtk,
+                                    link_conf, key_iter_data->igtk_cipher))
+               return false;
+
+       for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++)
+               if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+                                            &wowlan_status->bigtk[i],
+                                            link_conf,
+                                            key_iter_data->bigtk_cipher))
+                       return false;
+
+       return true;
+}
+
+static bool
+iwl_mld_update_sec_keys(struct iwl_mld *mld,
+                       struct ieee80211_vif *vif,
+                       struct iwl_mld_wowlan_status *wowlan_status)
+{
+       int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+       struct ieee80211_bss_conf *link_conf =
+               link_conf_dereference_protected(vif, link_id);
+       __be64 replay_ctr = cpu_to_be64(wowlan_status->replay_ctr);
+       struct iwl_mld_resume_key_iter_data key_iter_data = {
+               .mld = mld,
+               .wowlan_status = wowlan_status,
+       };
+
+       if (WARN_ON(!link_conf))
+               return false;
+
+       ieee80211_iter_keys(mld->hw, vif, iwl_mld_resume_keys_iter,
+                           &key_iter_data);
+
+       if (key_iter_data.unhandled_cipher)
+               return false;
+
+       IWL_DEBUG_WOWLAN(mld,
+                        "Number of installed keys: %d, Number of rekeys: %d\n",
+                        key_iter_data.num_keys,
+                        wowlan_status->num_of_gtk_rekeys);
+
+       if (!key_iter_data.num_keys || !wowlan_status->num_of_gtk_rekeys)
+               return true;
+
+       iwl_mld_add_all_rekeys(vif, wowlan_status, &key_iter_data,
+                              link_conf);
+
+       ieee80211_gtk_rekey_notify(vif, link_conf->bssid,
+                                  (void *)&replay_ctr, GFP_KERNEL);
+       /* TODO: MLO rekey (task=MLO) */
+       return true;
+}
+
+static bool
+iwl_mld_process_wowlan_status(struct iwl_mld *mld,
+                             struct ieee80211_vif *vif,
+                             struct iwl_mld_wowlan_status *wowlan_status)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct ieee80211_sta *ap_sta = mld_vif->ap_sta;
+       struct iwl_mld_txq *mld_txq;
+
+       iwl_mld_report_wowlan_wakeup(mld, vif, wowlan_status);
+
+       if (WARN_ON(!ap_sta))
+               return false;
+
+       mld_txq =
+               iwl_mld_txq_from_mac80211(ap_sta->txq[wowlan_status->tid_offloaded_tx]);
+
+       /* Update the pointers of the Tx queue that may have moved during
+        * suspend if the firmware sent frames.
+        * The firmware stores last-used value, we store next value.
+        */
+       WARN_ON(!mld_txq->status.allocated);
+       iwl_trans_set_q_ptrs(mld->trans, mld_txq->fw_id,
+                            (wowlan_status->last_qos_seq +
+                            0x10) >> 4);
+
+       if (!iwl_mld_update_sec_keys(mld, vif, wowlan_status))
+               return false;
+
+       if (wowlan_status->wakeup_reasons &
+           (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+            IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+            IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
+               return false;
+
+       return true;
+}
+
+static bool
+iwl_mld_netdetect_match_info_handler(struct iwl_mld *mld,
+                                    struct iwl_mld_resume_data *resume_data,
+                                    struct iwl_rx_packet *pkt)
+{
+       struct iwl_mld_netdetect_res *results = resume_data->netdetect_res;
+       const struct iwl_scan_offload_match_info *notif = (void *)pkt->data;
+       u32 len = iwl_rx_packet_payload_len(pkt);
+
+       if (IWL_FW_CHECK(mld, !mld->netdetect,
+                        "Got scan match info notif when mld->netdetect==%d\n",
+                        mld->netdetect))
+               return true;
+
+       if (IWL_FW_CHECK(mld, len < sizeof(*notif),
+                        "Invalid scan offload match notif of length: %d\n",
+                        len))
+               return true;
+
+       if (IWL_FW_CHECK(mld, resume_data->wowlan_status->wakeup_reasons !=
+                        IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS,
+                        "Ignore scan match info: unexpected wakeup reason (expected=0x%x got=0x%x)\n",
+                        IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS,
+                        resume_data->wowlan_status->wakeup_reasons))
+               return true;
+
+       results->matched_profiles = le32_to_cpu(notif->matched_profiles);
+       IWL_DEBUG_WOWLAN(mld, "number of matched profiles=%u\n",
+                        results->matched_profiles);
+
+       if (results->matched_profiles)
+               memcpy(results->matches, notif->matches,
+                      NETDETECT_QUERY_BUF_LEN);
+
+       /* No scan should be active at this point */
+       mld->scan.status = 0;
+       memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status));
+       return false;
+}
+
+static void
+iwl_mld_set_netdetect_info(struct iwl_mld *mld,
+                          const struct cfg80211_sched_scan_request *netdetect_cfg,
+                          struct cfg80211_wowlan_nd_info *netdetect_info,
+                          struct iwl_mld_netdetect_res *netdetect_res,
+                          unsigned long matched_profiles)
+{
+       int i;
+
+       for_each_set_bit(i, &matched_profiles, netdetect_cfg->n_match_sets) {
+               struct cfg80211_wowlan_nd_match *match;
+               int idx, j, n_channels = 0;
+               struct iwl_scan_offload_profile_match *matches =
+                       (void *)netdetect_res->matches;
+
+               for (int k = 0; k < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; k++)
+                       n_channels +=
+                               hweight8(matches[i].matching_channels[k]);
+               match = kzalloc(struct_size(match, channels, n_channels),
+                               GFP_KERNEL);
+               if (!match)
+                       return;
+
+               netdetect_info->matches[netdetect_info->n_matches++] = match;
+
+               /* We inverted the order of the SSIDs in the scan
+                * request, so invert the index here.
+                */
+               idx = netdetect_cfg->n_match_sets - i - 1;
+               match->ssid.ssid_len =
+                       netdetect_cfg->match_sets[idx].ssid.ssid_len;
+               memcpy(match->ssid.ssid,
+                      netdetect_cfg->match_sets[idx].ssid.ssid,
+                      match->ssid.ssid_len);
+
+               if (netdetect_cfg->n_channels < n_channels)
+                       continue;
+
+               for_each_set_bit(j,
+                                (unsigned long *)&matches[i].matching_channels[0],
+                                sizeof(matches[i].matching_channels))
+                       match->channels[match->n_channels++] =
+                               netdetect_cfg->channels[j]->center_freq;
+       }
+}
+
+static void
+iwl_mld_process_netdetect_res(struct iwl_mld *mld,
+                             struct ieee80211_vif *vif,
+                             struct iwl_mld_resume_data *resume_data)
+{
+       struct cfg80211_wowlan_nd_info *netdetect_info = NULL;
+       const struct cfg80211_sched_scan_request *netdetect_cfg;
+       struct cfg80211_wowlan_wakeup wakeup = {
+               .pattern_idx = -1,
+       };
+       struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+       unsigned long matched_profiles;
+       u32 wakeup_reasons;
+       int n_matches;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld->wiphy->wowlan_config ||
+                   !mld->wiphy->wowlan_config->nd_config)) {
+               IWL_DEBUG_WOWLAN(mld,
+                                "Netdetect isn't configured on resume flow\n");
+               goto out;
+       }
+
+       netdetect_cfg = mld->wiphy->wowlan_config->nd_config;
+       wakeup_reasons = resume_data->wowlan_status->wakeup_reasons;
+
+       if (wakeup_reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+               wakeup.rfkill_release = true;
+
+       if (wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+               goto out;
+
+       if (!resume_data->netdetect_res->matched_profiles) {
+               IWL_DEBUG_WOWLAN(mld,
+                                "Netdetect results aren't valid\n");
+               wakeup_report = NULL;
+               goto out;
+       }
+
+       matched_profiles = resume_data->netdetect_res->matched_profiles;
+       if (!netdetect_cfg->n_match_sets) {
+               IWL_DEBUG_WOWLAN(mld,
+                                "No netdetect match sets are configured\n");
+               goto out;
+       }
+       n_matches = hweight_long(matched_profiles);
+       netdetect_info = kzalloc(struct_size(netdetect_info, matches,
+                                            n_matches), GFP_KERNEL);
+       if (netdetect_info)
+               iwl_mld_set_netdetect_info(mld, netdetect_cfg, netdetect_info,
+                                          resume_data->netdetect_res,
+                                          matched_profiles);
+
+       wakeup.net_detect = netdetect_info;
+ out:
+       ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+       if (netdetect_info) {
+               for (int i = 0; i < netdetect_info->n_matches; i++)
+                       kfree(netdetect_info->matches[i]);
+               kfree(netdetect_info);
+       }
+}
+
+static bool iwl_mld_handle_d3_notif(struct iwl_notif_wait_data *notif_wait,
+                                   struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_mld_resume_data *resume_data = data;
+       struct iwl_mld *mld =
+               container_of(notif_wait, struct iwl_mld, notif_wait);
+
+       switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+       case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+               if (resume_data->notifs_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+                       IWL_DEBUG_WOWLAN(mld,
+                                        "got additional wowlan_info notif\n");
+                       break;
+               }
+               resume_data->notif_handling_err =
+                       iwl_mld_handle_wowlan_info_notif(mld,
+                                                        resume_data->wowlan_status,
+                                                        pkt);
+               resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+
+               if (resume_data->wowlan_status->wakeup_reasons &
+                   IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+                       resume_data->notifs_expected |=
+                               IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+               break;
+       }
+       case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): {
+               if (resume_data->notifs_received &
+                   IWL_D3_NOTIF_WOWLAN_WAKE_PKT) {
+                       /* We shouldn't get two wake packet notifications */
+                       IWL_DEBUG_WOWLAN(mld,
+                                        "Got additional wowlan wake packet notification\n");
+                       break;
+               }
+               resume_data->notif_handling_err =
+                       iwl_mld_handle_wake_pkt_notif(mld,
+                                                     resume_data->wowlan_status,
+                                                     pkt);
+               resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+               break;
+       }
+       case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): {
+               if (resume_data->notifs_received & IWL_D3_ND_MATCH_INFO) {
+                       IWL_ERR(mld,
+                               "Got additional netdetect match info\n");
+                       break;
+               }
+
+               resume_data->notif_handling_err =
+                       iwl_mld_netdetect_match_info_handler(mld, resume_data,
+                                                            pkt);
+               resume_data->notifs_received |= IWL_D3_ND_MATCH_INFO;
+               break;
+       }
+       case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
+               struct iwl_d3_end_notif *notif = (void *)pkt->data;
+
+               resume_data->d3_end_flags = le32_to_cpu(notif->flags);
+               resume_data->notifs_received |= IWL_D3_NOTIF_D3_END_NOTIF;
+               break;
+       }
+       default:
+               WARN_ON(1);
+       }
+
+       return resume_data->notifs_received == resume_data->notifs_expected;
+}
+
+#define IWL_MLD_D3_NOTIF_TIMEOUT (HZ / 3)
+
+static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
+                                struct iwl_mld_resume_data *resume_data,
+                                bool with_wowlan)
+{
+       static const u16 wowlan_resume_notif[] = {
+               WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION),
+               WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION),
+               WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
+               WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+       };
+       static const u16 d3_resume_notif[] = {
+               WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+       };
+       struct iwl_notification_wait wait_d3_notif;
+       enum iwl_d3_status d3_status;
+       int ret;
+
+       if (with_wowlan)
+               iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif,
+                                          wowlan_resume_notif,
+                                          ARRAY_SIZE(wowlan_resume_notif),
+                                          iwl_mld_handle_d3_notif,
+                                          resume_data);
+       else
+               iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif,
+                                          d3_resume_notif,
+                                          ARRAY_SIZE(d3_resume_notif),
+                                          iwl_mld_handle_d3_notif,
+                                          resume_data);
+
+       ret = iwl_trans_d3_resume(mld->trans, &d3_status, false, false);
+       if (ret || d3_status != IWL_D3_STATUS_ALIVE) {
+               if (d3_status != IWL_D3_STATUS_ALIVE) {
+                       IWL_INFO(mld, "Device was reset during suspend\n");
+                       ret = -ENOENT;
+               } else {
+                       IWL_ERR(mld, "Transport resume failed\n");
+               }
+               iwl_remove_notification(&mld->notif_wait, &wait_d3_notif);
+               return ret;
+       }
+
+       ret = iwl_wait_notification(&mld->notif_wait, &wait_d3_notif,
+                                   IWL_MLD_D3_NOTIF_TIMEOUT);
+       if (ret)
+               IWL_ERR(mld, "Couldn't get the d3 notif %d\n", ret);
+
+       if (resume_data->notif_handling_err)
+               ret = -EIO;
+
+       return ret;
+}
+
+int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
+{
+       struct iwl_d3_manager_config d3_cfg_cmd_data = {};
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan suspend flow\n");
+
+       iwl_mld_low_latency_stop(mld);
+
+       /* This will happen if iwl_mld_supsend failed with FW error */
+       if (mld->trans->state == IWL_TRANS_NO_FW &&
+           test_bit(STATUS_FW_ERROR, &mld->trans->status))
+               return -ENODEV;
+
+       ret = iwl_mld_update_device_power(mld, true);
+       if (ret) {
+               IWL_ERR(mld,
+                       "d3 suspend: couldn't send power_device %d\n", ret);
+               goto out;
+       }
+
+       ret = iwl_mld_send_cmd_pdu(mld, D3_CONFIG_CMD,
+                                  &d3_cfg_cmd_data);
+       if (ret) {
+               IWL_ERR(mld,
+                       "d3 suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
+               goto out;
+       }
+
+       ret = iwl_trans_d3_suspend(mld->trans, false, false);
+       if (ret) {
+               IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
+       } else {
+               mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+               mld->fw_status.in_d3 = true;
+       }
+
+ out:
+       if (ret) {
+               mld->trans->state = IWL_TRANS_NO_FW;
+               set_bit(STATUS_FW_ERROR, &mld->trans->status);
+       }
+
+       return ret;
+}
+
+int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
+{
+       struct iwl_mld_resume_data resume_data = {
+               .notifs_expected =
+                       IWL_D3_NOTIF_D3_END_NOTIF,
+       };
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan resume flow\n");
+
+       mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+       mld->fw_status.in_d3 = false;
+       iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
+
+       if (iwl_mld_fw_needs_restart(mld, NULL))
+               ret = -ENODEV;
+       else
+               ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
+
+       if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE))
+               return -ENODEV;
+
+       if (ret) {
+               mld->trans->state = IWL_TRANS_NO_FW;
+               set_bit(STATUS_FW_ERROR, &mld->trans->status);
+               return ret;
+       }
+       iwl_mld_low_latency_restart(mld);
+
+       return iwl_mld_update_device_power(mld, false);
+}
+
+static void
+iwl_mld_aes_seq_to_le64_pn(struct ieee80211_key_conf *key,
+                          __le64 *key_rsc)
+{
+       for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+               struct ieee80211_key_seq seq;
+               u8 *pn = key->cipher == WLAN_CIPHER_SUITE_CCMP ? seq.ccmp.pn :
+                       seq.gcmp.pn;
+
+               ieee80211_get_key_rx_seq(key, i, &seq);
+               key_rsc[i] = cpu_to_le64((u64)pn[5] |
+                                        ((u64)pn[4] << 8) |
+                                        ((u64)pn[3] << 16) |
+                                        ((u64)pn[2] << 24) |
+                                        ((u64)pn[1] << 32) |
+                                        ((u64)pn[0] << 40));
+       }
+}
+
+static void
+iwl_mld_suspend_set_ucast_pn(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                            struct ieee80211_key_conf *key, __le64 *key_rsc)
+{
+       struct iwl_mld_sta *mld_sta =
+               iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_ptk_pn *mld_ptk_pn;
+
+       if (WARN_ON(key->keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+               return;
+
+       mld_ptk_pn = wiphy_dereference(mld->wiphy,
+                                      mld_sta->ptk_pn[key->keyidx]);
+       if (WARN_ON(!mld_ptk_pn))
+               return;
+
+       for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               struct ieee80211_key_seq seq;
+               u8 *max_pn = seq.ccmp.pn;
+
+               /* get the PN from mac80211, used on the default queue */
+               ieee80211_get_key_rx_seq(key, tid, &seq);
+
+               /* and use the internal data for all queues */
+               for (int que = 1; que < mld->trans->num_rx_queues; que++) {
+                       u8 *cur_pn = mld_ptk_pn->q[que].pn[tid];
+
+                       if (memcmp(max_pn, cur_pn, IEEE80211_CCMP_PN_LEN) < 0)
+                               max_pn = cur_pn;
+               }
+               key_rsc[tid] = cpu_to_le64((u64)max_pn[5] |
+                                          ((u64)max_pn[4] << 8) |
+                                          ((u64)max_pn[3] << 16) |
+                                          ((u64)max_pn[2] << 24) |
+                                          ((u64)max_pn[1] << 32) |
+                                          ((u64)max_pn[0] << 40));
+       }
+}
+
+static void
+iwl_mld_suspend_convert_tkip_ipn(struct ieee80211_key_conf *key,
+                                __le64 *rsc)
+{
+       struct ieee80211_key_seq seq;
+
+       for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+               ieee80211_get_key_rx_seq(key, i, &seq);
+               rsc[i] =
+                       cpu_to_le64(((u64)seq.tkip.iv32 << 16) |
+                                   seq.tkip.iv16);
+       }
+}
+
+static void
+iwl_mld_suspend_key_data_iter(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_sta *sta,
+                             struct ieee80211_key_conf *key,
+                             void *_data)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_suspend_key_iter_data *data = _data;
+       __le64 *key_rsc;
+       __le32 cipher = 0;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+               fallthrough;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               if (!cipher)
+                       cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+               fallthrough;
+       case WLAN_CIPHER_SUITE_TKIP:
+               if (!cipher)
+                       cipher = cpu_to_le32(STA_KEY_FLG_TKIP);
+               if (sta) {
+                       key_rsc = data->rsc->ucast_rsc;
+                       if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+                               iwl_mld_suspend_convert_tkip_ipn(key, key_rsc);
+                       else
+                               iwl_mld_suspend_set_ucast_pn(mld, sta, key,
+                                                            key_rsc);
+
+                       data->have_rsc = true;
+                       return;
+               }
+               /* We're iterating from old to new, there're 4 possible
+                * gtk ids, and only the last two keys matter
+                */
+               if (WARN_ON(data->gtks >=
+                               ARRAY_SIZE(data->found_gtk_idx)))
+                       return;
+
+               if (WARN_ON(key->keyidx >=
+                               ARRAY_SIZE(data->rsc->mcast_key_id_map)))
+                       return;
+               data->gtk_cipher = cipher;
+               data->found_gtk_idx[data->gtks] = key->keyidx;
+               key_rsc = data->rsc->mcast_rsc[data->gtks % 2];
+               data->rsc->mcast_key_id_map[key->keyidx] =
+                       data->gtks % 2;
+
+               if (data->gtks >= 2) {
+                       int prev = data->gtks % 2;
+                       int prev_idx = data->found_gtk_idx[prev];
+
+                       data->rsc->mcast_key_id_map[prev_idx] =
+                               IWL_MCAST_KEY_MAP_INVALID;
+               }
+
+               if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+                       iwl_mld_suspend_convert_tkip_ipn(key, key_rsc);
+               else
+                       iwl_mld_aes_seq_to_le64_pn(key, key_rsc);
+
+               data->gtks++;
+               data->have_rsc = true;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+               fallthrough;
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               if (!cipher)
+                       cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+               if (key->keyidx == 4 || key->keyidx == 5)
+                       data->igtk_cipher = cipher;
+
+               if (key->keyidx == 6 || key->keyidx == 7)
+                       data->bigtk_cipher = cipher;
+
+               break;
+       }
+}
+
+static int
+iwl_mld_send_kek_kck_cmd(struct iwl_mld *mld,
+                        struct iwl_mld_vif *mld_vif,
+                        struct iwl_mld_suspend_key_iter_data data,
+                        int ap_sta_id)
+{
+       struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
+       struct iwl_mld_rekey_data *rekey_data =
+               &mld_vif->wowlan_data.rekey_data;
+
+       memcpy(kek_kck_cmd.kck, rekey_data->kck,
+              rekey_data->kck_len);
+       kek_kck_cmd.kck_len = cpu_to_le16(rekey_data->kck_len);
+       memcpy(kek_kck_cmd.kek, rekey_data->kek,
+              rekey_data->kek_len);
+       kek_kck_cmd.kek_len = cpu_to_le16(rekey_data->kek_len);
+       kek_kck_cmd.replay_ctr = rekey_data->replay_ctr;
+       kek_kck_cmd.akm = cpu_to_le32(rekey_data->akm);
+       kek_kck_cmd.sta_id = cpu_to_le32(ap_sta_id);
+       kek_kck_cmd.gtk_cipher = data.gtk_cipher;
+       kek_kck_cmd.igtk_cipher = data.igtk_cipher;
+       kek_kck_cmd.bigtk_cipher = data.bigtk_cipher;
+
+       IWL_DEBUG_WOWLAN(mld, "setting akm %d\n",
+                        rekey_data->akm);
+
+       return iwl_mld_send_cmd_pdu(mld, WOWLAN_KEK_KCK_MATERIAL,
+                                   &kek_kck_cmd);
+}
+
+static int
+iwl_mld_suspend_send_security_cmds(struct iwl_mld *mld,
+                                  struct ieee80211_vif *vif,
+                                  struct iwl_mld_vif *mld_vif,
+                                  int ap_sta_id)
+{
+       struct iwl_mld_suspend_key_iter_data data = {};
+       int ret;
+
+       data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
+       if (!data.rsc)
+               return -ENOMEM;
+
+       memset(data.rsc->mcast_key_id_map, IWL_MCAST_KEY_MAP_INVALID,
+              ARRAY_SIZE(data.rsc->mcast_key_id_map));
+
+       data.rsc->sta_id = cpu_to_le32(ap_sta_id);
+       ieee80211_iter_keys(mld->hw, vif,
+                           iwl_mld_suspend_key_data_iter,
+                           &data);
+
+       if (data.have_rsc)
+               ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_TSC_RSC_PARAM,
+                                          data.rsc);
+       else
+               ret = 0;
+
+       if (!ret && mld_vif->wowlan_data.rekey_data.valid)
+               ret = iwl_mld_send_kek_kck_cmd(mld, mld_vif, data, ap_sta_id);
+
+       kfree(data.rsc);
+
+       return ret;
+}
+
+static void
+iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
+                             struct cfg80211_wowlan *wowlan,
+                             struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+                             struct ieee80211_sta *ap_sta)
+{
+       wowlan_config_cmd->is_11n_connection =
+                                       ap_sta->deflink.ht_cap.ht_supported;
+       wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
+               ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+
+       if (ap_sta->mfp)
+               wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
+       if (wowlan->disconnect)
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+                                   IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+       if (wowlan->magic_pkt)
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+       if (wowlan->gtk_rekey_failure)
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+       if (wowlan->eap_identity_req)
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+       if (wowlan->four_way_handshake)
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+       if (wowlan->n_patterns)
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+       if (wowlan->rfkill_release)
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+       if (wowlan->any) {
+               wowlan_config_cmd->wakeup_filter |=
+                       cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+                                   IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+                                   IWL_WOWLAN_WAKEUP_RX_FRAME |
+                                   IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+       }
+}
+
+static int iwl_mld_send_patterns(struct iwl_mld *mld,
+                                struct cfg80211_wowlan *wowlan,
+                                int ap_sta_id)
+{
+       struct iwl_wowlan_patterns_cmd *pattern_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = WOWLAN_PATTERNS,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+       int ret;
+
+       if (!wowlan->n_patterns)
+               return 0;
+
+       cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
+
+       pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
+       if (!pattern_cmd)
+               return -ENOMEM;
+
+       pattern_cmd->n_patterns = wowlan->n_patterns;
+       pattern_cmd->sta_id = ap_sta_id;
+
+       for (int i = 0; i < wowlan->n_patterns; i++) {
+               int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+               pattern_cmd->patterns[i].pattern_type =
+                       WOWLAN_PATTERN_TYPE_BITMASK;
+
+               memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
+                      wowlan->patterns[i].mask, mask_len);
+               memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
+                      wowlan->patterns[i].pattern,
+                      wowlan->patterns[i].pattern_len);
+               pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
+               pattern_cmd->patterns[i].u.bitmask.pattern_size =
+                       wowlan->patterns[i].pattern_len;
+       }
+
+       cmd.data[0] = pattern_cmd;
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       kfree(pattern_cmd);
+       return ret;
+}
+
+static int
+iwl_mld_send_proto_offload(struct iwl_mld *mld,
+                          struct ieee80211_vif *vif,
+                          u8 ap_sta_id)
+{
+       struct iwl_proto_offload_cmd_v4 *cmd __free(kfree);
+       struct iwl_host_cmd hcmd = {
+               .id = PROT_OFFLOAD_CONFIG_CMD,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+               .len[0] = sizeof(*cmd),
+       };
+       u32 enabled = 0;
+
+       cmd = kzalloc(hcmd.len[0], GFP_KERNEL);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+       struct iwl_ns_config *nsc;
+       struct iwl_targ_addr *addrs;
+       int n_nsc, n_addrs;
+       int i, c;
+       int num_skipped = 0;
+
+       nsc = cmd->ns_config;
+       n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+       addrs = cmd->targ_addrs;
+       n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+
+       /* For each address we have (and that will fit) fill a target
+        * address struct and combine for NS offload structs with the
+        * solicited node addresses.
+        */
+       for (i = 0, c = 0;
+               i < wowlan_data->num_target_ipv6_addrs &&
+               i < n_addrs && c < n_nsc; i++) {
+               int j;
+               struct in6_addr solicited_addr;
+
+               /* Because ns is offloaded skip tentative address to avoid
+                * violating RFC4862.
+                */
+               if (test_bit(i, wowlan_data->tentative_addrs)) {
+                       num_skipped++;
+                       continue;
+               }
+
+               addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i],
+                                         &solicited_addr);
+               for (j = 0; j < c; j++)
+                       if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+                                         &solicited_addr) == 0)
+                               break;
+               if (j == c)
+                       c++;
+               addrs[i].addr = wowlan_data->target_ipv6_addrs[i];
+               addrs[i].config_num = cpu_to_le32(j);
+               nsc[j].dest_ipv6_addr = solicited_addr;
+               memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+       }
+
+       if (wowlan_data->num_target_ipv6_addrs - num_skipped)
+               enabled |= IWL_D3_PROTO_IPV6_VALID;
+
+       cmd->num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped);
+       if (enabled & IWL_D3_PROTO_IPV6_VALID)
+               enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+#endif
+
+       if (vif->cfg.arp_addr_cnt) {
+               enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
+               cmd->common.host_ipv4_addr = vif->cfg.arp_addr_list[0];
+               ether_addr_copy(cmd->common.arp_mac_addr, vif->addr);
+       }
+
+       enabled |= IWL_D3_PROTO_OFFLOAD_BTM;
+       cmd->common.enabled = cpu_to_le32(enabled);
+       cmd->sta_id = cpu_to_le32(ap_sta_id);
+       hcmd.data[0] = cmd;
+       return iwl_mld_send_cmd(mld, &hcmd);
+}
+
+static int
+iwl_mld_wowlan_config(struct iwl_mld *mld, struct ieee80211_vif *bss_vif,
+                     struct cfg80211_wowlan *wowlan)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_vif);
+       struct ieee80211_sta *ap_sta = mld_vif->ap_sta;
+       struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+                       .offloading_tid = IWL_WOWLAN_OFFLOAD_TID,
+       };
+       u32 sta_id_mask;
+       int ap_sta_id, ret;
+       int link_id = iwl_mld_get_primary_link(bss_vif);
+       struct ieee80211_bss_conf *link_conf;
+
+       ret = iwl_mld_block_emlsr_sync(mld, bss_vif,
+                                      IWL_MLD_EMLSR_BLOCKED_WOWLAN, link_id);
+       if (ret)
+               return ret;
+
+       link_conf = link_conf_dereference_protected(bss_vif, link_id);
+
+       if (WARN_ON(!ap_sta || !link_conf))
+               return -EINVAL;
+
+       sta_id_mask = iwl_mld_fw_sta_id_mask(mld, ap_sta);
+       if (WARN_ON(hweight32(sta_id_mask) != 1))
+               return -EINVAL;
+
+       ap_sta_id = __ffs(sta_id_mask);
+       wowlan_config_cmd.sta_id = ap_sta_id;
+
+       ret = iwl_mld_ensure_queue(mld,
+                                  ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+       if (ret)
+               return ret;
+
+       iwl_mld_set_wowlan_config_cmd(mld, wowlan,
+                                     &wowlan_config_cmd, ap_sta);
+       ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_CONFIGURATION,
+                                  &wowlan_config_cmd);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_suspend_send_security_cmds(mld, bss_vif, mld_vif,
+                                                ap_sta_id);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_send_patterns(mld, wowlan, ap_sta_id);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_send_proto_offload(mld, bss_vif, ap_sta_id);
+       if (ret)
+               return ret;
+
+       iwl_mld_enable_beacon_filter(mld, link_conf, true);
+       return iwl_mld_update_mac_power(mld, bss_vif, true);
+}
+
+int iwl_mld_wowlan_suspend(struct iwl_mld *mld, struct cfg80211_wowlan *wowlan)
+{
+       struct ieee80211_vif *bss_vif;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!wowlan))
+               return 1;
+
+       IWL_DEBUG_WOWLAN(mld, "Starting the wowlan suspend flow\n");
+
+       bss_vif = iwl_mld_get_bss_vif(mld);
+       if (WARN_ON(!bss_vif))
+               return 1;
+
+       if (!bss_vif->cfg.assoc) {
+               int ret;
+               /* If we're not associated, this must be netdetect */
+               if (WARN_ON(!wowlan->nd_config))
+                       return 1;
+
+               ret = iwl_mld_netdetect_config(mld, bss_vif, wowlan);
+               if (!ret)
+                       mld->netdetect = true;
+
+               return ret;
+       }
+
+       return iwl_mld_wowlan_config(mld, bss_vif, wowlan);
+}
+
+/* Returns 0 on success, 1 if an error occurred in firmware during d3,
+ * A negative value is expected only in unrecovreable cases.
+ */
+int iwl_mld_wowlan_resume(struct iwl_mld *mld)
+{
+       struct ieee80211_vif *bss_vif;
+       struct ieee80211_bss_conf *link_conf;
+       struct iwl_mld_netdetect_res netdetect_res;
+       struct iwl_mld_resume_data resume_data = {
+               .notifs_expected =
+                       IWL_D3_NOTIF_WOWLAN_INFO |
+                       IWL_D3_NOTIF_D3_END_NOTIF,
+               .netdetect_res = &netdetect_res,
+       };
+       int link_id;
+       int ret;
+       bool fw_err = false;
+       bool keep_connection;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       IWL_DEBUG_WOWLAN(mld, "Starting the wowlan resume flow\n");
+
+       mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+       if (!mld->fw_status.in_d3) {
+               IWL_DEBUG_WOWLAN(mld,
+                                "Device_powered_off() was called during wowlan\n");
+               goto err;
+       }
+
+       mld->fw_status.in_d3 = false;
+       mld->scan.last_start_time_jiffies = jiffies;
+
+       bss_vif = iwl_mld_get_bss_vif(mld);
+       if (WARN_ON(!bss_vif))
+               goto err;
+
+       /* We can't have several links upon wowlan entry,
+        * this is enforced in the suspend flow.
+        */
+       WARN_ON(hweight16(bss_vif->active_links) > 1);
+       link_id = bss_vif->active_links ? __ffs(bss_vif->active_links) : 0;
+       link_conf = link_conf_dereference_protected(bss_vif, link_id);
+
+       if (WARN_ON(!link_conf))
+               goto err;
+
+       iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
+
+       if (iwl_mld_fw_needs_restart(mld, bss_vif)) {
+               fw_err = true;
+               goto err;
+       }
+
+       resume_data.wowlan_status = kzalloc(sizeof(*resume_data.wowlan_status),
+                                           GFP_KERNEL);
+       if (!resume_data.wowlan_status)
+               return -1;
+
+       if (mld->netdetect)
+               resume_data.notifs_expected |= IWL_D3_ND_MATCH_INFO;
+
+       ret = iwl_mld_wait_d3_notif(mld, &resume_data, true);
+       if (ret) {
+               IWL_ERR(mld, "Couldn't get the d3 notifs %d\n", ret);
+               fw_err = true;
+               goto err;
+       }
+
+       if (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE) {
+               mld->fw_status.in_hw_restart = true;
+               goto process_wakeup_results;
+       }
+
+       iwl_mld_update_changed_regdomain(mld);
+       iwl_mld_update_mac_power(mld, bss_vif, false);
+       iwl_mld_enable_beacon_filter(mld, link_conf, false);
+       iwl_mld_update_device_power(mld, false);
+
+       if (mld->netdetect)
+               ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_NETDETECT, false);
+
+ process_wakeup_results:
+       if (mld->netdetect) {
+               iwl_mld_process_netdetect_res(mld, bss_vif, &resume_data);
+               mld->netdetect = false;
+       } else {
+               keep_connection =
+                       iwl_mld_process_wowlan_status(mld, bss_vif,
+                                                     resume_data.wowlan_status);
+
+               /* EMLSR state will be cleared if the connection is not kept */
+               if (keep_connection)
+                       iwl_mld_unblock_emlsr(mld, bss_vif,
+                                             IWL_MLD_EMLSR_BLOCKED_WOWLAN);
+       }
+
+       if (!mld->netdetect && !keep_connection)
+               ieee80211_resume_disconnect(bss_vif);
+
+       goto out;
+
+ err:
+       if (fw_err) {
+               mld->trans->state = IWL_TRANS_NO_FW;
+               set_bit(STATUS_FW_ERROR, &mld->trans->status);
+       }
+
+       mld->fw_status.in_hw_restart = true;
+       ret = 1;
+ out:
+       if (resume_data.wowlan_status) {
+               kfree(resume_data.wowlan_status->wake_packet);
+               kfree(resume_data.wowlan_status);
+       }
+
+       return ret;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_d3_h__
+#define __iwl_mld_d3_h__
+
+#include "fw/api/d3.h"
+
+struct iwl_mld_rekey_data {
+       bool valid;
+       u8 kck[NL80211_KCK_EXT_LEN];
+       u8 kek[NL80211_KEK_EXT_LEN];
+       size_t kck_len;
+       size_t kek_len;
+       __le64 replay_ctr;
+       u32 akm;
+};
+
+/**
+ * struct iwl_mld_wowlan_data - data used by the wowlan suspend flow
+ *
+ * @target_ipv6_addrs: IPv6 addresses on this interface for offload
+ * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
+ * @num_target_ipv6_addrs: number of @target_ipv6_addrs
+ * @rekey_data: security key data used for rekeying during D3
+ */
+struct iwl_mld_wowlan_data {
+#if IS_ENABLED(CONFIG_IPV6)
+       struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
+       unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
+       int num_target_ipv6_addrs;
+#endif
+       struct iwl_mld_rekey_data rekey_data;
+};
+
+int iwl_mld_no_wowlan_resume(struct iwl_mld *mld);
+int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld);
+int iwl_mld_wowlan_suspend(struct iwl_mld *mld,
+                          struct cfg80211_wowlan *wowlan);
+int iwl_mld_wowlan_resume(struct iwl_mld *mld);
+void iwl_mld_set_rekey_data(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct cfg80211_gtk_rekey_data *data);
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct inet6_dev *idev);
+#endif
+
+#endif /* __iwl_mld_d3_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "sta.h"
+#include "tlc.h"
+#include "power.h"
+#include "notif.h"
+#include "ap.h"
+#include "iwl-utils.h"
+#ifdef CONFIG_THERMAL
+#include "thermal.h"
+#endif
+
+#include "fw/api/rs.h"
+#include "fw/api/dhc.h"
+#include "fw/api/rfi.h"
+
+#define MLD_DEBUGFS_READ_FILE_OPS(name, bufsz)                         \
+       _MLD_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_mld)
+
+#define MLD_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)          \
+       debugfs_create_file(alias, mode, parent, mld,                   \
+                           &iwl_dbgfs_##name##_ops)
+#define MLD_DEBUGFS_ADD_FILE(name, parent, mode)                       \
+       MLD_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static bool iwl_mld_dbgfs_fw_cmd_disabled(struct iwl_mld *mld)
+{
+#ifdef CONFIG_PM_SLEEP
+       return !mld->fw_status.running || mld->fw_status.in_d3;
+#else
+       return !mld->fw_status.running;
+#endif /* CONFIG_PM_SLEEP */
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mld *mld,
+                                           char *buf, size_t count)
+{
+       /* If the firmware is not running, silently succeed since there is
+        * no data to clear.
+        */
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return 0;
+
+       iwl_fw_dbg_clear_monitor_buf(&mld->fwrt);
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mld *mld, char *buf,
+                                     size_t count)
+{
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       IWL_ERR(mld, "Triggering an NMI from debugfs\n");
+
+       if (count == 6 && !strcmp(buf, "nolog\n"))
+               mld->fw_status.do_not_dump_once = true;
+
+       iwl_force_nmi(mld->trans);
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf,
+                                         size_t count)
+{
+       int __maybe_unused ret;
+
+       if (!iwlwifi_mod_params.fw_restart)
+               return -EPERM;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       if (count == 6 && !strcmp(buf, "nolog\n")) {
+               mld->fw_status.do_not_dump_once = true;
+               set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mld->trans->status);
+       }
+
+       /* take the return value to make compiler happy - it will
+        * fail anyway
+        */
+       ret = iwl_mld_send_cmd_empty(mld, WIDE_ID(LONG_GROUP, REPLY_ERROR));
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mld *mld, char *buf,
+                                            size_t count)
+{
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       return iwl_mld_send_cmd_empty(mld, ECHO_CMD) ?: count;
+}
+
+struct iwl_mld_sniffer_apply {
+       struct iwl_mld *mld;
+       const u8 *bssid;
+       u16 aid;
+};
+
+static bool iwl_mld_sniffer_apply(struct iwl_notif_wait_data *notif_data,
+                                 struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_mld_sniffer_apply *apply = data;
+
+       apply->mld->monitor.cur_aid = cpu_to_le16(apply->aid);
+       memcpy(apply->mld->monitor.cur_bssid, apply->bssid,
+              sizeof(apply->mld->monitor.cur_bssid));
+
+       return true;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_write(struct iwl_mld *mld, char *buf,
+                                 size_t count)
+{
+       struct iwl_notification_wait wait;
+       struct iwl_he_monitor_cmd he_mon_cmd = {};
+       struct iwl_mld_sniffer_apply apply = {
+               .mld = mld,
+       };
+       u16 wait_cmds[] = {
+               WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+       };
+       u32 aid;
+       int ret;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       if (!mld->monitor.on)
+               return -ENODEV;
+
+       ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
+                    &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
+                    &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
+                    &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
+       if (ret != 7)
+               return -EINVAL;
+
+       he_mon_cmd.aid = cpu_to_le16(aid);
+
+       apply.aid = aid;
+       apply.bssid = (void *)he_mon_cmd.bssid;
+
+       /* Use the notification waiter to get our function triggered
+        * in sequence with other RX. This ensures that frames we get
+        * on the RX queue _before_ the new configuration is applied
+        * still have mld->cur_aid pointing to the old AID, and that
+        * frames on the RX queue _after_ the firmware processed the
+        * new configuration (and sent the response, synchronously)
+        * get mld->cur_aid correctly set to the new AID.
+        */
+       iwl_init_notification_wait(&mld->notif_wait, &wait,
+                                  wait_cmds, ARRAY_SIZE(wait_cmds),
+                                  iwl_mld_sniffer_apply, &apply);
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(DATA_PATH_GROUP,
+                                          HE_AIR_SNIFFER_CONFIG_CMD),
+                                  &he_mon_cmd);
+
+       /* no need to really wait, we already did anyway */
+       iwl_remove_notification(&mld->notif_wait, &wait);
+
+       return ret ?: count;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_read(struct iwl_mld *mld, size_t count,
+                                char *buf)
+{
+       return scnprintf(buf, count,
+                        "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
+                        le16_to_cpu(mld->monitor.cur_aid),
+                        mld->monitor.cur_bssid[0], mld->monitor.cur_bssid[1],
+                        mld->monitor.cur_bssid[2], mld->monitor.cur_bssid[3],
+                        mld->monitor.cur_bssid[4], mld->monitor.cur_bssid[5]);
+}
+
+/* The size computation is as follows:
+ * each number needs at most 3 characters, number of rows is the size of
+ * the table; So, need 5 chars for the "freq: " part and each tuple afterwards
+ * needs 6 characters for numbers and 5 for the punctuation around. 32 bytes
+ * for feature support message.
+ */
+#define IWL_RFI_DDR_BUF_SIZE (IWL_RFI_DDR_LUT_INSTALLED_SIZE *\
+                               (5 + IWL_RFI_DDR_LUT_ENTRY_CHANNELS_NUM *\
+                                       (6 + 5)) + 32)
+#define IWL_RFI_DLVR_BUF_SIZE (IWL_RFI_DLVR_LUT_INSTALLED_SIZE *\
+                               (5 + IWL_RFI_DLVR_LUT_ENTRY_CHANNELS_NUM *\
+                                       (6 + 5)) + 32)
+#define IWL_RFI_DESENSE_BUF_SIZE IWL_RFI_DDR_BUF_SIZE
+
+/* Extra 32 for "DDR and DLVR table" message */
+#define IWL_RFI_BUF_SIZE (IWL_RFI_DDR_BUF_SIZE + IWL_RFI_DLVR_BUF_SIZE +\
+                               IWL_RFI_DESENSE_BUF_SIZE + 32)
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_nmi, 10);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_restart, 10);
+WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(he_sniffer_params, 32);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_dbg_clear, 10);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(send_echo_cmd, 8);
+
+static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct iwl_mld *mld,
+                                            size_t count, u8 *buf)
+{
+       int err;
+       u32 value;
+
+       err = iwl_bios_get_dsm(&mld->fwrt, DSM_FUNC_ENABLE_6E, &value);
+       if (err)
+               return err;
+
+       return scnprintf(buf, count, "0x%08x\n", value);
+}
+
+MLD_DEBUGFS_READ_FILE_OPS(wifi_6e_enable, 64);
+
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mld *mld,
+                                            char *buf, size_t count)
+{
+       struct iwl_op_mode *opmode = container_of((void *)mld,
+                                                 struct iwl_op_mode,
+                                                 op_mode_specific);
+       struct iwl_rx_cmd_buffer rxb = {};
+       struct iwl_rx_packet *pkt;
+       int n_bytes = count / 2;
+       int ret = -EINVAL;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       rxb._page = alloc_pages(GFP_KERNEL, 0);
+       if (!rxb._page)
+               return -ENOMEM;
+       pkt = rxb_addr(&rxb);
+
+       ret = hex2bin(page_address(rxb._page), buf, n_bytes);
+       if (ret)
+               goto out;
+
+       /* avoid invalid memory access and malformed packet */
+       if (n_bytes < sizeof(*pkt) ||
+           n_bytes != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
+               goto out;
+
+       local_bh_disable();
+       iwl_mld_rx(opmode, NULL, &rxb);
+       local_bh_enable();
+       ret = 0;
+
+out:
+       iwl_free_rxb(&rxb);
+
+       return ret ?: count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(inject_packet, 512);
+
+#ifdef CONFIG_THERMAL
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mld *mld,
+                                        char *buf, size_t count)
+{
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+                                  CTDP_CMD_OPERATION_STOP) ? : count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(stop_ctdp, 8);
+
+static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mld *mld,
+                                         char *buf, size_t count)
+{
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+                                  CTDP_CMD_OPERATION_START) ? : count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(start_ctdp, 8);
+
+#endif /* CONFIG_THERMAL */
+
+void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
+{
+       /* Add debugfs files here */
+
+       MLD_DEBUGFS_ADD_FILE(fw_nmi, debugfs_dir, 0200);
+       MLD_DEBUGFS_ADD_FILE(fw_restart, debugfs_dir, 0200);
+       MLD_DEBUGFS_ADD_FILE(wifi_6e_enable, debugfs_dir, 0400);
+       MLD_DEBUGFS_ADD_FILE(he_sniffer_params, debugfs_dir, 0600);
+       MLD_DEBUGFS_ADD_FILE(fw_dbg_clear, debugfs_dir, 0200);
+       MLD_DEBUGFS_ADD_FILE(send_echo_cmd, debugfs_dir, 0200);
+#ifdef CONFIG_THERMAL
+       MLD_DEBUGFS_ADD_FILE(start_ctdp, debugfs_dir, 0200);
+       MLD_DEBUGFS_ADD_FILE(stop_ctdp, debugfs_dir, 0200);
+#endif
+       MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200);
+
+       /* Create a symlink with mac80211. It will be removed when mac80211
+        * exits (before the opmode exits which removes the target.)
+        */
+       if (!IS_ERR(debugfs_dir)) {
+               char buf[100];
+
+               snprintf(buf, 100, "../../%pd2", debugfs_dir->d_parent);
+               debugfs_create_symlink("iwlwifi", mld->wiphy->debugfsdir,
+                                      buf);
+       }
+}
+
+#define VIF_DEBUGFS_WRITE_FILE_OPS(name, bufsz)                        \
+       WIPHY_DEBUGFS_WRITE_FILE_OPS(vif_##name, bufsz, vif)
+
+#define VIF_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz)                       \
+       IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(vif_##name, bufsz, vif) \
+
+#define VIF_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)  \
+       debugfs_create_file(alias, mode, parent, vif,           \
+                           &iwl_dbgfs_vif_##name##_ops)
+#define VIF_DEBUGFS_ADD_FILE(name, parent, mode)               \
+       VIF_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t iwl_dbgfs_vif_bf_params_write(struct iwl_mld *mld, char *buf,
+                                            size_t count, void *data)
+{
+       struct ieee80211_vif *vif = data;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+       struct ieee80211_bss_conf *link_conf;
+       int val;
+
+       if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+               if (sscanf(buf + 24, "%d", &val) != 1)
+                       return -EINVAL;
+       } else {
+               return -EINVAL;
+       }
+
+       if (val != 0 && val != 1)
+               return -EINVAL;
+
+       link_conf = link_conf_dereference_protected(vif, link_id);
+       if (WARN_ON(!link_conf))
+               return -ENODEV;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       mld_vif->disable_bf = !val;
+
+       if (val)
+               return iwl_mld_enable_beacon_filter(mld, link_conf,
+                                                   false) ?: count;
+       else
+               return iwl_mld_disable_beacon_filter(mld, vif) ?: count;
+}
+
+static ssize_t iwl_dbgfs_vif_pm_params_write(struct iwl_mld *mld,
+                                            char *buf,
+                                            size_t count, void *data)
+{
+       struct ieee80211_vif *vif = data;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int val;
+
+       if (!strncmp("use_ps_poll=", buf, 12)) {
+               if (sscanf(buf + 12, "%d", &val) != 1)
+                       return -EINVAL;
+       } else {
+               return -EINVAL;
+       }
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       mld_vif->use_ps_poll = val;
+
+       return iwl_mld_update_mac_power(mld, vif, false) ?: count;
+}
+
+static ssize_t iwl_dbgfs_vif_low_latency_write(struct iwl_mld *mld,
+                                              char *buf, size_t count,
+                                              void *data)
+{
+       struct ieee80211_vif *vif = data;
+       u8 value;
+       int ret;
+
+       ret = kstrtou8(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       if (value > 1)
+               return -EINVAL;
+
+       iwl_mld_vif_update_low_latency(mld, vif, value, LOW_LATENCY_DEBUGFS);
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_vif_low_latency_read(struct ieee80211_vif *vif,
+                                             size_t count, char *buf)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       char format[] = "traffic=%d\ndbgfs=%d\nvif_type=%d\nactual=%d\n";
+       u8 ll_causes;
+
+       if (WARN_ON(count < sizeof(format)))
+               return -EINVAL;
+
+       ll_causes = READ_ONCE(mld_vif->low_latency_causes);
+
+       /* all values in format are boolean so the size of format is enough
+        * for holding the result string
+        */
+       return scnprintf(buf, count, format,
+                        !!(ll_causes & LOW_LATENCY_TRAFFIC),
+                        !!(ll_causes & LOW_LATENCY_DEBUGFS),
+                        !!(ll_causes & LOW_LATENCY_VIF_TYPE),
+                        !!(ll_causes));
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(pm_params, 32);
+VIF_DEBUGFS_WRITE_FILE_OPS(bf_params, 32);
+VIF_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 45);
+
+static int
+_iwl_dbgfs_inject_beacon_ie(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                           char *bin, ssize_t len,
+                           bool restore)
+{
+       struct iwl_mld_vif *mld_vif;
+       struct iwl_mld_link *mld_link;
+       struct iwl_mac_beacon_cmd beacon_cmd = {};
+       int n_bytes = len / 2;
+
+       /* Element len should be represented by u8 */
+       if (n_bytes >= U8_MAX)
+               return -EINVAL;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       if (!vif)
+               return -EINVAL;
+
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+       mld_vif->beacon_inject_active = true;
+       mld->hw->extra_beacon_tailroom = n_bytes;
+
+       for_each_mld_vif_valid_link(mld_vif, mld_link) {
+               u32 offset;
+               struct ieee80211_tx_info *info;
+               struct ieee80211_bss_conf *link_conf =
+                       link_conf_dereference_protected(vif, link_id);
+               struct ieee80211_chanctx_conf *ctx =
+                       wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
+               struct sk_buff *beacon =
+                       ieee80211_beacon_get_template(mld->hw, vif,
+                                                     NULL, link_id);
+
+               if (!beacon)
+                       return -EINVAL;
+
+               if (!restore && (WARN_ON(!n_bytes || !bin) ||
+                                hex2bin(skb_put_zero(beacon, n_bytes),
+                                        bin, n_bytes))) {
+                       dev_kfree_skb(beacon);
+                       return -EINVAL;
+               }
+
+               info = IEEE80211_SKB_CB(beacon);
+
+               beacon_cmd.flags =
+                       cpu_to_le16(iwl_mld_get_rate_flags(mld, info, vif,
+                                                          link_conf,
+                                                          ctx->def.chan->band));
+               beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+               beacon_cmd.link_id =
+                       cpu_to_le32(mld_link->fw_id);
+
+               iwl_mld_set_tim_idx(mld, &beacon_cmd.tim_idx,
+                                   beacon->data, beacon->len);
+
+               offset = iwl_find_ie_offset(beacon->data,
+                                           WLAN_EID_S1G_TWT,
+                                           beacon->len);
+
+               beacon_cmd.btwt_offset = cpu_to_le32(offset);
+
+               iwl_mld_send_beacon_template_cmd(mld, beacon, &beacon_cmd);
+               dev_kfree_skb(beacon);
+       }
+
+       if (restore)
+               mld_vif->beacon_inject_active = false;
+
+       return 0;
+}
+
+static ssize_t
+iwl_dbgfs_vif_inject_beacon_ie_write(struct iwl_mld *mld,
+                                    char *buf, size_t count,
+                                    void *data)
+{
+       struct ieee80211_vif *vif = data;
+       int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, buf,
+                                             count, false);
+
+       mld->hw->extra_beacon_tailroom = 0;
+       return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512);
+
+static ssize_t
+iwl_dbgfs_vif_inject_beacon_ie_restore_write(struct iwl_mld *mld,
+                                            char *buf,
+                                            size_t count,
+                                            void *data)
+{
+       struct ieee80211_vif *vif = data;
+       int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, NULL,
+                                             0, true);
+
+       mld->hw->extra_beacon_tailroom = 0;
+       return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
+
+static ssize_t
+iwl_dbgfs_vif_twt_setup_write(struct iwl_mld *mld, char *buf, size_t count,
+                             void *data)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, DEBUG_HOST_COMMAND),
+       };
+       struct ieee80211_vif *vif = data;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_dhc_twt_operation *dhc_twt_cmd;
+       struct iwl_dhc_cmd *cmd __free(kfree);
+       u64 target_wake_time;
+       u32 twt_operation, interval_exp, interval_mantissa, min_wake_duration;
+       u8 trigger, flow_type, flow_id, protection, tenth_param;
+       u8 twt_request = 1, broadcast = 0;
+       int ret;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       ret = sscanf(buf, "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu",
+                    &twt_operation, &target_wake_time, &interval_exp,
+                    &interval_mantissa, &min_wake_duration, &trigger,
+                    &flow_type, &flow_id, &protection, &tenth_param);
+
+       /* the new twt_request parameter is optional for station */
+       if ((ret != 9 && ret != 10) ||
+           (ret == 10 && vif->type != NL80211_IFTYPE_STATION &&
+            tenth_param == 1))
+               return -EINVAL;
+
+       /* The 10th parameter:
+        * In STA mode - the TWT type (broadcast or individual)
+        * In AP mode - the role (0 responder, 2 unsolicited)
+        */
+       if (ret == 10) {
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       broadcast = tenth_param;
+               else
+                       twt_request = tenth_param;
+       }
+
+       cmd = kzalloc(sizeof(*cmd) + sizeof(*dhc_twt_cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       dhc_twt_cmd = (void *)cmd->data;
+       dhc_twt_cmd->mac_id = cpu_to_le32(mld_vif->fw_id);
+       dhc_twt_cmd->twt_operation = cpu_to_le32(twt_operation);
+       dhc_twt_cmd->target_wake_time = cpu_to_le64(target_wake_time);
+       dhc_twt_cmd->interval_exp = cpu_to_le32(interval_exp);
+       dhc_twt_cmd->interval_mantissa = cpu_to_le32(interval_mantissa);
+       dhc_twt_cmd->min_wake_duration = cpu_to_le32(min_wake_duration);
+       dhc_twt_cmd->trigger = trigger;
+       dhc_twt_cmd->flow_type = flow_type;
+       dhc_twt_cmd->flow_id = flow_id;
+       dhc_twt_cmd->protection = protection;
+       dhc_twt_cmd->twt_request = twt_request;
+       dhc_twt_cmd->negotiation_type = broadcast ? 3 : 0;
+
+       cmd->length = cpu_to_le32(sizeof(*dhc_twt_cmd) >> 2);
+       cmd->index_and_mask =
+               cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
+                           DHC_INT_UMAC_TWT_OPERATION);
+
+       hcmd.len[0] = sizeof(*cmd) + sizeof(*dhc_twt_cmd);
+       hcmd.data[0] = cmd;
+
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+
+       return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(twt_setup, 256);
+
+static ssize_t
+iwl_dbgfs_vif_twt_operation_write(struct iwl_mld *mld, char *buf, size_t count,
+                                 void *data)
+{
+       struct ieee80211_vif *vif = data;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_twt_operation_cmd twt_cmd = {};
+       int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+       struct iwl_mld_link *mld_link = iwl_mld_link_dereference_check(mld_vif,
+                                                                      link_id);
+       int ret;
+
+       if (WARN_ON(!mld_link))
+               return -ENODEV;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       if (hweight16(vif->active_links) > 1)
+               return -EOPNOTSUPP;
+
+       ret = sscanf(buf,
+                    "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu",
+                    &twt_cmd.twt_operation, &twt_cmd.target_wake_time,
+                    &twt_cmd.interval_exponent, &twt_cmd.interval_mantissa,
+                    &twt_cmd.minimum_wake_duration, &twt_cmd.trigger,
+                    &twt_cmd.flow_type, &twt_cmd.flow_id,
+                    &twt_cmd.twt_protection, &twt_cmd.ndp_paging_indicator,
+                    &twt_cmd.responder_pm_mode, &twt_cmd.negotiation_type,
+                    &twt_cmd.twt_request, &twt_cmd.implicit,
+                    &twt_cmd.twt_group_assignment, &twt_cmd.twt_channel,
+                    &twt_cmd.restricted_info_present, &twt_cmd.dl_bitmap_valid,
+                    &twt_cmd.ul_bitmap_valid, &twt_cmd.dl_tid_bitmap,
+                    &twt_cmd.ul_tid_bitmap);
+
+       if (ret != 21)
+               return -EINVAL;
+
+       twt_cmd.link_id = cpu_to_le32(mld_link->fw_id);
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(MAC_CONF_GROUP, TWT_OPERATION_CMD),
+                                  &twt_cmd);
+       return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(twt_operation, 256);
+
+void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif)
+{
+       struct dentry *mld_vif_dbgfs =
+               debugfs_create_dir("iwlmld", vif->debugfs_dir);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       char target[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+                   (7 + IFNAMSIZ + 1) + 6 + 1];
+       char name[7 + IFNAMSIZ + 1];
+
+       /* Create symlink for convenience pointing to interface specific
+        * debugfs entries for the driver. For example, under
+        * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmld/
+        * find
+        * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmld/
+        */
+       snprintf(name, sizeof(name), "%pd", vif->debugfs_dir);
+       snprintf(target, sizeof(target), "../../../%pd3/iwlmld",
+                vif->debugfs_dir);
+       mld_vif->dbgfs_slink =
+               debugfs_create_symlink(name, mld->debugfs_dir, target);
+
+       if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+           vif->type == NL80211_IFTYPE_STATION) {
+               VIF_DEBUGFS_ADD_FILE(pm_params, mld_vif_dbgfs, 0200);
+               VIF_DEBUGFS_ADD_FILE(bf_params, mld_vif_dbgfs, 0200);
+       }
+
+       if (vif->type == NL80211_IFTYPE_AP) {
+               VIF_DEBUGFS_ADD_FILE(inject_beacon_ie, mld_vif_dbgfs, 0200);
+               VIF_DEBUGFS_ADD_FILE(inject_beacon_ie_restore,
+                                    mld_vif_dbgfs, 0200);
+       }
+
+       VIF_DEBUGFS_ADD_FILE(low_latency, mld_vif_dbgfs, 0600);
+       VIF_DEBUGFS_ADD_FILE(twt_setup, mld_vif_dbgfs, 0200);
+       VIF_DEBUGFS_ADD_FILE(twt_operation, mld_vif_dbgfs, 0200);
+}
+
+#define LINK_DEBUGFS_WRITE_FILE_OPS(name, bufsz)                       \
+       WIPHY_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, bss_conf)
+
+#define LINK_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)         \
+       debugfs_create_file(alias, mode, parent, link_conf,             \
+                           &iwl_dbgfs_link_##name##_ops)
+#define LINK_DEBUGFS_ADD_FILE(name, parent, mode)                      \
+       LINK_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_bss_conf *link_conf,
+                             struct dentry *dir)
+{
+       struct dentry *mld_link_dir;
+
+       mld_link_dir = debugfs_lookup("iwlmld", dir);
+
+       /* For non-MLO vifs, the dir of deflink is the same as the vif's one.
+        * so if iwlmld dir already exists, this means that this is deflink.
+        * If not, this is a per-link dir of a MLO vif, add in it the iwlmld
+        * dir.
+        */
+       if (!mld_link_dir)
+               mld_link_dir = debugfs_create_dir("iwlmld", dir);
+}
+
+static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+                                         size_t count, void *data)
+{
+       struct ieee80211_link_sta *link_sta = data;
+       struct iwl_mld_link_sta *mld_link_sta;
+       u32 rate;
+       u32 partial = false;
+       char pretty_rate[100];
+       int ret;
+       u8 fw_sta_id;
+
+       mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+       if (WARN_ON(!mld_link_sta))
+               return -EINVAL;
+
+       fw_sta_id = mld_link_sta->fw_id;
+
+       if (sscanf(buf, "%i %i", &rate, &partial) == 0)
+               return -EINVAL;
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id,
+                                  partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE :
+                                            IWL_TLC_DEBUG_FIXED_RATE,
+                                  rate);
+
+       rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), rate);
+
+       IWL_DEBUG_RATE(mld, "sta_id %d rate %s partial: %d, ret:%d\n",
+                      fw_sta_id, pretty_rate, partial, ret);
+
+       return ret ? : count;
+}
+
+static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
+                                      size_t count, void *data)
+{
+       struct ieee80211_link_sta *link_sta = data;
+       struct iwl_mld_link_sta *mld_link_sta;
+       u32 type, value;
+       int ret;
+       u8 fw_sta_id;
+
+       mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+       if (WARN_ON(!mld_link_sta))
+               return -EINVAL;
+
+       fw_sta_id = mld_link_sta->fw_id;
+
+       if (sscanf(buf, "%i %i", &type, &value) != 2) {
+               IWL_DEBUG_RATE(mld, "usage <type> <value>\n");
+               return -EINVAL;
+       }
+
+       if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+               return -EIO;
+
+       ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id, type, value);
+
+       return ret ? : count;
+}
+
+#define LINK_STA_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)     \
+       debugfs_create_file(alias, mode, parent, link_sta,              \
+                           &iwl_dbgfs_##name##_ops)
+#define LINK_STA_DEBUGFS_ADD_FILE(name, parent, mode)                  \
+       LINK_STA_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+#define LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(name, bufsz)                  \
+       WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, link_sta)
+
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64);
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64);
+
+void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_link_sta *link_sta,
+                                 struct dentry *dir)
+{
+       LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200);
+       LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include "iface.h"
+#include "sta.h"
+
+#define MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)                        \
+struct dbgfs_##name##_data {                                           \
+       argtype *arg;                                                   \
+       bool read_done;                                                 \
+       ssize_t rlen;                                                   \
+       char buf[buflen];                                               \
+};                                                                     \
+static int _iwl_dbgfs_##name##_open(struct inode *inode,               \
+                                   struct file *file)                  \
+{                                                                      \
+       struct dbgfs_##name##_data *data;                               \
+                                                                       \
+       if ((file->f_flags & O_ACCMODE) == O_RDWR)                      \
+               return -EOPNOTSUPP;                                     \
+                                                                       \
+       data = kzalloc(sizeof(*data), GFP_KERNEL);                      \
+       if (!data)                                                      \
+               return -ENOMEM;                                         \
+                                                                       \
+       data->read_done = false;                                        \
+       data->arg = inode->i_private;                                   \
+       file->private_data = data;                                      \
+                                                                       \
+       return 0;                                                       \
+}
+
+#define MLD_DEBUGFS_READ_WRAPPER(name)                                 \
+static ssize_t _iwl_dbgfs_##name##_read(struct file *file,             \
+                                       char __user *user_buf,          \
+                                       size_t count, loff_t *ppos)     \
+{                                                                      \
+       struct dbgfs_##name##_data *data = file->private_data;          \
+                                                                       \
+       if (!data->read_done) {                                         \
+               data->read_done = true;                                 \
+               data->rlen = iwl_dbgfs_##name##_read(data->arg,         \
+                                                    sizeof(data->buf),\
+                                                    data->buf);        \
+       }                                                               \
+                                                                       \
+       if (data->rlen < 0)                                             \
+               return data->rlen;                                      \
+       return simple_read_from_buffer(user_buf, count, ppos,           \
+                                      data->buf, data->rlen);          \
+}
+
+static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
+
+#define _MLD_DEBUGFS_READ_FILE_OPS(name, buflen, argtype)              \
+MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)                                \
+MLD_DEBUGFS_READ_WRAPPER(name)                                         \
+static const struct file_operations iwl_dbgfs_##name##_ops = {         \
+       .read = _iwl_dbgfs_##name##_read,                               \
+       .open = _iwl_dbgfs_##name##_open,                               \
+       .llseek = generic_file_llseek,                                  \
+       .release = _iwl_dbgfs_release,                                  \
+}
+
+#define WIPHY_DEBUGFS_HANDLER_WRAPPER(name)                            \
+static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy,   \
+                                      struct file *file, char *buf,    \
+                                      size_t count, void *data)        \
+{                                                                      \
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);         \
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);                 \
+       return iwl_dbgfs_##name##_write(mld, buf, count, data);         \
+}
+
+static inline struct iwl_mld *
+iwl_mld_from_link_sta(struct ieee80211_link_sta *link_sta)
+{
+       struct ieee80211_vif *vif =
+               iwl_mld_sta_from_mac80211(link_sta->sta)->vif;
+       return iwl_mld_vif_from_mac80211(vif)->mld;
+}
+
+static inline struct iwl_mld *
+iwl_mld_from_bss_conf(struct ieee80211_bss_conf *link)
+{
+       return iwl_mld_vif_from_mac80211(link->vif)->mld;
+}
+
+static inline struct iwl_mld *iwl_mld_from_vif(struct ieee80211_vif *vif)
+{
+       return iwl_mld_vif_from_mac80211(vif)->mld;
+}
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype)              \
+WIPHY_DEBUGFS_HANDLER_WRAPPER(name)                                    \
+static ssize_t __iwl_dbgfs_##name##_write(struct file *file,           \
+                                         const char __user *user_buf,  \
+                                         size_t count, loff_t *ppos)   \
+{                                                                      \
+       struct ieee80211_##objtype *arg = file->private_data;           \
+       struct iwl_mld *mld = iwl_mld_from_##objtype(arg);              \
+       char buf[bufsz] = {};                                           \
+                                                                       \
+       return wiphy_locked_debugfs_write(mld->wiphy, file,             \
+                               buf, sizeof(buf),                       \
+                               user_buf, count,                        \
+                               iwl_dbgfs_##name##_write_handler,       \
+                               arg);                                   \
+}
+
+#define WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, objtype)             \
+       WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype)               \
+       static const struct file_operations iwl_dbgfs_##name##_ops = {  \
+               .write = __iwl_dbgfs_##name##_write,                    \
+               .open = simple_open,                                    \
+               .llseek = generic_file_llseek,                          \
+       }
+
+#define WIPHY_DEBUGFS_HANDLER_WRAPPER_MLD(name)                                \
+static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy,   \
+                                      struct file *file, char *buf,    \
+                                      size_t count, void *data)        \
+{                                                                      \
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);         \
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);                 \
+       return iwl_dbgfs_##name##_write(mld, buf, count);               \
+}
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name)                          \
+WIPHY_DEBUGFS_HANDLER_WRAPPER_MLD(name)                                        \
+static ssize_t __iwl_dbgfs_##name##_write(struct file *file,           \
+                                         const char __user *user_buf,  \
+                                         size_t count, loff_t *ppos)   \
+{                                                                      \
+       struct dbgfs_##name##_data *data = file->private_data;          \
+       struct iwl_mld *mld = data->arg;                                \
+                                                                       \
+       return wiphy_locked_debugfs_write(mld->wiphy, file,             \
+                               data->buf, sizeof(data->buf),           \
+                               user_buf, count,                        \
+                               iwl_dbgfs_##name##_write_handler,       \
+                               NULL);                                  \
+}
+
+#define WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(name, bufsz)                  \
+       MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld)           \
+       WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name)                           \
+       static const struct file_operations iwl_dbgfs_##name##_ops = {  \
+               .write = __iwl_dbgfs_##name##_write,                    \
+               .open = _iwl_dbgfs_##name##_open,                       \
+               .llseek = generic_file_llseek,                          \
+               .release = _iwl_dbgfs_release,                          \
+       }
+
+#define WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(name, bufsz)             \
+       MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld)           \
+       WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name)                           \
+       MLD_DEBUGFS_READ_WRAPPER(name)                                  \
+       static const struct file_operations iwl_dbgfs_##name##_ops = {  \
+               .write = __iwl_dbgfs_##name##_write,                    \
+               .read = _iwl_dbgfs_##name##_read,                       \
+               .open = _iwl_dbgfs_##name##_open,                       \
+               .llseek = generic_file_llseek,                          \
+               .release = _iwl_dbgfs_release,                          \
+       }
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype)    \
+WIPHY_DEBUGFS_HANDLER_WRAPPER(name)                                    \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file,            \
+                                         const char __user *user_buf,  \
+                                         size_t count, loff_t *ppos)   \
+{                                                                      \
+       struct dbgfs_##name##_data *data = file->private_data;          \
+       struct ieee80211_##objtype *arg = data->arg;                    \
+       struct iwl_mld *mld = iwl_mld_from_##objtype(arg);              \
+       char buf[bufsz] = {};                                           \
+                                                                       \
+       return wiphy_locked_debugfs_write(mld->wiphy, file,             \
+                               buf, sizeof(buf),                       \
+                               user_buf, count,                        \
+                               iwl_dbgfs_##name##_write_handler,       \
+                               arg);                                   \
+}
+
+#define IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, objtype) \
+       MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct ieee80211_##objtype) \
+       WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype)       \
+       MLD_DEBUGFS_READ_WRAPPER(name)                                    \
+       static const struct file_operations iwl_dbgfs_##name##_ops = {    \
+               .write = _iwl_dbgfs_##name##_write,                       \
+               .read = _iwl_dbgfs_##name##_read,                         \
+               .open = _iwl_dbgfs_##name##_open,                         \
+               .llseek = generic_file_llseek,                            \
+               .release = _iwl_dbgfs_release,                            \
+       }
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <linux/etherdevice.h>
+#include <linux/math64.h>
+#include <net/cfg80211.h>
+#include "mld.h"
+#include "iface.h"
+#include "phy.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "constants.h"
+#include "fw/api/location.h"
+#include "ftm-initiator.h"
+
+static void iwl_mld_ftm_cmd_common(struct iwl_mld *mld,
+                                  struct ieee80211_vif *vif,
+                                  struct iwl_tof_range_req_cmd *cmd,
+                                  struct cfg80211_pmsr_request *req)
+{
+       int i;
+
+       cmd->initiator_flags =
+               cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
+                           IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
+       cmd->request_id = req->cookie;
+       cmd->num_of_ap = req->n_peers;
+
+       /* Use a large value for "no timeout". Don't use the maximum value
+        * because of fw limitations.
+        */
+       if (req->timeout)
+               cmd->req_timeout_ms = cpu_to_le32(min(req->timeout, 0xfffff));
+       else
+               cmd->req_timeout_ms = cpu_to_le32(0xfffff);
+
+       memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
+       for (i = 0; i < ETH_ALEN; i++)
+               cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
+
+       if (vif->cfg.assoc) {
+               memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+               /* AP's TSF is only relevant if associated */
+               for (i = 0; i < req->n_peers; i++) {
+                       if (req->peers[i].report_ap_tsf) {
+                               struct iwl_mld_vif *mld_vif =
+                                       iwl_mld_vif_from_mac80211(vif);
+
+                               cmd->tsf_mac_id = cpu_to_le32(mld_vif->fw_id);
+                               return;
+                       }
+               }
+       } else {
+               eth_broadcast_addr(cmd->range_req_bssid);
+       }
+
+       /* Don't report AP's TSF */
+       cmd->tsf_mac_id = cpu_to_le32(0xff);
+}
+
+static int
+iwl_mld_ftm_set_target_chandef(struct iwl_mld *mld,
+                              struct cfg80211_pmsr_request_peer *peer,
+                              struct iwl_tof_range_req_ap_entry *target)
+{
+       u32 freq = peer->chandef.chan->center_freq;
+
+       target->channel_num = ieee80211_frequency_to_channel(freq);
+
+       switch (peer->chandef.width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               target->format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+               target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+               break;
+       case NL80211_CHAN_WIDTH_20:
+               target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+               target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+               target->format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               target->format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+               target->format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               target->format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
+               target->format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
+               break;
+       default:
+               IWL_ERR(mld, "Unsupported BW in FTM request (%d)\n",
+                       peer->chandef.width);
+               return -EINVAL;
+}
+
+       /* non EDCA based measurement must use HE preamble */
+       if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
+               target->format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
+
+       target->ctrl_ch_position =
+               (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+               iwl_mld_get_fw_ctrl_pos(&peer->chandef) : 0;
+
+       target->band = iwl_mld_nl80211_band_to_fw(peer->chandef.chan->band);
+       return 0;
+}
+
+#define FTM_SET_FLAG(flag) (target->initiator_ap_flags |= \
+                           cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
+
+static void
+iwl_mld_ftm_set_target_flags(struct iwl_mld *mld,
+                            struct cfg80211_pmsr_request_peer *peer,
+                            struct iwl_tof_range_req_ap_entry *target)
+{
+       target->initiator_ap_flags = cpu_to_le32(0);
+
+       if (peer->ftm.asap)
+               FTM_SET_FLAG(ASAP);
+
+       if (peer->ftm.request_lci)
+               FTM_SET_FLAG(LCI_REQUEST);
+
+       if (peer->ftm.request_civicloc)
+               FTM_SET_FLAG(CIVIC_REQUEST);
+
+       if (IWL_MLD_FTM_INITIATOR_DYNACK)
+               FTM_SET_FLAG(DYN_ACK);
+
+       if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
+               FTM_SET_FLAG(ALGO_LR);
+       else if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
+               FTM_SET_FLAG(ALGO_FFT);
+
+       if (peer->ftm.trigger_based)
+               FTM_SET_FLAG(TB);
+       else if (peer->ftm.non_trigger_based)
+               FTM_SET_FLAG(NON_TB);
+
+       if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
+           peer->ftm.lmr_feedback)
+               FTM_SET_FLAG(LMR_FEEDBACK);
+}
+
+static void iwl_mld_ftm_set_sta(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                               struct cfg80211_pmsr_request_peer *peer,
+                               struct iwl_tof_range_req_ap_entry *target)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       u32 sta_id_mask;
+
+       target->sta_id = IWL_INVALID_STA;
+
+       /* TODO: add ftm_unprotected debugfs support */
+
+       if (!vif->cfg.assoc || !mld_vif->ap_sta)
+               return;
+
+       sta_id_mask = iwl_mld_fw_sta_id_mask(mld, mld_vif->ap_sta);
+       if (WARN_ON(hweight32(sta_id_mask) != 1))
+               return;
+
+       target->sta_id = __ffs(sta_id_mask);
+
+       if (mld_vif->ap_sta->mfp &&
+           (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
+               FTM_SET_FLAG(PMF);
+}
+
+static int
+iwl_mld_ftm_set_target(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                      struct cfg80211_pmsr_request_peer *peer,
+                      struct iwl_tof_range_req_ap_entry *target)
+{
+       u32 i2r_max_sts;
+       int ret;
+
+       ret = iwl_mld_ftm_set_target_chandef(mld, peer, target);
+       if (ret)
+               return ret;
+
+       memcpy(target->bssid, peer->addr, ETH_ALEN);
+       target->burst_period = cpu_to_le16(peer->ftm.burst_period);
+       target->samples_per_burst = peer->ftm.ftms_per_burst;
+       target->num_of_bursts = peer->ftm.num_bursts_exp;
+       iwl_mld_ftm_set_target_flags(mld, peer, target);
+       iwl_mld_ftm_set_sta(mld, vif, peer, target);
+
+       /* TODO: add secured ranging support */
+
+       i2r_max_sts = IWL_MLD_FTM_I2R_MAX_STS > 1 ? 1 :
+               IWL_MLD_FTM_I2R_MAX_STS;
+
+       target->r2i_ndp_params = IWL_MLD_FTM_R2I_MAX_REP |
+               (IWL_MLD_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
+               (IWL_MLD_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+       target->i2r_ndp_params = IWL_MLD_FTM_I2R_MAX_REP |
+               (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
+               (IWL_MLD_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+
+       if (peer->ftm.non_trigger_based) {
+               target->min_time_between_msr =
+                       cpu_to_le16(IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+               target->burst_period =
+                       cpu_to_le16(IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+       } else {
+               target->min_time_between_msr = cpu_to_le16(0);
+       }
+
+       /* TODO: Beacon interval is currently unknown, so use the common value
+        * of 100 TUs.
+        */
+       target->beacon_interval = cpu_to_le16(100);
+
+       return 0;
+}
+
+int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                     struct cfg80211_pmsr_request *req)
+{
+       struct iwl_tof_range_req_cmd cmd;
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+               .dataflags[0] = IWL_HCMD_DFL_DUP,
+               .data[0] = &cmd,
+               .len[0] = sizeof(cmd),
+       };
+       u8 i;
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (mld->ftm_initiator.req)
+               return -EBUSY;
+
+       if (req->n_peers > ARRAY_SIZE(cmd.ap))
+               return -EINVAL;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       iwl_mld_ftm_cmd_common(mld, vif, (void *)&cmd, req);
+
+       for (i = 0; i < cmd.num_of_ap; i++) {
+               struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+               struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
+
+               ret = iwl_mld_ftm_set_target(mld, vif, peer, target);
+               if (ret)
+                       return ret;
+       }
+
+       /* TODO: get the status from the response*/
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+       if (!ret) {
+               mld->ftm_initiator.req = req;
+               mld->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
+       }
+
+       return ret;
+}
+
+static void iwl_mld_ftm_reset(struct iwl_mld *mld)
+{
+       lockdep_assert_wiphy(mld->wiphy);
+
+       mld->ftm_initiator.req = NULL;
+       mld->ftm_initiator.req_wdev = NULL;
+       memset(mld->ftm_initiator.responses, 0,
+              sizeof(mld->ftm_initiator.responses));
+}
+
+static int iwl_mld_ftm_range_resp_valid(struct iwl_mld *mld, u8 request_id,
+                                       u8 num_of_aps)
+{
+       if (IWL_FW_CHECK(mld, request_id != (u8)mld->ftm_initiator.req->cookie,
+                        "Request ID mismatch, got %u, active %u\n",
+                        request_id, (u8)mld->ftm_initiator.req->cookie))
+               return -EINVAL;
+
+       if (IWL_FW_CHECK(mld, num_of_aps > mld->ftm_initiator.req->n_peers ||
+                        num_of_aps > IWL_TOF_MAX_APS,
+                        "FTM range response: invalid num of APs (%u)\n",
+                        num_of_aps))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int iwl_mld_ftm_find_peer(struct cfg80211_pmsr_request *req,
+                                const u8 *addr)
+{
+       for (int i = 0; i < req->n_peers; i++) {
+               struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+               if (ether_addr_equal_unaligned(peer->addr, addr))
+                       return i;
+       }
+
+       return -ENOENT;
+}
+
+static void iwl_mld_debug_range_resp(struct iwl_mld *mld, u8 index,
+                                    struct cfg80211_pmsr_result *res)
+{
+       s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
+
+       IWL_DEBUG_INFO(mld, "entry %d\n", index);
+       IWL_DEBUG_INFO(mld, "\tstatus: %d\n", res->status);
+       IWL_DEBUG_INFO(mld, "\tBSSID: %pM\n", res->addr);
+       IWL_DEBUG_INFO(mld, "\thost time: %llu\n", res->host_time);
+       IWL_DEBUG_INFO(mld, "\tburst index: %d\n", res->ftm.burst_index);
+       IWL_DEBUG_INFO(mld, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
+       IWL_DEBUG_INFO(mld, "\trssi: %d\n", res->ftm.rssi_avg);
+       IWL_DEBUG_INFO(mld, "\trssi spread: %d\n", res->ftm.rssi_spread);
+       IWL_DEBUG_INFO(mld, "\trtt: %lld\n", res->ftm.rtt_avg);
+       IWL_DEBUG_INFO(mld, "\trtt var: %llu\n", res->ftm.rtt_variance);
+       IWL_DEBUG_INFO(mld, "\trtt spread: %llu\n", res->ftm.rtt_spread);
+       IWL_DEBUG_INFO(mld, "\tdistance: %lld\n", rtt_avg);
+}
+
+void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
+                                  struct iwl_rx_packet *pkt)
+{
+       struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
+       u8 num_of_aps, last_in_batch;
+
+       if (IWL_FW_CHECK(mld, !mld->ftm_initiator.req,
+                        "FTM response without a pending request\n"))
+               return;
+
+       if (iwl_mld_ftm_range_resp_valid(mld, fw_resp->request_id,
+                                        fw_resp->num_of_aps))
+               return;
+
+       num_of_aps = fw_resp->num_of_aps;
+       last_in_batch = fw_resp->last_report;
+
+       IWL_DEBUG_INFO(mld, "Range response received\n");
+       IWL_DEBUG_INFO(mld, "request id: %llu, num of entries: %u\n",
+                      mld->ftm_initiator.req->cookie, num_of_aps);
+
+       for (int i = 0; i < num_of_aps; i++) {
+               struct cfg80211_pmsr_result result = {};
+               struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
+               int peer_idx;
+
+               fw_ap = &fw_resp->ap[i];
+               result.final = fw_ap->last_burst;
+               result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
+               result.ap_tsf_valid = 1;
+
+               peer_idx = iwl_mld_ftm_find_peer(mld->ftm_initiator.req,
+                                                fw_ap->bssid);
+               if (peer_idx < 0) {
+                       IWL_WARN(mld,
+                                "Unknown address (%pM, target #%d) in FTM response\n",
+                                fw_ap->bssid, i);
+                       continue;
+               }
+
+               switch (fw_ap->measure_status) {
+               case IWL_TOF_ENTRY_SUCCESS:
+                       result.status = NL80211_PMSR_STATUS_SUCCESS;
+                       break;
+               case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
+                       result.status = NL80211_PMSR_STATUS_TIMEOUT;
+                       break;
+               case IWL_TOF_ENTRY_NO_RESPONSE:
+                       result.status = NL80211_PMSR_STATUS_FAILURE;
+                       result.ftm.failure_reason =
+                               NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
+                       break;
+               case IWL_TOF_ENTRY_REQUEST_REJECTED:
+                       result.status = NL80211_PMSR_STATUS_FAILURE;
+                       result.ftm.failure_reason =
+                               NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
+                       result.ftm.busy_retry_time = fw_ap->refusal_period;
+                       break;
+               default:
+                       result.status = NL80211_PMSR_STATUS_FAILURE;
+                       result.ftm.failure_reason =
+                               NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
+                       break;
+               }
+               memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
+
+               /* TODO: convert the timestamp from the result to systime */
+               result.host_time = ktime_get_boottime_ns();
+
+               result.type = NL80211_PMSR_TYPE_FTM;
+               result.ftm.burst_index = mld->ftm_initiator.responses[peer_idx];
+               mld->ftm_initiator.responses[peer_idx]++;
+               result.ftm.rssi_avg = fw_ap->rssi;
+               result.ftm.rssi_avg_valid = 1;
+               result.ftm.rssi_spread = fw_ap->rssi_spread;
+               result.ftm.rssi_spread_valid = 1;
+               result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
+               result.ftm.rtt_avg_valid = 1;
+               result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
+               result.ftm.rtt_variance_valid = 1;
+               result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
+               result.ftm.rtt_spread_valid = 1;
+
+               cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
+                                    mld->ftm_initiator.req,
+                                    &result, GFP_KERNEL);
+
+               if (fw_has_api(&mld->fw->ucode_capa,
+                              IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+                       IWL_DEBUG_INFO(mld, "RTT confidence: %u\n",
+                                      fw_ap->rttConfidence);
+
+               iwl_mld_debug_range_resp(mld, i, &result);
+       }
+
+       if (last_in_batch) {
+               cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
+                                      mld->ftm_initiator.req,
+                                      GFP_KERNEL);
+               iwl_mld_ftm_reset(mld);
+       }
+}
+
+void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld)
+{
+       struct cfg80211_pmsr_result result = {
+               .status = NL80211_PMSR_STATUS_FAILURE,
+               .final = 1,
+               .host_time = ktime_get_boottime_ns(),
+               .type = NL80211_PMSR_TYPE_FTM,
+       };
+
+       if (!mld->ftm_initiator.req)
+               return;
+
+       for (int i = 0; i < mld->ftm_initiator.req->n_peers; i++) {
+               memcpy(result.addr, mld->ftm_initiator.req->peers[i].addr,
+                      ETH_ALEN);
+
+               cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
+                                    mld->ftm_initiator.req,
+                                    &result, GFP_KERNEL);
+       }
+
+       cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
+                              mld->ftm_initiator.req, GFP_KERNEL);
+       iwl_mld_ftm_reset(mld);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_ftm_initiator_h__
+#define __iwl_mld_ftm_initiator_h__
+
+int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                     struct cfg80211_pmsr_request *req);
+
+void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
+                                  struct iwl_rx_packet *pkt);
+void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_ftm_initiator_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+
+#include "fw/api/alive.h"
+#include "fw/api/scan.h"
+#include "fw/api/rx.h"
+#include "fw/dbg.h"
+#include "fw/pnvm.h"
+#include "hcmd.h"
+#include "iwl-nvm-parse.h"
+#include "power.h"
+#include "mcc.h"
+#include "led.h"
+#include "coex.h"
+#include "regulatory.h"
+#include "thermal.h"
+
+static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld)
+{
+       struct iwl_tx_ant_cfg_cmd cmd;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld));
+
+       IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid);
+
+       return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd);
+}
+
+static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
+{
+       struct iwl_rss_config_cmd cmd = {
+               .flags = cpu_to_le32(IWL_RSS_ENABLE),
+               .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
+                            BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
+                            BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
+                            BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
+                            BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
+                            BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
+       };
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* Do not direct RSS traffic to Q 0 which is our fallback queue */
+       for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+               cmd.indirection_table[i] =
+                       1 + (i % (mld->trans->num_rx_queues - 1));
+       netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+       return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
+}
+
+static int iwl_mld_config_scan(struct iwl_mld *mld)
+{
+       struct iwl_scan_config cmd = {
+               .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)),
+               .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld))
+       };
+
+       return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD),
+                                   &cmd);
+}
+
+static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
+                                  const struct iwl_imr_alive_info *imr_info)
+{
+       struct iwl_imr_data *imr_data = &trans->dbg.imr_data;
+
+       imr_data->imr_enable = le32_to_cpu(imr_info->enabled);
+       imr_data->imr_size = le32_to_cpu(imr_info->size);
+       imr_data->imr2sram_remainbyte = imr_data->imr_size;
+       imr_data->imr_base_addr = imr_info->base_addr;
+       imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr);
+
+       if (imr_data->imr_enable)
+               return;
+
+       for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
+               struct iwl_fw_ini_region_tlv *reg;
+
+               if (!trans->dbg.active_regions[i])
+                       continue;
+
+               reg = (void *)trans->dbg.active_regions[i]->data;
+
+               /* We have only one DRAM IMR region, so we
+                * can break as soon as we find the first
+                * one.
+                */
+               if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+                       trans->dbg.unsupported_region_msk |= BIT(i);
+                       break;
+               }
+       }
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+                        struct iwl_rx_packet *pkt, void *data)
+{
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+       struct iwl_mld *mld =
+               container_of(notif_wait, struct iwl_mld, notif_wait);
+       struct iwl_trans *trans = mld->trans;
+       u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
+                                             UCODE_ALIVE_NTFY, 0);
+       struct iwl_alive_ntf_v6 *palive;
+       bool *alive_valid = data;
+       struct iwl_umac_alive *umac;
+       struct iwl_lmac_alive *lmac1;
+       struct iwl_lmac_alive *lmac2 = NULL;
+       u32 lmac_error_event_table;
+       u32 umac_error_table;
+       u16 status;
+
+       if (version < 6 || version > 7 || pkt_len != sizeof(*palive))
+               return false;
+
+       palive = (void *)pkt->data;
+
+       iwl_mld_alive_imr_data(trans, &palive->imr);
+
+       umac = &palive->umac_data;
+       lmac1 = &palive->lmac_data[0];
+       lmac2 = &palive->lmac_data[1];
+       status = le16_to_cpu(palive->status);
+
+       trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
+       trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
+       trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+
+       IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
+                    trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]);
+
+       lmac_error_event_table =
+               le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
+       iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table);
+
+       if (lmac2)
+               trans->dbg.lmac_error_event_table[1] =
+                       le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
+
+       umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
+               ~FW_ADDR_CACHE_CONTROL;
+
+       if (umac_error_table >= trans->cfg->min_umac_error_event_table)
+               iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
+       else
+               IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
+                       umac_error_table);
+
+       *alive_valid = status == IWL_ALIVE_STATUS_OK;
+
+       IWL_DEBUG_FW(mld,
+                    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+                    status, lmac1->ver_type, lmac1->ver_subtype);
+
+       if (lmac2)
+               IWL_DEBUG_FW(mld, "Alive ucode CDB\n");
+
+       IWL_DEBUG_FW(mld,
+                    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+                    le32_to_cpu(umac->umac_major),
+                    le32_to_cpu(umac->umac_minor));
+
+       if (version >= 7)
+               IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
+                            le16_to_cpu(palive->flags));
+
+       iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
+
+       return true;
+}
+
+#define MLD_ALIVE_TIMEOUT              (2 * HZ)
+#define MLD_INIT_COMPLETE_TIMEOUT      (2 * HZ)
+
+static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
+{
+       struct iwl_trans *trans = mld->trans;
+       struct iwl_pc_data *pc_data;
+       u8 count;
+
+       IWL_ERR(mld,
+               "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+               iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
+               iwl_read_umac_prph(trans,
+                                  UMAG_SB_CPU_2_STATUS));
+#define IWL_FW_PRINT_REG_INFO(reg_name) \
+       IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
+
+       IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
+
+       IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
+
+       /* print OTP info */
+       IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
+       IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
+#undef IWL_FW_PRINT_REG_INFO
+
+       pc_data = trans->dbg.pc_data;
+       for (count = 0; count < trans->dbg.num_pc; count++, pc_data++)
+               IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name,
+                       pc_data->pc_address);
+}
+
+static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
+{
+       const struct fw_img *fw =
+               iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
+       static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
+       struct iwl_notification_wait alive_wait;
+       bool alive_valid = false;
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
+                                  alive_cmd, ARRAY_SIZE(alive_cmd),
+                                  iwl_alive_fn, &alive_valid);
+
+       iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
+
+       ret = iwl_trans_start_fw(mld->trans, fw, true);
+       if (ret) {
+               iwl_remove_notification(&mld->notif_wait, &alive_wait);
+               return ret;
+       }
+
+       ret = iwl_wait_notification(&mld->notif_wait, &alive_wait,
+                                   MLD_ALIVE_TIMEOUT);
+
+       if (ret) {
+               if (ret == -ETIMEDOUT)
+                       iwl_fw_dbg_error_collect(&mld->fwrt,
+                                                FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+               iwl_mld_print_alive_notif_timeout(mld);
+               goto alive_failure;
+       }
+
+       if (!alive_valid) {
+               IWL_ERR(mld, "Loaded firmware is not valid!\n");
+               ret = -EIO;
+               goto alive_failure;
+       }
+
+       iwl_trans_fw_alive(mld->trans, 0);
+
+       return 0;
+
+alive_failure:
+       iwl_trans_stop_device(mld->trans);
+       return ret;
+}
+
+int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
+{
+       struct iwl_notification_wait init_wait;
+       struct iwl_init_extended_cfg_cmd init_cfg = {};
+       static const u16 init_complete[] = {
+               INIT_COMPLETE_NOTIF,
+       };
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_load_fw_wait_alive(mld);
+       if (ret)
+               return ret;
+
+       mld->trans->step_urm =
+               !!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) &
+                  CNVI_PMU_STEP_FLOW_FORCE_URM);
+
+       ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
+                           &mld->fw->ucode_capa);
+       if (ret) {
+               IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
+               goto init_failure;
+       }
+
+       iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
+                              NULL);
+
+       iwl_init_notification_wait(&mld->notif_wait,
+                                  &init_wait,
+                                  init_complete,
+                                  ARRAY_SIZE(init_complete),
+                                  NULL, NULL);
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD),
+                                  &init_cfg);
+       if (ret) {
+               IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
+               iwl_remove_notification(&mld->notif_wait, &init_wait);
+               goto init_failure;
+       }
+
+       ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
+                                   MLD_INIT_COMPLETE_TIMEOUT);
+       if (ret) {
+               IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
+               goto init_failure;
+       }
+
+       if (!mld->nvm_data) {
+               mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
+               if (IS_ERR(mld->nvm_data)) {
+                       ret = PTR_ERR(mld->nvm_data);
+                       mld->nvm_data = NULL;
+                       IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
+                       goto init_failure;
+               }
+       }
+
+       return 0;
+
+init_failure:
+       iwl_trans_stop_device(mld->trans);
+       return ret;
+}
+
+int iwl_mld_load_fw(struct iwl_mld *mld)
+{
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_trans_start_hw(mld->trans);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_run_fw_init_sequence(mld);
+       if (ret)
+               return ret;
+
+       mld->fw_status.running = true;
+
+       return 0;
+}
+
+void iwl_mld_stop_fw(struct iwl_mld *mld)
+{
+       lockdep_assert_wiphy(mld->wiphy);
+
+       iwl_abort_notification_waits(&mld->notif_wait);
+
+       iwl_fw_dbg_stop_sync(&mld->fwrt);
+
+       iwl_trans_stop_device(mld->trans);
+
+       mld->fw_status.running = false;
+}
+
+static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       if (vif->type == NL80211_IFTYPE_STATION)
+               ieee80211_hw_restart_disconnect(vif);
+}
+
+void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags)
+{
+       u32 error_log_size = mld->fw->ucode_capa.error_log_size;
+       struct iwl_fw_error_recovery_cmd recovery_cmd = {
+               .flags = cpu_to_le32(flags),
+       };
+       struct iwl_host_cmd cmd = {
+               .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
+               .flags = CMD_WANT_SKB,
+               .data = {&recovery_cmd, },
+               .len = {sizeof(recovery_cmd), },
+       };
+       int ret;
+
+       /* no error log was defined in TLV */
+       if (!error_log_size)
+               return;
+
+       if (flags & ERROR_RECOVERY_UPDATE_DB) {
+               /* no buf was allocated upon NIC error */
+               if (!mld->error_recovery_buf)
+                       return;
+
+               cmd.data[1] = mld->error_recovery_buf;
+               cmd.len[1] =  error_log_size;
+               cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+               recovery_cmd.buf_size = cpu_to_le32(error_log_size);
+       }
+
+       ret = iwl_mld_send_cmd(mld, &cmd);
+
+       /* we no longer need the recovery buffer */
+       kfree(mld->error_recovery_buf);
+       mld->error_recovery_buf = NULL;
+
+       if (ret) {
+               IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret);
+               return;
+       }
+
+       if (flags & ERROR_RECOVERY_UPDATE_DB) {
+               struct iwl_rx_packet *pkt = cmd.resp_pkt;
+               u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+               u32 resp;
+
+               if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp),
+                                "Unexpected recovery cmd response size %u (expected %zu)\n",
+                                pkt_len, sizeof(resp)))
+                       goto out;
+
+               resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data);
+               if (!resp)
+                       goto out;
+
+               IWL_ERR(mld,
+                       "Failed to send recovery cmd blob was invalid %d\n",
+                       resp);
+
+               ieee80211_iterate_interfaces(mld->hw, 0,
+                                            iwl_mld_restart_disconnect_iter,
+                                            NULL);
+       }
+
+out:
+       iwl_free_resp(&cmd);
+}
+
+static int iwl_mld_config_fw(struct iwl_mld *mld)
+{
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       iwl_fw_disable_dbg_asserts(&mld->fwrt);
+       iwl_get_shared_mem_conf(&mld->fwrt);
+
+       ret = iwl_mld_send_tx_ant_cfg(mld);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_send_bt_init_conf(mld);
+       if (ret)
+               return ret;
+
+       ret = iwl_set_soc_latency(&mld->fwrt);
+       if (ret)
+               return ret;
+
+       iwl_mld_configure_lari(mld);
+
+       ret = iwl_mld_config_temp_report_ths(mld);
+       if (ret)
+               return ret;
+
+#ifdef CONFIG_THERMAL
+       ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+                                 CTDP_CMD_OPERATION_START);
+       if (ret)
+               return ret;
+#endif
+
+       ret = iwl_configure_rxq(&mld->fwrt);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_send_rss_cfg_cmd(mld);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_config_scan(mld);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_update_device_power(mld, false);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_init_mcc(mld);
+       if (ret)
+               return ret;
+
+       if (mld->fw_status.in_hw_restart) {
+               iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB);
+               iwl_mld_time_sync_fw_config(mld);
+       }
+
+       iwl_mld_led_config_fw(mld);
+
+       ret = iwl_mld_init_ppag(mld);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_init_sar(mld);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_init_sgom(mld);
+       if (ret)
+               return ret;
+
+       iwl_mld_init_tas(mld);
+       iwl_mld_init_uats(mld);
+
+       return 0;
+}
+
+int iwl_mld_start_fw(struct iwl_mld *mld)
+{
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_load_fw(mld);
+       if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
+               iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
+               goto error;
+       }
+
+       IWL_DEBUG_INFO(mld, "uCode started.\n");
+
+       ret = iwl_mld_config_fw(mld);
+       if (ret)
+               goto error;
+
+       return 0;
+
+error:
+       iwl_mld_stop_fw(mld);
+       return ret;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_hcmd_h__
+#define __iwl_mld_hcmd_h__
+
+static inline int iwl_mld_send_cmd(struct iwl_mld *mld, struct iwl_host_cmd *cmd)
+{
+       /* No commands, including the d3 related commands, should be sent
+        * after entering d3
+        */
+       if (WARN_ON(mld->fw_status.in_d3))
+               return -EIO;
+
+       if (!(cmd->flags & CMD_ASYNC))
+               lockdep_assert_wiphy(mld->wiphy);
+
+       /* Devices that need to shutdown immediately on rfkill are not
+        * supported, so we can send all the cmds in rfkill
+        */
+       cmd->flags |= CMD_SEND_IN_RFKILL;
+
+       return iwl_trans_send_cmd(mld->trans, cmd);
+}
+
+static inline int
+__iwl_mld_send_cmd_with_flags_pdu(struct iwl_mld *mld, u32 id,
+                                 u32 flags, const void *data, u16 len)
+{
+       struct iwl_host_cmd cmd = {
+               .id = id,
+               .len = { data ? len : 0, },
+               .data = { data, },
+               .flags = flags,
+       };
+
+       return iwl_mld_send_cmd(mld, &cmd);
+}
+
+#define _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len,    \
+                                        ignored...)                    \
+       __iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len)
+#define iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len...)  \
+       _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, ##len,   \
+                                        sizeof(*(data)))
+
+#define iwl_mld_send_cmd_pdu(mld, id, ...)                             \
+       iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, __VA_ARGS__)
+
+#define iwl_mld_send_cmd_empty(mld, id)                                        \
+       iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, NULL, 0)
+
+#endif /* __iwl_mld_hcmd_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/cfg80211.h>
+
+#include "iface.h"
+#include "hcmd.h"
+#include "key.h"
+#include "mlo.h"
+#include "mac80211.h"
+
+#include "fw/api/context.h"
+#include "fw/api/mac.h"
+#include "fw/api/time-event.h"
+#include "fw/api/datapath.h"
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld *mld = mld_vif->mld;
+       struct iwl_mld_link *link;
+
+       /* EMLSR is turned back on during recovery */
+       vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+       mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+       for_each_mld_vif_valid_link(mld_vif, link) {
+               iwl_mld_cleanup_link(mld_vif->mld, link);
+
+               /* Correctly allocated primary link in non-MLO mode */
+               if (!ieee80211_vif_is_mld(vif) &&
+                   link_id == 0 && link == &mld_vif->deflink)
+                       continue;
+
+               if (vif->active_links & BIT(link_id))
+                       continue;
+
+               /* Should not happen as link removal should always succeed */
+               WARN_ON(1);
+               if (link != &mld_vif->deflink)
+                       kfree_rcu(link, rcu_head);
+               RCU_INIT_POINTER(mld_vif->link[link_id], NULL);
+       }
+
+       ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL);
+
+       CLEANUP_STRUCT(mld_vif);
+}
+
+static int iwl_mld_send_mac_cmd(struct iwl_mld *mld,
+                               struct iwl_mac_config_cmd *cmd)
+{
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD),
+                                  cmd);
+       if (ret)
+               IWL_ERR(mld, "Failed to send MAC_CONFIG_CMD ret = %d\n", ret);
+
+       return ret;
+}
+
+int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif)
+{
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               return vif->p2p ? FW_MAC_TYPE_P2P_STA : FW_MAC_TYPE_BSS_STA;
+       case NL80211_IFTYPE_AP:
+               return FW_MAC_TYPE_GO;
+       case NL80211_IFTYPE_MONITOR:
+               return FW_MAC_TYPE_LISTENER;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               return FW_MAC_TYPE_P2P_DEVICE;
+       case NL80211_IFTYPE_ADHOC:
+               return FW_MAC_TYPE_IBSS;
+       default:
+               WARN_ON_ONCE(1);
+       }
+       return FW_MAC_TYPE_BSS_STA;
+}
+
+static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
+                                      struct ieee80211_vif *vif)
+{
+       const struct ieee80211_supported_band *sband;
+       const struct ieee80211_sta_he_cap *own_he_cap;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* This capability is the same for all bands,
+        * so take it from one of them.
+        */
+       sband = mld->hw->wiphy->bands[NL80211_BAND_2GHZ];
+       own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+       return own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] &
+                              IEEE80211_HE_MAC_CAP2_ACK_EN);
+}
+
+/* fill the common part for all interface types */
+static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif,
+                                       struct iwl_mac_config_cmd *cmd,
+                                       u32 action)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct ieee80211_bss_conf *link_conf;
+       unsigned int link_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
+       cmd->action = cpu_to_le32(action);
+
+       cmd->mac_type =
+               cpu_to_le32(iwl_mld_mac80211_iftype_to_fw(vif));
+
+       memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN);
+
+       if (iwlwifi_mod_params.disable_11ax)
+               return;
+
+       cmd->nic_not_ack_enabled =
+               cpu_to_le32(!iwl_mld_is_nic_ack_enabled(mld, vif));
+
+       /* If we have MLO enabled, then the firmware needs to enable
+        * address translation for the station(s) we add. That depends
+        * on having EHT enabled in firmware, which in turn depends on
+        * mac80211 in the code below.
+        * However, mac80211 doesn't enable HE/EHT until it has parsed
+        * the association response successfully, so just skip all that
+        * and enable both when we have MLO.
+        */
+       if (ieee80211_vif_is_mld(vif)) {
+               if (vif->type == NL80211_IFTYPE_AP)
+                       cmd->he_ap_support = cpu_to_le16(1);
+               else
+                       cmd->he_support = cpu_to_le16(1);
+
+               cmd->eht_support = cpu_to_le32(1);
+               return;
+       }
+
+       for_each_vif_active_link(vif, link_conf, link_id) {
+               if (!link_conf->he_support)
+                       continue;
+
+               if (vif->type == NL80211_IFTYPE_AP)
+                       cmd->he_ap_support = cpu_to_le16(1);
+               else
+                       cmd->he_support = cpu_to_le16(1);
+
+               /* EHT, if supported, was already set above */
+               break;
+       }
+}
+
+static void iwl_mld_fill_mac_cmd_sta(struct iwl_mld *mld,
+                                    struct ieee80211_vif *vif, u32 action,
+                                    struct iwl_mac_config_cmd *cmd)
+{
+       struct ieee80211_bss_conf *link;
+       u32 twt_policy = 0;
+       int link_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+       /* We always want to hear MCAST frames, if we're not authorized yet,
+        * we'll drop them.
+        */
+       cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP);
+
+       /* Adding a MAC ctxt with is_assoc set is not allowed in fw
+        * (and shouldn't happen)
+        */
+       if (vif->cfg.assoc && action != FW_CTXT_ACTION_ADD) {
+               cmd->client.is_assoc = 1;
+
+               if (!iwl_mld_vif_from_mac80211(vif)->authorized)
+                       cmd->client.data_policy |=
+                               cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE);
+       } else {
+               /* Allow beacons to pass through as long as we are not
+                * associated
+                */
+               cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
+       }
+
+       cmd->client.assoc_id = cpu_to_le16(vif->cfg.aid);
+
+       if (ieee80211_vif_is_mld(vif)) {
+               u16 esr_transition_timeout =
+                       u16_get_bits(vif->cfg.eml_cap,
+                                    IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+               cmd->client.esr_transition_timeout =
+                       min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU,
+                             esr_transition_timeout);
+               cmd->client.medium_sync_delay =
+                       cpu_to_le16(vif->cfg.eml_med_sync_delay);
+       }
+
+       for_each_vif_active_link(vif, link, link_id) {
+               if (!link->he_support)
+                       continue;
+
+               if (link->twt_requester)
+                       twt_policy |= TWT_SUPPORTED;
+               if (link->twt_protected)
+                       twt_policy |= PROTECTED_TWT_SUPPORTED;
+               if (link->twt_broadcast)
+                       twt_policy |= BROADCAST_TWT_SUPPORTED;
+       }
+
+       if (!iwlwifi_mod_params.disable_11ax)
+               cmd->client.data_policy |= cpu_to_le16(twt_policy);
+
+       if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
+               cmd->filter_flags |=
+                       cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+
+       if (vif->p2p)
+               cmd->client.ctwin =
+                       cpu_to_le32(vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+                                   IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+}
+
+static void iwl_mld_fill_mac_cmd_ap(struct iwl_mld *mld,
+                                   struct ieee80211_vif *vif,
+                                   struct iwl_mac_config_cmd *cmd)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       WARN_ON(vif->type != NL80211_IFTYPE_AP);
+
+       cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+
+       /* in AP mode, pass beacons from other APs (needed for ht protection).
+        * When there're no any associated station, which means that we are not
+        * TXing anyway, don't ask FW to pass beacons to prevent unnecessary
+        * wake-ups.
+        */
+       if (mld_vif->num_associated_stas)
+               cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
+}
+
+static void iwl_mld_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *go_active = _data;
+
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
+           iwl_mld_vif_from_mac80211(vif)->ap_ibss_active)
+               *go_active = true;
+}
+
+static bool iwl_mld_p2p_dev_has_extended_disc(struct iwl_mld *mld)
+{
+       bool go_active = false;
+
+       /* This flag should be set to true when the P2P Device is
+        * discoverable and there is at least a P2P GO. Setting
+        * this flag will allow the P2P Device to be discoverable on other
+        * channels in addition to its listen channel.
+        * Note that this flag should not be set in other cases as it opens the
+        * Rx filters on all MAC and increases the number of interrupts.
+        */
+       ieee80211_iterate_active_interfaces(mld->hw,
+                                           IEEE80211_IFACE_ITER_RESUME_ALL,
+                                           iwl_mld_go_iterator, &go_active);
+
+       return go_active;
+}
+
+static void iwl_mld_fill_mac_cmd_p2p_dev(struct iwl_mld *mld,
+                                        struct ieee80211_vif *vif,
+                                        struct iwl_mac_config_cmd *cmd)
+{
+       bool ext_disc = iwl_mld_p2p_dev_has_extended_disc(mld);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* Override the filter flags to accept all management frames. This is
+        * needed to support both P2P device discovery using probe requests and
+        * P2P service discovery using action frames
+        */
+       cmd->filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
+
+       if (ext_disc)
+               cmd->p2p_dev.is_disc_extended = cpu_to_le32(1);
+}
+
+static void iwl_mld_fill_mac_cmd_ibss(struct iwl_mld *mld,
+                                     struct ieee80211_vif *vif,
+                                     struct iwl_mac_config_cmd *cmd)
+{
+       lockdep_assert_wiphy(mld->wiphy);
+
+       WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+       cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON |
+                                        MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+                                        MAC_CFG_FILTER_ACCEPT_GRP);
+}
+
+static int
+iwl_mld_rm_mac_from_fw(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mac_config_cmd cmd = {
+               .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+               .id_and_color = cpu_to_le32(mld_vif->fw_id),
+       };
+
+       return iwl_mld_send_mac_cmd(mld, &cmd);
+}
+
+int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                         u32 action)
+{
+       struct iwl_mac_config_cmd cmd = {};
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (action == FW_CTXT_ACTION_REMOVE)
+               return iwl_mld_rm_mac_from_fw(mld, vif);
+
+       iwl_mld_mac_cmd_fill_common(mld, vif, &cmd, action);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               iwl_mld_fill_mac_cmd_sta(mld, vif, action, &cmd);
+               break;
+       case NL80211_IFTYPE_AP:
+               iwl_mld_fill_mac_cmd_ap(mld, vif, &cmd);
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               cmd.filter_flags =
+                       cpu_to_le32(MAC_CFG_FILTER_PROMISC |
+                                   MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
+                                   MAC_CFG_FILTER_ACCEPT_BEACON |
+                                   MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+                                   MAC_CFG_FILTER_ACCEPT_GRP);
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               iwl_mld_fill_mac_cmd_p2p_dev(mld, vif, &cmd);
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               iwl_mld_fill_mac_cmd_ibss(mld, vif, &cmd);
+               break;
+       default:
+               WARN(1, "not supported yet\n");
+               return -EOPNOTSUPP;
+       }
+
+       return iwl_mld_send_mac_cmd(mld, &cmd);
+}
+
+IWL_MLD_ALLOC_FN(vif, vif)
+
+/* Constructor function for struct iwl_mld_vif */
+static int
+iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       mld_vif->mld = mld;
+       mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+       ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
+       if (ret)
+               return ret;
+
+       if (!mld->fw_status.in_hw_restart) {
+               wiphy_work_init(&mld_vif->emlsr.unblock_tpt_wk,
+                               iwl_mld_emlsr_unblock_tpt_wk);
+               wiphy_delayed_work_init(&mld_vif->emlsr.check_tpt_wk,
+                                       iwl_mld_emlsr_check_tpt);
+               wiphy_delayed_work_init(&mld_vif->emlsr.prevent_done_wk,
+                                       iwl_mld_emlsr_prevent_done_wk);
+               wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk,
+                                       iwl_mld_emlsr_tmp_non_bss_done_wk);
+       }
+
+       return 0;
+}
+
+int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_init_vif(mld, vif);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_ADD);
+       if (ret)
+               RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
+
+       return ret;
+}
+
+int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
+
+       if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))
+               return -EINVAL;
+
+       RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
+
+       iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF,
+                                              mld_vif->fw_id);
+
+       return ret;
+}
+
+void iwl_mld_set_vif_associated(struct iwl_mld *mld,
+                               struct ieee80211_vif *vif)
+{
+       struct ieee80211_bss_conf *link;
+       unsigned int link_id;
+
+       for_each_vif_active_link(vif, link, link_id) {
+               if (iwl_mld_link_set_associated(mld, vif, link))
+                       IWL_ERR(mld, "failed to update link %d\n", link_id);
+       }
+
+       iwl_mld_recalc_multicast_filter(mld);
+}
+
+static void iwl_mld_get_fw_id_bss_bitmap_iter(void *_data, u8 *mac,
+                                             struct ieee80211_vif *vif)
+{
+       u8 *fw_id_bitmap = _data;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+               return;
+
+       *fw_id_bitmap |= BIT(mld_vif->fw_id);
+}
+
+u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld)
+{
+       u8 fw_id_bitmap = 0;
+
+       ieee80211_iterate_interfaces(mld->hw,
+                                    IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+                                    iwl_mld_get_fw_id_bss_bitmap_iter,
+                                    &fw_id_bitmap);
+
+       return fw_id_bitmap;
+}
+
+void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
+                                         struct iwl_rx_packet *pkt)
+{
+       const struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
+       struct iwl_probe_resp_data *old_data, *new_data;
+       struct ieee80211_vif *vif;
+       struct iwl_mld_link *mld_link;
+
+       IWL_DEBUG_INFO(mld, "Probe response data notif: noa %d, csa %d\n",
+                      notif->noa_active, notif->csa_counter);
+
+       if (IWL_FW_CHECK(mld, le32_to_cpu(notif->mac_id) >=
+                        ARRAY_SIZE(mld->fw_id_to_vif),
+                        "mac id is invalid: %d\n",
+                        le32_to_cpu(notif->mac_id)))
+               return;
+
+       vif = wiphy_dereference(mld->wiphy,
+                               mld->fw_id_to_vif[le32_to_cpu(notif->mac_id)]);
+
+       /* the firmware gives us the mac_id (and not the link_id), mac80211
+        * gets a vif and not a link, bottom line, this flow is not MLD ready
+        * yet.
+        */
+       if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
+               return;
+
+       if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
+           notif->csa_counter >= 1)
+               ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
+
+       if (!vif->p2p)
+               return;
+
+       mld_link = &iwl_mld_vif_from_mac80211(vif)->deflink;
+
+       new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
+       if (!new_data)
+               return;
+
+       memcpy(&new_data->notif, notif, sizeof(new_data->notif));
+
+       /* noa_attr contains 1 reserved byte, need to substruct it */
+       new_data->noa_len = sizeof(struct ieee80211_vendor_ie) +
+                           sizeof(new_data->notif.noa_attr) - 1;
+
+       /*
+        * If it's a one time NoA, only one descriptor is needed,
+        * adjust the length according to len_low.
+        */
+       if (new_data->notif.noa_attr.len_low ==
+           sizeof(struct ieee80211_p2p_noa_desc) + 2)
+               new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
+
+       old_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data);
+       rcu_assign_pointer(mld_link->probe_resp_data, new_data);
+
+       if (old_data)
+               kfree_rcu(old_data, rcu_head);
+}
+
+void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
+                                              struct iwl_rx_packet *pkt)
+{
+       struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+       struct ieee80211_vif *vif;
+
+       if (IWL_FW_CHECK(mld, notif->mac_id >= ARRAY_SIZE(mld->fw_id_to_vif),
+                        "mac id is invalid: %d\n", notif->mac_id))
+               return;
+
+       vif = wiphy_dereference(mld->wiphy, mld->fw_id_to_vif[notif->mac_id]);
+
+       if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
+               return;
+
+       IWL_WARN(mld, "uapsd misbehaving AP: %pM\n", vif->bss_conf.bssid);
+}
+
+void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
+                                          struct iwl_rx_packet *pkt)
+{
+       struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
+       struct ieee80211_bss_conf *link;
+       struct ieee80211_supported_band *sband;
+       const struct ieee80211_sta_he_cap *he_cap;
+       struct ieee80211_vif *vif;
+       struct iwl_mld_vif *mld_vif;
+
+       if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
+               return;
+
+       link = iwl_mld_fw_id_to_link_conf(mld, notif->link_id);
+       if (WARN_ON(!link))
+               return;
+
+       vif = link->vif;
+       if (WARN_ON(!vif) || vif->type != NL80211_IFTYPE_STATION ||
+           !vif->cfg.assoc)
+               return;
+
+       if (!link->chanreq.oper.chan ||
+           link->chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+           link->chanreq.oper.width < NL80211_CHAN_WIDTH_40)
+               return;
+
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       /* this shouldn't happen *again*, ignore it */
+       if (mld_vif->cca_40mhz_workaround != CCA_40_MHZ_WA_NONE)
+               return;
+
+       mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RECONNECT;
+
+       /*
+        * This capability manipulation isn't really ideal, but it's the
+        * easiest choice - otherwise we'd have to do some major changes
+        * in mac80211 to support this, which isn't worth it. This does
+        * mean that userspace may have outdated information, but that's
+        * actually not an issue at all.
+        */
+       sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
+
+       WARN_ON(!sband->ht_cap.ht_supported);
+       WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
+       sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+       he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+       if (he_cap) {
+               /* we know that ours is writable */
+               struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+               WARN_ON(!he->has_he);
+               WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
+                         IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
+               he->he_cap_elem.phy_cap_info[0] &=
+                       ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+       }
+
+       ieee80211_disconnect(vif, true);
+}
+
+void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif)
+{
+       struct ieee80211_supported_band *sband;
+       const struct ieee80211_sta_he_cap *he_cap;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_NONE)
+               return;
+
+       /* Now we are just reconnecting with the new capabilities,
+        * but remember to reset the capabilities when we disconnect for real
+        */
+       if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_RECONNECT) {
+               mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RESET;
+               return;
+       }
+
+       /* Now cca_40mhz_workaround == CCA_40_MHZ_WA_RESET */
+
+       sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
+
+       sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+       he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+       if (he_cap) {
+               /* we know that ours is writable */
+               struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+               he->he_cap_elem.phy_cap_info[0] |=
+                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+       }
+
+       mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_NONE;
+}
+
+struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld)
+{
+       unsigned long fw_id_bitmap = iwl_mld_get_fw_bss_vifs_ids(mld);
+       int fw_id;
+
+       if (hweight8(fw_id_bitmap) != 1)
+               return NULL;
+
+       fw_id = __ffs(fw_id_bitmap);
+
+       return wiphy_dereference(mld->wiphy,
+                                mld->fw_id_to_vif[fw_id]);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_iface_h__
+#define __iwl_mld_iface_h__
+
+#include <net/mac80211.h>
+
+#include "link.h"
+#include "session-protect.h"
+#include "d3.h"
+
+enum iwl_mld_cca_40mhz_wa_status {
+       CCA_40_MHZ_WA_NONE,
+       CCA_40_MHZ_WA_RESET,
+       CCA_40_MHZ_WA_RECONNECT,
+};
+
+/**
+ * enum iwl_mld_emlsr_blocked - defines reasons for which EMLSR is blocked
+ *
+ * These blocks are applied/stored per-VIF.
+ *
+ * @IWL_MLD_EMLSR_BLOCKED_PREVENTION: Prevent repeated EMLSR enter/exit
+ * @IWL_MLD_EMLSR_BLOCKED_WOWLAN: WOWLAN is preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_FW: FW did not recommend MLO
+ * @IWL_MLD_EMLSR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_NON_BSS: An active non-BSS interface's link is
+ *      preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's
+ *      link is preventing EMLSR. This is a temporary blocking that is set when
+ *      there is an indication that a non-BSS interface is to be added.
+ * @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile
+ */
+enum iwl_mld_emlsr_blocked {
+       IWL_MLD_EMLSR_BLOCKED_PREVENTION        = 0x1,
+       IWL_MLD_EMLSR_BLOCKED_WOWLAN            = 0x2,
+       IWL_MLD_EMLSR_BLOCKED_FW                = 0x4,
+       IWL_MLD_EMLSR_BLOCKED_ROC               = 0x8,
+       IWL_MLD_EMLSR_BLOCKED_NON_BSS           = 0x10,
+       IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS       = 0x20,
+       IWL_MLD_EMLSR_BLOCKED_TPT               = 0x40,
+};
+
+/**
+ * enum iwl_mld_emlsr_exit - defines reasons for exiting EMLSR
+ *
+ * Reasons to exit EMLSR may be either link specific or even specific to a
+ * combination of links.
+ *
+ * @IWL_MLD_EMLSR_EXIT_BLOCK: Exit due to a block reason being set
+ * @IWL_MLD_EMLSR_EXIT_MISSED_BEACON: Exit due to missed beacons
+ * @IWL_MLD_EMLSR_EXIT_FAIL_ENTRY: FW failed to enter EMLSR
+ * @IWL_MLD_EMLSR_EXIT_CSA: EMLSR prevented due to channel switch on link
+ * @IWL_MLD_EMLSR_EXIT_EQUAL_BAND: EMLSR prevented as both links share the band
+ * @IWL_MLD_EMLSR_EXIT_BANDWIDTH: Bandwidths of primary and secondary links are
+ *      not equal
+ * @IWL_MLD_EMLSR_EXIT_LOW_RSSI: Link RSSI is unsuitable for EMLSR
+ * @IWL_MLD_EMLSR_EXIT_LINK_USAGE: Exit EMLSR due to low TPT on secondary link
+ * @IWL_MLD_EMLSR_EXIT_BT_COEX: Exit EMLSR due to BT coexistence
+ * @IWL_MLD_EMLSR_EXIT_CHAN_LOAD: Exit EMLSR because the primary channel is not
+ *     loaded enough to justify EMLSR.
+ * @IWL_MLD_EMLSR_EXIT_RFI: Exit EMLSR due to RFI
+ */
+enum iwl_mld_emlsr_exit {
+       IWL_MLD_EMLSR_EXIT_BLOCK                = 0x1,
+       IWL_MLD_EMLSR_EXIT_MISSED_BEACON        = 0x2,
+       IWL_MLD_EMLSR_EXIT_FAIL_ENTRY           = 0x4,
+       IWL_MLD_EMLSR_EXIT_CSA                  = 0x8,
+       IWL_MLD_EMLSR_EXIT_EQUAL_BAND           = 0x10,
+       IWL_MLD_EMLSR_EXIT_BANDWIDTH            = 0x20,
+       IWL_MLD_EMLSR_EXIT_LOW_RSSI             = 0x40,
+       IWL_MLD_EMLSR_EXIT_LINK_USAGE           = 0x80,
+       IWL_MLD_EMLSR_EXIT_BT_COEX              = 0x100,
+       IWL_MLD_EMLSR_EXIT_CHAN_LOAD            = 0x200,
+       IWL_MLD_EMLSR_EXIT_RFI                  = 0x400,
+};
+
+/**
+ * struct iwl_mld_emlsr - per-VIF data about EMLSR operation
+ *
+ * @primary: The current primary link
+ * @selected_primary: Primary link as selected during the last link selection
+ * @selected_links: Links as selected during the last link selection
+ * @blocked_reasons: Reasons preventing EMLSR from being enabled
+ * @last_exit_reason: Reason for the last EMLSR exit
+ * @last_exit_ts: Time of the last EMLSR exit (if @last_exit_reason is non-zero)
+ * @exit_repeat_count: Number of times EMLSR was exited for the same reason
+ * @unblock_tpt_wk: Unblock EMLSR because the throughput limit was reached
+ * @check_tpt_wk: a worker to check if IWL_MLD_EMLSR_BLOCKED_TPT should be
+ *     added, for example if there is no longer enough traffic.
+ * @prevent_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_PREVENTION
+ * @tmp_non_bss_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS
+ */
+struct iwl_mld_emlsr {
+       struct_group(zeroed_on_not_authorized,
+               u8 primary;
+
+               u8 selected_primary;
+               u16 selected_links;
+
+               enum iwl_mld_emlsr_blocked blocked_reasons;
+
+               enum iwl_mld_emlsr_exit last_exit_reason;
+               unsigned long last_exit_ts;
+               u8 exit_repeat_count;
+       );
+
+       struct wiphy_work unblock_tpt_wk;
+       struct wiphy_delayed_work check_tpt_wk;
+
+       struct wiphy_delayed_work prevent_done_wk;
+       struct wiphy_delayed_work tmp_non_bss_done_wk;
+};
+
+/**
+ * struct iwl_mld_vif - virtual interface (MAC context) configuration parameters
+ *
+ * @fw_id: fw id of the mac context.
+ * @session_protect: session protection parameters
+ * @ap_sta: pointer to AP sta, for easier access to it.
+ *     Relevant only for STA vifs.
+ * @authorized: indicates the AP station was set to authorized
+ * @bigtks: BIGTKs of the AP, for beacon protection.
+ *     Only valid for STA. (FIXME: needs to be per link)
+ * @num_associated_stas: number of associated STAs. Relevant only for AP mode.
+ * @ap_ibss_active: whether the AP/IBSS was started
+ * @roc_activity: the id of the roc_activity running. Relevant for p2p device
+ *     only. Set to %ROC_NUM_ACTIVITIES when not in use.
+ * @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
+ *     environment is too loaded, we work around this by reconnecting to the
+ *     same AP with 20 MHz. This manages the status of the workaround.
+ * @beacon_inject_active: indicates an active debugfs beacon ie injection
+ * @low_latency_causes: bit flags, indicating the causes for low-latency,
+ *     see @iwl_mld_low_latency_cause.
+ * @mld: pointer to the mld structure.
+ * @deflink: default link data, for use in non-MLO,
+ * @link: reference to link data for each valid link, for use in MLO.
+ * @emlsr: information related to EMLSR
+ * @wowlan_data: data used by the wowlan suspend flow
+ * @use_ps_poll: use ps_poll frames
+ * @disable_bf: disable beacon filter
+ * @dbgfs_slink: debugfs symlink for this interface
+ */
+struct iwl_mld_vif {
+       /* Add here fields that need clean up on restart */
+       struct_group(zeroed_on_hw_restart,
+               u8 fw_id;
+               struct iwl_mld_session_protect session_protect;
+               struct ieee80211_sta *ap_sta;
+               bool authorized;
+               struct ieee80211_key_conf __rcu *bigtks[2];
+               u8 num_associated_stas;
+               bool ap_ibss_active;
+               u32 roc_activity;
+               enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               bool beacon_inject_active;
+#endif
+               u8 low_latency_causes;
+       );
+       /* And here fields that survive a fw restart */
+       struct iwl_mld *mld;
+       struct iwl_mld_link deflink;
+       struct iwl_mld_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+       struct iwl_mld_emlsr emlsr;
+
+#if CONFIG_PM_SLEEP
+       struct iwl_mld_wowlan_data wowlan_data;
+#endif
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       bool use_ps_poll;
+       bool disable_bf;
+       struct dentry *dbgfs_slink;
+#endif
+};
+
+static inline struct iwl_mld_vif *
+iwl_mld_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+       return (void *)vif->drv_priv;
+}
+
+#define iwl_mld_link_dereference_check(mld_vif, link_id)               \
+       rcu_dereference_check((mld_vif)->link[link_id],                 \
+                             lockdep_is_held(&mld_vif->mld->wiphy->mtx))
+
+#define for_each_mld_vif_valid_link(mld_vif, mld_link)                 \
+       for (int link_id = 0; link_id < ARRAY_SIZE((mld_vif)->link);    \
+            link_id++)                                                 \
+               if ((mld_link = iwl_mld_link_dereference_check(mld_vif, link_id)))
+
+/* Retrieve pointer to mld link from mac80211 structures */
+static inline struct iwl_mld_link *
+iwl_mld_link_from_mac80211(struct ieee80211_bss_conf *bss_conf)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+
+       return iwl_mld_link_dereference_check(mld_vif, bss_conf->link_id);
+}
+
+int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif);
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
+int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                         u32 action);
+int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+void iwl_mld_set_vif_associated(struct iwl_mld *mld,
+                               struct ieee80211_vif *vif);
+u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld);
+void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
+                                         struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
+                                          struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
+                                              struct iwl_rx_packet *pkt);
+
+void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif);
+
+static inline bool iwl_mld_vif_low_latency(const struct iwl_mld_vif *mld_vif)
+{
+       return !!mld_vif->low_latency_causes;
+}
+
+struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_iface_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include "key.h"
+#include "iface.h"
+#include "sta.h"
+#include "fw/api/datapath.h"
+
+static u32 iwl_mld_get_key_flags(struct iwl_mld *mld,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta,
+                                struct ieee80211_key_conf *key)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+       bool igtk = key->keyidx == 4 || key->keyidx == 5;
+       u32 flags = 0;
+
+       if (!pairwise)
+               flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP;
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_CCMP:
+               flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP_256:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
+               fallthrough;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP;
+               break;
+       }
+
+       if (!sta && vif->type == NL80211_IFTYPE_STATION)
+               sta = mld_vif->ap_sta;
+
+       /* If we are installing an iGTK (in AP or STA mode), we need to tell
+        * the firmware this key will en/decrypt MGMT frames.
+        * Same goes if we are installing a pairwise key for an MFP station.
+        * In case we're installing a groupwise key (which is not an iGTK),
+        * then, we will not use this key for MGMT frames.
+        */
+       if ((sta && sta->mfp && pairwise) || igtk)
+               flags |= IWL_SEC_KEY_FLAG_MFP;
+
+       if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+               flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
+       return flags;
+}
+
+static u32 iwl_mld_get_key_sta_mask(struct iwl_mld *mld,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta,
+                                   struct ieee80211_key_conf *key)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct ieee80211_link_sta *link_sta;
+       int sta_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* AP group keys are per link and should be on the mcast/bcast STA */
+       if (vif->type == NL80211_IFTYPE_AP &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+               struct iwl_mld_link *link = NULL;
+
+               if (key->link_id >= 0)
+                       link = iwl_mld_link_dereference_check(mld_vif,
+                                                             key->link_id);
+
+               if (WARN_ON(!link))
+                       return 0;
+
+               /* In this stage we should have both the bcast and mcast STAs */
+               if (WARN_ON(link->bcast_sta.sta_id == IWL_INVALID_STA ||
+                           link->mcast_sta.sta_id == IWL_INVALID_STA))
+                       return 0;
+
+               /* IGTK/BIGTK to bcast STA */
+               if (key->keyidx >= 4)
+                       return BIT(link->bcast_sta.sta_id);
+
+               /* GTK for data to mcast STA */
+               return BIT(link->mcast_sta.sta_id);
+       }
+
+       /* for client mode use the AP STA also for group keys */
+       if (!sta && vif->type == NL80211_IFTYPE_STATION)
+               sta = mld_vif->ap_sta;
+
+       /* STA should be non-NULL now */
+       if (WARN_ON(!sta))
+               return 0;
+
+       /* Key is not per-link, get the full sta mask */
+       if (key->link_id < 0)
+               return iwl_mld_fw_sta_id_mask(mld, sta);
+
+       /* The link_sta shouldn't be NULL now, but this is checked in
+        * iwl_mld_fw_sta_id_mask
+        */
+       link_sta = link_sta_dereference_check(sta, key->link_id);
+
+       sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+       if (sta_id < 0)
+               return 0;
+
+       return BIT(sta_id);
+}
+
+static int iwl_mld_add_key_to_fw(struct iwl_mld *mld, u32 sta_mask,
+                                u32 key_flags, struct ieee80211_key_conf *key)
+{
+       struct iwl_sec_key_cmd cmd = {
+               .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+               .u.add.sta_mask = cpu_to_le32(sta_mask),
+               .u.add.key_id = cpu_to_le32(key->keyidx),
+               .u.add.key_flags = cpu_to_le32(key_flags),
+               .u.add.tx_seq = cpu_to_le64(atomic64_read(&key->tx_pn)),
+       };
+       bool tkip = key->cipher == WLAN_CIPHER_SUITE_TKIP;
+       int max_key_len = sizeof(cmd.u.add.key);
+
+       if (WARN_ON(!sta_mask))
+               return -EINVAL;
+
+       if (WARN_ON(key->keylen > max_key_len))
+               return -EINVAL;
+
+       memcpy(cmd.u.add.key, key->key, key->keylen);
+
+       if (tkip) {
+               memcpy(cmd.u.add.tkip_mic_rx_key,
+                      key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+                      8);
+               memcpy(cmd.u.add.tkip_mic_tx_key,
+                      key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
+                      8);
+       }
+
+       return iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
+                                   &cmd);
+}
+
+static void iwl_mld_remove_key_from_fw(struct iwl_mld *mld, u32 sta_mask,
+                                      u32 key_flags, u32 keyidx)
+{
+       struct iwl_sec_key_cmd cmd = {
+               .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+               .u.remove.sta_mask = cpu_to_le32(sta_mask),
+               .u.remove.key_id = cpu_to_le32(keyidx),
+               .u.remove.key_flags = cpu_to_le32(key_flags),
+       };
+
+       if (WARN_ON(!sta_mask))
+               return;
+
+       iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD), &cmd);
+}
+
+void iwl_mld_remove_key(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta,
+                       struct ieee80211_key_conf *key)
+{
+       u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
+       u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (!sta_mask)
+               return;
+
+       if (key->keyidx == 4 || key->keyidx == 5) {
+               struct iwl_mld_link *mld_link;
+               unsigned int link_id = 0;
+
+               /* set to -1 for non-MLO right now */
+               if (key->link_id >= 0)
+                       link_id = key->link_id;
+
+               mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
+               if (WARN_ON(!mld_link))
+                       return;
+
+               if (mld_link->igtk == key)
+                       mld_link->igtk = NULL;
+
+               mld->num_igtks--;
+       }
+
+       iwl_mld_remove_key_from_fw(mld, sta_mask, key_flags, key->keyidx);
+
+       /* no longer in HW */
+       key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+int iwl_mld_add_key(struct iwl_mld *mld,
+                   struct ieee80211_vif *vif,
+                   struct ieee80211_sta *sta,
+                   struct ieee80211_key_conf *key)
+{
+       u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
+       u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *mld_link = NULL;
+       bool igtk = key->keyidx == 4 || key->keyidx == 5;
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (!sta_mask)
+               return -EINVAL;
+
+       if (igtk) {
+               if (mld->num_igtks == IWL_MAX_NUM_IGTKS)
+                       return -EOPNOTSUPP;
+
+               u8 link_id = 0;
+
+               /* set to -1 for non-MLO right now */
+               if (key->link_id >= 0)
+                       link_id = key->link_id;
+
+               mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
+
+               if (WARN_ON(!mld_link))
+                       return -EINVAL;
+
+               if (mld_link->igtk) {
+                       IWL_DEBUG_MAC80211(mld, "remove old IGTK %d\n",
+                                          mld_link->igtk->keyidx);
+                       iwl_mld_remove_key(mld, vif, sta, mld_link->igtk);
+               }
+
+               WARN_ON(mld_link->igtk);
+       }
+
+       ret = iwl_mld_add_key_to_fw(mld, sta_mask, key_flags, key);
+       if (ret)
+               return ret;
+
+       if (mld_link) {
+               mld_link->igtk = key;
+               mld->num_igtks++;
+       }
+
+       /* We don't really need this, but need it to be not invalid,
+        * so we will know if the key is in fw.
+        */
+       key->hw_key_idx = 0;
+
+       return 0;
+}
+
+struct remove_ap_keys_iter_data {
+       u8 link_id;
+       struct ieee80211_sta *sta;
+};
+
+static void iwl_mld_remove_ap_keys_iter(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_sta *sta,
+                                       struct ieee80211_key_conf *key,
+                                       void *_data)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct remove_ap_keys_iter_data *data = _data;
+
+       if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+               return;
+
+       /* All the pairwise keys should have been removed by now */
+       if (WARN_ON(sta))
+               return;
+
+       if (key->link_id >= 0 && key->link_id != data->link_id)
+               return;
+
+       iwl_mld_remove_key(mld, vif, data->sta, key);
+}
+
+void iwl_mld_remove_ap_keys(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta, unsigned int link_id)
+{
+       struct remove_ap_keys_iter_data iter_data = {
+               .link_id = link_id,
+               .sta = sta,
+       };
+
+       if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
+               return;
+
+       ieee80211_iter_keys(mld->hw, vif,
+                           iwl_mld_remove_ap_keys_iter,
+                           &iter_data);
+}
+
+struct iwl_mvm_sta_key_update_data {
+       struct ieee80211_sta *sta;
+       u32 old_sta_mask;
+       u32 new_sta_mask;
+       int err;
+};
+
+static void iwl_mld_update_sta_key_iter(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_sta *sta,
+                                       struct ieee80211_key_conf *key,
+                                       void *_data)
+{
+       struct iwl_mvm_sta_key_update_data *data = _data;
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_sec_key_cmd cmd = {
+               .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
+               .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask),
+               .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask),
+               .u.modify.key_id = cpu_to_le32(key->keyidx),
+               .u.modify.key_flags =
+                       cpu_to_le32(iwl_mld_get_key_flags(mld, vif, sta, key)),
+       };
+       int err;
+
+       /* only need to do this for pairwise keys (link_id == -1) */
+       if (sta != data->sta || key->link_id >= 0)
+               return;
+
+       err = iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
+                                  &cmd);
+
+       if (err)
+               data->err = err;
+}
+
+int iwl_mld_update_sta_keys(struct iwl_mld *mld,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           u32 old_sta_mask,
+                           u32 new_sta_mask)
+{
+       struct iwl_mvm_sta_key_update_data data = {
+               .sta = sta,
+               .old_sta_mask = old_sta_mask,
+               .new_sta_mask = new_sta_mask,
+       };
+
+       ieee80211_iter_keys(mld->hw, vif, iwl_mld_update_sta_key_iter,
+                           &data);
+       return data.err;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_key_h__
+#define __iwl_mld_key_h__
+
+#include "mld.h"
+#include <net/mac80211.h>
+#include "fw/api/sta.h"
+
+void iwl_mld_remove_key(struct iwl_mld *mld,
+                       struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta,
+                       struct ieee80211_key_conf *key);
+int iwl_mld_add_key(struct iwl_mld *mld,
+                   struct ieee80211_vif *vif,
+                   struct ieee80211_sta *sta,
+                   struct ieee80211_key_conf *key);
+void iwl_mld_remove_ap_keys(struct iwl_mld *mld,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           unsigned int link_id);
+
+int iwl_mld_update_sta_keys(struct iwl_mld *mld,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           u32 old_sta_mask,
+                           u32 new_sta_mask);
+
+static inline void
+iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                         struct ieee80211_sta *sta,
+                         struct ieee80211_key_conf *key, void *data)
+{
+       key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+#endif /* __iwl_mld_key_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/leds.h>
+#include <net/mac80211.h>
+
+#include "fw/api/led.h"
+#include "mld.h"
+#include "led.h"
+#include "hcmd.h"
+
+static void iwl_mld_send_led_fw_cmd(struct iwl_mld *mld, bool on)
+{
+       struct iwl_led_cmd led_cmd = {
+               .status = cpu_to_le32(on),
+       };
+       int err;
+
+       if (WARN_ON(!mld->fw_status.running))
+               return;
+
+       err = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(LONG_GROUP,
+                                                          LEDS_CMD),
+                                             CMD_ASYNC, &led_cmd);
+
+       if (err)
+               IWL_WARN(mld, "LED command failed: %d\n", err);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+                                  enum led_brightness brightness)
+{
+       struct iwl_mld *mld = container_of(led_cdev, struct iwl_mld, led);
+
+       if (!mld->fw_status.running)
+               return;
+
+       iwl_mld_send_led_fw_cmd(mld, brightness > 0);
+}
+
+int iwl_mld_leds_init(struct iwl_mld *mld)
+{
+       int mode = iwlwifi_mod_params.led_mode;
+       int ret;
+
+       switch (mode) {
+       case IWL_LED_BLINK:
+               IWL_ERR(mld, "Blink led mode not supported, used default\n");
+               fallthrough;
+       case IWL_LED_DEFAULT:
+       case IWL_LED_RF_STATE:
+               mode = IWL_LED_RF_STATE;
+               break;
+       case IWL_LED_DISABLE:
+               IWL_INFO(mld, "Led disabled\n");
+               return 0;
+       default:
+               return -EINVAL;
+       }
+
+       mld->led.name = kasprintf(GFP_KERNEL, "%s-led",
+                                 wiphy_name(mld->hw->wiphy));
+       if (!mld->led.name)
+               return -ENOMEM;
+
+       mld->led.brightness_set = iwl_led_brightness_set;
+       mld->led.max_brightness = 1;
+
+       if (mode == IWL_LED_RF_STATE)
+               mld->led.default_trigger =
+                       ieee80211_get_radio_led_name(mld->hw);
+
+       ret = led_classdev_register(mld->trans->dev, &mld->led);
+       if (ret) {
+               kfree(mld->led.name);
+               mld->led.name = NULL;
+               IWL_INFO(mld, "Failed to enable led\n");
+       }
+
+       return ret;
+}
+
+void iwl_mld_led_config_fw(struct iwl_mld *mld)
+{
+       if (!mld->led.name)
+               return;
+
+       iwl_mld_send_led_fw_cmd(mld, mld->led.brightness > 0);
+}
+
+void iwl_mld_leds_exit(struct iwl_mld *mld)
+{
+       if (!mld->led.name)
+               return;
+
+       led_classdev_unregister(&mld->led);
+       kfree(mld->led.name);
+       mld->led.name = NULL;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_led_h__
+#define __iwl_mld_led_h__
+
+#include "mld.h"
+
+#ifdef CONFIG_IWLWIFI_LEDS
+int iwl_mld_leds_init(struct iwl_mld *mld);
+void iwl_mld_leds_exit(struct iwl_mld *mld);
+void iwl_mld_led_config_fw(struct iwl_mld *mld);
+#else
+static inline int iwl_mld_leds_init(struct iwl_mld *mld)
+{
+       return 0;
+}
+
+static inline void iwl_mld_leds_exit(struct iwl_mld *mld)
+{
+}
+
+static inline void iwl_mld_led_config_fw(struct iwl_mld *mld)
+{
+}
+#endif
+
+#endif /* __iwl_mld_led_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "constants.h"
+#include "link.h"
+#include "iface.h"
+#include "mlo.h"
+#include "hcmd.h"
+#include "phy.h"
+#include "fw/api/rs.h"
+#include "fw/api/txq.h"
+#include "fw/api/mac.h"
+
+#include "fw/api/context.h"
+#include "fw/dbg.h"
+
+static int iwl_mld_send_link_cmd(struct iwl_mld *mld,
+                                struct iwl_link_config_cmd *cmd,
+                                enum iwl_ctxt_action action)
+{
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       cmd->action = cpu_to_le32(action);
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD),
+                                  cmd);
+       if (ret)
+               IWL_ERR(mld, "Failed to send LINK_CONFIG_CMD (action:%d): %d\n",
+                       action, ret);
+       return ret;
+}
+
+static int iwl_mld_add_link_to_fw(struct iwl_mld *mld,
+                                 struct ieee80211_bss_conf *link_conf)
+{
+       struct ieee80211_vif *vif = link_conf->vif;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *link = iwl_mld_link_from_mac80211(link_conf);
+       struct iwl_link_config_cmd cmd = {};
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!link))
+               return -EINVAL;
+
+       cmd.link_id = cpu_to_le32(link->fw_id);
+       cmd.mac_id = cpu_to_le32(mld_vif->fw_id);
+       cmd.spec_link_id = link_conf->link_id;
+       cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+
+       ether_addr_copy(cmd.local_link_addr, link_conf->addr);
+
+       if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
+               ether_addr_copy(cmd.ibss_bssid_addr, link_conf->bssid);
+
+       return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_ADD);
+}
+
+/* Get the basic rates of the used band and add the mandatory ones */
+static void iwl_mld_fill_rates(struct iwl_mld *mld,
+                              struct ieee80211_bss_conf *link,
+                              struct ieee80211_chanctx_conf *chan_ctx,
+                              __le32 *cck_rates, __le32 *ofdm_rates)
+{
+       struct cfg80211_chan_def *chandef =
+               iwl_mld_get_chandef_from_chanctx(chan_ctx);
+       struct ieee80211_supported_band *sband =
+               mld->hw->wiphy->bands[chandef->chan->band];
+       unsigned long basic = link->basic_rates;
+       int lowest_present_ofdm = 100;
+       int lowest_present_cck = 100;
+       u32 cck = 0;
+       u32 ofdm = 0;
+       int i;
+
+       for_each_set_bit(i, &basic, BITS_PER_LONG) {
+               int hw = sband->bitrates[i].hw_value;
+
+               if (hw >= IWL_FIRST_OFDM_RATE) {
+                       ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+                       if (lowest_present_ofdm > hw)
+                               lowest_present_ofdm = hw;
+               } else {
+                       BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+                       cck |= BIT(hw);
+                       if (lowest_present_cck > hw)
+                               lowest_present_cck = hw;
+               }
+       }
+
+       /* Now we've got the basic rates as bitmaps in the ofdm and cck
+        * variables. This isn't sufficient though, as there might not
+        * be all the right rates in the bitmap. E.g. if the only basic
+        * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+        * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+        *
+        *    [...] a STA responding to a received frame shall transmit
+        *    its Control Response frame [...] at the highest rate in the
+        *    BSSBasicRateSet parameter that is less than or equal to the
+        *    rate of the immediately previous frame in the frame exchange
+        *    sequence ([...]) and that is of the same modulation class
+        *    ([...]) as the received frame. If no rate contained in the
+        *    BSSBasicRateSet parameter meets these conditions, then the
+        *    control frame sent in response to a received frame shall be
+        *    transmitted at the highest mandatory rate of the PHY that is
+        *    less than or equal to the rate of the received frame, and
+        *    that is of the same modulation class as the received frame.
+        *
+        * As a consequence, we need to add all mandatory rates that are
+        * lower than all of the basic rates to these bitmaps.
+        */
+
+       if (lowest_present_ofdm > IWL_RATE_24M_INDEX)
+               ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+       if (lowest_present_ofdm > IWL_RATE_12M_INDEX)
+               ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+       /* 6M already there or needed so always add */
+       ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+       /* CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+        * Note, however:
+        *  - if no CCK rates are basic, it must be ERP since there must
+        *    be some basic rates at all, so they're OFDM => ERP PHY
+        *    (or we're in 5 GHz, and the cck bitmap will never be used)
+        *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+        *  - if 5.5M is basic, 1M and 2M are mandatory
+        *  - if 2M is basic, 1M is mandatory
+        *  - if 1M is basic, that's the only valid ACK rate.
+        * As a consequence, it's not as complicated as it sounds, just add
+        * any lower rates to the ACK rate bitmap.
+        */
+       if (lowest_present_cck > IWL_RATE_11M_INDEX)
+               cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+       if (lowest_present_cck > IWL_RATE_5M_INDEX)
+               cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+       if (lowest_present_cck > IWL_RATE_2M_INDEX)
+               cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+       /* 1M already there or needed so always add */
+       cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+       *cck_rates = cpu_to_le32((u32)cck);
+       *ofdm_rates = cpu_to_le32((u32)ofdm);
+}
+
+static void iwl_mld_fill_protection_flags(struct iwl_mld *mld,
+                                         struct ieee80211_bss_conf *link,
+                                         __le32 *protection_flags)
+{
+       u8 protection_mode = link->ht_operation_mode &
+                               IEEE80211_HT_OP_MODE_PROTECTION;
+       u8 ht_flag = LINK_PROT_FLG_HT_PROT | LINK_PROT_FLG_FAT_PROT;
+
+       IWL_DEBUG_RATE(mld, "HT protection mode: %d\n", protection_mode);
+
+       if (link->use_cts_prot)
+               *protection_flags |= cpu_to_le32(LINK_PROT_FLG_TGG_PROTECT);
+
+       /* See section 9.23.3.1 of IEEE 80211-2012.
+        * Nongreenfield HT STAs Present is not supported.
+        */
+       switch (protection_mode) {
+       case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+               break;
+       case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+       case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+               *protection_flags |= cpu_to_le32(ht_flag);
+               break;
+       case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+               /* Protect when channel wider than 20MHz */
+               if (link->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
+                       *protection_flags |= cpu_to_le32(ht_flag);
+               break;
+       }
+}
+
+static u8 iwl_mld_mac80211_ac_to_fw_ac(enum ieee80211_ac_numbers ac)
+{
+       static const u8 mac80211_ac_to_fw[] = {
+               AC_VO,
+               AC_VI,
+               AC_BE,
+               AC_BK
+       };
+
+       return mac80211_ac_to_fw[ac];
+}
+
+static void iwl_mld_fill_qos_params(struct ieee80211_bss_conf *link,
+                                   struct iwl_ac_qos *ac, __le32 *qos_flags)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+       /* no need to check mld_link since it is done in the caller */
+
+       for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) {
+               u8 txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(mac_ac);
+               u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac);
+
+               ac[fw_ac].cw_min =
+                       cpu_to_le16(mld_link->queue_params[mac_ac].cw_min);
+               ac[fw_ac].cw_max =
+                       cpu_to_le16(mld_link->queue_params[mac_ac].cw_max);
+               ac[fw_ac].edca_txop =
+                       cpu_to_le16(mld_link->queue_params[mac_ac].txop * 32);
+               ac[fw_ac].aifsn = mld_link->queue_params[mac_ac].aifs;
+               ac[fw_ac].fifos_mask = BIT(txf);
+       }
+
+       if (link->qos)
+               *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+       if (link->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
+               *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+}
+
+static bool iwl_mld_fill_mu_edca(struct iwl_mld *mld,
+                                const struct iwl_mld_link *mld_link,
+                                struct iwl_he_backoff_conf *trig_based_txf)
+{
+       for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) {
+               const struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+                       &mld_link->queue_params[mac_ac].mu_edca_param_rec;
+               u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac);
+
+               if (!mld_link->queue_params[mac_ac].mu_edca)
+                       return false;
+
+               trig_based_txf[fw_ac].cwmin =
+                       cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+               trig_based_txf[fw_ac].cwmax =
+                       cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+               trig_based_txf[fw_ac].aifsn =
+                       cpu_to_le16(mu_edca->aifsn & 0xf);
+               trig_based_txf[fw_ac].mu_time =
+                       cpu_to_le16(mu_edca->mu_edca_timer);
+       }
+       return true;
+}
+
+static u8 iwl_mld_sta_rx_bw_to_fw(enum ieee80211_sta_rx_bandwidth bw)
+{
+       switch (bw) {
+       default: /* potential future values not supported by this hw/driver */
+       case IEEE80211_STA_RX_BW_20:
+               return IWL_LINK_MODIFY_BW_20;
+       case IEEE80211_STA_RX_BW_40:
+               return IWL_LINK_MODIFY_BW_40;
+       case IEEE80211_STA_RX_BW_80:
+               return IWL_LINK_MODIFY_BW_80;
+       case IEEE80211_STA_RX_BW_160:
+               return IWL_LINK_MODIFY_BW_160;
+       case IEEE80211_STA_RX_BW_320:
+               return IWL_LINK_MODIFY_BW_320;
+       }
+}
+
+static int _iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+                                     struct ieee80211_bss_conf *link,
+                                     enum ieee80211_sta_rx_bandwidth bw,
+                                     u32 changes)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       struct ieee80211_vif *vif = link->vif;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct ieee80211_chanctx_conf *chan_ctx;
+       struct iwl_link_config_cmd cmd = {};
+       u32 flags = 0;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld_link))
+               return -EINVAL;
+
+       cmd.link_id = cpu_to_le32(mld_link->fw_id);
+       cmd.spec_link_id = link->link_id;
+       cmd.mac_id = cpu_to_le32(mld_vif->fw_id);
+
+       chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+
+       cmd.phy_id = cpu_to_le32(chan_ctx ?
+               iwl_mld_phy_from_mac80211(chan_ctx)->fw_id :
+               FW_CTXT_ID_INVALID);
+
+       ether_addr_copy(cmd.local_link_addr, link->addr);
+
+       cmd.active = cpu_to_le32(mld_link->active);
+
+       if ((changes & LINK_CONTEXT_MODIFY_ACTIVE) && !mld_link->active &&
+           mld_link->silent_deactivation) {
+               /* We are de-activating a link that is having CSA with
+                * immediate quiet in EMLSR. Tell the firmware not to send any
+                * frame.
+                */
+               cmd.block_tx = 1;
+               mld_link->silent_deactivation = false;
+       }
+
+       if (vif->type == NL80211_IFTYPE_ADHOC && link->bssid)
+               ether_addr_copy(cmd.ibss_bssid_addr, link->bssid);
+
+       /* Channel context is needed to get the rates */
+       if (chan_ctx)
+               iwl_mld_fill_rates(mld, link, chan_ctx, &cmd.cck_rates,
+                                  &cmd.ofdm_rates);
+
+       cmd.cck_short_preamble = cpu_to_le32(link->use_short_preamble);
+       cmd.short_slot = cpu_to_le32(link->use_short_slot);
+
+       iwl_mld_fill_protection_flags(mld, link, &cmd.protection_flags);
+
+       iwl_mld_fill_qos_params(link, cmd.ac, &cmd.qos_flags);
+
+       cmd.bi = cpu_to_le32(link->beacon_int);
+       cmd.dtim_interval = cpu_to_le32(link->beacon_int * link->dtim_period);
+
+       if (changes & LINK_CONTEXT_MODIFY_BANDWIDTH)
+               cmd.modify_bandwidth = iwl_mld_sta_rx_bw_to_fw(bw);
+
+       /* Configure HE parameters only if HE is supported, and only after
+        * the parameters are set in mac80211 (meaning after assoc)
+        */
+       if (!link->he_support || iwlwifi_mod_params.disable_11ax ||
+           (vif->type == NL80211_IFTYPE_STATION && !vif->cfg.assoc)) {
+               changes &= ~LINK_CONTEXT_MODIFY_HE_PARAMS;
+               goto send_cmd;
+       }
+
+       /* ap_sta may be NULL if we're disconnecting */
+       if (mld_vif->ap_sta) {
+               struct ieee80211_link_sta *link_sta =
+                       link_sta_dereference_check(mld_vif->ap_sta,
+                                                  link->link_id);
+
+               if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he &&
+                   link_sta->he_cap.he_cap_elem.mac_cap_info[5] &
+                   IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX)
+                       cmd.ul_mu_data_disable = 1;
+       }
+
+       cmd.htc_trig_based_pkt_ext = link->htc_trig_based_pkt_ext;
+
+       if (link->uora_exists) {
+               cmd.rand_alloc_ecwmin = link->uora_ocw_range & 0x7;
+               cmd.rand_alloc_ecwmax = (link->uora_ocw_range >> 3) & 0x7;
+       }
+
+       if (iwl_mld_fill_mu_edca(mld, mld_link, cmd.trig_based_txf))
+               flags |= LINK_FLG_MU_EDCA_CW;
+
+       cmd.bss_color = link->he_bss_color.color;
+
+       if (!link->he_bss_color.enabled)
+               flags |= LINK_FLG_BSS_COLOR_DIS;
+
+       cmd.frame_time_rts_th = cpu_to_le16(link->frame_time_rts_th);
+
+       /* Block 26-tone RU OFDMA transmissions */
+       if (mld_link->he_ru_2mhz_block)
+               flags |= LINK_FLG_RU_2MHZ_BLOCK;
+
+       if (link->nontransmitted) {
+               ether_addr_copy(cmd.ref_bssid_addr, link->transmitter_bssid);
+               cmd.bssid_index = link->bssid_index;
+       }
+
+       /* The only EHT parameter is puncturing, and starting from PHY cmd
+        * version 6 - it is sent there. For older versions of the PHY cmd,
+        * puncturing is not needed at all.
+        */
+       if (WARN_ON(changes & LINK_CONTEXT_MODIFY_EHT_PARAMS))
+               changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
+
+send_cmd:
+       cmd.modify_mask = cpu_to_le32(changes);
+       cmd.flags = cpu_to_le32(flags);
+
+       return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_MODIFY);
+}
+
+int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+                             struct ieee80211_bss_conf *link,
+                             u32 changes)
+{
+       if (WARN_ON(changes & LINK_CONTEXT_MODIFY_BANDWIDTH))
+               changes &= ~LINK_CONTEXT_MODIFY_BANDWIDTH;
+
+       return _iwl_mld_change_link_in_fw(mld, link, 0, changes);
+}
+
+int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
+                              struct ieee80211_bss_conf *link,
+                              enum ieee80211_sta_rx_bandwidth bw)
+{
+       return _iwl_mld_change_link_in_fw(mld, link, bw,
+                                         LINK_CONTEXT_MODIFY_BANDWIDTH);
+}
+
+int iwl_mld_activate_link(struct iwl_mld *mld,
+                         struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld_link || mld_link->active))
+               return -EINVAL;
+
+       mld_link->rx_omi.exit_ts = jiffies;
+       mld_link->active = true;
+
+       ret = iwl_mld_change_link_in_fw(mld, link,
+                                       LINK_CONTEXT_MODIFY_ACTIVE);
+       if (ret)
+               mld_link->active = false;
+
+       return ret;
+}
+
+void iwl_mld_deactivate_link(struct iwl_mld *mld,
+                            struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       struct iwl_probe_resp_data *probe_data;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld_link || !mld_link->active))
+               return;
+
+       iwl_mld_cancel_session_protection(mld, link->vif, link->link_id);
+
+       /* If we deactivate the link, we will probably remove it, or switch
+        * channel. In both cases, the CSA or Notice of Absence information is
+        * now irrelevant. Remove the data here.
+        */
+       probe_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data);
+       RCU_INIT_POINTER(mld_link->probe_resp_data, NULL);
+       if (probe_data)
+               kfree_rcu(probe_data, rcu_head);
+
+       mld_link->active = false;
+
+       iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ACTIVE);
+
+       /* Now that the link is not active in FW, we don't expect any new
+        * notifications for it. Cancel the ones that are already pending
+        */
+       iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_LINK,
+                                              mld_link->fw_id);
+}
+
+static int
+iwl_mld_rm_link_from_fw(struct iwl_mld *mld, struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       struct iwl_link_config_cmd cmd = {};
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld_link))
+               return -EINVAL;
+
+       cmd.link_id = cpu_to_le32(mld_link->fw_id);
+       cmd.spec_link_id = link->link_id;
+       cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+
+       return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_REMOVE);
+}
+
+static void iwl_mld_omi_bw_update(struct iwl_mld *mld,
+                                 struct ieee80211_bss_conf *link_conf,
+                                 struct iwl_mld_link *mld_link,
+                                 struct ieee80211_link_sta *link_sta,
+                                 enum ieee80211_sta_rx_bandwidth bw,
+                                 bool ap_update)
+{
+       enum ieee80211_sta_rx_bandwidth apply_bw;
+
+       mld_link->rx_omi.desired_bw = bw;
+
+       /* Can't update OMI while already in progress, desired_bw was
+        * set so on FW notification the worker will see the change
+        * and apply new the new desired bw.
+        */
+       if (mld_link->rx_omi.bw_in_progress)
+               return;
+
+       if (bw == IEEE80211_STA_RX_BW_MAX)
+               apply_bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
+       else
+               apply_bw = bw;
+
+       if (!ap_update) {
+               /* The update isn't due to AP tracking after leaving OMI,
+                * where the AP could increase BW and then we must tell
+                * it that we can do the increased BW as well, if we did
+                * update the chandef.
+                * In this case, if we want MAX, then we will need to send
+                * a new OMI to the AP if it increases its own bandwidth as
+                * we can (due to internal and FW limitations, and being
+                * worried the AP might break) only send to what we're doing
+                * at the moment. In this case, set last_max_bw; otherwise
+                * if we really want to decrease our bandwidth set it to 0
+                * to indicate no updates are needed if the AP changes.
+                */
+               if (bw != IEEE80211_STA_RX_BW_MAX)
+                       mld_link->rx_omi.last_max_bw = apply_bw;
+               else
+                       mld_link->rx_omi.last_max_bw = 0;
+       } else {
+               /* Otherwise, if we're already trying to do maximum and
+                * the AP is changing, set last_max_bw to the new max the
+                * AP is using, we'll only get to this code path if the
+                * new bandwidth of the AP is bigger than what we sent it
+                * previously. This avoids repeatedly sending updates if
+                * it changes bandwidth, only doing it once on an increase.
+                */
+               mld_link->rx_omi.last_max_bw = apply_bw;
+       }
+
+       if (ieee80211_prepare_rx_omi_bw(link_sta, bw)) {
+               mld_link->rx_omi.bw_in_progress = apply_bw;
+               iwl_mld_change_link_omi_bw(mld, link_conf, apply_bw);
+       }
+}
+
+static void iwl_mld_omi_bw_finished_work(struct wiphy *wiphy,
+                                        struct wiphy_work *work)
+{
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_link *mld_link =
+               container_of(work, typeof(*mld_link), rx_omi.finished_work.work);
+       enum ieee80211_sta_rx_bandwidth desired_bw, switched_to_bw;
+       struct ieee80211_vif *vif = mld_link->vif;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct ieee80211_bss_conf *link_conf;
+       struct ieee80211_link_sta *link_sta;
+
+       if (!mld_vif->ap_sta)
+               return;
+
+       link_sta = wiphy_dereference(mld->wiphy,
+                                    mld_vif->ap_sta->link[mld_link->link_id]);
+       if (WARN_ON_ONCE(!link_sta))
+               return;
+
+       link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+       if (WARN_ON_ONCE(!link_conf))
+               return;
+
+       if (WARN_ON(!mld_link->rx_omi.bw_in_progress))
+               return;
+
+       desired_bw = mld_link->rx_omi.desired_bw;
+       switched_to_bw = mld_link->rx_omi.bw_in_progress;
+
+       ieee80211_finalize_rx_omi_bw(link_sta);
+       mld_link->rx_omi.bw_in_progress = 0;
+
+       if (desired_bw != switched_to_bw)
+               iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
+                                     desired_bw, false);
+}
+
+static struct ieee80211_vif *
+iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld,
+                                     struct ieee80211_link_sta **link_sta,
+                                     struct iwl_mld_link **mld_link)
+{
+       struct iwl_mld_vif *mld_vif;
+       struct ieee80211_vif *vif;
+       int n_link_stas = 0;
+
+       *link_sta = NULL;
+
+       if (mld->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+               return NULL;
+
+       vif = iwl_mld_get_bss_vif(mld);
+       if (!vif)
+               return NULL;
+
+       for (int i = 0; i < ARRAY_SIZE(mld->fw_id_to_link_sta); i++) {
+               struct ieee80211_link_sta *tmp;
+
+               tmp = wiphy_dereference(mld->wiphy, mld->fw_id_to_link_sta[i]);
+               if (IS_ERR_OR_NULL(tmp))
+                       continue;
+
+               n_link_stas++;
+               *link_sta = tmp;
+       }
+
+       /* can't do anything if we have TDLS peers or EMLSR */
+       if (n_link_stas != 1)
+               return NULL;
+
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+       *mld_link = iwl_mld_link_dereference_check(mld_vif,
+                                                  (*link_sta)->link_id);
+       if (WARN_ON(!*mld_link))
+               return NULL;
+
+       return vif;
+}
+
+void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
+                              struct ieee80211_bss_conf *link_conf,
+                              enum ieee80211_sta_rx_bandwidth bw)
+{
+       struct ieee80211_link_sta *link_sta;
+       struct iwl_mld_link *mld_link;
+       struct ieee80211_vif *vif;
+
+       vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+       if (!vif)
+               return;
+
+       if (WARN_ON(link_conf->vif != vif))
+               return;
+
+       /* This is 0 if we requested an OMI BW reduction and don't want to
+        * be sending an OMI when the AP's bandwidth changes.
+        */
+       if (!mld_link->rx_omi.last_max_bw)
+               return;
+
+       /* We only need to tell the AP if it increases BW over what we last
+        * told it we were using, if it reduces then our last OMI to it will
+        * not get used anyway (e.g. we said we want 160 but it's doing 80.)
+        */
+       if (bw < mld_link->rx_omi.last_max_bw)
+               return;
+
+       iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, true);
+}
+
+void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
+                                    struct iwl_rx_packet *pkt)
+{
+       struct ieee80211_link_sta *link_sta;
+       struct iwl_mld_link *mld_link;
+       struct ieee80211_vif *vif;
+
+       vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+       if (IWL_FW_CHECK(mld, !vif, "unexpected OMI notification\n"))
+               return;
+
+       if (IWL_FW_CHECK(mld, !mld_link->rx_omi.bw_in_progress,
+                        "OMI notification when not requested\n"))
+               return;
+
+       wiphy_delayed_work_queue(mld->hw->wiphy,
+                                &mld_link->rx_omi.finished_work,
+                                msecs_to_jiffies(IWL_MLD_OMI_AP_SETTLE_DELAY));
+}
+
+void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld)
+{
+       struct ieee80211_bss_conf *link_conf;
+       struct ieee80211_link_sta *link_sta;
+       struct iwl_mld_link *mld_link;
+       struct ieee80211_vif *vif;
+
+       vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+       if (!vif)
+               return;
+
+       link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+       if (WARN_ON_ONCE(!link_conf))
+               return;
+
+       if (!link_conf->he_support)
+               return;
+
+       mld_link->rx_omi.exit_ts = jiffies;
+
+       iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
+                             IEEE80211_STA_RX_BW_MAX, false);
+}
+
+void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld)
+{
+       enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_MAX;
+       struct ieee80211_chanctx_conf *chanctx;
+       struct ieee80211_bss_conf *link_conf;
+       struct ieee80211_link_sta *link_sta;
+       struct cfg80211_chan_def chandef;
+       struct iwl_mld_link *mld_link;
+       struct iwl_mld_vif *mld_vif;
+       struct ieee80211_vif *vif;
+       struct iwl_mld_phy *phy;
+       u16 punctured;
+       int exit_thr;
+
+       /* not allowed in CAM mode */
+       if (iwlmld_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+               return;
+
+       /* must have one BSS connection (no P2P), no TDLS, nor EMLSR */
+       vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+       if (!vif)
+               return;
+
+       link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+       if (WARN_ON_ONCE(!link_conf))
+               return;
+
+       if (!link_conf->he_support)
+               return;
+
+       chanctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+       if (WARN_ON(!chanctx))
+               return;
+
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+       if (!mld_vif->authorized)
+               goto apply;
+
+       /* must not be in low-latency mode */
+       if (iwl_mld_vif_low_latency(mld_vif))
+               goto apply;
+
+       chandef = link_conf->chanreq.oper;
+
+       switch (chandef.width) {
+       case NL80211_CHAN_WIDTH_320:
+               exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_320;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_160;
+               break;
+       default:
+               /* since we reduce to 80 MHz, must have more to start with */
+               goto apply;
+       }
+
+       /* not to be done if primary 80 MHz is punctured */
+       if (cfg80211_chandef_primary(&chandef, NL80211_CHAN_WIDTH_80,
+                                    &punctured) < 0 ||
+           punctured != 0)
+               goto apply;
+
+       phy = iwl_mld_phy_from_mac80211(chanctx);
+
+       if (phy->channel_load_by_us > exit_thr) {
+               /* send OMI for max bandwidth */
+               goto apply;
+       }
+
+       if (phy->channel_load_by_us > IWL_MLD_OMI_ENTER_CHAN_LOAD) {
+               /* no changes between enter/exit thresholds */
+               return;
+       }
+
+       if (time_is_before_jiffies(mld_link->rx_omi.exit_ts +
+                                  msecs_to_jiffies(IWL_MLD_OMI_EXIT_PROTECTION)))
+               return;
+
+       /* reduce bandwidth to 80 MHz to save power */
+       bw = IEEE80211_STA_RX_BW_80;
+apply:
+       iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, false);
+}
+
+IWL_MLD_ALLOC_FN(link, bss_conf)
+
+/* Constructor function for struct iwl_mld_link */
+static int
+iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
+                 struct iwl_mld_link *mld_link)
+{
+       mld_link->vif = link->vif;
+       mld_link->link_id = link->link_id;
+
+       iwl_mld_init_internal_sta(&mld_link->bcast_sta);
+       iwl_mld_init_internal_sta(&mld_link->mcast_sta);
+       iwl_mld_init_internal_sta(&mld_link->aux_sta);
+
+       wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
+                               iwl_mld_omi_bw_finished_work);
+
+       return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
+}
+
+/* Initializes the link structure, maps fw id to the ieee80211_bss_conf, and
+ * adds a link to the fw
+ */
+int iwl_mld_add_link(struct iwl_mld *mld,
+                    struct ieee80211_bss_conf *bss_conf)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+       struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
+       bool is_deflink = bss_conf == &bss_conf->vif->bss_conf;
+       int ret;
+
+       if (!link) {
+               if (is_deflink)
+                       link = &mld_vif->deflink;
+               else
+                       link = kzalloc(sizeof(*link), GFP_KERNEL);
+       } else {
+               WARN_ON(!mld->fw_status.in_hw_restart);
+       }
+
+       ret = iwl_mld_init_link(mld, bss_conf, link);
+       if (ret)
+               goto free;
+
+       rcu_assign_pointer(mld_vif->link[bss_conf->link_id], link);
+
+       ret = iwl_mld_add_link_to_fw(mld, bss_conf);
+       if (ret) {
+               RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+               RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
+               goto free;
+       }
+
+       return ret;
+
+free:
+       if (!is_deflink)
+               kfree(link);
+       return ret;
+}
+
+/* Remove link from fw, unmap the bss_conf, and destroy the link structure */
+int iwl_mld_remove_link(struct iwl_mld *mld,
+                       struct ieee80211_bss_conf *bss_conf)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+       struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
+       bool is_deflink = link == &mld_vif->deflink;
+       int ret;
+
+       if (WARN_ON(!link || link->active))
+               return -EINVAL;
+
+       ret = iwl_mld_rm_link_from_fw(mld, bss_conf);
+       /* Continue cleanup on failure */
+
+       if (!is_deflink)
+               kfree_rcu(link, rcu_head);
+
+       RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
+
+       wiphy_delayed_work_cancel(mld->wiphy, &link->rx_omi.finished_work);
+
+       if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
+               return -EINVAL;
+
+       RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+
+       return ret;
+}
+
+void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt)
+{
+       const struct iwl_missed_beacons_notif *notif = (const void *)pkt->data;
+       union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
+       u32 link_id = le32_to_cpu(notif->link_id);
+       u32 missed_bcon = le32_to_cpu(notif->consec_missed_beacons);
+       u32 missed_bcon_since_rx =
+               le32_to_cpu(notif->consec_missed_beacons_since_last_rx);
+       u32 scnd_lnk_bcn_lost =
+               le32_to_cpu(notif->consec_missed_beacons_other_link);
+       struct ieee80211_bss_conf *link_conf =
+               iwl_mld_fw_id_to_link_conf(mld, link_id);
+       u32 bss_param_ch_cnt_link_id;
+       struct ieee80211_vif *vif;
+
+       if (WARN_ON(!link_conf))
+               return;
+
+       vif = link_conf->vif;
+       bss_param_ch_cnt_link_id = link_conf->bss_param_ch_cnt_link_id;
+
+       IWL_DEBUG_INFO(mld,
+                      "missed bcn link_id=%u, %u consecutive=%u\n",
+                      link_id, missed_bcon, missed_bcon_since_rx);
+
+       if (WARN_ON(!vif))
+               return;
+
+       mld->trans->dbg.dump_file_name_ext_valid = true;
+       snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
+                "LinkId_%d_MacType_%d", link_id,
+                iwl_mld_mac80211_iftype_to_fw(vif));
+
+       iwl_dbg_tlv_time_point(&mld->fwrt,
+                              IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
+
+       if (missed_bcon >= IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG) {
+               if (missed_bcon_since_rx >=
+                   IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD) {
+                       ieee80211_connection_loss(vif);
+                       return;
+               }
+               IWL_WARN(mld,
+                        "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+               return;
+       }
+
+       if (missed_bcon_since_rx > IWL_MLD_MISSED_BEACONS_THRESHOLD) {
+               ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+
+               /* try to switch links, no-op if we don't have MLO */
+               iwl_mld_trigger_link_selection(mld, vif);
+       }
+
+       /* no more logic if we're not in EMLSR */
+       if (hweight16(vif->active_links) <= 1)
+               return;
+
+       /* We are processing a notification before link activation */
+       if (le32_to_cpu(notif->other_link_id) == FW_CTXT_ID_INVALID)
+               return;
+
+       /* Exit EMLSR if we lost more than
+        * IWL_MLD_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
+        * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH on current link.
+        * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED
+        * on current link and the link's bss_param_ch_count has changed on
+        * the other link's beacon.
+        */
+       if ((missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
+            scnd_lnk_bcn_lost >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
+           missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH ||
+           (bss_param_ch_cnt_link_id != link_id &&
+            missed_bcon >=
+            IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED)) {
+               iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_MISSED_BEACON,
+                                  iwl_mld_get_primary_link(vif));
+       }
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_handle_missed_beacon_notif);
+
+bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt,
+                                       u32 removed_link_id)
+{
+       struct iwl_missed_beacons_notif *notif = (void *)pkt->data;
+
+       if (le32_to_cpu(notif->other_link_id) == removed_link_id) {
+               /* Second link is being removed. Don't cancel the notification,
+                * but mark second link as invalid.
+                */
+               notif->other_link_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+       }
+
+       /* If the primary link is removed, cancel the notification */
+       return le32_to_cpu(notif->link_id) == removed_link_id;
+}
+
+int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                               struct ieee80211_bss_conf *link)
+{
+       return iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ALL &
+                                        ~(LINK_CONTEXT_MODIFY_ACTIVE |
+                                          LINK_CONTEXT_MODIFY_EHT_PARAMS));
+}
+
+struct iwl_mld_rssi_to_grade {
+       s8 rssi[2];
+       u16 grade;
+};
+
+#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
+       { \
+               .rssi = {_lb, _hb_uhb}, \
+               .grade = _grade \
+       }
+
+/*
+ * This array must be sorted by increasing RSSI for proper functionality.
+ * The grades are actually estimated throughput, represented as fixed-point
+ * with a scale factor of 1/10.
+ */
+static const struct iwl_mld_rssi_to_grade rssi_to_grade_map[] = {
+       RSSI_TO_GRADE_LINE(-85, -89, 172),
+       RSSI_TO_GRADE_LINE(-83, -86, 344),
+       RSSI_TO_GRADE_LINE(-82, -85, 516),
+       RSSI_TO_GRADE_LINE(-80, -83, 688),
+       RSSI_TO_GRADE_LINE(-77, -79, 1032),
+       RSSI_TO_GRADE_LINE(-73, -76, 1376),
+       RSSI_TO_GRADE_LINE(-70, -74, 1548),
+       RSSI_TO_GRADE_LINE(-69, -72, 1720),
+       RSSI_TO_GRADE_LINE(-65, -68, 2064),
+       RSSI_TO_GRADE_LINE(-61, -66, 2294),
+       RSSI_TO_GRADE_LINE(-58, -61, 2580),
+       RSSI_TO_GRADE_LINE(-55, -58, 2868),
+       RSSI_TO_GRADE_LINE(-46, -55, 3098),
+       RSSI_TO_GRADE_LINE(-43, -54, 3442)
+};
+
+#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
+
+#define DEFAULT_CHAN_LOAD_2GHZ 30
+#define DEFAULT_CHAN_LOAD_5GHZ 15
+#define DEFAULT_CHAN_LOAD_6GHZ 0
+
+/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+#define MAX_CHAN_LOAD 256
+
+static unsigned int
+iwl_mld_get_n_subchannels(const struct ieee80211_bss_conf *link_conf)
+{
+       enum nl80211_chan_width chan_width =
+               link_conf->chanreq.oper.width;
+       int mhz = nl80211_chan_width_to_mhz(chan_width);
+       unsigned int n_subchannels;
+
+       if (WARN_ONCE(mhz < 20 || mhz > 320,
+                     "Invalid channel width : (%d)\n", mhz))
+               return 1;
+
+       /* total number of subchannels */
+       n_subchannels = mhz / 20;
+
+       /* No puncturing if less than 80 MHz */
+       if (mhz >= 80)
+               n_subchannels -= hweight16(link_conf->chanreq.oper.punctured);
+
+       return n_subchannels;
+}
+
+static int
+iwl_mld_get_chan_load_from_element(struct iwl_mld *mld,
+                                  struct ieee80211_bss_conf *link_conf)
+{
+       struct ieee80211_vif *vif = link_conf->vif;
+       const struct cfg80211_bss_ies *ies;
+       const struct element *bss_load_elem = NULL;
+       const struct ieee80211_bss_load_elem *bss_load;
+
+       guard(rcu)();
+
+       if (ieee80211_vif_link_active(vif, link_conf->link_id))
+               ies = rcu_dereference(link_conf->bss->beacon_ies);
+       else
+               ies = rcu_dereference(link_conf->bss->ies);
+
+       if (ies)
+               bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
+                                                  ies->data, ies->len);
+
+       if (!bss_load_elem ||
+           bss_load_elem->datalen != sizeof(*bss_load))
+               return -EINVAL;
+
+       bss_load = (const void *)bss_load_elem->data;
+
+       return bss_load->channel_util;
+}
+
+static unsigned int
+iwl_mld_get_chan_load_by_us(struct iwl_mld *mld,
+                           struct ieee80211_bss_conf *link_conf,
+                           bool expect_active_link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+       struct ieee80211_chanctx_conf *chan_ctx;
+       struct iwl_mld_phy *phy;
+
+       if (!mld_link || !mld_link->active) {
+               WARN_ON(expect_active_link);
+               return 0;
+       }
+
+       if (WARN_ONCE(!rcu_access_pointer(mld_link->chan_ctx),
+                     "Active link (%u) without channel ctxt assigned!\n",
+                     link_conf->link_id))
+               return 0;
+
+       chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+       phy = iwl_mld_phy_from_mac80211(chan_ctx);
+
+       return phy->channel_load_by_us;
+}
+
+/* Returns error if the channel utilization element is invalid/unavailable */
+int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
+                                   struct ieee80211_bss_conf *link_conf,
+                                   bool expect_active_link)
+{
+       int chan_load;
+       unsigned int chan_load_by_us;
+
+       /* get overall load */
+       chan_load = iwl_mld_get_chan_load_from_element(mld, link_conf);
+       if (chan_load < 0)
+               return chan_load;
+
+       chan_load_by_us = iwl_mld_get_chan_load_by_us(mld, link_conf,
+                                                     expect_active_link);
+
+       /* channel load by us is given in percentage */
+       chan_load_by_us =
+               NORMALIZE_PERCENT_TO_255(chan_load_by_us);
+
+       /* Use only values that firmware sends that can possibly be valid */
+       if (chan_load_by_us <= chan_load)
+               chan_load -= chan_load_by_us;
+
+       return chan_load;
+}
+
+static unsigned int
+iwl_mld_get_default_chan_load(struct ieee80211_bss_conf *link_conf)
+{
+       enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+
+       switch (band) {
+       case NL80211_BAND_2GHZ:
+               return DEFAULT_CHAN_LOAD_2GHZ;
+       case NL80211_BAND_5GHZ:
+               return DEFAULT_CHAN_LOAD_5GHZ;
+       case NL80211_BAND_6GHZ:
+               return DEFAULT_CHAN_LOAD_6GHZ;
+       default:
+               WARN_ON(1);
+               return 0;
+       }
+}
+
+unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
+                                  struct ieee80211_bss_conf *link_conf)
+{
+       int chan_load;
+
+       chan_load = iwl_mld_get_chan_load_by_others(mld, link_conf, false);
+       if (chan_load >= 0)
+               return chan_load;
+
+       /* No information from the element, take the defaults */
+       chan_load = iwl_mld_get_default_chan_load(link_conf);
+
+       /* The defaults are given in percentage */
+       return NORMALIZE_PERCENT_TO_255(chan_load);
+}
+
+static unsigned int
+iwl_mld_get_avail_chan_load(struct iwl_mld *mld,
+                           struct ieee80211_bss_conf *link_conf)
+{
+       return MAX_CHAN_LOAD - iwl_mld_get_chan_load(mld, link_conf);
+}
+
+/* This function calculates the grade of a link. Returns 0 in error case */
+unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld,
+                                   struct ieee80211_bss_conf *link_conf)
+{
+       enum nl80211_band band;
+       int rssi_idx;
+       s32 link_rssi;
+       unsigned int grade = MAX_GRADE;
+
+       if (WARN_ON_ONCE(!link_conf))
+               return 0;
+
+       band = link_conf->chanreq.oper.chan->band;
+       if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
+                     band != NL80211_BAND_5GHZ &&
+                     band != NL80211_BAND_6GHZ,
+                     "Invalid band (%u)\n", band))
+               return 0;
+
+       link_rssi = MBM_TO_DBM(link_conf->bss->signal);
+       /*
+        * For 6 GHz the RSSI of the beacons is lower than
+        * the RSSI of the data.
+        */
+       if (band == NL80211_BAND_6GHZ && link_rssi)
+               link_rssi += 4;
+
+       rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
+
+       /* No valid RSSI - take the lowest grade */
+       if (!link_rssi)
+               link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
+
+       IWL_DEBUG_EHT(mld,
+                     "Calculating grade of link %d: band = %d, bandwidth = %d, punctured subchannels =0x%x RSSI = %d\n",
+                     link_conf->link_id, band,
+                     link_conf->chanreq.oper.width,
+                     link_conf->chanreq.oper.punctured, link_rssi);
+
+       /* Get grade based on RSSI */
+       for (int i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
+               const struct iwl_mld_rssi_to_grade *line =
+                       &rssi_to_grade_map[i];
+
+               if (link_rssi > line->rssi[rssi_idx])
+                       continue;
+               grade = line->grade;
+               break;
+       }
+
+       /* Apply the channel load and puncturing factors */
+       grade = grade * iwl_mld_get_avail_chan_load(mld, link_conf) / SCALE_FACTOR;
+       grade = grade * iwl_mld_get_n_subchannels(link_conf);
+
+       IWL_DEBUG_EHT(mld, "Link %d's grade: %d\n", link_conf->link_id, grade);
+
+       return grade;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_get_link_grade);
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_link_h__
+#define __iwl_mld_link_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "sta.h"
+
+/**
+ * struct iwl_probe_resp_data - data for NoA/CSA updates
+ * @rcu_head: used for freeing the data on update
+ * @notif: notification data
+ * @noa_len: length of NoA attribute, calculated from the notification
+ */
+struct iwl_probe_resp_data {
+       struct rcu_head rcu_head;
+       struct iwl_probe_resp_data_notif notif;
+       int noa_len;
+};
+
+/**
+ * struct iwl_mld_link - link configuration parameters
+ *
+ * @rcu_head: RCU head for freeing this data.
+ * @fw_id: the fw id of the link.
+ * @active: if the link is active or not.
+ * @queue_params: QoS data from mac80211. This is updated with a call to
+ *     drv_conf_tx per each AC, and then notified once with BSS_CHANGED_QOS.
+ *     So we store it here and then send one link cmd for all the ACs.
+ * @chan_ctx: pointer to the channel context assigned to the link. If a link
+ *     has an assigned channel context it means that it is active.
+ * @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
+ * @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
+ *     This tracks the one IGTK that currently exists in FW.
+ * @vif: the vif this link belongs to
+ * @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
+ * @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
+ * @aux_sta: station used for remain on channel. Used in P2P device.
+ * @link_id: over the air link ID
+ * @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
+ *     but higher layers work differently, so we store the keys here for
+ *     later installation.
+ * @silent_deactivation: next deactivation needs to be silent.
+ * @probe_resp_data: data from FW notification to store NOA related data to be
+ *     inserted into probe response.
+ * @rx_omi: data for BW reduction with OMI
+ * @rx_omi.bw_in_progress: update is in progress (indicates target BW)
+ * @rx_omi.exit_ts: timestamp of last exit
+ * @rx_omi.finished_work: work for the delayed reaction to the firmware saying
+ *     the change was applied, and for then applying a new mode if it was
+ *     updated while waiting for firmware/AP settle delay.
+ * @rx_omi.desired_bw: desired bandwidth
+ * @rx_omi.last_max_bw: last maximum BW used by firmware, for AP BW changes
+ */
+struct iwl_mld_link {
+       struct rcu_head rcu_head;
+
+       /* Add here fields that need clean up on restart */
+       struct_group(zeroed_on_hw_restart,
+               u8 fw_id;
+               bool active;
+               struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+               struct ieee80211_chanctx_conf __rcu *chan_ctx;
+               bool he_ru_2mhz_block;
+               struct ieee80211_key_conf *igtk;
+       );
+       /* And here fields that survive a fw restart */
+       struct ieee80211_vif *vif;
+       struct iwl_mld_int_sta bcast_sta;
+       struct iwl_mld_int_sta mcast_sta;
+       struct iwl_mld_int_sta aux_sta;
+       u8 link_id;
+
+       struct {
+               struct wiphy_delayed_work finished_work;
+               unsigned long exit_ts;
+               enum ieee80211_sta_rx_bandwidth bw_in_progress,
+                                               desired_bw,
+                                               last_max_bw;
+       } rx_omi;
+
+       /* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */
+       struct ieee80211_key_conf *ap_early_keys[6];
+       bool silent_deactivation;
+       struct iwl_probe_resp_data __rcu *probe_resp_data;
+};
+
+/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+static inline void
+iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
+{
+       struct iwl_probe_resp_data *probe_data;
+
+       probe_data = wiphy_dereference(mld->wiphy, link->probe_resp_data);
+       RCU_INIT_POINTER(link->probe_resp_data, NULL);
+       if (probe_data)
+               kfree_rcu(probe_data, rcu_head);
+
+       CLEANUP_STRUCT(link);
+       if (link->bcast_sta.sta_id != IWL_INVALID_STA)
+               iwl_mld_free_internal_sta(mld, &link->bcast_sta);
+       if (link->mcast_sta.sta_id != IWL_INVALID_STA)
+               iwl_mld_free_internal_sta(mld, &link->mcast_sta);
+       if (link->aux_sta.sta_id != IWL_INVALID_STA)
+               iwl_mld_free_internal_sta(mld, &link->aux_sta);
+}
+
+/* Convert a percentage from [0,100] to [0,255] */
+#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * 256 / 100)
+
+int iwl_mld_add_link(struct iwl_mld *mld,
+                    struct ieee80211_bss_conf *bss_conf);
+int iwl_mld_remove_link(struct iwl_mld *mld,
+                       struct ieee80211_bss_conf *bss_conf);
+int iwl_mld_activate_link(struct iwl_mld *mld,
+                         struct ieee80211_bss_conf *link);
+void iwl_mld_deactivate_link(struct iwl_mld *mld,
+                            struct ieee80211_bss_conf *link);
+int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
+                              struct ieee80211_bss_conf *link,
+                              enum ieee80211_sta_rx_bandwidth bw);
+int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+                             struct ieee80211_bss_conf *link, u32 changes);
+void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt);
+bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt,
+                                       u32 removed_link_id);
+int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                               struct ieee80211_bss_conf *link);
+
+unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld,
+                                   struct ieee80211_bss_conf *link_conf);
+
+unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
+                                  struct ieee80211_bss_conf *link_conf);
+
+int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
+                                   struct ieee80211_bss_conf *link_conf,
+                                   bool expect_active_link);
+void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
+                                    struct iwl_rx_packet *pkt);
+void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld);
+void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld);
+void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
+                              struct ieee80211_bss_conf *link_conf,
+                              enum ieee80211_sta_rx_bandwidth bw);
+
+#endif /* __iwl_mld_link_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mld.h"
+#include "iface.h"
+#include "low_latency.h"
+#include "hcmd.h"
+#include "power.h"
+
+#define MLD_LL_WK_INTERVAL_MSEC 500
+#define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000)
+#define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10)
+
+/* packets/MLD_LL_WK_PERIOD seconds */
+#define MLD_LL_ENABLE_THRESH 100
+
+static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
+                                    unsigned long timestamp)
+{
+       struct iwl_mld_low_latency *ll = &mld->low_latency;
+       bool global_low_latency = false;
+       u8 num_rx_q = mld->trans->num_rx_queues;
+
+       for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
+               u32 total_vo_vi_pkts = 0;
+               bool ll_period_expired;
+
+               /* If it's not initialized yet, it means we have not yet
+                * received/transmitted any vo/vi packet on this MAC.
+                */
+               if (!ll->window_start[mac_id])
+                       continue;
+
+               ll_period_expired =
+                       time_after(timestamp, ll->window_start[mac_id] +
+                                  MLD_LL_ACTIVE_WK_PERIOD);
+
+               if (ll_period_expired)
+                       ll->window_start[mac_id] = timestamp;
+
+               for (int q = 0; q < num_rx_q; q++) {
+                       struct iwl_mld_low_latency_packets_counters *counters =
+                               &mld->low_latency.pkts_counters[q];
+
+                       spin_lock_bh(&counters->lock);
+
+                       total_vo_vi_pkts += counters->vo_vi[mac_id];
+
+                       if (ll_period_expired)
+                               counters->vo_vi[mac_id] = 0;
+
+                       spin_unlock_bh(&counters->lock);
+               }
+
+               /* enable immediately with enough packets but defer
+                * disabling only if the low-latency period expired and
+                * below threshold.
+                */
+               if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH)
+                       mld->low_latency.result[mac_id] = true;
+               else if (ll_period_expired)
+                       mld->low_latency.result[mac_id] = false;
+
+               global_low_latency |= mld->low_latency.result[mac_id];
+       }
+
+       return global_low_latency;
+}
+
+static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
+                                    struct ieee80211_vif *vif)
+{
+       struct iwl_mld *mld = _data;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
+       bool low_latency;
+
+       if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
+               return;
+
+       low_latency = mld->low_latency.result[mld_vif->fw_id];
+
+       if (prev != low_latency)
+               iwl_mld_vif_update_low_latency(mld, vif, low_latency,
+                                              LOW_LATENCY_TRAFFIC);
+}
+
+static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+       struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+                                          low_latency.work.work);
+       unsigned long timestamp = jiffies;
+       bool low_latency_active;
+
+       if (mld->fw_status.in_hw_restart)
+               return;
+
+       /* It is assumed that the work was scheduled only after checking
+        * at least MLD_LL_PERIOD has passed since the last update.
+        */
+
+       low_latency_active = iwl_mld_calc_low_latency(mld, timestamp);
+
+       /* Update the timestamp now after the low-latency calculation */
+       mld->low_latency.timestamp = timestamp;
+
+       /* If low-latency is active we need to force re-evaluation after
+        * 10 seconds, so that we can disable low-latency when
+        * the low-latency traffic ends.
+        *
+        * Otherwise, we don't need to run the work because there is nothing to
+        * disable.
+        *
+        * Note that this has no impact on the regular scheduling of the
+        * updates triggered by traffic - those happen whenever the
+        * MLD_LL_PERIOD timeout expire.
+        */
+       if (low_latency_active)
+               wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
+                                        MLD_LL_ACTIVE_WK_PERIOD);
+
+       ieee80211_iterate_active_interfaces(mld->hw,
+                                           IEEE80211_IFACE_ITER_NORMAL,
+                                           iwl_mld_low_latency_iter, mld);
+}
+
+int iwl_mld_low_latency_init(struct iwl_mld *mld)
+{
+       struct iwl_mld_low_latency *ll = &mld->low_latency;
+       unsigned long ts = jiffies;
+
+       ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
+                                   sizeof(*ll->pkts_counters), GFP_KERNEL);
+       if (!ll->pkts_counters)
+               return -ENOMEM;
+
+       for (int q = 0; q < mld->trans->num_rx_queues; q++)
+               spin_lock_init(&ll->pkts_counters[q].lock);
+
+       wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
+
+       ll->timestamp = ts;
+
+       /* The low-latency window_start will be initialized per-MAC on
+        * the first vo/vi packet received/transmitted.
+        */
+
+       return 0;
+}
+
+void iwl_mld_low_latency_free(struct iwl_mld *mld)
+{
+       struct iwl_mld_low_latency *ll = &mld->low_latency;
+
+       kfree(ll->pkts_counters);
+       ll->pkts_counters = NULL;
+}
+
+void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
+{
+       struct iwl_mld_low_latency *ll = &mld->low_latency;
+
+       ll->timestamp = jiffies;
+
+       memset(ll->window_start, 0, sizeof(ll->window_start));
+       memset(ll->result, 0, sizeof(ll->result));
+
+       for (int q = 0; q < mld->trans->num_rx_queues; q++)
+               memset(ll->pkts_counters[q].vo_vi, 0,
+                      sizeof(ll->pkts_counters[q].vo_vi));
+}
+
+static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency,
+                                       u16 mac_id)
+{
+       struct iwl_mac_low_latency_cmd cmd = {
+               .mac_id = cpu_to_le32(mac_id)
+       };
+       u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD);
+       int ret;
+
+       if (low_latency) {
+               /* Currently we don't care about the direction */
+               cmd.low_latency_rx = 1;
+               cmd.low_latency_tx = 1;
+       }
+
+       ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+       if (ret)
+               IWL_ERR(mld, "Failed to send low latency command\n");
+
+       return ret;
+}
+
+static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set,
+                                       enum iwl_mld_low_latency_cause cause)
+{
+       if (set)
+               mld_vif->low_latency_causes |= cause;
+       else
+               mld_vif->low_latency_causes &= ~cause;
+}
+
+void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
+                                   struct ieee80211_vif *vif,
+                                   bool low_latency,
+                                   enum iwl_mld_low_latency_cause cause)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       bool prev;
+
+       prev = iwl_mld_vif_low_latency(mld_vif);
+       iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause);
+
+       low_latency = iwl_mld_vif_low_latency(mld_vif);
+       if (low_latency == prev)
+               return;
+
+       if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) {
+               /* revert to previous low-latency state */
+               iwl_mld_vif_set_low_latency(mld_vif, prev, cause);
+               return;
+       }
+
+       if (low_latency)
+               iwl_mld_leave_omi_bw_reduction(mld);
+
+       if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
+               return;
+
+       iwl_mld_update_mac_power(mld, vif, false);
+}
+
+static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr)
+{
+       u8 tid;
+       static const u8 tid_to_mac80211_ac[] = {
+               IEEE80211_AC_BE,
+               IEEE80211_AC_BK,
+               IEEE80211_AC_BK,
+               IEEE80211_AC_BE,
+               IEEE80211_AC_VI,
+               IEEE80211_AC_VI,
+               IEEE80211_AC_VO,
+               IEEE80211_AC_VO,
+       };
+
+       if (!hdr || !ieee80211_is_data_qos(hdr->frame_control))
+               return false;
+
+       tid = ieee80211_get_tid(hdr);
+       if (tid >= IWL_MAX_TID_COUNT)
+               return false;
+
+       return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI;
+}
+
+void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
+                                        struct ieee80211_hdr *hdr,
+                                        struct ieee80211_sta *sta,
+                                        u8 queue)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+       struct iwl_mld_low_latency_packets_counters *counters;
+       unsigned long ts = jiffies ? jiffies : 1;
+       u8 fw_id = mld_vif->fw_id;
+
+       /* we should have failed op mode init if NULL */
+       if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
+               return;
+
+       if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
+                        queue >= mld->trans->num_rx_queues))
+               return;
+
+       if (mld->low_latency.stopped)
+               return;
+
+       if (!iwl_mld_is_vo_vi_pkt(hdr))
+               return;
+
+       counters = &mld->low_latency.pkts_counters[queue];
+
+       spin_lock_bh(&counters->lock);
+       counters->vo_vi[fw_id]++;
+       spin_unlock_bh(&counters->lock);
+
+       /* Initialize the window_start on the first vo/vi packet */
+       if (!mld->low_latency.window_start[fw_id])
+               mld->low_latency.window_start[fw_id] = ts;
+
+       if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD))
+               wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
+                                        0);
+}
+
+void iwl_mld_low_latency_stop(struct iwl_mld *mld)
+{
+       lockdep_assert_wiphy(mld->wiphy);
+
+       mld->low_latency.stopped = true;
+
+       wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
+}
+
+void iwl_mld_low_latency_restart(struct iwl_mld *mld)
+{
+       struct iwl_mld_low_latency *ll = &mld->low_latency;
+       bool low_latency = false;
+       unsigned long ts = jiffies;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ll->timestamp = ts;
+       mld->low_latency.stopped = false;
+
+       for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+               ll->window_start[mac] = 0;
+               low_latency |= ll->result[mac];
+
+               for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+                       spin_lock_bh(&ll->pkts_counters[q].lock);
+                       ll->pkts_counters[q].vo_vi[mac] = 0;
+                       spin_unlock_bh(&ll->pkts_counters[q].lock);
+               }
+       }
+
+       /* if low latency is active, force re-evaluation to cover the case of
+        * no traffic.
+        */
+       if (low_latency)
+               wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_low_latency_h__
+#define __iwl_mld_low_latency_h__
+
+/**
+ * struct iwl_mld_low_latency_packets_counters - Packets counters
+ * @lock: synchronize the counting in data path against the worker
+ * @vo_vi: per-mac, counts the number of TX and RX voice and video packets
+ */
+struct iwl_mld_low_latency_packets_counters {
+       spinlock_t lock;
+       u32 vo_vi[NUM_MAC_INDEX_DRIVER];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * enum iwl_mld_low_latency_cause - low-latency set causes
+ *
+ * @LOW_LATENCY_TRAFFIC: indicates low-latency traffic was detected
+ * @LOW_LATENCY_DEBUGFS: low-latency mode set from debugfs
+ * @LOW_LATENCY_VIF_TYPE: low-latency mode set because of vif type (AP)
+ */
+enum iwl_mld_low_latency_cause {
+       LOW_LATENCY_TRAFFIC     = BIT(0),
+       LOW_LATENCY_DEBUGFS     = BIT(1),
+       LOW_LATENCY_VIF_TYPE    = BIT(2),
+};
+
+/**
+ * struct iwl_mld_low_latency - Manage low-latency detection and activation.
+ * @work: this work is used to detect low-latency by monitoring the number of
+ *     voice and video packets transmitted in a period of time. If the
+ *     threshold is reached, low-latency is activated. When active,
+ *     it is deactivated if the threshold is not reached within a
+ *     10-second period.
+ * @timestamp: timestamp of the last update.
+ * @window_start: per-mac, timestamp of the start of the current window. when
+ *     the window is over, the counters are reset.
+ * @pkts_counters: per-queue array voice/video packet counters
+ * @result: per-mac latest low-latency result
+ * @stopped: if true, ignore the requests to update the counters
+ */
+struct iwl_mld_low_latency {
+       struct wiphy_delayed_work work;
+       unsigned long timestamp;
+       unsigned long window_start[NUM_MAC_INDEX_DRIVER];
+       struct iwl_mld_low_latency_packets_counters *pkts_counters;
+       bool result[NUM_MAC_INDEX_DRIVER];
+       bool stopped;
+};
+
+int iwl_mld_low_latency_init(struct iwl_mld *mld);
+void iwl_mld_low_latency_free(struct iwl_mld *mld);
+void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld);
+void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
+                                   struct ieee80211_vif *vif,
+                                   bool low_latency,
+                                   enum iwl_mld_low_latency_cause cause);
+void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
+                                        struct ieee80211_hdr *hdr,
+                                        struct ieee80211_sta *sta,
+                                        u8 queue);
+void iwl_mld_low_latency_stop(struct iwl_mld *mld);
+void iwl_mld_low_latency_restart(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_low_latency_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include <linux/ip.h>
+
+#include "mld.h"
+#include "mac80211.h"
+#include "phy.h"
+#include "iface.h"
+#include "power.h"
+#include "sta.h"
+#include "agg.h"
+#include "scan.h"
+#include "d3.h"
+#include "tlc.h"
+#include "key.h"
+#include "ap.h"
+#include "tx.h"
+#include "roc.h"
+#include "mlo.h"
+#include "stats.h"
+#include "ftm-initiator.h"
+#include "low_latency.h"
+#include "fw/api/scan.h"
+#include "fw/api/context.h"
+#include "fw/api/filter.h"
+#include "fw/api/sta.h"
+#include "fw/api/tdls.h"
+#ifdef CONFIG_PM_SLEEP
+#include "fw/api/d3.h"
+#endif /* CONFIG_PM_SLEEP */
+#include "iwl-trans.h"
+
+#define IWL_MLD_LIMITS(ap)                                     \
+       {                                                       \
+               .max = 2,                                       \
+               .types = BIT(NL80211_IFTYPE_STATION),           \
+       },                                                      \
+       {                                                       \
+               .max = 1,                                       \
+               .types = ap |                                   \
+                        BIT(NL80211_IFTYPE_P2P_CLIENT) |       \
+                        BIT(NL80211_IFTYPE_P2P_GO),            \
+       },                                                      \
+       {                                                       \
+               .max = 1,                                       \
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),        \
+       }
+
+static const struct ieee80211_iface_limit iwl_mld_limits[] = {
+       IWL_MLD_LIMITS(0)
+};
+
+static const struct ieee80211_iface_limit iwl_mld_limits_ap[] = {
+       IWL_MLD_LIMITS(BIT(NL80211_IFTYPE_AP))
+};
+
+static const struct ieee80211_iface_combination
+iwl_mld_iface_combinations[] = {
+       {
+               .num_different_channels = 2,
+               .max_interfaces = 4,
+               .limits = iwl_mld_limits,
+               .n_limits = ARRAY_SIZE(iwl_mld_limits),
+       },
+       {
+               .num_different_channels = 1,
+               .max_interfaces = 4,
+               .limits = iwl_mld_limits_ap,
+               .n_limits = ARRAY_SIZE(iwl_mld_limits_ap),
+       },
+};
+
+static const u8 if_types_ext_capa_sta[] = {
+        [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+        [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+        [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+              WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+        [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+        [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+#define IWL_MLD_EMLSR_CAPA     (IEEE80211_EML_CAP_EMLSR_SUPP | \
+                                IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \
+                                       __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
+                                IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
+                                       __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MLD_CAPA_OPS (FIELD_PREP_CONST( \
+                       IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+                       IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+                       IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)
+
+static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = {
+       {
+               .iftype = NL80211_IFTYPE_STATION,
+               .extended_capabilities = if_types_ext_capa_sta,
+               .extended_capabilities_mask = if_types_ext_capa_sta,
+               .extended_capabilities_len = sizeof(if_types_ext_capa_sta),
+               /* relevant only if EHT is supported */
+               .eml_capabilities = IWL_MLD_EMLSR_CAPA,
+               .mld_capa_and_ops = IWL_MLD_CAPA_OPS,
+       },
+};
+
+static void iwl_mld_hw_set_addresses(struct iwl_mld *mld)
+{
+       struct wiphy *wiphy = mld->wiphy;
+       int num_addrs = 1;
+
+       /* Extract MAC address */
+       memcpy(mld->addresses[0].addr, mld->nvm_data->hw_addr, ETH_ALEN);
+       wiphy->addresses = mld->addresses;
+       wiphy->n_addresses = 1;
+
+       /* Extract additional MAC addresses if available */
+       if (mld->nvm_data->n_hw_addrs > 1)
+               num_addrs = min(mld->nvm_data->n_hw_addrs,
+                               IWL_MLD_MAX_ADDRESSES);
+
+       for (int i = 1; i < num_addrs; i++) {
+               memcpy(mld->addresses[i].addr,
+                      mld->addresses[i - 1].addr,
+                      ETH_ALEN);
+               mld->addresses[i].addr[ETH_ALEN - 1]++;
+               wiphy->n_addresses++;
+       }
+}
+
+static void iwl_mld_hw_set_channels(struct iwl_mld *mld)
+{
+       struct wiphy *wiphy = mld->wiphy;
+       struct ieee80211_supported_band *bands = mld->nvm_data->bands;
+
+       wiphy->bands[NL80211_BAND_2GHZ] = &bands[NL80211_BAND_2GHZ];
+       wiphy->bands[NL80211_BAND_5GHZ] = &bands[NL80211_BAND_5GHZ];
+
+       if (bands[NL80211_BAND_6GHZ].n_channels)
+               wiphy->bands[NL80211_BAND_6GHZ] = &bands[NL80211_BAND_6GHZ];
+}
+
+static void iwl_mld_hw_set_security(struct iwl_mld *mld)
+{
+       struct ieee80211_hw *hw = mld->hw;
+       static const u32 mld_ciphers[] = {
+               WLAN_CIPHER_SUITE_WEP40,
+               WLAN_CIPHER_SUITE_WEP104,
+               WLAN_CIPHER_SUITE_TKIP,
+               WLAN_CIPHER_SUITE_CCMP,
+               WLAN_CIPHER_SUITE_GCMP,
+               WLAN_CIPHER_SUITE_GCMP_256,
+               WLAN_CIPHER_SUITE_AES_CMAC,
+               WLAN_CIPHER_SUITE_BIP_GMAC_128,
+               WLAN_CIPHER_SUITE_BIP_GMAC_256
+       };
+
+       hw->wiphy->n_cipher_suites = ARRAY_SIZE(mld_ciphers);
+       hw->wiphy->cipher_suites = mld_ciphers;
+
+       ieee80211_hw_set(hw, MFP_CAPABLE);
+       wiphy_ext_feature_set(hw->wiphy,
+                             NL80211_EXT_FEATURE_BEACON_PROTECTION);
+}
+
+static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld)
+{
+       struct wiphy *wiphy = mld->wiphy;
+
+       wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+       wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+}
+
+static void iwl_mld_hw_set_antennas(struct iwl_mld *mld)
+{
+       struct wiphy *wiphy = mld->wiphy;
+
+       wiphy->available_antennas_tx = iwl_mld_get_valid_tx_ant(mld);
+       wiphy->available_antennas_rx = iwl_mld_get_valid_rx_ant(mld);
+}
+
+static void iwl_mld_hw_set_pm(struct iwl_mld *mld)
+{
+#ifdef CONFIG_PM_SLEEP
+       struct wiphy *wiphy = mld->wiphy;
+
+       if (!device_can_wakeup(mld->trans->dev))
+               return;
+
+       mld->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
+                            WIPHY_WOWLAN_DISCONNECT |
+                            WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+                            WIPHY_WOWLAN_RFKILL_RELEASE |
+                            WIPHY_WOWLAN_NET_DETECT |
+                            WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+                            WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+                            WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+       mld->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+       mld->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+       mld->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+       mld->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES_V2;
+
+       wiphy->wowlan = &mld->wowlan;
+#endif /* CONFIG_PM_SLEEP */
+}
+
+static void iwl_mac_hw_set_radiotap(struct iwl_mld *mld)
+{
+       struct ieee80211_hw *hw = mld->hw;
+
+       hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+                                   IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+
+       hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+                                   IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+       hw->radiotap_timestamp.units_pos =
+               IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+               IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+
+       /* this is the case for CCK frames, it's better (only 8) for OFDM */
+       hw->radiotap_timestamp.accuracy = 22;
+}
+
+static void iwl_mac_hw_set_flags(struct iwl_mld *mld)
+{
+       struct ieee80211_hw *hw = mld->hw;
+
+       ieee80211_hw_set(hw, USES_RSS);
+       ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
+       ieee80211_hw_set(hw, AP_LINK_PS);
+       ieee80211_hw_set(hw, SIGNAL_DBM);
+       ieee80211_hw_set(hw, SPECTRUM_MGMT);
+       ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+       ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+       ieee80211_hw_set(hw, SUPPORTS_PS);
+       ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+       ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+       ieee80211_hw_set(hw, CONNECTION_MONITOR);
+       ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+       ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+       ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+       ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+       ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+       ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
+       ieee80211_hw_set(hw, STA_MMPDU_TXQ);
+       ieee80211_hw_set(hw, TX_AMSDU);
+       ieee80211_hw_set(hw, TX_FRAG_LIST);
+       ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+       ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+       ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+       ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
+       ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+       ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+       ieee80211_hw_set(hw, TDLS_WIDER_BW);
+}
+
+static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
+{
+       struct ieee80211_hw *hw = mld->hw;
+       struct wiphy *wiphy = hw->wiphy;
+       const struct iwl_ucode_capabilities *ucode_capa = &mld->fw->ucode_capa;
+
+       snprintf(wiphy->fw_version,
+                sizeof(wiphy->fw_version),
+                "%.31s", mld->fw->fw_version);
+
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_P2P_GO) |
+                                BIT(NL80211_IFTYPE_P2P_DEVICE) |
+                                BIT(NL80211_IFTYPE_ADHOC);
+
+       wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+                          NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+                          NL80211_FEATURE_ND_RANDOM_MAC_ADDR |
+                          NL80211_FEATURE_HT_IBSS |
+                          NL80211_FEATURE_P2P_GO_CTWIN |
+                          NL80211_FEATURE_LOW_PRIORITY_SCAN |
+                          NL80211_FEATURE_P2P_GO_OPPPS |
+                          NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+                          NL80211_FEATURE_SUPPORTS_WMM_ADMISSION |
+                          NL80211_FEATURE_TX_POWER_INSERTION |
+                          NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
+
+       wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
+                       WIPHY_FLAG_AP_UAPSD |
+                       WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+                       WIPHY_FLAG_SPLIT_SCAN_6GHZ |
+                       WIPHY_FLAG_SUPPORTS_TDLS |
+                       WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
+       if (mld->nvm_data->sku_cap_11be_enable &&
+           !iwlwifi_mod_params.disable_11ax &&
+           !iwlwifi_mod_params.disable_11be)
+               wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
+       /* the firmware uses u8 for num of iterations, but 0xff is saved for
+        * infinite loop, so the maximum number of iterations is actually 254.
+        */
+       wiphy->max_sched_scan_plan_iterations = 254;
+       wiphy->max_sched_scan_ie_len = iwl_mld_scan_max_template_size();
+       wiphy->max_scan_ie_len = iwl_mld_scan_max_template_size();
+       wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+       wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+       wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
+       wiphy->max_sched_scan_reqs = 1;
+       wiphy->max_sched_scan_plan_interval = U16_MAX;
+       wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES_V2;
+
+       wiphy->max_remain_on_channel_duration = 10000;
+
+       wiphy->hw_version = mld->trans->hw_id;
+
+       wiphy->hw_timestamp_max_peers = 1;
+
+       wiphy->iface_combinations = iwl_mld_iface_combinations;
+       wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mld_iface_combinations);
+
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_CONCURRENT);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+       wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
+       if (fw_has_capa(ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
+               wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT);
+
+       wiphy->iftype_ext_capab = NULL;
+       wiphy->num_iftype_ext_capab = 0;
+
+       if (!iwlwifi_mod_params.disable_11ax) {
+               wiphy->iftype_ext_capab = iftypes_ext_capa;
+               wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa);
+
+               ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+               ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+       }
+
+       if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+               wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+       else
+               wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+}
+
+static void iwl_mac_hw_set_misc(struct iwl_mld *mld)
+{
+       struct ieee80211_hw *hw = mld->hw;
+
+       hw->queues = IEEE80211_NUM_ACS;
+
+       hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
+       hw->netdev_features |= mld->cfg->features;
+
+       hw->max_tx_fragments = mld->trans->max_skb_frags;
+       hw->max_listen_interval = IWL_MLD_CONN_LISTEN_INTERVAL;
+
+       hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
+       hw->uapsd_queues = IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |
+                          IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |
+                          IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |
+                          IEEE80211_WMM_IE_STA_QOSINFO_AC_BE;
+
+       hw->chanctx_data_size = sizeof(struct iwl_mld_phy);
+       hw->vif_data_size = sizeof(struct iwl_mld_vif);
+       hw->sta_data_size = sizeof(struct iwl_mld_sta);
+       hw->txq_data_size = sizeof(struct iwl_mld_txq);
+
+       /* TODO: Remove this division when IEEE80211_MAX_AMPDU_BUF_EHT size
+        * is supported.
+        * Note: ensure that IWL_DEFAULT_QUEUE_SIZE_EHT is updated accordingly.
+        */
+       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT / 2;
+}
+
+static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld)
+{
+       /* 11ax is expected to be enabled for all supported devices */
+       if (WARN_ON(!mld->nvm_data->sku_cap_11ax_enable))
+               return -EINVAL;
+
+       /* LAR is expected to be enabled for all supported devices */
+       if (WARN_ON(!mld->nvm_data->lar_enabled))
+               return -EINVAL;
+
+       /* All supported devices are currently using version 3 of the cmd.
+        * Since version 3, IWL_SCAN_MAX_PROFILES_V2 shall be used where
+        * necessary.
+        */
+       if (WARN_ON(iwl_fw_lookup_cmd_ver(mld->fw,
+                                         SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+                                         IWL_FW_CMD_VER_UNKNOWN) != 3))
+               return -EINVAL;
+
+       return 0;
+}
+
+int iwl_mld_register_hw(struct iwl_mld *mld)
+{
+       /* verify once essential preconditions required for setting
+        * the hw capabilities
+        */
+       if (iwl_mld_hw_verify_preconditions(mld))
+               return -EINVAL;
+
+       iwl_mld_hw_set_addresses(mld);
+       iwl_mld_hw_set_channels(mld);
+       iwl_mld_hw_set_security(mld);
+       iwl_mld_hw_set_regulatory(mld);
+       iwl_mld_hw_set_pm(mld);
+       iwl_mld_hw_set_antennas(mld);
+       iwl_mac_hw_set_radiotap(mld);
+       iwl_mac_hw_set_flags(mld);
+       iwl_mac_hw_set_wiphy(mld);
+       iwl_mac_hw_set_misc(mld);
+
+       SET_IEEE80211_DEV(mld->hw, mld->trans->dev);
+
+       return ieee80211_register_hw(mld->hw);
+}
+
+static void
+iwl_mld_mac80211_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_tx_control *control, struct sk_buff *skb)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct ieee80211_sta *sta = control->sta;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       u32 link_id = u32_get_bits(info->control.flags,
+                                  IEEE80211_TX_CTRL_MLO_LINK);
+
+       /* In AP mode, mgmt frames are sent on the bcast station,
+        * so the FW can't translate the MLD addr to the link addr. Do it here
+        */
+       if (ieee80211_is_mgmt(hdr->frame_control) && sta &&
+           link_id != IEEE80211_LINK_UNSPECIFIED &&
+           !ieee80211_is_probe_resp(hdr->frame_control)) {
+               /* translate MLD addresses to LINK addresses */
+               struct ieee80211_link_sta *link_sta =
+                       rcu_dereference(sta->link[link_id]);
+               struct ieee80211_bss_conf *link_conf =
+                       rcu_dereference(info->control.vif->link_conf[link_id]);
+               struct ieee80211_mgmt *mgmt;
+
+               if (WARN_ON(!link_sta || !link_conf)) {
+                       ieee80211_free_txskb(hw, skb);
+                       return;
+               }
+
+               mgmt = (void *)hdr;
+               memcpy(mgmt->da, link_sta->addr, ETH_ALEN);
+               memcpy(mgmt->sa, link_conf->addr, ETH_ALEN);
+               memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN);
+       }
+
+       iwl_mld_tx_skb(mld, skb, NULL);
+}
+
+static void
+iwl_mld_restart_cleanup(struct iwl_mld *mld)
+{
+       iwl_cleanup_mld(mld);
+
+       ieee80211_iterate_interfaces(mld->hw, IEEE80211_IFACE_ITER_ACTIVE,
+                                    iwl_mld_cleanup_vif, NULL);
+
+       ieee80211_iterate_stations_atomic(mld->hw,
+                                         iwl_mld_cleanup_sta, NULL);
+
+       iwl_mld_ftm_restart_cleanup(mld);
+}
+
+static
+int iwl_mld_mac80211_start(struct ieee80211_hw *hw)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+       bool in_d3 = false;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+#ifdef CONFIG_PM_SLEEP
+       /* Unless the host goes into hibernate the FW always stays on and
+        * the d3_resume flow is used. When wowlan is configured, mac80211
+        * would call it's resume callback and the wowlan_resume flow
+        * would be used.
+        */
+
+       in_d3 = mld->fw_status.in_d3;
+       if (in_d3) {
+               /* mac80211 already cleaned up the state, no need for cleanup */
+               ret = iwl_mld_no_wowlan_resume(mld);
+               if (ret)
+                       iwl_mld_stop_fw(mld);
+       }
+#endif /* CONFIG_PM_SLEEP */
+
+       if (mld->fw_status.in_hw_restart) {
+               iwl_mld_stop_fw(mld);
+               iwl_mld_restart_cleanup(mld);
+       }
+
+       if (!in_d3 || ret) {
+               ret = iwl_mld_start_fw(mld);
+               if (ret)
+                       goto error;
+       }
+
+       mld->scan.last_start_time_jiffies = jiffies;
+
+       iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
+                              NULL);
+       iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
+                              NULL);
+
+       return 0;
+
+error:
+       /* If we failed to restart the hw, there is nothing useful
+        * we can do but indicate we are no longer in restart.
+        */
+       mld->fw_status.in_hw_restart = false;
+
+       return ret;
+}
+
+static
+void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       wiphy_work_cancel(mld->wiphy, &mld->add_txqs_wk);
+
+       /* if the suspend flow fails the fw is in error. Stop it here, and it
+        * will be started upon wakeup
+        */
+       if (!suspend || iwl_mld_no_wowlan_suspend(mld))
+               iwl_mld_stop_fw(mld);
+
+       /* HW is stopped, no more coming RX. OTOH, the worker can't run as the
+        * wiphy lock is held. Cancel it in case it was scheduled just before
+        * we stopped the HW.
+        */
+       wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
+
+       /* Empty out the list, as the worker won't do that */
+       iwl_mld_purge_async_handlers_list(mld);
+
+       /* Clear in_hw_restart flag when stopping the hw, as mac80211 won't
+        * execute the restart.
+        */
+       mld->fw_status.in_hw_restart = false;
+
+       /* We shouldn't have any UIDs still set. Loop over all the UIDs to
+        * make sure there's nothing left there and warn if any is found.
+        */
+       for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++)
+               if (WARN_ONCE(mld->scan.uid_status[i],
+                             "UMAC scan UID %d status was not cleaned (0x%x 0x%x)\n",
+                             i, mld->scan.uid_status[i], mld->scan.status))
+                       mld->scan.uid_status[i] = 0;
+}
+
+static
+int iwl_mld_mac80211_config(struct ieee80211_hw *hw, u32 changed)
+{
+       return 0;
+}
+
+static
+int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* Construct mld_vif, add it to fw, and map its ID to ieee80211_vif */
+       ret = iwl_mld_add_vif(mld, vif);
+       if (ret)
+               return ret;
+
+       /*
+        * Add the default link, but not if this is an MLD vif as that implies
+        * the HW is restarting and it will be configured by change_vif_links.
+        */
+       if (!ieee80211_vif_is_mld(vif))
+               ret = iwl_mld_add_link(mld, &vif->bss_conf);
+       if (ret)
+               goto err;
+
+       if (vif->type == NL80211_IFTYPE_STATION) {
+               vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+               if (!vif->p2p)
+                       vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+                                            IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+       }
+
+       if (vif->p2p || iwl_fw_lookup_cmd_ver(mld->fw, PHY_CONTEXT_CMD, 0) < 5)
+               vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
+       /*
+        * For an MLD vif (in restart) we may not have a link; delay the call
+        * the initial change_vif_links.
+        */
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           !ieee80211_vif_is_mld(vif))
+               iwl_mld_update_mac_power(mld, vif, false);
+
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               mld->monitor.on = true;
+               ieee80211_hw_set(mld->hw, RX_INCLUDES_FCS);
+       }
+
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+               mld->p2p_device_vif = vif;
+
+       return 0;
+
+err:
+       iwl_mld_rm_vif(mld, vif);
+       return ret;
+}
+
+static
+void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+               vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+                                      IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mld->hw->flags);
+               mld->monitor.on = false;
+       }
+
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+               mld->p2p_device_vif = NULL;
+
+       iwl_mld_remove_link(mld, &vif->bss_conf);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink);
+#endif
+
+       iwl_mld_rm_vif(mld, vif);
+}
+
+struct iwl_mld_mc_iter_data {
+       struct iwl_mld *mld;
+       int port_id;
+};
+
+static void iwl_mld_mc_iface_iterator(void *data, u8 *mac,
+                                     struct ieee80211_vif *vif)
+{
+       struct iwl_mld_mc_iter_data *mc_data = data;
+       struct iwl_mld *mld = mc_data->mld;
+       struct iwl_mcast_filter_cmd *cmd = mld->mcast_filter_cmd;
+       struct iwl_host_cmd hcmd = {
+               .id = MCAST_FILTER_CMD,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+       int ret, len;
+
+       /* If we don't have free ports, mcast frames will be dropped */
+       if (WARN_ON_ONCE(mc_data->port_id >= MAX_PORT_ID_NUM))
+               return;
+
+       if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+               return;
+
+       cmd->port_id = mc_data->port_id++;
+       ether_addr_copy(cmd->bssid, vif->bss_conf.bssid);
+       len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+       hcmd.len[0] = len;
+       hcmd.data[0] = cmd;
+
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+       if (ret)
+               IWL_ERR(mld, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld)
+{
+       struct iwl_mld_mc_iter_data iter_data = {
+               .mld = mld,
+       };
+
+       if (WARN_ON_ONCE(!mld->mcast_filter_cmd))
+               return;
+
+       ieee80211_iterate_active_interfaces(mld->hw,
+                                           IEEE80211_IFACE_ITER_NORMAL,
+                                           iwl_mld_mc_iface_iterator,
+                                           &iter_data);
+}
+
+static u64
+iwl_mld_mac80211_prepare_multicast(struct ieee80211_hw *hw,
+                                  struct netdev_hw_addr_list *mc_list)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mcast_filter_cmd *cmd;
+       struct netdev_hw_addr *addr;
+       int addr_count = netdev_hw_addr_list_count(mc_list);
+       bool pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES;
+       int len;
+
+       if (pass_all)
+               addr_count = 0;
+
+       /* len must be a multiple of 4 */
+       len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+       cmd = kzalloc(len, GFP_ATOMIC);
+       if (!cmd)
+               return 0;
+
+       if (pass_all) {
+               cmd->pass_all = 1;
+               goto out;
+       }
+
+       netdev_hw_addr_list_for_each(addr, mc_list) {
+               IWL_DEBUG_MAC80211(mld, "mcast addr (%d): %pM\n",
+                                  cmd->count, addr->addr);
+               ether_addr_copy(&cmd->addr_list[cmd->count * ETH_ALEN],
+                               addr->addr);
+               cmd->count++;
+       }
+
+out:
+       return (u64)(unsigned long)cmd;
+}
+
+static
+void iwl_mld_mac80211_configure_filter(struct ieee80211_hw *hw,
+                                      unsigned int changed_flags,
+                                      unsigned int *total_flags,
+                                      u64 multicast)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
+
+       /* Replace previous configuration */
+       kfree(mld->mcast_filter_cmd);
+       mld->mcast_filter_cmd = cmd;
+
+       if (!cmd)
+               goto out;
+
+       if (changed_flags & FIF_ALLMULTI)
+               cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+       if (cmd->pass_all)
+               cmd->count = 0;
+
+       iwl_mld_recalc_multicast_filter(mld);
+out:
+       *total_flags = 0;
+}
+
+static
+void iwl_mld_mac80211_wake_tx_queue(struct ieee80211_hw *hw,
+                                   struct ieee80211_txq *txq)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+
+       if (likely(mld_txq->status.allocated) || !txq->sta) {
+               iwl_mld_tx_from_txq(mld, txq);
+               return;
+       }
+
+       /* We don't support TSPEC tids. %IEEE80211_NUM_TIDS is for mgmt */
+       if (txq->tid != IEEE80211_NUM_TIDS && txq->tid >= IWL_MAX_TID_COUNT) {
+               IWL_DEBUG_MAC80211(mld, "TID %d is not supported\n", txq->tid);
+               return;
+       }
+
+       /* The worker will handle any packets we leave on the txq now */
+
+       spin_lock_bh(&mld->add_txqs_lock);
+       /* The list is being deleted only after the queue is fully allocated. */
+       if (list_empty(&mld_txq->list) &&
+           /* recheck under lock, otherwise it can be added twice */
+           !mld_txq->status.allocated) {
+               list_add_tail(&mld_txq->list, &mld->txqs_to_add);
+               wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk);
+       }
+       spin_unlock_bh(&mld->add_txqs_lock);
+}
+
+static void iwl_mld_teardown_tdls_peers(struct iwl_mld *mld)
+{
+       lockdep_assert_wiphy(mld->wiphy);
+
+       for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+               struct ieee80211_link_sta *link_sta;
+               struct iwl_mld_sta *mld_sta;
+
+               link_sta = wiphy_dereference(mld->wiphy,
+                                            mld->fw_id_to_link_sta[i]);
+               if (IS_ERR_OR_NULL(link_sta))
+                       continue;
+
+               if (!link_sta->sta->tdls)
+                       continue;
+
+               mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+               ieee80211_tdls_oper_request(mld_sta->vif, link_sta->addr,
+                                           NL80211_TDLS_TEARDOWN,
+                                           WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+                                           GFP_KERNEL);
+       }
+}
+
+static
+int iwl_mld_add_chanctx(struct ieee80211_hw *hw,
+                       struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+       int fw_id = iwl_mld_allocate_fw_phy_id(mld);
+       int ret;
+
+       if (fw_id < 0)
+               return fw_id;
+
+       phy->fw_id = fw_id;
+       phy->chandef = *iwl_mld_get_chandef_from_chanctx(ctx);
+
+       ret = iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_ADD);
+       if (ret) {
+               mld->used_phy_ids &= ~BIT(phy->fw_id);
+               return ret;
+       }
+
+       if (hweight8(mld->used_phy_ids) > 1)
+               iwl_mld_teardown_tdls_peers(mld);
+
+       return 0;
+}
+
+static
+void iwl_mld_remove_chanctx(struct ieee80211_hw *hw,
+                           struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+
+       iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_REMOVE);
+       mld->used_phy_ids &= ~BIT(phy->fw_id);
+}
+
+static
+void iwl_mld_change_chanctx(struct ieee80211_hw *hw,
+                           struct ieee80211_chanctx_conf *ctx, u32 changed)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+       struct cfg80211_chan_def *chandef =
+               iwl_mld_get_chandef_from_chanctx(ctx);
+
+       /* We don't care about these */
+       if (!(changed & ~(IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+                         IEEE80211_CHANCTX_CHANGE_RADAR |
+                         IEEE80211_CHANCTX_CHANGE_CHANNEL)))
+               return;
+
+       /* Check if a FW update is required */
+
+       if (changed & IEEE80211_CHANCTX_CHANGE_AP)
+               goto update;
+
+       if (chandef->chan == phy->chandef.chan &&
+           chandef->center_freq1 == phy->chandef.center_freq1 &&
+           chandef->punctured == phy->chandef.punctured) {
+               /* Check if we are toggling between HT and non-HT, no-op */
+               if (phy->chandef.width == chandef->width ||
+                   (phy->chandef.width <= NL80211_CHAN_WIDTH_20 &&
+                    chandef->width <= NL80211_CHAN_WIDTH_20))
+                       return;
+       }
+update:
+       phy->chandef = *chandef;
+
+       iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY);
+}
+
+static u8
+iwl_mld_chandef_get_primary_80(struct cfg80211_chan_def *chandef)
+{
+       int data_start;
+       int control_start;
+       int bw;
+
+       if (chandef->width == NL80211_CHAN_WIDTH_320)
+               bw = 320;
+       else if (chandef->width == NL80211_CHAN_WIDTH_160)
+               bw = 160;
+       else
+               return 0;
+
+       /* data is bw wide so the start is half the width */
+       data_start = chandef->center_freq1 - bw / 2;
+       /* control is 20Mhz width */
+       control_start = chandef->chan->center_freq - 10;
+
+       return (control_start - data_start) / 80;
+}
+
+static bool iwl_mld_can_activate_link(struct iwl_mld *mld,
+                                     struct ieee80211_vif *vif,
+                                     struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_link_sta *link_sta;
+
+       /* In association, we activate the assoc link before adding the STA. */
+       if (!mld_vif->ap_sta || !vif->cfg.assoc)
+               return true;
+
+       mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+       /* When switching links, we need to wait with the activation until the
+        * STA was added to the FW. It'll be activated in
+        * iwl_mld_update_link_stas
+        */
+       link_sta = wiphy_dereference(mld->wiphy, mld_sta->link[link->link_id]);
+
+       /* In restart we can have a link_sta that doesn't exist in FW yet */
+       return link_sta && link_sta->in_fw;
+}
+
+static
+int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct ieee80211_bss_conf *link,
+                              struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       unsigned int n_active = iwl_mld_count_active_links(mld, vif);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld_link))
+               return -EINVAL;
+
+       /* if the assigned one was not counted yet, count it now */
+       if (!rcu_access_pointer(mld_link->chan_ctx)) {
+               n_active++;
+
+               /* Track addition of non-BSS link */
+               if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+                       ret = iwl_mld_emlsr_check_non_bss_block(mld, 1);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       /* for AP, mac parameters such as HE support are updated at this stage. */
+       if (vif->type == NL80211_IFTYPE_AP) {
+               ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+
+               if (ret) {
+                       IWL_ERR(mld, "failed to update MAC %pM\n", vif->addr);
+                       return -EINVAL;
+               }
+       }
+
+       rcu_assign_pointer(mld_link->chan_ctx, ctx);
+
+       if (n_active > 1) {
+               struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+               iwl_mld_leave_omi_bw_reduction(mld);
+
+               /* Indicate to mac80211 that EML is enabled */
+               vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
+
+               if (vif->active_links & BIT(mld_vif->emlsr.selected_links))
+                       mld_vif->emlsr.primary = mld_vif->emlsr.selected_primary;
+               else
+                       mld_vif->emlsr.primary = __ffs(vif->active_links);
+
+               iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
+                                      NULL);
+       }
+
+       /* First send the link command with the phy context ID.
+        * Now that we have the phy, we know the band so also the rates
+        */
+       ret = iwl_mld_change_link_in_fw(mld, link,
+                                       LINK_CONTEXT_MODIFY_RATES_INFO);
+       if (ret)
+               goto err;
+
+       /* TODO: Initialize rate control for the AP station, since we might be
+        * doing a link switch here - we cannot initialize it before since
+        * this needs the phy context assigned (and in FW?), and we cannot
+        * do it later because it needs to be initialized as soon as we're
+        * able to TX on the link, i.e. when active. (task=link-switch)
+        */
+
+       /* Now activate the link */
+       if (iwl_mld_can_activate_link(mld, vif, link)) {
+               ret = iwl_mld_activate_link(mld, link);
+               if (ret)
+                       goto err;
+       }
+
+       if (vif->type == NL80211_IFTYPE_STATION)
+               iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
+
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               /* TODO: task=sniffer add sniffer station */
+               mld->monitor.p80 =
+                       iwl_mld_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
+       }
+
+       return 0;
+err:
+       RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
+       return ret;
+}
+
+static
+void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_bss_conf *link,
+                                 struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       unsigned int n_active = iwl_mld_count_active_links(mld, vif);
+
+       if (WARN_ON(!mld_link))
+               return;
+
+       /* Track removal of non-BSS link */
+       if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+               iwl_mld_emlsr_check_non_bss_block(mld, -1);
+
+       iwl_mld_deactivate_link(mld, link);
+
+       /* TODO: task=sniffer remove sniffer station */
+
+       if (n_active > 1) {
+               /* Indicate to mac80211 that EML is disabled */
+               vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+               iwl_dbg_tlv_time_point(&mld->fwrt,
+                                      IWL_FW_INI_TIME_ESR_LINK_DOWN,
+                                      NULL);
+       }
+
+       RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
+
+       /* in the non-MLO case, remove/re-add the link to clean up FW state.
+        * In MLO, it'll be done in drv_change_vif_link
+        */
+       if (!ieee80211_vif_is_mld(vif) && !mld_vif->ap_sta &&
+           !WARN_ON_ONCE(vif->cfg.assoc) &&
+           vif->type != NL80211_IFTYPE_AP && !mld->fw_status.in_hw_restart) {
+               iwl_mld_remove_link(mld, link);
+               iwl_mld_add_link(mld, link);
+       }
+}
+
+static
+int iwl_mld_mac80211_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       return 0;
+}
+
+static void
+iwl_mld_link_info_changed_ap_ibss(struct iwl_mld *mld,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_bss_conf *link,
+                                 u64 changes)
+{
+       u32 link_changes = 0;
+
+       if (changes & BSS_CHANGED_ERP_SLOT)
+               link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+       if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT))
+               link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS;
+
+       if (changes & (BSS_CHANGED_QOS | BSS_CHANGED_BANDWIDTH))
+               link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+       if (changes & BSS_CHANGED_HE_BSS_COLOR)
+               link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+
+       if (link_changes)
+               iwl_mld_change_link_in_fw(mld, link, link_changes);
+
+       if (changes & BSS_CHANGED_BEACON)
+               iwl_mld_update_beacon_template(mld, vif, link);
+}
+
+static
+u32 iwl_mld_link_changed_mapping(struct iwl_mld *mld,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_bss_conf *link_conf,
+                                u64 changes)
+{
+       u32 link_changes = 0;
+       bool has_he, has_eht;
+
+       if (changes & BSS_CHANGED_QOS && vif->cfg.assoc && link_conf->qos)
+               link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+       if (changes & (BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_BASIC_RATES |
+                      BSS_CHANGED_ERP_SLOT))
+               link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+       if (changes & (BSS_CHANGED_HT | BSS_CHANGED_ERP_CTS_PROT))
+               link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS;
+
+       /* TODO: task=MLO check mac80211's HE flags and if command is needed
+        * every time there's a link change. Currently used flags are
+        * BSS_CHANGED_HE_OBSS_PD and BSS_CHANGED_HE_BSS_COLOR.
+        */
+       has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax;
+       has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be;
+
+       if (vif->cfg.assoc && (has_he || has_eht)) {
+               IWL_DEBUG_MAC80211(mld, "Associated in HE mode\n");
+               link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+       }
+
+       return link_changes;
+}
+
+static void
+iwl_mld_mac80211_link_info_changed_sta(struct iwl_mld *mld,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_bss_conf *link_conf,
+                                      u64 changes)
+{
+       u32 link_changes = iwl_mld_link_changed_mapping(mld, vif, link_conf,
+                                                       changes);
+
+       if (link_changes)
+               iwl_mld_change_link_in_fw(mld, link_conf, link_changes);
+
+       if (changes & BSS_CHANGED_TPE)
+               iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link_conf);
+
+       if (changes & BSS_CHANGED_BEACON_INFO)
+               iwl_mld_update_mac_power(mld, vif, false);
+
+       /* The firmware will wait quite a while after association before it
+        * starts filtering the beacons. We can safely enable beacon filtering
+        * upon CQM configuration, even if we didn't get a beacon yet.
+        */
+       if (changes & (BSS_CHANGED_CQM | BSS_CHANGED_BEACON_INFO))
+               iwl_mld_enable_beacon_filter(mld, link_conf, false);
+
+       /* If we have used OMI before to reduce bandwidth to 80 MHz and then
+        * increased to 160 MHz again, and then the AP changes to 320 MHz, it
+        * will think that we're limited to 160 MHz right now. Update it by
+        * requesting a new OMI bandwidth.
+        */
+       if (changes & BSS_CHANGED_BANDWIDTH) {
+               enum ieee80211_sta_rx_bandwidth bw;
+
+               bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
+
+               iwl_mld_omi_ap_changed_bw(mld, link_conf, bw);
+       }
+
+       if (changes & BSS_CHANGED_BANDWIDTH)
+               iwl_mld_emlsr_check_equal_bw(mld, vif, link_conf);
+}
+
+static int iwl_mld_update_mu_groups(struct iwl_mld *mld,
+                                   struct ieee80211_bss_conf *link_conf)
+{
+       struct iwl_mu_group_mgmt_cmd cmd = {};
+
+       BUILD_BUG_ON(sizeof(cmd.membership_status) !=
+                    sizeof(link_conf->mu_group.membership));
+       BUILD_BUG_ON(sizeof(cmd.user_position) !=
+                    sizeof(link_conf->mu_group.position));
+
+       memcpy(cmd.membership_status, link_conf->mu_group.membership,
+              WLAN_MEMBERSHIP_LEN);
+       memcpy(cmd.user_position, link_conf->mu_group.position,
+              WLAN_USER_POSITION_LEN);
+
+       return iwl_mld_send_cmd_pdu(mld,
+                                   WIDE_ID(DATA_PATH_GROUP,
+                                           UPDATE_MU_GROUPS_CMD),
+                                   &cmd);
+}
+
+static void
+iwl_mld_mac80211_link_info_changed(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_bss_conf *link_conf,
+                                  u64 changes)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               iwl_mld_mac80211_link_info_changed_sta(mld, vif, link_conf,
+                                                      changes);
+               break;
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
+               iwl_mld_link_info_changed_ap_ibss(mld, vif, link_conf,
+                                                 changes);
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               /* The firmware tracks this on its own in STATION mode, but
+                * obviously not in sniffer mode.
+                */
+               if (changes & BSS_CHANGED_MU_GROUPS)
+                       iwl_mld_update_mu_groups(mld, link_conf);
+               break;
+       default:
+               /* shouldn't happen */
+               WARN_ON_ONCE(1);
+       }
+
+       /* We now know our BSSID, we can configure the MAC context with
+        * eht_support if needed.
+        */
+       if (changes & BSS_CHANGED_BSSID)
+               iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+
+       if (changes & BSS_CHANGED_TXPOWER)
+               iwl_mld_set_tx_power(mld, link_conf, link_conf->txpower);
+}
+
+static
+void iwl_mld_mac80211_vif_cfg_changed(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     u64 changes)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (changes & BSS_CHANGED_ASSOC) {
+               ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+               if (ret)
+                       IWL_ERR(mld, "failed to update context\n");
+
+               if (vif->cfg.assoc) {
+                       /* Clear statistics to get clean beacon counter, and
+                        * ask for periodic statistics, as they are needed for
+                        * link selection and RX OMI decisions.
+                        */
+                       iwl_mld_clear_stats_in_fw(mld);
+                       iwl_mld_request_periodic_fw_stats(mld, true);
+
+                       iwl_mld_set_vif_associated(mld, vif);
+               } else {
+                       iwl_mld_request_periodic_fw_stats(mld, false);
+               }
+       }
+
+       if (changes & BSS_CHANGED_PS) {
+               /* Send both device-level and MAC-level power commands since the
+                * firmware checks the POWER_TABLE_CMD's POWER_SAVE_EN bit to
+                * determine SMPS mode.
+                */
+               iwl_mld_update_device_power(mld, false);
+               iwl_mld_update_mac_power(mld, vif, false);
+       }
+
+       /* TODO: task=MLO BSS_CHANGED_MLD_VALID_LINKS/CHANGED_MLD_TTLM */
+}
+
+static int
+iwl_mld_mac80211_hw_scan(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif,
+                        struct ieee80211_scan_request *hw_req)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       if (WARN_ON(!hw_req->req.n_channels ||
+                   hw_req->req.n_channels >
+                   mld->fw->ucode_capa.n_scan_channels))
+               return -EINVAL;
+
+       return iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies);
+}
+
+static void
+iwl_mld_mac80211_cancel_hw_scan(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       /* Due to a race condition, it's possible that mac80211 asks
+        * us to stop a hw_scan when it's already stopped. This can
+        * happen, for instance, if we stopped the scan ourselves,
+        * called ieee80211_scan_completed() and the userspace called
+        * cancel scan before ieee80211_scan_work() could run.
+        * To handle that, simply return if the scan is not running.
+        */
+       if (mld->scan.status & IWL_MLD_SCAN_REGULAR)
+               iwl_mld_scan_stop(mld, IWL_MLD_SCAN_REGULAR, true);
+}
+
+static int
+iwl_mld_mac80211_sched_scan_start(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif,
+                                 struct cfg80211_sched_scan_request *req,
+                                 struct ieee80211_scan_ies *ies)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       return iwl_mld_sched_scan_start(mld, vif, req, ies, IWL_MLD_SCAN_SCHED);
+}
+
+static int
+iwl_mld_mac80211_sched_scan_stop(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       /* Due to a race condition, it's possible that mac80211 asks
+        * us to stop a sched_scan when it's already stopped. This
+        * can happen, for instance, if we stopped the scan ourselves,
+        * called ieee80211_sched_scan_stopped() and the userspace called
+        * stop sched scan before ieee80211_sched_scan_stopped_work()
+        * could run. To handle this, simply return if the scan is
+        * not running.
+        */
+       if (!(mld->scan.status & IWL_MLD_SCAN_SCHED))
+               return 0;
+
+       return iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, false);
+}
+
+static void
+iwl_mld_restart_complete_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct ieee80211_bss_conf *link_conf;
+       struct iwl_mld *mld = data;
+       int link_id;
+
+       for_each_vif_active_link(vif, link_conf, link_id) {
+               enum ieee80211_sta_rx_bandwidth bw;
+               struct iwl_mld_link *mld_link;
+
+               mld_link = wiphy_dereference(mld->wiphy,
+                                            mld_vif->link[link_id]);
+
+               if (WARN_ON_ONCE(!mld_link))
+                       continue;
+
+               bw = mld_link->rx_omi.bw_in_progress;
+               if (bw)
+                       iwl_mld_change_link_omi_bw(mld, link_conf, bw);
+       }
+}
+
+static void
+iwl_mld_mac80211_reconfig_complete(struct ieee80211_hw *hw,
+                                  enum ieee80211_reconfig_type reconfig_type)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       switch (reconfig_type) {
+       case IEEE80211_RECONFIG_TYPE_RESTART:
+               mld->fw_status.in_hw_restart = false;
+               iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_END_OF_RECOVERY);
+
+               ieee80211_iterate_interfaces(mld->hw,
+                                            IEEE80211_IFACE_ITER_NORMAL,
+                                            iwl_mld_restart_complete_vif, mld);
+
+               iwl_trans_finish_sw_reset(mld->trans);
+               /* no need to lock, adding in parallel would schedule too */
+               if (!list_empty(&mld->txqs_to_add))
+                       wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk);
+
+               IWL_INFO(mld, "restart completed\n");
+               break;
+       case IEEE80211_RECONFIG_TYPE_SUSPEND:
+               break;
+       }
+}
+
+static
+void iwl_mld_mac80211_mgd_prepare_tx(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_prep_tx_info *info)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       u32 duration = IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS;
+
+       /* After a successful association the connection is etalibeshed
+        * and we can rely on the quota to send the disassociation frame.
+        */
+       if (info->was_assoc)
+               return;
+
+       if (info->duration > duration)
+               duration = info->duration;
+
+       iwl_mld_schedule_session_protection(mld, vif, duration,
+                                           IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS,
+                                           info->link_id);
+}
+
+static
+void iwl_mld_mac_mgd_complete_tx(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_prep_tx_info *info)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       /* Successful authentication is the only case that requires to let
+        * the session protection go. We'll need it for the upcoming
+        * association. For all the other cases, we need to cancel the session
+        * protection.
+        * After successful association the connection is established and
+        * further mgd tx can rely on the quota.
+        */
+       if (info->success && info->subtype == IEEE80211_STYPE_AUTH)
+               return;
+
+       /* The firmware will be on medium after we configure the vif as
+        * associated. Removing the session protection allows the firmware
+        * to stop being on medium. In order to ensure the continuity of our
+        * presence on medium, we need first to configure the vif as associated
+        * and only then, remove the session protection.
+        * Currently, mac80211 calls vif_cfg_changed() first and then,
+        * drv_mgd_complete_tx(). Ensure that this assumption stays true by
+        * a warning.
+        */
+       WARN_ON(info->success &&
+               (info->subtype == IEEE80211_STYPE_ASSOC_REQ ||
+                info->subtype == IEEE80211_STYPE_REASSOC_REQ) &&
+               !vif->cfg.assoc);
+
+       iwl_mld_cancel_session_protection(mld, vif, info->link_id);
+}
+
+static int
+iwl_mld_mac80211_conf_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif,
+                        unsigned int link_id, u16 ac,
+                        const struct ieee80211_tx_queue_params *params)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *link;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       link = iwl_mld_link_dereference_check(mld_vif, link_id);
+       if (!link)
+               return -EINVAL;
+
+       link->queue_params[ac] = *params;
+
+       /* No need to update right away, we'll get BSS_CHANGED_QOS
+        * The exception is P2P_DEVICE interface which needs immediate update.
+        */
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+               iwl_mld_change_link_in_fw(mld, &vif->bss_conf,
+                                         LINK_CONTEXT_MODIFY_QOS_PARAMS);
+
+       return 0;
+}
+
+static void iwl_mld_set_uapsd(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (vif->p2p &&
+           !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT))
+               vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+       if (!vif->p2p &&
+           !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS))
+               vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+}
+
+int iwl_mld_tdls_sta_count(struct iwl_mld *mld)
+{
+       int count = 0;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+               struct ieee80211_link_sta *link_sta;
+
+               link_sta = wiphy_dereference(mld->wiphy,
+                                            mld->fw_id_to_link_sta[i]);
+               if (IS_ERR_OR_NULL(link_sta))
+                       continue;
+
+               if (!link_sta->sta->tdls)
+                       continue;
+
+               count++;
+       }
+
+       return count;
+}
+
+static void iwl_mld_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+                                                   struct cfg80211_bss *bss,
+                                                   void *_data)
+{
+       bool *tolerated = _data;
+       const struct cfg80211_bss_ies *ies;
+       const struct element *elem;
+
+       rcu_read_lock();
+       ies = rcu_dereference(bss->ies);
+       elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
+                                 ies->len);
+
+       if (!elem || elem->datalen < 10 ||
+           !(elem->data[10] &
+             WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
+               *tolerated = false;
+       }
+       rcu_read_unlock();
+}
+
+static void
+iwl_mld_check_he_obss_narrow_bw_ru(struct iwl_mld *mld,
+                                  struct iwl_mld_link *mld_link,
+                                  struct ieee80211_bss_conf *link_conf)
+{
+       bool tolerated = true;
+
+       if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
+               return;
+
+       if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
+               mld_link->he_ru_2mhz_block = false;
+               return;
+       }
+
+       cfg80211_bss_iter(mld->wiphy, &link_conf->chanreq.oper,
+                         iwl_mld_check_he_obss_narrow_bw_ru_iter, &tolerated);
+
+       /* If there is at least one AP on radar channel that cannot
+        * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU.
+        */
+       mld_link->he_ru_2mhz_block = !tolerated;
+}
+
+static void iwl_mld_link_set_2mhz_block(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_sta *sta)
+{
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
+
+       for_each_sta_active_link(vif, sta, link_sta, link_id) {
+               struct ieee80211_bss_conf *link_conf =
+                       link_conf_dereference_protected(vif, link_id);
+               struct iwl_mld_link *mld_link =
+                       iwl_mld_link_from_mac80211(link_conf);
+
+               if (WARN_ON(!link_conf || !mld_link))
+                       continue;
+
+               if (link_sta->he_cap.has_he)
+                       iwl_mld_check_he_obss_narrow_bw_ru(mld, mld_link,
+                                                          link_conf);
+       }
+}
+
+static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_sta *sta,
+                                    enum ieee80211_sta_state old_state,
+                                    enum ieee80211_sta_state new_state)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int tdls_count = 0;
+       int ret;
+
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE) {
+               if (sta->tdls) {
+                       if (vif->p2p || hweight8(mld->used_phy_ids) != 1)
+                               return -EBUSY;
+
+                       tdls_count = iwl_mld_tdls_sta_count(mld);
+                       if (tdls_count >= IWL_TDLS_STA_COUNT)
+                               return -EBUSY;
+               }
+
+               /*
+                * If this is the first STA (i.e. the AP) it won't do
+                * anything, otherwise must leave for any new STA on
+                * any other interface, or for TDLS, etc.
+                * Need to call this _before_ adding the STA so it can
+                * look up the one STA to use to ask mac80211 to leave
+                * OMI; in the unlikely event that adding the new STA
+                * then fails we'll just re-enter OMI later (via the
+                * statistics notification handling.)
+                */
+               iwl_mld_leave_omi_bw_reduction(mld);
+
+               ret = iwl_mld_add_sta(mld, sta, vif, STATION_TYPE_PEER);
+               if (ret)
+                       return ret;
+
+               /* just added first TDLS STA, so disable PM */
+               if (sta->tdls && tdls_count == 0)
+                       iwl_mld_update_mac_power(mld, vif, false);
+
+               if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+                       mld_vif->ap_sta = sta;
+
+               /* Initialize TLC here already - this really tells
+                * the firmware only what the supported legacy rates are
+                * (may be) since it's initialized already from what the
+                * AP advertised in the beacon/probe response. This will
+                * allow the firmware to send auth/assoc frames with one
+                * of the supported rates already, rather than having to
+                * use a mandatory rate.
+                * If we're the AP, we'll just assume mandatory rates at
+                * this point, but we know nothing about the STA anyway.
+                */
+               iwl_mld_config_tlc(mld, vif, sta);
+
+               return ret;
+       } else if (old_state == IEEE80211_STA_NONE &&
+                  new_state == IEEE80211_STA_AUTH) {
+               iwl_mld_set_uapsd(mld, vif);
+               return 0;
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_ASSOC) {
+               ret = iwl_mld_update_all_link_stations(mld, sta);
+
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       iwl_mld_link_set_2mhz_block(mld, vif, sta);
+               /* Now the link_sta's capabilities are set, update the FW */
+               iwl_mld_config_tlc(mld, vif, sta);
+
+               if (vif->type == NL80211_IFTYPE_AP) {
+                       /* Update MAC_CFG_FILTER_ACCEPT_BEACON if at least
+                        * one sta is associated
+                        */
+                       if (++mld_vif->num_associated_stas == 1)
+                               ret = iwl_mld_mac_fw_action(mld, vif,
+                                                           FW_CTXT_ACTION_MODIFY);
+               }
+
+               return ret;
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTHORIZED) {
+               ret = 0;
+
+               if (!sta->tdls) {
+                       mld_vif->authorized = true;
+
+                       /* Ensure any block due to a non-BSS link is synced */
+                       iwl_mld_emlsr_check_non_bss_block(mld, 0);
+
+                       /* Block EMLSR until a certain throughput it reached */
+                       if (!mld->fw_status.in_hw_restart &&
+                           IWL_MLD_ENTER_EMLSR_TPT_THRESH > 0)
+                               iwl_mld_block_emlsr(mld_vif->mld, vif,
+                                                   IWL_MLD_EMLSR_BLOCKED_TPT,
+                                                   0);
+
+                       /* Wait for the FW to send a recommendation */
+                       iwl_mld_block_emlsr(mld, vif,
+                                           IWL_MLD_EMLSR_BLOCKED_FW, 0);
+
+                       /* clear COEX_HIGH_PRIORITY_ENABLE */
+                       ret = iwl_mld_mac_fw_action(mld, vif,
+                                                   FW_CTXT_ACTION_MODIFY);
+                       if (ret)
+                               return ret;
+               }
+
+               /* MFP is set by default before the station is authorized.
+                * Clear it here in case it's not used.
+                */
+               if (!sta->mfp)
+                       ret = iwl_mld_update_all_link_stations(mld, sta);
+
+               /* We can use wide bandwidth now, not only 20 MHz */
+               iwl_mld_config_tlc(mld, vif, sta);
+
+               return ret;
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int iwl_mld_move_sta_state_down(struct iwl_mld *mld,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_sta *sta,
+                                      enum ieee80211_sta_state old_state,
+                                      enum ieee80211_sta_state new_state)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       if (old_state == IEEE80211_STA_AUTHORIZED &&
+           new_state == IEEE80211_STA_ASSOC) {
+               if (!sta->tdls) {
+                       mld_vif->authorized = false;
+
+                       memset(&mld_vif->emlsr.zeroed_on_not_authorized, 0,
+                              sizeof(mld_vif->emlsr.zeroed_on_not_authorized));
+
+                       wiphy_delayed_work_cancel(mld->wiphy,
+                                                 &mld_vif->emlsr.prevent_done_wk);
+                       wiphy_delayed_work_cancel(mld->wiphy,
+                                                 &mld_vif->emlsr.tmp_non_bss_done_wk);
+                       wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
+                       wiphy_delayed_work_cancel(mld->wiphy,
+                                                 &mld_vif->emlsr.check_tpt_wk);
+
+                       iwl_mld_reset_cca_40mhz_workaround(mld, vif);
+               }
+
+               /* once we move into assoc state, need to update the FW to
+                * stop using wide bandwidth
+                */
+               iwl_mld_config_tlc(mld, vif, sta);
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTH) {
+               if (vif->type == NL80211_IFTYPE_AP &&
+                   !WARN_ON(!mld_vif->num_associated_stas)) {
+                       /* Update MAC_CFG_FILTER_ACCEPT_BEACON if the last sta
+                        * is disassociating
+                        */
+                       if (--mld_vif->num_associated_stas == 0)
+                               iwl_mld_mac_fw_action(mld, vif,
+                                                     FW_CTXT_ACTION_MODIFY);
+               }
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_NONE) {
+               /* nothing */
+       } else if (old_state == IEEE80211_STA_NONE &&
+                  new_state == IEEE80211_STA_NOTEXIST) {
+               iwl_mld_remove_sta(mld, sta);
+
+               if (sta->tdls && iwl_mld_tdls_sta_count(mld) == 0) {
+                       /* just removed last TDLS STA, so enable PM */
+                       iwl_mld_update_mac_power(mld, vif, false);
+               }
+       } else {
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int iwl_mld_mac80211_sta_state(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     struct ieee80211_sta *sta,
+                                     enum ieee80211_sta_state old_state,
+                                     enum ieee80211_sta_state new_state)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+       IWL_DEBUG_MAC80211(mld, "station %pM state change %d->%d\n",
+                          sta->addr, old_state, new_state);
+
+       mld_sta->sta_state = new_state;
+
+       if (old_state < new_state)
+               return iwl_mld_move_sta_state_up(mld, vif, sta, old_state,
+                                                new_state);
+       else
+               return iwl_mld_move_sta_state_down(mld, vif, sta, old_state,
+                                                  new_state);
+}
+
+static void iwl_mld_mac80211_flush(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  u32 queues, bool drop)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       /* Make sure we're done with the deferred traffic before flushing */
+       iwl_mld_add_txq_list(mld);
+
+       for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+               struct ieee80211_link_sta *link_sta =
+                       wiphy_dereference(mld->wiphy,
+                                         mld->fw_id_to_link_sta[i]);
+
+               if (IS_ERR_OR_NULL(link_sta))
+                       continue;
+
+               /* Check that the sta belongs to the given vif */
+               if (vif && vif != iwl_mld_sta_from_mac80211(link_sta->sta)->vif)
+                       continue;
+
+               if (drop)
+                       iwl_mld_flush_sta_txqs(mld, link_sta->sta);
+               else
+                       iwl_mld_wait_sta_txqs_empty(mld, link_sta->sta);
+       }
+}
+
+static void iwl_mld_mac80211_flush_sta(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_sta *sta)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       iwl_mld_flush_sta_txqs(mld, sta);
+}
+
+static int
+iwl_mld_mac80211_ampdu_action(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_ampdu_params *params)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 ssn = params->ssn;
+       u16 buf_size = params->buf_size;
+       u16 timeout = params->timeout;
+       int ret;
+
+       IWL_DEBUG_HT(mld, "A-MPDU action on addr %pM tid: %d action: %d\n",
+                    sta->addr, tid, action);
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               ret = iwl_mld_ampdu_rx_start(mld, sta, tid, ssn, buf_size,
+                                            timeout);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               ret = iwl_mld_ampdu_rx_stop(mld, sta, tid);
+               break;
+       default:
+               /* The mac80211 TX_AMPDU_SETUP_IN_HW flag is set for all
+                * devices, since all support TX A-MPDU offload in hardware.
+                * Therefore, no TX action should be requested here.
+                */
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+static bool iwl_mld_can_hw_csum(struct sk_buff *skb)
+{
+       u8 protocol = ip_hdr(skb)->protocol;
+
+       return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
+}
+
+static bool iwl_mld_mac80211_can_aggregate(struct ieee80211_hw *hw,
+                                          struct sk_buff *head,
+                                          struct sk_buff *skb)
+{
+       if (!IS_ENABLED(CONFIG_INET))
+               return false;
+
+       /* For now don't aggregate IPv6 in AMSDU */
+       if (skb->protocol != htons(ETH_P_IP))
+               return false;
+
+       /* Allow aggregation only if both frames have the same HW csum offload
+        * capability, ensuring consistent HW or SW csum handling in A-MSDU.
+        */
+       return iwl_mld_can_hw_csum(skb) == iwl_mld_can_hw_csum(head);
+}
+
+static void iwl_mld_mac80211_sync_rx_queues(struct ieee80211_hw *hw)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_EMPTY, NULL, 0);
+}
+
+static void iwl_mld_sta_rc_update(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_link_sta *link_sta,
+                                 u32 changed)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       if (changed & (IEEE80211_RC_BW_CHANGED |
+                      IEEE80211_RC_SUPP_RATES_CHANGED |
+                      IEEE80211_RC_NSS_CHANGED)) {
+               struct ieee80211_bss_conf *link =
+                       link_conf_dereference_check(vif, link_sta->link_id);
+
+               if (WARN_ON(!link))
+                       return;
+
+               iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+       }
+}
+
+static void iwl_mld_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       device_set_wakeup_enable(mld->trans->dev, enabled);
+}
+
+/* Returns 0 on success. 1 if failed to suspend with wowlan:
+ * If the circumstances didn't satisfy the conditions for suspension
+ * with wowlan, mac80211 would use the no_wowlan flow.
+ * If an error had occurred we update the trans status and state here
+ * and the result will be stopping the FW.
+ */
+static int
+iwl_mld_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+
+       iwl_fw_runtime_suspend(&mld->fwrt);
+
+       ret = iwl_mld_wowlan_suspend(mld, wowlan);
+       if (ret) {
+               if (ret < 0) {
+                       mld->trans->state = IWL_TRANS_NO_FW;
+                       set_bit(STATUS_FW_ERROR, &mld->trans->status);
+               }
+               return 1;
+       }
+
+       if (iwl_mld_no_wowlan_suspend(mld))
+               return 1;
+
+       return 0;
+}
+
+static int iwl_mld_resume(struct ieee80211_hw *hw)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+
+       ret = iwl_mld_wowlan_resume(mld);
+       if (ret)
+               return ret;
+
+       iwl_fw_runtime_resume(&mld->fwrt);
+
+       iwl_mld_low_latency_restart(mld);
+
+       return 0;
+}
+
+static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld,
+                               struct iwl_mld_sta *mld_sta,
+                               struct ieee80211_key_conf *key,
+                               struct iwl_mld_ptk_pn **ptk_pn)
+{
+       u8 num_rx_queues = mld->trans->num_rx_queues;
+       int keyidx = key->keyidx;
+       struct ieee80211_key_seq seq;
+
+       if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+               return -EINVAL;
+
+       WARN_ON(rcu_access_pointer(mld_sta->ptk_pn[keyidx]));
+       *ptk_pn = kzalloc(struct_size(*ptk_pn, q, num_rx_queues),
+                         GFP_KERNEL);
+       if (!*ptk_pn)
+               return -ENOMEM;
+
+       for (u8 tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               ieee80211_get_key_rx_seq(key, tid, &seq);
+               for (u8 q = 0; q < num_rx_queues; q++)
+                       memcpy((*ptk_pn)->q[q].pn[tid], seq.ccmp.pn,
+                              IEEE80211_CCMP_PN_LEN);
+       }
+
+       rcu_assign_pointer(mld_sta->ptk_pn[keyidx], *ptk_pn);
+
+       return 0;
+}
+
+static int iwl_mld_set_key_add(struct iwl_mld *mld,
+                              struct ieee80211_vif *vif,
+                              struct ieee80211_sta *sta,
+                              struct ieee80211_key_conf *key)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_sta *mld_sta =
+               sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
+       struct iwl_mld_ptk_pn *ptk_pn = NULL;
+       int keyidx = key->keyidx;
+       int ret;
+
+       /* Will be set to 0 if added successfully */
+       key->hw_key_idx = STA_KEY_IDX_INVALID;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               IWL_DEBUG_MAC80211(mld, "Use SW encryption for WEP\n");
+               return -EOPNOTSUPP;
+       case WLAN_CIPHER_SUITE_TKIP:
+               if (vif->type == NL80211_IFTYPE_STATION) {
+                       key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
+                       break;
+               }
+               IWL_DEBUG_MAC80211(mld, "Use SW encryption for TKIP\n");
+               return -EOPNOTSUPP;
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           (keyidx == 6 || keyidx == 7))
+               rcu_assign_pointer(mld_vif->bigtks[keyidx - 6], key);
+
+       /* After exiting from RFKILL, hostapd configures GTK/ITGK before the
+        * AP is started, but those keys can't be sent to the FW before the
+        * MCAST/BCAST STAs are added to it (which happens upon AP start).
+        * Store it here to be sent later when the AP is started.
+        */
+       if ((vif->type == NL80211_IFTYPE_ADHOC ||
+            vif->type == NL80211_IFTYPE_AP) && !sta &&
+            !mld_vif->ap_ibss_active)
+               return iwl_mld_store_ap_early_key(mld, key, mld_vif);
+
+       if (!mld->fw_status.in_hw_restart && mld_sta &&
+           key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+           (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+            key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+            key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+               ret = iwl_mld_alloc_ptk_pn(mld, mld_sta, key, &ptk_pn);
+               if (ret)
+                       return ret;
+       }
+
+       IWL_DEBUG_MAC80211(mld, "set hwcrypto key (sta:%pM, id:%d)\n",
+                          sta ? sta->addr : NULL, keyidx);
+
+       ret = iwl_mld_add_key(mld, vif, sta, key);
+       if (ret) {
+               IWL_WARN(mld, "set key failed (%d)\n", ret);
+               if (ptk_pn) {
+                       RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL);
+                       kfree(ptk_pn);
+               }
+
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static void iwl_mld_set_key_remove(struct iwl_mld *mld,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_sta *sta,
+                                  struct ieee80211_key_conf *key)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_sta *mld_sta =
+               sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
+       int keyidx = key->keyidx;
+
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           (keyidx == 6 || keyidx == 7))
+               RCU_INIT_POINTER(mld_vif->bigtks[keyidx - 6], NULL);
+
+       if (mld_sta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+           (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+            key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+            key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+               struct iwl_mld_ptk_pn *ptk_pn;
+
+               if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+                       return;
+
+               ptk_pn = wiphy_dereference(mld->wiphy,
+                                          mld_sta->ptk_pn[keyidx]);
+               RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL);
+               if (!WARN_ON(!ptk_pn))
+                       kfree_rcu(ptk_pn, rcu_head);
+       }
+
+       /* if this key was stored to be added later to the FW - free it here */
+       if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               iwl_mld_free_ap_early_key(mld, key, mld_vif);
+
+       /* We already removed it */
+       if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+               return;
+
+       IWL_DEBUG_MAC80211(mld, "disable hwcrypto key\n");
+
+       iwl_mld_remove_key(mld, vif, sta, key);
+}
+
+static int iwl_mld_mac80211_set_key(struct ieee80211_hw *hw,
+                                   enum set_key_cmd cmd,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta,
+                                   struct ieee80211_key_conf *key)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+
+       switch (cmd) {
+       case SET_KEY:
+               ret = iwl_mld_set_key_add(mld, vif, sta, key);
+               if (ret)
+                       ret = -EOPNOTSUPP;
+               break;
+       case DISABLE_KEY:
+               iwl_mld_set_key_remove(mld, vif, sta, key);
+               ret = 0;
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+iwl_mld_pre_channel_switch(struct ieee80211_hw *hw,
+                          struct ieee80211_vif *vif,
+                          struct ieee80211_channel_switch *chsw)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *mld_link =
+               iwl_mld_link_dereference_check(mld_vif, chsw->link_id);
+       u8 primary;
+       int selected;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(!mld_link))
+               return -EINVAL;
+
+       IWL_DEBUG_MAC80211(mld, "pre CSA to freq %d\n",
+                          chsw->chandef.center_freq1);
+
+       if (!iwl_mld_emlsr_active(vif))
+               return 0;
+
+       primary = iwl_mld_get_primary_link(vif);
+
+       /* stay on the primary link unless it is undergoing a CSA with quiet */
+       if (chsw->link_id == primary && chsw->block_tx)
+               selected = iwl_mld_get_other_link(vif, primary);
+       else
+               selected = primary;
+
+       /* Remember to tell the firmware that this link can't tx
+        * Note that this logic seems to be unrelated to emlsr, but it
+        * really is needed only when emlsr is active. When we have a
+        * single link, the firmware will handle all this on its own.
+        * In multi-link scenarios, we can learn about the CSA from
+        * another link and this logic is too complex for the firmware
+        * to track.
+        * Since we want to de-activate the link that got a CSA with mode=1,
+        * we need to tell the firmware not to send any frame on that link
+        * as the firmware may not be aware that link is under a CSA
+        * with mode=1 (no Tx allowed).
+        */
+       mld_link->silent_deactivation = chsw->block_tx;
+       iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_CSA, selected);
+
+       return 0;
+}
+
+static void
+iwl_mld_channel_switch(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_channel_switch *chsw)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       /* By implementing this operation, we prevent mac80211 from
+        * starting its own channel switch timer, so that we can call
+        * ieee80211_chswitch_done() ourselves at the right time
+        * (Upon receiving the channel_switch_start notification from the fw)
+        */
+       IWL_DEBUG_MAC80211(mld,
+                          "dummy channel switch op\n");
+}
+
+static int
+iwl_mld_post_channel_switch(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_bss_conf *link_conf)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       WARN_ON(mld_link->silent_deactivation);
+
+       return 0;
+}
+
+static void
+iwl_mld_abort_channel_switch(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *link_conf)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+
+       IWL_DEBUG_MAC80211(mld,
+                          "abort channel switch op\n");
+       mld_link->silent_deactivation = false;
+}
+
+static int
+iwl_mld_switch_vif_chanctx_swap(struct ieee80211_hw *hw,
+                               struct ieee80211_vif_chanctx_switch *vifs)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+
+       iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+                                    vifs[0].old_ctx);
+       iwl_mld_remove_chanctx(hw, vifs[0].old_ctx);
+
+       ret = iwl_mld_add_chanctx(hw, vifs[0].new_ctx);
+       if (ret) {
+               IWL_ERR(mld, "failed to add new_ctx during channel switch\n");
+               goto out_reassign;
+       }
+
+       ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+                                        vifs[0].new_ctx);
+       if (ret) {
+               IWL_ERR(mld,
+                       "failed to assign new_ctx during channel switch\n");
+               goto out_remove;
+       }
+
+       return 0;
+
+ out_remove:
+       iwl_mld_remove_chanctx(hw, vifs[0].new_ctx);
+ out_reassign:
+       if (iwl_mld_add_chanctx(hw, vifs[0].old_ctx)) {
+               IWL_ERR(mld, "failed to add old_ctx after failure\n");
+               return ret;
+       }
+
+       if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+                                      vifs[0].old_ctx))
+               IWL_ERR(mld, "failed to reassign old_ctx after failure\n");
+
+       return ret;
+}
+
+static int
+iwl_mld_switch_vif_chanctx_reassign(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif_chanctx_switch *vifs)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int ret;
+
+       iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+                                    vifs[0].old_ctx);
+       ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+                                        vifs[0].new_ctx);
+       if (ret) {
+               IWL_ERR(mld,
+                       "failed to assign new_ctx during channel switch\n");
+               goto out_reassign;
+       }
+
+       return 0;
+
+out_reassign:
+       if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+                                      vifs[0].old_ctx))
+               IWL_ERR(mld, "failed to reassign old_ctx after failure\n");
+
+       return ret;
+}
+
+static int
+iwl_mld_switch_vif_chanctx(struct ieee80211_hw *hw,
+                          struct ieee80211_vif_chanctx_switch *vifs,
+                          int n_vifs,
+                          enum ieee80211_chanctx_switch_mode mode)
+{
+       int ret;
+
+       /* we only support a single-vif right now */
+       if (n_vifs > 1)
+               return -EOPNOTSUPP;
+
+       switch (mode) {
+       case CHANCTX_SWMODE_SWAP_CONTEXTS:
+               ret = iwl_mld_switch_vif_chanctx_swap(hw, vifs);
+               break;
+       case CHANCTX_SWMODE_REASSIGN_VIF:
+               ret = iwl_mld_switch_vif_chanctx_reassign(hw, vifs);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
+static void iwl_mld_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_sta *sta)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link_sta *mld_link_sta;
+       u8 link_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* This is called before mac80211 does RCU synchronisation,
+        * so here we already invalidate our internal RCU-protected
+        * station pointer. The rest of the code will thus no longer
+        * be able to find the station this way, and we don't rely
+        * on further RCU synchronisation after the sta_state()
+        * callback deleted the station.
+        */
+       for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+               RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id],
+                                NULL);
+
+       if (sta == mld_vif->ap_sta)
+               mld_vif->ap_sta = NULL;
+}
+
+static void
+iwl_mld_mac80211_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+                                          struct ieee80211_vif *vif,
+                                          unsigned int link_id)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct ieee80211_bss_conf *link_conf;
+       u32 duration;
+       int ret;
+
+       link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+       if (WARN_ON_ONCE(!link_conf))
+               return;
+
+       /* Protect the session to hear the TDLS setup response on the channel */
+
+       duration = 2 * link_conf->dtim_period * link_conf->beacon_int;
+
+       ret = iwl_mld_start_session_protection(mld, vif, duration, duration,
+                                              link_id, HZ / 5);
+       if (ret)
+               IWL_ERR(mld,
+                       "Failed to start session protection for TDLS: %d\n",
+                       ret);
+}
+
+static bool iwl_mld_can_activate_links(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      u16 desired_links)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       int n_links = hweight16(desired_links);
+
+       /* Check if HW supports the wanted number of links */
+       return n_links <= iwl_mld_max_active_links(mld, vif);
+}
+
+static int
+iwl_mld_change_vif_links(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif,
+                        u16 old_links, u16 new_links,
+                        struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct ieee80211_bss_conf *link_conf;
+       u16 removed = old_links & ~new_links;
+       u16 added = new_links & ~old_links;
+       int err;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /*
+        * No bits designate non-MLO mode. We can handle MLO exit/enter by
+        * simply mapping that to link ID zero internally.
+        * Note that mac80211 does such a non-MLO to MLO switch during restart
+        * if it was in MLO before. In that case, we do not have a link to
+        * remove.
+        */
+       if (old_links == 0 && !mld->fw_status.in_hw_restart)
+               removed |= BIT(0);
+
+       if (new_links == 0)
+               added |= BIT(0);
+
+       for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+               if (removed & BIT(i)) {
+                       link_conf = old[i];
+
+                       err = iwl_mld_remove_link(mld, link_conf);
+                       if (err)
+                               return err;
+               }
+       }
+
+       for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+               if (added & BIT(i)) {
+                       link_conf = link_conf_dereference_protected(vif, i);
+                       if (WARN_ON(!link_conf))
+                               return -EINVAL;
+
+                       err = iwl_mld_add_link(mld, link_conf);
+                       if (err)
+                               goto remove_added_links;
+               }
+       }
+
+       /*
+        * Ensure we always have a valid primary_link. When using multiple
+        * links the proper value is set in assign_vif_chanctx.
+        */
+       mld_vif->emlsr.primary = new_links ? __ffs(new_links) : 0;
+
+       /*
+        * Special MLO restart case. We did not have a link when the interface
+        * was added, so do the power configuration now.
+        */
+       if (old_links == 0 && mld->fw_status.in_hw_restart)
+               iwl_mld_update_mac_power(mld, vif, false);
+
+       return 0;
+
+remove_added_links:
+       for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+               if (!(added & BIT(i)))
+                       continue;
+
+               link_conf = link_conf_dereference_protected(vif, i);
+               if (!link_conf || !iwl_mld_link_from_mac80211(link_conf))
+                       continue;
+
+               iwl_mld_remove_link(mld, link_conf);
+       }
+
+       return err;
+}
+
+static int iwl_mld_change_sta_links(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta,
+                                   u16 old_links, u16 new_links)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       return iwl_mld_update_link_stas(mld, vif, sta, old_links, new_links);
+}
+
+static int iwl_mld_mac80211_join_ibss(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif)
+{
+       return iwl_mld_start_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static void iwl_mld_mac80211_leave_ibss(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif)
+{
+       return iwl_mld_stop_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static int iwl_mld_mac80211_tx_last_beacon(struct ieee80211_hw *hw)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       return mld->ibss_manager;
+}
+
+#define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (5 * HZ)
+
+static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
+                                                    struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int ret;
+
+       if (!iwl_mld_vif_has_emlsr_cap(vif))
+               return;
+
+       ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+                                      IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
+                                      iwl_mld_get_primary_link(vif));
+       if (ret)
+               return;
+
+       wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+                                &mld_vif->emlsr.tmp_non_bss_done_wk,
+                                IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
+}
+
+static void iwl_mld_prep_add_interface(struct ieee80211_hw *hw,
+                                      enum nl80211_iftype type)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       IWL_DEBUG_MAC80211(mld, "prep_add_interface: type=%u\n", type);
+
+       if (!(type == NL80211_IFTYPE_AP ||
+             type == NL80211_IFTYPE_P2P_GO ||
+             type == NL80211_IFTYPE_P2P_CLIENT))
+               return;
+
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
+                                               NULL);
+}
+
+static int iwl_mld_set_hw_timestamp(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct cfg80211_set_hw_timestamp *hwts)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       u32 protocols = 0;
+
+       /* HW timestamping is only supported for a specific station */
+       if (!hwts->macaddr)
+               return -EOPNOTSUPP;
+
+       if (hwts->enable)
+               protocols =
+                       IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM;
+
+       return iwl_mld_time_sync_config(mld, hwts->macaddr, protocols);
+}
+
+static int iwl_mld_start_pmsr(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct cfg80211_pmsr_request *request)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+       return iwl_mld_ftm_start(mld, vif, request);
+}
+
+const struct ieee80211_ops iwl_mld_hw_ops = {
+       .tx = iwl_mld_mac80211_tx,
+       .start = iwl_mld_mac80211_start,
+       .stop = iwl_mld_mac80211_stop,
+       .config = iwl_mld_mac80211_config,
+       .add_interface = iwl_mld_mac80211_add_interface,
+       .remove_interface = iwl_mld_mac80211_remove_interface,
+       .conf_tx = iwl_mld_mac80211_conf_tx,
+       .prepare_multicast = iwl_mld_mac80211_prepare_multicast,
+       .configure_filter = iwl_mld_mac80211_configure_filter,
+       .reconfig_complete = iwl_mld_mac80211_reconfig_complete,
+       .wake_tx_queue = iwl_mld_mac80211_wake_tx_queue,
+       .add_chanctx = iwl_mld_add_chanctx,
+       .remove_chanctx = iwl_mld_remove_chanctx,
+       .change_chanctx = iwl_mld_change_chanctx,
+       .assign_vif_chanctx = iwl_mld_assign_vif_chanctx,
+       .unassign_vif_chanctx = iwl_mld_unassign_vif_chanctx,
+       .set_rts_threshold = iwl_mld_mac80211_set_rts_threshold,
+       .link_info_changed = iwl_mld_mac80211_link_info_changed,
+       .vif_cfg_changed = iwl_mld_mac80211_vif_cfg_changed,
+       .set_key = iwl_mld_mac80211_set_key,
+       .hw_scan = iwl_mld_mac80211_hw_scan,
+       .cancel_hw_scan = iwl_mld_mac80211_cancel_hw_scan,
+       .sched_scan_start = iwl_mld_mac80211_sched_scan_start,
+       .sched_scan_stop = iwl_mld_mac80211_sched_scan_stop,
+       .mgd_prepare_tx = iwl_mld_mac80211_mgd_prepare_tx,
+       .mgd_complete_tx = iwl_mld_mac_mgd_complete_tx,
+       .sta_state = iwl_mld_mac80211_sta_state,
+       .sta_statistics = iwl_mld_mac80211_sta_statistics,
+       .flush = iwl_mld_mac80211_flush,
+       .flush_sta = iwl_mld_mac80211_flush_sta,
+       .ampdu_action = iwl_mld_mac80211_ampdu_action,
+       .can_aggregate_in_amsdu = iwl_mld_mac80211_can_aggregate,
+       .sync_rx_queues = iwl_mld_mac80211_sync_rx_queues,
+       .link_sta_rc_update = iwl_mld_sta_rc_update,
+       .start_ap = iwl_mld_start_ap_ibss,
+       .stop_ap = iwl_mld_stop_ap_ibss,
+       .pre_channel_switch = iwl_mld_pre_channel_switch,
+       .channel_switch = iwl_mld_channel_switch,
+       .post_channel_switch = iwl_mld_post_channel_switch,
+       .abort_channel_switch = iwl_mld_abort_channel_switch,
+       .switch_vif_chanctx = iwl_mld_switch_vif_chanctx,
+       .sta_pre_rcu_remove = iwl_mld_sta_pre_rcu_remove,
+       .remain_on_channel = iwl_mld_start_roc,
+       .cancel_remain_on_channel = iwl_mld_cancel_roc,
+       .can_activate_links = iwl_mld_can_activate_links,
+       .change_vif_links = iwl_mld_change_vif_links,
+       .change_sta_links = iwl_mld_change_sta_links,
+#ifdef CONFIG_PM_SLEEP
+       .suspend = iwl_mld_suspend,
+       .resume = iwl_mld_resume,
+       .set_wakeup = iwl_mld_set_wakeup,
+       .set_rekey_data = iwl_mld_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+       .ipv6_addr_change = iwl_mld_ipv6_addr_change,
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       .vif_add_debugfs = iwl_mld_add_vif_debugfs,
+       .link_add_debugfs = iwl_mld_add_link_debugfs,
+       .link_sta_add_debugfs = iwl_mld_add_link_sta_debugfs,
+#endif
+       .mgd_protect_tdls_discover = iwl_mld_mac80211_mgd_protect_tdls_discover,
+       .join_ibss = iwl_mld_mac80211_join_ibss,
+       .leave_ibss = iwl_mld_mac80211_leave_ibss,
+       .tx_last_beacon = iwl_mld_mac80211_tx_last_beacon,
+       .prep_add_interface = iwl_mld_prep_add_interface,
+       .set_hw_timestamp = iwl_mld_set_hw_timestamp,
+       .start_pmsr = iwl_mld_start_pmsr,
+};
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_mac80211_h__
+#define __iwl_mld_mac80211_h__
+
+#include "mld.h"
+
+int iwl_mld_register_hw(struct iwl_mld *mld);
+void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_mac80211_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+
+#include <fw/dbg.h>
+#include <iwl-nvm-parse.h>
+
+#include "mld.h"
+#include "hcmd.h"
+#include "mcc.h"
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_parse_mcc_update_resp_v8(const struct iwl_rx_packet *pkt)
+{
+       const struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (const void *)pkt->data;
+       int n_channels = __le32_to_cpu(mcc_resp_v8->n_channels);
+       struct iwl_mcc_update_resp_v8 *resp_cp;
+       int notif_len = struct_size(resp_cp, channels, n_channels);
+
+       if (iwl_rx_packet_payload_len(pkt) != notif_len)
+               return ERR_PTR(-EINVAL);
+
+       resp_cp = kmemdup(mcc_resp_v8, notif_len, GFP_KERNEL);
+       if (!resp_cp)
+               return ERR_PTR(-ENOMEM);
+
+       return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_parse_mcc_update_resp_v5_v6(const struct iwl_rx_packet *pkt)
+{
+       const struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (const void *)pkt->data;
+       struct iwl_mcc_update_resp_v8 *resp_cp;
+       int n_channels = __le32_to_cpu(mcc_resp_v4->n_channels);
+       int resp_len;
+
+       if (iwl_rx_packet_payload_len(pkt) !=
+           struct_size(mcc_resp_v4, channels, n_channels))
+               return ERR_PTR(-EINVAL);
+
+       resp_len = struct_size(resp_cp, channels, n_channels);
+       resp_cp = kzalloc(resp_len, GFP_KERNEL);
+       if (!resp_cp)
+               return ERR_PTR(-ENOMEM);
+
+       resp_cp->status = mcc_resp_v4->status;
+       resp_cp->mcc = mcc_resp_v4->mcc;
+       resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap));
+       resp_cp->source_id = mcc_resp_v4->source_id;
+       resp_cp->geo_info = mcc_resp_v4->geo_info;
+       resp_cp->n_channels = mcc_resp_v4->n_channels;
+       memcpy(resp_cp->channels, mcc_resp_v4->channels,
+              n_channels * sizeof(__le32));
+
+       return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_update_mcc(struct iwl_mld *mld, const char *alpha2,
+                  enum iwl_mcc_source src_id)
+{
+       int resp_ver = iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP,
+                                              MCC_UPDATE_CMD, 0);
+       struct iwl_mcc_update_cmd mcc_update_cmd = {
+               .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+               .source_id = (u8)src_id,
+       };
+       struct iwl_mcc_update_resp_v8 *resp_cp;
+       struct iwl_rx_packet *pkt;
+       struct iwl_host_cmd cmd = {
+               .id = MCC_UPDATE_CMD,
+               .flags = CMD_WANT_SKB,
+               .data = { &mcc_update_cmd },
+               .len[0] = sizeof(mcc_update_cmd),
+       };
+       int ret;
+       u16 mcc;
+
+       IWL_DEBUG_LAR(mld, "send MCC update to FW with '%c%c' src = %d\n",
+                     alpha2[0], alpha2[1], src_id);
+
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       if (ret)
+               return ERR_PTR(ret);
+
+       pkt = cmd.resp_pkt;
+
+       /* For Wifi-7 radios, we get version 8
+        * For Wifi-6E radios, we get version 6
+        * For Wifi-6 radios, we get version 5, but 5, 6, and 4 are compatible.
+        */
+       switch (resp_ver) {
+       case 5:
+       case 6:
+               resp_cp = iwl_mld_parse_mcc_update_resp_v5_v6(pkt);
+               break;
+       case 8:
+               resp_cp = iwl_mld_parse_mcc_update_resp_v8(pkt);
+               break;
+       default:
+               IWL_FW_CHECK_FAILED(mld, "Unknown MCC_UPDATE_CMD version %d\n", resp_ver);
+               resp_cp = ERR_PTR(-EINVAL);
+       }
+
+       if (IS_ERR(resp_cp))
+               goto exit;
+
+       mcc = le16_to_cpu(resp_cp->mcc);
+
+       IWL_FW_CHECK(mld, !mcc, "mcc can't be 0: %d\n", mcc);
+
+       IWL_DEBUG_LAR(mld,
+                     "MCC response status: 0x%x. new MCC: 0x%x ('%c%c')\n",
+                     le32_to_cpu(resp_cp->status), mcc, mcc >> 8, mcc & 0xff);
+
+exit:
+       iwl_free_resp(&cmd);
+       return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+struct ieee80211_regdomain *
+iwl_mld_get_regdomain(struct iwl_mld *mld,
+                     const char *alpha2,
+                     enum iwl_mcc_source src_id,
+                     bool *changed)
+{
+       struct ieee80211_regdomain *regd = NULL;
+       struct iwl_mcc_update_resp_v8 *resp;
+       u8 resp_ver = iwl_fw_lookup_notif_ver(mld->fw, IWL_ALWAYS_LONG_GROUP,
+                                             MCC_UPDATE_CMD, 0);
+
+       IWL_DEBUG_LAR(mld, "Getting regdomain data for %s from FW\n", alpha2);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       resp = iwl_mld_update_mcc(mld, alpha2, src_id);
+       if (IS_ERR(resp)) {
+               IWL_DEBUG_LAR(mld, "Could not get update from FW %ld\n",
+                             PTR_ERR(resp));
+               resp = NULL;
+               goto out;
+       }
+
+       if (changed) {
+               u32 status = le32_to_cpu(resp->status);
+
+               *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+                           status == MCC_RESP_ILLEGAL);
+       }
+       IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver);
+
+       regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg,
+                                     __le32_to_cpu(resp->n_channels),
+                                     resp->channels,
+                                     __le16_to_cpu(resp->mcc),
+                                     __le16_to_cpu(resp->geo_info),
+                                     le32_to_cpu(resp->cap), resp_ver);
+
+       if (IS_ERR(regd)) {
+               IWL_DEBUG_LAR(mld, "Could not get parse update from FW %ld\n",
+                             PTR_ERR(regd));
+               goto out;
+       }
+
+       IWL_DEBUG_LAR(mld, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+                     regd->alpha2, regd->alpha2[0],
+                     regd->alpha2[1], resp->source_id);
+
+       mld->mcc_src = resp->source_id;
+
+       if (!iwl_puncturing_is_allowed_in_bios(mld->bios_enable_puncturing,
+                                              le16_to_cpu(resp->mcc)))
+               ieee80211_hw_set(mld->hw, DISALLOW_PUNCTURING);
+       else
+               __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mld->hw->flags);
+
+out:
+       kfree(resp);
+       return regd;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct ieee80211_regdomain *
+iwl_mld_get_current_regdomain(struct iwl_mld *mld,
+                             bool *changed)
+{
+       return iwl_mld_get_regdomain(mld, "ZZ",
+                                    MCC_SOURCE_GET_CURRENT, changed);
+}
+
+void iwl_mld_update_changed_regdomain(struct iwl_mld *mld)
+{
+       struct ieee80211_regdomain *regd;
+       bool changed;
+
+       regd = iwl_mld_get_current_regdomain(mld, &changed);
+
+       if (IS_ERR_OR_NULL(regd))
+               return;
+
+       if (changed)
+               regulatory_set_wiphy_regd(mld->wiphy, regd);
+       kfree(regd);
+}
+
+static int iwl_mld_apply_last_mcc(struct iwl_mld *mld,
+                                 const char *alpha2)
+{
+       struct ieee80211_regdomain *regd;
+       u32 used_src;
+       bool changed;
+       int ret;
+
+       /* save the last source in case we overwrite it below */
+       used_src = mld->mcc_src;
+
+       /* Notify the firmware we support wifi location updates */
+       regd = iwl_mld_get_current_regdomain(mld, NULL);
+       if (!IS_ERR_OR_NULL(regd))
+               kfree(regd);
+
+       /* Now set our last stored MCC and source */
+       regd = iwl_mld_get_regdomain(mld, alpha2, used_src,
+                                    &changed);
+       if (IS_ERR_OR_NULL(regd))
+               return -EIO;
+
+       /* update cfg80211 if the regdomain was changed */
+       if (changed)
+               ret = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
+       else
+               ret = 0;
+
+       kfree(regd);
+       return ret;
+}
+
+int iwl_mld_init_mcc(struct iwl_mld *mld)
+{
+       const struct ieee80211_regdomain *r;
+       struct ieee80211_regdomain *regd;
+       char mcc[3];
+       int retval;
+
+       /* try to replay the last set MCC to FW */
+       r = wiphy_dereference(mld->wiphy, mld->wiphy->regd);
+
+       if (r)
+               return iwl_mld_apply_last_mcc(mld, r->alpha2);
+
+       regd = iwl_mld_get_current_regdomain(mld, NULL);
+       if (IS_ERR_OR_NULL(regd))
+               return -EIO;
+
+       if (!iwl_bios_get_mcc(&mld->fwrt, mcc)) {
+               kfree(regd);
+               regd = iwl_mld_get_regdomain(mld, mcc, MCC_SOURCE_BIOS, NULL);
+               if (IS_ERR_OR_NULL(regd))
+                       return -EIO;
+       }
+
+       retval = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
+
+       kfree(regd);
+       return retval;
+}
+
+static void iwl_mld_find_assoc_vif_iterator(void *data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       bool *assoc = data;
+
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           vif->cfg.assoc)
+               *assoc = true;
+}
+
+static bool iwl_mld_is_a_vif_assoc(struct iwl_mld *mld)
+{
+       bool assoc = false;
+
+       ieee80211_iterate_active_interfaces_atomic(mld->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mld_find_assoc_vif_iterator,
+                                                  &assoc);
+       return assoc;
+}
+
+void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
+{
+       struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+       enum iwl_mcc_source src;
+       char mcc[3];
+       struct ieee80211_regdomain *regd;
+       bool changed;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (iwl_mld_is_a_vif_assoc(mld) &&
+           notif->source_id == MCC_SOURCE_WIFI) {
+               IWL_DEBUG_LAR(mld, "Ignore mcc update while associated\n");
+               return;
+       }
+
+       mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+       mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
+       mcc[2] = '\0';
+       src = notif->source_id;
+
+       IWL_DEBUG_LAR(mld,
+                     "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+                     mcc, src);
+       regd = iwl_mld_get_regdomain(mld, mcc, src, &changed);
+       if (IS_ERR_OR_NULL(regd))
+               return;
+
+       if (changed)
+               regulatory_set_wiphy_regd(mld->hw->wiphy, regd);
+       kfree(regd);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_mcc_h__
+#define __iwl_mld_mcc_h__
+
+int iwl_mld_init_mcc(struct iwl_mld *mld);
+void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+void iwl_mld_update_changed_regdomain(struct iwl_mld *mld);
+struct ieee80211_regdomain *
+iwl_mld_get_regdomain(struct iwl_mld *mld,
+                     const char *alpha2,
+                     enum iwl_mcc_source src_id,
+                     bool *changed);
+
+#endif /* __iwl_mld_mcc_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+
+#include "fw/api/rx.h"
+#include "fw/api/datapath.h"
+#include "fw/api/commands.h"
+#include "fw/api/offload.h"
+#include "fw/api/coex.h"
+#include "fw/dbg.h"
+#include "fw/uefi.h"
+
+#include "mld.h"
+#include "mlo.h"
+#include "mac80211.h"
+#include "led.h"
+#include "scan.h"
+#include "tx.h"
+#include "sta.h"
+#include "regulatory.h"
+#include "thermal.h"
+#include "low_latency.h"
+#include "hcmd.h"
+#include "fw/api/location.h"
+
+#define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IWLWIFI");
+
+static const struct iwl_op_mode_ops iwl_mld_ops;
+
+static int __init iwl_mld_init(void)
+{
+       int ret = iwl_opmode_register("iwlmld", &iwl_mld_ops);
+
+       if (ret)
+               pr_err("Unable to register MLD op_mode: %d\n", ret);
+
+       return ret;
+}
+module_init(iwl_mld_init);
+
+static void __exit iwl_mld_exit(void)
+{
+       iwl_opmode_deregister("iwlmld");
+}
+module_exit(iwl_mld_exit);
+
+VISIBLE_IF_IWLWIFI_KUNIT
+void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
+                      const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+                      struct ieee80211_hw *hw, struct dentry *dbgfs_dir)
+{
+       mld->dev = trans->dev;
+       mld->trans = trans;
+       mld->cfg = cfg;
+       mld->fw = fw;
+       mld->hw = hw;
+       mld->wiphy = hw->wiphy;
+       mld->debugfs_dir = dbgfs_dir;
+
+       iwl_notification_wait_init(&mld->notif_wait);
+
+       /* Setup async RX handling */
+       spin_lock_init(&mld->async_handlers_lock);
+       INIT_LIST_HEAD(&mld->async_handlers_list);
+       wiphy_work_init(&mld->async_handlers_wk,
+                       iwl_mld_async_handlers_wk);
+
+       /* Dynamic Queue Allocation */
+       spin_lock_init(&mld->add_txqs_lock);
+       INIT_LIST_HEAD(&mld->txqs_to_add);
+       wiphy_work_init(&mld->add_txqs_wk, iwl_mld_add_txqs_wk);
+
+       /* Setup RX queues sync wait queue */
+       init_waitqueue_head(&mld->rxq_sync.waitq);
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_construct_mld);
+
+static void __acquires(&mld->wiphy->mtx)
+iwl_mld_fwrt_dump_start(void *ctx)
+{
+       struct iwl_mld *mld = ctx;
+
+       wiphy_lock(mld->wiphy);
+}
+
+static void __releases(&mld->wiphy->mtx)
+iwl_mld_fwrt_dump_end(void *ctx)
+{
+       struct iwl_mld *mld = ctx;
+
+       wiphy_unlock(mld->wiphy);
+}
+
+static bool iwl_mld_d3_debug_enable(void *ctx)
+{
+       return IWL_MLD_D3_DEBUG;
+}
+
+static int iwl_mld_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+{
+       struct iwl_mld *mld = (struct iwl_mld *)ctx;
+       int ret;
+
+       wiphy_lock(mld->wiphy);
+       ret = iwl_mld_send_cmd(mld, host_cmd);
+       wiphy_unlock(mld->wiphy);
+
+       return ret;
+}
+
+static const struct iwl_fw_runtime_ops iwl_mld_fwrt_ops = {
+       .dump_start = iwl_mld_fwrt_dump_start,
+       .dump_end = iwl_mld_fwrt_dump_end,
+       .send_hcmd = iwl_mld_fwrt_send_hcmd,
+       .d3_debug_enable = iwl_mld_d3_debug_enable,
+};
+
+static void
+iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
+                            const struct iwl_fw *fw,
+                            struct dentry *debugfs_dir)
+{
+       iwl_fw_runtime_init(&mld->fwrt, trans, fw, &iwl_mld_fwrt_ops, mld,
+                           NULL, NULL, debugfs_dir);
+
+       iwl_fw_set_current_image(&mld->fwrt, IWL_UCODE_REGULAR);
+}
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
+       HCMD_NAME(UCODE_ALIVE_NTFY),
+       HCMD_NAME(INIT_COMPLETE_NOTIF),
+       HCMD_NAME(PHY_CONTEXT_CMD),
+       HCMD_NAME(SCAN_CFG_CMD),
+       HCMD_NAME(SCAN_REQ_UMAC),
+       HCMD_NAME(SCAN_ABORT_UMAC),
+       HCMD_NAME(SCAN_COMPLETE_UMAC),
+       HCMD_NAME(TX_CMD),
+       HCMD_NAME(TXPATH_FLUSH),
+       HCMD_NAME(LEDS_CMD),
+       HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
+       HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
+       HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+       HCMD_NAME(POWER_TABLE_CMD),
+       HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+       HCMD_NAME(BEACON_NOTIFICATION),
+       HCMD_NAME(BEACON_TEMPLATE_CMD),
+       HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+       HCMD_NAME(REDUCE_TX_POWER_CMD),
+       HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
+       HCMD_NAME(MAC_PM_POWER_TABLE),
+       HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+       HCMD_NAME(RSS_CONFIG_CMD),
+       HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
+       HCMD_NAME(REPLY_RX_MPDU_CMD),
+       HCMD_NAME(BA_NOTIF),
+       HCMD_NAME(MCC_UPDATE_CMD),
+       HCMD_NAME(MCC_CHUB_UPDATE_CMD),
+       HCMD_NAME(MCAST_FILTER_CMD),
+       HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
+       HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
+       HCMD_NAME(MATCH_FOUND_NOTIFICATION),
+       HCMD_NAME(WOWLAN_PATTERNS),
+       HCMD_NAME(WOWLAN_CONFIGURATION),
+       HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
+       HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
+       HCMD_NAME(DEBUG_HOST_COMMAND),
+       HCMD_NAME(LDBG_CONFIG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_system_names[] = {
+       HCMD_NAME(SHARED_MEM_CFG_CMD),
+       HCMD_NAME(SOC_CONFIGURATION_CMD),
+       HCMD_NAME(INIT_EXTENDED_CFG_CMD),
+       HCMD_NAME(FW_ERROR_RECOVERY_CMD),
+       HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
+       HCMD_NAME(SYSTEM_STATISTICS_CMD),
+       HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_reg_and_nvm_names[] = {
+       HCMD_NAME(LARI_CONFIG_CHANGE),
+       HCMD_NAME(NVM_GET_INFO),
+       HCMD_NAME(TAS_CONFIG),
+       HCMD_NAME(SAR_OFFSET_MAPPING_TABLE_CMD),
+       HCMD_NAME(MCC_ALLOWED_AP_TYPE_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_debug_names[] = {
+       HCMD_NAME(HOST_EVENT_CFG),
+       HCMD_NAME(DBGC_SUSPEND_RESUME),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = {
+       HCMD_NAME(LOW_LATENCY_CMD),
+       HCMD_NAME(SESSION_PROTECTION_CMD),
+       HCMD_NAME(MAC_CONFIG_CMD),
+       HCMD_NAME(LINK_CONFIG_CMD),
+       HCMD_NAME(STA_CONFIG_CMD),
+       HCMD_NAME(AUX_STA_CMD),
+       HCMD_NAME(STA_REMOVE_CMD),
+       HCMD_NAME(ROC_CMD),
+       HCMD_NAME(MISSED_BEACONS_NOTIF),
+       HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
+       HCMD_NAME(ROC_NOTIF),
+       HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
+       HCMD_NAME(SESSION_PROTECTION_NOTIF),
+       HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
+       HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
+       HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+       HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
+       HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+       HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+       HCMD_NAME(TLC_MNG_CONFIG_CMD),
+       HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
+       HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+       HCMD_NAME(OMI_SEND_STATUS_NOTIF),
+       HCMD_NAME(ESR_MODE_NOTIF),
+       HCMD_NAME(MONITOR_NOTIF),
+       HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
+       HCMD_NAME(MU_GROUP_MGMT_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_location_names[] = {
+       HCMD_NAME(TOF_RANGE_REQ_CMD),
+       HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_phy_names[] = {
+       HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+       HCMD_NAME(CTDP_CONFIG_CMD),
+       HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+       HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
+       HCMD_NAME(CT_KILL_NOTIFICATION),
+       HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_statistics_names[] = {
+       HCMD_NAME(STATISTICS_OPER_NOTIF),
+       HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = {
+       HCMD_NAME(STORED_BEACON_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_coex_names[] = {
+       HCMD_NAME(PROFILE_NOTIF),
+};
+
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_hcmd_arr iwl_mld_groups[] = {
+       [LEGACY_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
+       [LONG_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
+       [SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names),
+       [MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names),
+       [DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names),
+       [LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names),
+       [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names),
+       [DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names),
+       [PHY_OPS_GROUP] = HCMD_ARR(iwl_mld_phy_names),
+       [STATISTICS_GROUP] = HCMD_ARR(iwl_mld_statistics_names),
+       [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mld_prot_offload_names),
+       [BT_COEX_GROUP] = HCMD_ARR(iwl_mld_coex_names),
+};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_groups);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int global_iwl_mld_goups_size = ARRAY_SIZE(iwl_mld_groups);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size);
+#endif
+
+static void
+iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
+{
+       const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+       static const u8 no_reclaim_cmds[] = {TX_CMD};
+       struct iwl_trans_config trans_cfg = {
+               .op_mode = op_mode,
+               /* Rx is not supported yet, but add it to avoid warnings */
+               .rx_buf_size = iwl_amsdu_size_to_rxb_size(),
+               .command_groups = iwl_mld_groups,
+               .command_groups_size = ARRAY_SIZE(iwl_mld_groups),
+               .fw_reset_handshake = true,
+               .queue_alloc_cmd_ver =
+                       iwl_fw_lookup_cmd_ver(mld->fw,
+                                             WIDE_ID(DATA_PATH_GROUP,
+                                                     SCD_QUEUE_CONFIG_CMD),
+                                             0),
+               .no_reclaim_cmds = no_reclaim_cmds,
+               .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds),
+               .cb_data_offs = offsetof(struct ieee80211_tx_info,
+                                        driver_data[2]),
+       };
+       struct iwl_trans *trans = mld->trans;
+
+       trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+       trans->iml = mld->fw->iml;
+       trans->iml_len = mld->fw->iml_len;
+       trans->wide_cmd_header = true;
+
+       iwl_trans_configure(trans, &trans_cfg);
+}
+
+/*
+ *****************************************************
+ * op mode ops functions
+ *****************************************************
+ */
+
+#define NUM_FW_LOAD_RETRIES    3
+static struct iwl_op_mode *
+iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                     const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+       struct ieee80211_hw *hw;
+       struct iwl_op_mode *op_mode;
+       struct iwl_mld *mld;
+       u32 eckv_value;
+       int ret;
+
+       /* Allocate and initialize a new hardware device */
+       hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+                               sizeof(struct iwl_mld),
+                               &iwl_mld_hw_ops);
+       if (!hw)
+               return ERR_PTR(-ENOMEM);
+
+       op_mode = hw->priv;
+
+       op_mode->ops = &iwl_mld_ops;
+
+       mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+       iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir);
+
+       iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir);
+
+       iwl_mld_get_bios_tables(mld);
+       iwl_uefi_get_sgom_table(trans, &mld->fwrt);
+       iwl_uefi_get_step_table(trans);
+       if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
+               IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
+       else
+               trans->ext_32khz_clock_valid = !!eckv_value;
+       iwl_bios_setup_step(trans, &mld->fwrt);
+       mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt);
+
+       /* Configure transport layer with the opmode specific params */
+       iwl_mld_configure_trans(op_mode);
+
+       /* Needed for sending commands */
+       wiphy_lock(mld->wiphy);
+
+       for (int i = 0; i < NUM_FW_LOAD_RETRIES; i++) {
+               ret = iwl_mld_load_fw(mld);
+               if (!ret)
+                       break;
+       }
+
+       if (ret) {
+               wiphy_unlock(mld->wiphy);
+               iwl_fw_flush_dumps(&mld->fwrt);
+               goto free_hw;
+       }
+
+       iwl_mld_stop_fw(mld);
+
+       wiphy_unlock(mld->wiphy);
+
+       ret = iwl_mld_leds_init(mld);
+       if (ret)
+               goto free_nvm;
+
+       ret = iwl_mld_alloc_scan_cmd(mld);
+       if (ret)
+               goto leds_exit;
+
+       ret = iwl_mld_low_latency_init(mld);
+       if (ret)
+               goto free_scan_cmd;
+
+       ret = iwl_mld_register_hw(mld);
+       if (ret)
+               goto low_latency_free;
+
+       iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
+
+       iwl_mld_add_debugfs_files(mld, dbgfs_dir);
+       iwl_mld_thermal_initialize(mld);
+
+       iwl_mld_ptp_init(mld);
+
+       return op_mode;
+
+low_latency_free:
+       iwl_mld_low_latency_free(mld);
+free_scan_cmd:
+       kfree(mld->scan.cmd);
+leds_exit:
+       iwl_mld_leds_exit(mld);
+free_nvm:
+       kfree(mld->nvm_data);
+free_hw:
+       ieee80211_free_hw(mld->hw);
+       return ERR_PTR(ret);
+}
+
+static void
+iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+       iwl_mld_ptp_remove(mld);
+       iwl_mld_leds_exit(mld);
+
+       wiphy_lock(mld->wiphy);
+       iwl_mld_thermal_exit(mld);
+       iwl_mld_low_latency_stop(mld);
+       iwl_mld_deinit_time_sync(mld);
+       wiphy_unlock(mld->wiphy);
+
+       ieee80211_unregister_hw(mld->hw);
+
+       iwl_fw_runtime_free(&mld->fwrt);
+       iwl_mld_low_latency_free(mld);
+
+       iwl_trans_op_mode_leave(mld->trans);
+
+       kfree(mld->nvm_data);
+       kfree(mld->scan.cmd);
+       kfree(mld->error_recovery_buf);
+       kfree(mld->mcast_filter_cmd);
+
+       ieee80211_free_hw(mld->hw);
+}
+
+static void iwl_mld_queue_state_change(struct iwl_op_mode *op_mode,
+                                      int hw_queue, bool queue_full)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+       struct ieee80211_txq *txq;
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_txq *mld_txq;
+
+       rcu_read_lock();
+
+       txq = rcu_dereference(mld->fw_id_to_txq[hw_queue]);
+       if (!txq) {
+               rcu_read_unlock();
+
+               if (queue_full) {
+                       /* An internal queue is not expected to become full */
+                       IWL_WARN(mld,
+                                "Internal hw_queue %d is full! stopping all queues\n",
+                                hw_queue);
+                       /* Stop all queues, as an internal queue is not
+                        * mapped to a mac80211 one
+                        */
+                       ieee80211_stop_queues(mld->hw);
+               } else {
+                       ieee80211_wake_queues(mld->hw);
+               }
+
+               return;
+       }
+
+       mld_txq = iwl_mld_txq_from_mac80211(txq);
+       mld_sta = txq->sta ? iwl_mld_sta_from_mac80211(txq->sta) : NULL;
+
+       mld_txq->status.stop_full = queue_full;
+
+       if (!queue_full && mld_sta &&
+           mld_sta->sta_state != IEEE80211_STA_NOTEXIST) {
+               local_bh_disable();
+               iwl_mld_tx_from_txq(mld, txq);
+               local_bh_enable();
+       }
+
+       rcu_read_unlock();
+}
+
+static void
+iwl_mld_queue_full(struct iwl_op_mode *op_mode, int hw_queue)
+{
+       iwl_mld_queue_state_change(op_mode, hw_queue, true);
+}
+
+static void
+iwl_mld_queue_not_full(struct iwl_op_mode *op_mode, int hw_queue)
+{
+       iwl_mld_queue_state_change(op_mode, hw_queue, false);
+}
+
+static bool
+iwl_mld_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+       iwl_mld_set_hwkill(mld, state);
+
+       return false;
+}
+
+static void
+iwl_mld_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+       ieee80211_free_txskb(mld->hw, skb);
+}
+
+static void iwl_mld_read_error_recovery_buffer(struct iwl_mld *mld)
+{
+       u32 src_size = mld->fw->ucode_capa.error_log_size;
+       u32 src_addr = mld->fw->ucode_capa.error_log_addr;
+       u8 *recovery_buf;
+       int ret;
+
+       /* no recovery buffer size defined in a TLV */
+       if (!src_size)
+               return;
+
+       recovery_buf = kzalloc(src_size, GFP_ATOMIC);
+       if (!recovery_buf)
+               return;
+
+       ret = iwl_trans_read_mem_bytes(mld->trans, src_addr,
+                                      recovery_buf, src_size);
+       if (ret) {
+               IWL_ERR(mld, "Failed to read error recovery buffer (%d)\n",
+                       ret);
+               kfree(recovery_buf);
+               return;
+       }
+
+       mld->error_recovery_buf = recovery_buf;
+}
+
+static void iwl_mld_restart_nic(struct iwl_mld *mld)
+{
+       iwl_mld_read_error_recovery_buffer(mld);
+
+       mld->fwrt.trans->dbg.restart_required = false;
+
+       ieee80211_restart_hw(mld->hw);
+}
+
+static void
+iwl_mld_nic_error(struct iwl_op_mode *op_mode,
+                 enum iwl_fw_error_type type)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+       bool trans_dead = test_bit(STATUS_TRANS_DEAD, &mld->trans->status);
+
+       if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
+               IWL_ERR(mld, "Command queue full!\n");
+       else if (!trans_dead && !mld->fw_status.do_not_dump_once)
+               iwl_fwrt_dump_error_logs(&mld->fwrt);
+
+       mld->fw_status.do_not_dump_once = false;
+
+       /* It is necessary to abort any os scan here because mac80211 requires
+        * having the scan cleared before restarting.
+        * We'll reset the scan_status to NONE in restart cleanup in
+        * the next drv_start() call from mac80211. If ieee80211_hw_restart
+        * isn't called scan status will stay busy.
+        */
+       iwl_mld_report_scan_aborted(mld);
+
+       /*
+        * This should be first thing before trying to collect any
+        * data to avoid endless loops if any HW error happens while
+        * collecting debug data.
+        * It might not actually be true that we'll restart, but the
+        * setting doesn't matter if we're going to be unbound either.
+        */
+       if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+               mld->fw_status.in_hw_restart = true;
+}
+
+static void iwl_mld_dump_error(struct iwl_op_mode *op_mode,
+                              struct iwl_fw_error_dump_mode *mode)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+       /* if we come in from opmode we have the mutex held */
+       if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
+               lockdep_assert_wiphy(mld->wiphy);
+               iwl_fw_error_collect(&mld->fwrt);
+       } else {
+               wiphy_lock(mld->wiphy);
+               if (mode->context != IWL_ERR_CONTEXT_ABORT)
+                       iwl_fw_error_collect(&mld->fwrt);
+               wiphy_unlock(mld->wiphy);
+       }
+}
+
+static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode,
+                            enum iwl_fw_error_type type)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+       /* Do restart only in the following conditions are met:
+        * - we consider the FW as running
+        * - The trigger that brought us here is defined as one that requires
+        *   a restart (in the debug TLVs)
+        */
+       if (!mld->fw_status.running || !mld->fwrt.trans->dbg.restart_required)
+               return false;
+
+       iwl_mld_restart_nic(mld);
+       return true;
+}
+
+static void
+iwl_mld_time_point(struct iwl_op_mode *op_mode,
+                  enum iwl_fw_ini_time_point tp_id,
+                  union iwl_dbg_tlv_tp_data *tp_data)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+       iwl_dbg_tlv_time_point(&mld->fwrt, tp_id, tp_data);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+       wiphy_lock(mld->wiphy);
+       mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+       iwl_mld_stop_fw(mld);
+       mld->fw_status.in_d3 = false;
+       wiphy_unlock(mld->wiphy);
+}
+#else
+static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
+
+static const struct iwl_op_mode_ops iwl_mld_ops = {
+       .start = iwl_op_mode_mld_start,
+       .stop = iwl_op_mode_mld_stop,
+       .rx = iwl_mld_rx,
+       .rx_rss = iwl_mld_rx_rss,
+       .queue_full = iwl_mld_queue_full,
+       .queue_not_full = iwl_mld_queue_not_full,
+       .hw_rf_kill = iwl_mld_set_hw_rfkill_state,
+       .free_skb = iwl_mld_free_skb,
+       .nic_error = iwl_mld_nic_error,
+       .dump_error = iwl_mld_dump_error,
+       .sw_reset = iwl_mld_sw_reset,
+       .time_point = iwl_mld_time_point,
+       .device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off),
+};
+
+struct iwl_mld_mod_params iwlmld_mod_params = {
+       .power_scheme = IWL_POWER_SCHEME_BPS,
+};
+
+module_param_named(power_scheme, iwlmld_mod_params.power_scheme, int, 0444);
+MODULE_PARM_DESC(power_scheme,
+                "power management scheme: 1-active, 2-balanced, default: 2");
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_h__
+#define __iwl_mld_h__
+
+#include <linux/leds.h>
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/runtime.h"
+#include "fw/notif-wait.h"
+#include "fw/api/commands.h"
+#include "fw/api/scan.h"
+#include "fw/api/mac-cfg.h"
+#include "fw/api/mac.h"
+#include "fw/api/phy-ctxt.h"
+#include "fw/api/datapath.h"
+#include "fw/api/rx.h"
+#include "fw/api/rs.h"
+#include "fw/api/context.h"
+#include "fw/api/coex.h"
+#include "fw/api/location.h"
+
+#include "fw/dbg.h"
+
+#include "notif.h"
+#include "scan.h"
+#include "rx.h"
+#include "thermal.h"
+#include "low_latency.h"
+#include "constants.h"
+#include "ptp.h"
+#include "time_sync.h"
+
+/**
+ * DOC: Introduction
+ *
+ * iwlmld is an operation mode (a.k.a. op_mode) for Intel wireless devices.
+ * It is used for devices that ship after 2024 which typically support
+ * the WiFi-7 features. MLD stands for multi-link device. Note that there are
+ * devices that do not support WiFi-7 or even WiFi 6E and yet use iwlmld, but
+ * the firmware APIs used in this driver are WiFi-7 compatible.
+ *
+ * In the architecture of iwlwifi, an op_mode is a layer that translates
+ * mac80211's APIs into commands for the firmware and, of course, notifications
+ * from the firmware to mac80211's APIs. An op_mode must implement the
+ * interface defined in iwl-op-mode.h to interact with the transport layer
+ * which allows to send and receive data to the device, start the hardware,
+ * etc...
+ */
+
+/**
+ * DOC: Locking policy
+ *
+ * iwlmld has a very simple locking policy: it doesn't have any mutexes. It
+ * relies on cfg80211's wiphy->mtx and takes the lock when needed. All the
+ * control flows originating from mac80211 already acquired the lock, so that
+ * part is trivial, but also notifications that are received from the firmware
+ * and handled asynchronously are handled only after having taken the lock.
+ * This is described in notif.c.
+ * There are spin_locks needed to synchronize with the data path, around the
+ * allocation of the queues, for example.
+ */
+
+/**
+ * DOC: Debugfs
+ *
+ * iwlmld adds its share of debugfs hooks and its handlers are synchronized
+ * with the wiphy_lock using wiphy_locked_debugfs. This avoids races against
+ * resources deletion while the debugfs hook is being used.
+ */
+
+/**
+ * DOC: Main resources
+ *
+ * iwlmld is designed with the life cycle of the resource in mind. The
+ * resources are:
+ *
+ *  - struct iwl_mld (matches mac80211's struct ieee80211_hw)
+ *
+ *  - struct iwl_mld_vif (matches macu80211's struct ieee80211_vif)
+ *    iwl_mld_vif contains an array of pointers to struct iwl_mld_link
+ *    which describe the links for this vif.
+ *
+ *  - struct iwl_mld_sta (matches mac80211's struct ieee80211_sta)
+ *    iwl_mld_sta contains an array of points to struct iwl_mld_link_sta
+ *    which describes the link stations for this station
+ *
+ * Each object has properties that can survive a firmware reset or not.
+ * Asynchronous firmware notifications can declare themselves as dependent on a
+ * certain instance of those resources and that means that the notifications
+ * will be cancelled once the instance is destroyed.
+ */
+
+#define IWL_MLD_MAX_ADDRESSES          5
+
+/**
+ * struct iwl_mld - MLD op mode
+ *
+ * @fw_id_to_bss_conf: maps a fw id of a link to the corresponding
+ *     ieee80211_bss_conf.
+ * @fw_id_to_vif: maps a fw id of a MAC context to the corresponding
+ *     ieee80211_vif. Mapping is valid only when the MAC exists in the fw.
+ * @fw_id_to_txq: maps a fw id of a txq to the corresponding
+ *     ieee80211_txq.
+ * @used_phy_ids: a bitmap of the phy IDs used. If a bit is set, it means
+ *     that the index of this bit is already used as a PHY id.
+ * @num_igtks: the number if iGTKs that were sent to the FW.
+ * @monitor: monitor related data
+ * @monitor.on: does a monitor vif exist (singleton hence bool)
+ * @monitor.ampdu_ref: the id of the A-MPDU for sniffer
+ * @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU
+ * @monitor.cur_aid: current association id tracked by the sniffer
+ * @monitor.cur_bssid: current bssid tracked by the sniffer
+ * @monitor.p80: primary channel position relative to he whole bandwidth, in
+ * steps of 80 MHz
+ * @fw_id_to_link_sta: maps a fw id of a sta to the corresponding
+ *     ieee80211_link_sta. This is not cleaned up on restart since we want to
+ *     preserve the fw sta ids during a restart (for SN/PN restoring).
+ *     FW ids of internal stations will be mapped to ERR_PTR, and will be
+ *     re-allocated during a restart, so make sure to free it in restart
+ *     cleanup using iwl_mld_free_internal_sta
+ * @netdetect: indicates the FW is in suspend mode with netdetect configured
+ * @p2p_device_vif: points to the p2p device vif if exists
+ * @bt_is_active: indicates that BT is active
+ * @dev: pointer to device struct. For printing purposes
+ * @trans: pointer to the transport layer
+ * @cfg: pointer to the device configuration
+ * @fw: a pointer to the fw object
+ * @hw: pointer to the hw object.
+ * @wiphy: a pointer to the wiphy struct, for easier access to it.
+ * @nvm_data: pointer to the nvm_data that includes all our capabilities
+ * @fwrt: fw runtime data
+ * @debugfs_dir: debugfs directory
+ * @notif_wait: notification wait related data.
+ * @async_handlers_list: a list of all async RX handlers. When a notifciation
+ *     with an async handler is received, it is added to this list.
+ *     When &async_handlers_wk runs - it runs these handlers one by one.
+ * @async_handlers_lock: a lock for &async_handlers_list. Sync
+ *     &async_handlers_wk and RX notifcation path.
+ * @async_handlers_wk: A work to run all async RX handlers from
+ *     &async_handlers_list.
+ * @ct_kill_exit_wk: worker to exit thermal kill
+ * @fw_status: bitmap of fw status bits
+ * @running: true if the firmware is running
+ * @do_not_dump_once: true if firmware dump must be prevented once
+ * @in_d3: indicates FW is in suspend mode and should be resumed
+ * @in_hw_restart: indicates that we are currently in restart flow.
+ *     rather than restarted. Should be unset upon restart.
+ * @radio_kill: bitmap of radio kill status
+ * @radio_kill.hw: radio is killed by hw switch
+ * @radio_kill.ct: radio is killed because the device it too hot
+ * @addresses: device MAC addresses.
+ * @scan: instance of the scan object
+ * @wowlan: WoWLAN support data.
+ * @led: the led device
+ * @mcc_src: the source id of the MCC, comes from the firmware
+ * @bios_enable_puncturing: is puncturing enabled by bios
+ * @fw_id_to_ba: maps a fw (BA) id to a corresponding Block Ack session data.
+ * @num_rx_ba_sessions: tracks the number of active Rx Block Ack (BA) sessions.
+ *     the driver ensures that new BA sessions are blocked once the maximum
+ *     supported by the firmware is reached, preventing firmware asserts.
+ * @rxq_sync: manages RX queue sync state
+ * @txqs_to_add: a list of &ieee80211_txq's to allocate in &add_txqs_wk
+ * @add_txqs_wk: a worker to allocate txqs.
+ * @add_txqs_lock: to lock the &txqs_to_add list.
+ * @error_recovery_buf: pointer to the recovery buffer that will be read
+ *     from firmware upon fw/hw error and sent back to the firmware in
+ *     reconfig flow (after NIC reset).
+ * @mcast_filter_cmd: pointer to the multicast filter command.
+ * @mgmt_tx_ant: stores the last TX antenna index; used for setting
+ *     TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure).
+ * @low_latency: low-latency manager.
+ * @tzone: thermal zone device's data
+ * @cooling_dev: cooling device's related data
+ * @ibss_manager: in IBSS mode (only one vif can be active), indicates what
+ *     firmware indicated about having transmitted the last beacon, i.e.
+ *     being IBSS manager for that time and needing to respond to probe
+ *     requests
+ * @ptp_data: data of the PTP clock
+ * @time_sync: time sync data.
+ * @ftm_initiator: FTM initiator data
+ */
+struct iwl_mld {
+       /* Add here fields that need clean up on restart */
+       struct_group(zeroed_on_hw_restart,
+               struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINK_ID + 1];
+               struct ieee80211_vif __rcu *fw_id_to_vif[NUM_MAC_INDEX_DRIVER];
+               struct ieee80211_txq __rcu *fw_id_to_txq[IWL_MAX_TVQM_QUEUES];
+               u8 used_phy_ids: NUM_PHY_CTX;
+               u8 num_igtks;
+               struct {
+                       bool on;
+                       u32 ampdu_ref;
+                       bool ampdu_toggle;
+                       u8 p80;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+                       __le16 cur_aid;
+                       u8 cur_bssid[ETH_ALEN];
+#endif
+               } monitor;
+#ifdef CONFIG_PM_SLEEP
+               bool netdetect;
+#endif /* CONFIG_PM_SLEEP */
+               struct ieee80211_vif *p2p_device_vif;
+               bool bt_is_active;
+       );
+       struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
+       /* And here fields that survive a fw restart */
+       struct device *dev;
+       struct iwl_trans *trans;
+       const struct iwl_cfg *cfg;
+       const struct iwl_fw *fw;
+       struct ieee80211_hw *hw;
+       struct wiphy *wiphy;
+       struct iwl_nvm_data *nvm_data;
+       struct iwl_fw_runtime fwrt;
+       struct dentry *debugfs_dir;
+       struct iwl_notif_wait_data notif_wait;
+       struct list_head async_handlers_list;
+       spinlock_t async_handlers_lock;
+       struct wiphy_work async_handlers_wk;
+       struct wiphy_delayed_work ct_kill_exit_wk;
+
+       struct {
+               u32 running:1,
+                   do_not_dump_once:1,
+#ifdef CONFIG_PM_SLEEP
+                   in_d3:1,
+#endif
+                   in_hw_restart:1;
+
+       } fw_status;
+
+       struct {
+               u32 hw:1,
+                   ct:1;
+       } radio_kill;
+
+       struct mac_address addresses[IWL_MLD_MAX_ADDRESSES];
+       struct iwl_mld_scan scan;
+#ifdef CONFIG_PM_SLEEP
+       struct wiphy_wowlan_support wowlan;
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_IWLWIFI_LEDS
+       struct led_classdev led;
+#endif
+       enum iwl_mcc_source mcc_src;
+       bool bios_enable_puncturing;
+
+       struct iwl_mld_baid_data __rcu *fw_id_to_ba[IWL_MAX_BAID];
+       u8 num_rx_ba_sessions;
+
+       struct iwl_mld_rx_queues_sync rxq_sync;
+
+       struct list_head txqs_to_add;
+       struct wiphy_work add_txqs_wk;
+       spinlock_t add_txqs_lock;
+
+       u8 *error_recovery_buf;
+       struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+
+       u8 mgmt_tx_ant;
+
+       struct iwl_mld_low_latency low_latency;
+
+       bool ibss_manager;
+#ifdef CONFIG_THERMAL
+       struct thermal_zone_device *tzone;
+       struct iwl_mld_cooling_device cooling_dev;
+#endif
+
+       struct ptp_data ptp_data;
+
+       struct iwl_mld_time_sync_data __rcu *time_sync;
+
+       struct {
+               struct cfg80211_pmsr_request *req;
+               struct wireless_dev *req_wdev;
+               int responses[IWL_TOF_MAX_APS];
+       } ftm_initiator;
+};
+
+/* memset the part of the struct that requires cleanup on restart */
+#define CLEANUP_STRUCT(_ptr)                             \
+       memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \
+              sizeof((_ptr)->zeroed_on_hw_restart))
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+static inline void
+iwl_cleanup_mld(struct iwl_mld *mld)
+{
+       CLEANUP_STRUCT(mld);
+       CLEANUP_STRUCT(&mld->scan);
+
+       mld->fw_status.in_d3 = false;
+
+       iwl_mld_low_latency_restart_cleanup(mld);
+
+       /* Empty the list of async notification handlers so we won't process
+        * notifications from the dead fw after the reconfig flow.
+        */
+       iwl_mld_purge_async_handlers_list(mld);
+}
+
+enum iwl_power_scheme {
+       IWL_POWER_SCHEME_CAM = 1,
+       IWL_POWER_SCHEME_BPS,
+};
+
+/**
+ * struct iwl_mld_mod_params - module parameters for iwlmld
+ * @power_scheme: one of enum iwl_power_scheme
+ */
+struct iwl_mld_mod_params {
+       int power_scheme;
+};
+
+extern struct iwl_mld_mod_params iwlmld_mod_params;
+
+/* Extract MLD priv from op_mode */
+#define IWL_OP_MODE_GET_MLD(_iwl_op_mode)              \
+       ((struct iwl_mld *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MLD(_hw)                      \
+       IWL_OP_MODE_GET_MLD((struct iwl_op_mode *)((_hw)->priv))
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir);
+#else
+static inline void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
+{}
+#endif
+
+int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld);
+int iwl_mld_load_fw(struct iwl_mld *mld);
+void iwl_mld_stop_fw(struct iwl_mld *mld);
+int iwl_mld_start_fw(struct iwl_mld *mld);
+void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags);
+
+static inline void iwl_mld_set_ctkill(struct iwl_mld *mld, bool state)
+{
+       mld->radio_kill.ct = state;
+
+       wiphy_rfkill_set_hw_state(mld->wiphy,
+                                 mld->radio_kill.hw || mld->radio_kill.ct);
+}
+
+static inline void iwl_mld_set_hwkill(struct iwl_mld *mld, bool state)
+{
+       mld->radio_kill.hw = state;
+
+       wiphy_rfkill_set_hw_state(mld->wiphy,
+                                 mld->radio_kill.hw || mld->radio_kill.ct);
+}
+
+static inline u8 iwl_mld_get_valid_tx_ant(const struct iwl_mld *mld)
+{
+       u8 tx_ant = mld->fw->valid_tx_ant;
+
+       if (mld->nvm_data && mld->nvm_data->valid_tx_ant)
+               tx_ant &= mld->nvm_data->valid_tx_ant;
+
+       return tx_ant;
+}
+
+static inline u8 iwl_mld_get_valid_rx_ant(const struct iwl_mld *mld)
+{
+       u8 rx_ant = mld->fw->valid_rx_ant;
+
+       if (mld->nvm_data && mld->nvm_data->valid_rx_ant)
+               rx_ant &= mld->nvm_data->valid_rx_ant;
+
+       return rx_ant;
+}
+
+static inline u8 iwl_mld_nl80211_band_to_fw(enum nl80211_band band)
+{
+       switch (band) {
+       case NL80211_BAND_2GHZ:
+               return PHY_BAND_24;
+       case NL80211_BAND_5GHZ:
+               return PHY_BAND_5;
+       case NL80211_BAND_6GHZ:
+               return PHY_BAND_6;
+       default:
+               WARN_ONCE(1, "Unsupported band (%u)\n", band);
+               return PHY_BAND_5;
+       }
+}
+
+static inline u8 iwl_mld_phy_band_to_nl80211(u8 phy_band)
+{
+       switch (phy_band) {
+       case PHY_BAND_24:
+               return NL80211_BAND_2GHZ;
+       case PHY_BAND_5:
+               return NL80211_BAND_5GHZ;
+       case PHY_BAND_6:
+               return NL80211_BAND_6GHZ;
+       default:
+               WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+               return NL80211_BAND_5GHZ;
+       }
+}
+
+static inline int
+iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
+                                     enum nl80211_band band)
+{
+       int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+       int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+       bool is_lb = band == NL80211_BAND_2GHZ;
+
+       if (format == RATE_MCS_LEGACY_OFDM_MSK)
+               return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate;
+
+       /* CCK is not allowed in 5 GHz */
+       return is_lb ? rate : -1;
+}
+
+extern const struct ieee80211_ops iwl_mld_hw_ops;
+
+/**
+ * enum iwl_rx_handler_context: context for Rx handler
+ * @RX_HANDLER_SYNC: this means that it will be called in the Rx path
+ *     which can't acquire the wiphy->mutex.
+ * @RX_HANDLER_ASYNC: If the handler needs to hold wiphy->mutex
+ *     (and only in this case!), it should be set as ASYNC. In that case,
+ *     it will be called from a worker with wiphy->mutex held.
+ */
+enum iwl_rx_handler_context {
+       RX_HANDLER_SYNC,
+       RX_HANDLER_ASYNC,
+};
+
+/**
+ * struct iwl_rx_handler: handler for FW notification
+ * @val_fn: input validation function.
+ * @sizes: an array that mapps a version to the expected size.
+ * @fn: the function is called when notification is handled
+ * @cmd_id: command id
+ * @n_sizes: number of elements in &sizes.
+ * @context: see &iwl_rx_handler_context
+ * @obj_type: the type of the object that this handler is related to.
+ *     See &iwl_mld_object_type. Use IWL_MLD_OBJECT_TYPE_NONE if not related.
+ * @cancel: function to cancel the notification. valid only if obj_type is not
+ *     IWL_MLD_OBJECT_TYPE_NONE.
+ */
+struct iwl_rx_handler {
+       union {
+               bool (*val_fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+               const struct iwl_notif_struct_size *sizes;
+       };
+       void (*fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+       u16 cmd_id;
+       u8 n_sizes;
+       u8 context;
+       enum iwl_mld_object_type obj_type;
+       bool (*cancel)(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
+                      u32 obj_id);
+};
+
+/**
+ * struct iwl_notif_struct_size: map a notif ver to the expected size
+ *
+ * @size: the size to expect
+ * @ver: the version of the notification
+ */
+struct iwl_notif_struct_size {
+       u32 size:24, ver:8;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_hcmd_arr iwl_mld_groups[];
+extern const unsigned int global_iwl_mld_goups_size;
+extern const struct iwl_rx_handler iwl_mld_rx_handlers[];
+extern const unsigned int iwl_mld_rx_handlers_num;
+
+bool
+iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
+              struct ieee80211_hdr *hdr,
+              const struct iwl_rx_mpdu_desc *mpdu_desc,
+              struct ieee80211_rx_status *rx_status, int queue);
+
+void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
+                      const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+                      struct ieee80211_hw *hw, struct dentry *dbgfs_dir);
+#endif
+
+#define IWL_MLD_INVALID_FW_ID 0xff
+
+#define IWL_MLD_ALLOC_FN(_type, _mac80211_type)                                                \
+static int                                                                             \
+iwl_mld_allocate_##_type##_fw_id(struct iwl_mld *mld,                                  \
+                                u8 *fw_id,                             \
+                                struct ieee80211_##_mac80211_type *mac80211_ptr)       \
+{                                                                                      \
+       u8 rand = IWL_MLD_DIS_RANDOM_FW_ID ? 0 : get_random_u8();                       \
+       u8 arr_sz = ARRAY_SIZE(mld->fw_id_to_##_mac80211_type);                         \
+       if (__builtin_types_compatible_p(typeof(*mac80211_ptr),                         \
+                                        struct ieee80211_link_sta))                    \
+               arr_sz = mld->fw->ucode_capa.num_stations;                              \
+       if (__builtin_types_compatible_p(typeof(*mac80211_ptr),                         \
+                                        struct ieee80211_bss_conf))                    \
+               arr_sz = mld->fw->ucode_capa.num_links;                                 \
+       for (int i = 0; i < arr_sz; i++) {                                              \
+               u8 idx = (i + rand) % arr_sz;                                           \
+               if (rcu_access_pointer(mld->fw_id_to_##_mac80211_type[idx]))            \
+                       continue;                                                       \
+               IWL_DEBUG_INFO(mld, "Allocated at index %d / %d\n", idx, arr_sz);       \
+               *fw_id = idx;                                                           \
+               rcu_assign_pointer(mld->fw_id_to_##_mac80211_type[idx], mac80211_ptr);  \
+               return 0;                                                               \
+       }                                                                               \
+       return -ENOSPC;                                                                 \
+}
+
+static inline struct ieee80211_bss_conf *
+iwl_mld_fw_id_to_link_conf(struct iwl_mld *mld, u8 fw_link_id)
+{
+       if (IWL_FW_CHECK(mld, fw_link_id >= mld->fw->ucode_capa.num_links,
+                        "Invalid fw_link_id: %d\n", fw_link_id))
+               return NULL;
+
+       return wiphy_dereference(mld->wiphy,
+                                mld->fw_id_to_bss_conf[fw_link_id]);
+}
+
+#define MSEC_TO_TU(_msec)      ((_msec) * 1000 / 1024)
+
+void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif);
+void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_bss_conf *link_conf,
+                             struct dentry *dir);
+void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_link_sta *link_sta,
+                                 struct dentry *dir);
+
+/* Utilities */
+
+static inline u8 iwl_mld_mac80211_ac_to_fw_tx_fifo(enum ieee80211_ac_numbers ac)
+{
+       static const u8 mac80211_ac_to_fw_tx_fifo[] = {
+               IWL_BZ_EDCA_TX_FIFO_VO,
+               IWL_BZ_EDCA_TX_FIFO_VI,
+               IWL_BZ_EDCA_TX_FIFO_BE,
+               IWL_BZ_EDCA_TX_FIFO_BK,
+               IWL_BZ_TRIG_TX_FIFO_VO,
+               IWL_BZ_TRIG_TX_FIFO_VI,
+               IWL_BZ_TRIG_TX_FIFO_BE,
+               IWL_BZ_TRIG_TX_FIFO_BK,
+       };
+       return mac80211_ac_to_fw_tx_fifo[ac];
+}
+
+static inline u32
+iwl_mld_get_lmac_id(struct iwl_mld *mld, enum nl80211_band band)
+{
+       if (!fw_has_capa(&mld->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
+           band == NL80211_BAND_2GHZ)
+               return IWL_LMAC_24G_INDEX;
+       return IWL_LMAC_5G_INDEX;
+}
+
+/* Check if we had an error, but reconfig flow didn't start yet */
+static inline bool iwl_mld_error_before_recovery(struct iwl_mld *mld)
+{
+       return mld->fw_status.in_hw_restart &&
+               !iwl_trans_fw_running(mld->trans);
+}
+
+int iwl_mld_tdls_sta_count(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mlo.h"
+
+/* Block reasons helper */
+#define HANDLE_EMLSR_BLOCKED_REASONS(HOW)      \
+       HOW(PREVENTION)                 \
+       HOW(WOWLAN)                     \
+       HOW(FW)                         \
+       HOW(ROC)                        \
+       HOW(NON_BSS)                    \
+       HOW(TMP_NON_BSS)                \
+       HOW(TPT)
+
+static const char *
+iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
+{
+       /* Using switch without "default" will warn about missing entries  */
+       switch (blocked) {
+#define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
+       HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
+#undef REASON_CASE
+       }
+
+       return "ERROR";
+}
+
+static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
+       IWL_DEBUG_INFO(mld,
+                      "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
+                      " (0x%x)\n",
+                      HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR)
+                      mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
+/* Exit reasons helper */
+#define HANDLE_EMLSR_EXIT_REASONS(HOW) \
+       HOW(BLOCK)                      \
+       HOW(MISSED_BEACON)              \
+       HOW(FAIL_ENTRY)                 \
+       HOW(CSA)                        \
+       HOW(EQUAL_BAND)                 \
+       HOW(BANDWIDTH)                  \
+       HOW(LOW_RSSI)                   \
+       HOW(LINK_USAGE)                 \
+       HOW(BT_COEX)                    \
+       HOW(CHAN_LOAD)                  \
+       HOW(RFI)
+
+static const char *
+iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
+{
+       /* Using switch without "default" will warn about missing entries  */
+       switch (exit) {
+#define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
+       HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
+#undef REASON_CASE
+       }
+
+       return "ERROR";
+}
+
+static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
+       IWL_DEBUG_INFO(mld,
+                      "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
+                      " (0x%x)\n",
+                      HANDLE_EMLSR_EXIT_REASONS(NAME_PR)
+                      mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
+void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+       struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+                                                  emlsr.prevent_done_wk.work);
+       struct ieee80211_vif *vif =
+               container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+       if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
+                     IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
+               return;
+
+       iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+                             IWL_MLD_EMLSR_BLOCKED_PREVENTION);
+}
+
+void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
+                                      struct wiphy_work *wk)
+{
+       struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+                                                  emlsr.prevent_done_wk.work);
+       struct ieee80211_vif *vif =
+               container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+       if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
+                     IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
+               return;
+
+       iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+                             IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
+}
+
+#define IWL_MLD_TRIGGER_LINK_SEL_TIME  (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
+#define IWL_MLD_SCAN_EXPIRE_TIME       (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
+
+/* Exit reasons that can cause longer EMLSR prevention */
+#define IWL_MLD_PREVENT_EMLSR_REASONS  (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
+                                        IWL_MLD_EMLSR_EXIT_LINK_USAGE)
+#define IWL_MLD_PREVENT_EMLSR_TIMEOUT  (HZ * 400)
+
+#define IWL_MLD_EMLSR_PREVENT_SHORT    (HZ * 300)
+#define IWL_MLD_EMLSR_PREVENT_LONG     (HZ * 600)
+
+static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
+                                          struct iwl_mld_vif *mld_vif,
+                                          enum iwl_mld_emlsr_exit reason)
+{
+       unsigned long delay;
+
+       /*
+        * Reset the counter if more than 400 seconds have passed between one
+        * exit and the other, or if we exited due to a different reason.
+        * Will also reset the counter after the long prevention is done.
+        */
+       if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
+                               IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
+           mld_vif->emlsr.last_exit_reason != reason)
+               mld_vif->emlsr.exit_repeat_count = 0;
+
+       mld_vif->emlsr.last_exit_reason = reason;
+       mld_vif->emlsr.last_exit_ts = jiffies;
+       mld_vif->emlsr.exit_repeat_count++;
+
+       /*
+        * Do not add a prevention when the reason was a block. For a block,
+        * EMLSR will be enabled again on unblock.
+        */
+       if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
+               return;
+
+       /* Set prevention for a minimum of 30 seconds */
+       mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
+       delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
+
+       /* Handle repeats for reasons that can cause long prevention */
+       if (mld_vif->emlsr.exit_repeat_count > 1 &&
+           reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
+               if (mld_vif->emlsr.exit_repeat_count == 2)
+                       delay = IWL_MLD_EMLSR_PREVENT_SHORT;
+               else
+                       delay = IWL_MLD_EMLSR_PREVENT_LONG;
+
+               /*
+                * The timeouts are chosen so that this will not happen, i.e.
+                * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
+                */
+               WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
+       }
+
+       IWL_DEBUG_INFO(mld,
+                      "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
+                      delay / HZ, mld_vif->emlsr.exit_repeat_count,
+                      iwl_mld_get_emlsr_exit_string(reason), reason);
+
+       wiphy_delayed_work_queue(mld->wiphy,
+                                &mld_vif->emlsr.prevent_done_wk, delay);
+}
+
+static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                              enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
+                              bool sync)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       u16 new_active_links;
+       int ret = 0;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* On entry failure need to exit anyway, even if entered from debugfs */
+       if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
+               return 0;
+
+       /* Ignore exit request if EMLSR is not active */
+       if (!iwl_mld_emlsr_active(vif))
+               return 0;
+
+       if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
+               return 0;
+
+       if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
+               link_to_keep = __ffs(vif->active_links);
+
+       new_active_links = BIT(link_to_keep);
+       IWL_DEBUG_INFO(mld,
+                      "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
+                      iwl_mld_get_emlsr_exit_string(exit), exit,
+                      vif->active_links, new_active_links);
+
+       if (sync)
+               ret = ieee80211_set_active_links(vif, new_active_links);
+       else
+               ieee80211_set_active_links_async(vif, new_active_links);
+
+       /* Update latest exit reason and check EMLSR prevention */
+       iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
+
+       return ret;
+}
+
+void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                       enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
+{
+       _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
+}
+
+static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                               enum iwl_mld_emlsr_blocked reason,
+                               u8 link_to_keep, bool sync)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
+               return 0;
+
+       if (mld_vif->emlsr.blocked_reasons & reason)
+               return 0;
+
+       mld_vif->emlsr.blocked_reasons |= reason;
+
+       IWL_DEBUG_INFO(mld,
+                      "Blocking EMLSR mode. reason = %s (0x%x)\n",
+                      iwl_mld_get_emlsr_blocked_string(reason), reason);
+       iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
+
+       if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
+               wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
+                                         &mld_vif->emlsr.check_tpt_wk);
+
+       return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
+                                  link_to_keep, sync);
+}
+
+void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                        enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
+{
+       _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
+}
+
+int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                            enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
+{
+       return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
+}
+
+static void _iwl_mld_select_links(struct iwl_mld *mld,
+                                 struct ieee80211_vif *vif);
+
+void iwl_mld_trigger_link_selection(struct iwl_mld *mld,
+                                   struct ieee80211_vif *vif)
+{
+       bool last_scan_was_recent =
+               time_before(jiffies, mld->scan.last_mlo_scan_jiffies +
+                                    IWL_MLD_SCAN_EXPIRE_TIME);
+
+       if (last_scan_was_recent) {
+               IWL_DEBUG_EHT(mld, "MLO scan was recent, skip.\n");
+               _iwl_mld_select_links(mld, vif);
+       } else {
+               IWL_DEBUG_EHT(mld, "Doing link selection after MLO scan\n");
+               iwl_mld_int_mlo_scan(mld, vif);
+       }
+}
+
+void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                          enum iwl_mld_emlsr_blocked reason)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
+               return;
+
+       if (!(mld_vif->emlsr.blocked_reasons & reason))
+               return;
+
+       mld_vif->emlsr.blocked_reasons &= ~reason;
+
+       IWL_DEBUG_INFO(mld,
+                      "Unblocking EMLSR mode. reason = %s (0x%x)\n",
+                      iwl_mld_get_emlsr_blocked_string(reason), reason);
+       iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
+
+       if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
+               wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+                                        &mld_vif->emlsr.check_tpt_wk,
+                                        round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+
+       if (mld_vif->emlsr.blocked_reasons)
+               return;
+
+       IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n");
+       iwl_mld_trigger_link_selection(mld, vif);
+}
+
+static void
+iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_esr_mode_notif *notif = (void *)data;
+
+       if (!iwl_mld_vif_has_emlsr_cap(vif))
+               return;
+
+       switch (le32_to_cpu(notif->action)) {
+       case ESR_RECOMMEND_ENTER:
+               iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+                                     IWL_MLD_EMLSR_BLOCKED_FW);
+               break;
+       case ESR_RECOMMEND_LEAVE:
+               iwl_mld_block_emlsr(mld_vif->mld, vif,
+                                   IWL_MLD_EMLSR_BLOCKED_FW,
+                                   iwl_mld_get_primary_link(vif));
+               break;
+       case ESR_FORCE_LEAVE:
+       default:
+               /* ESR_FORCE_LEAVE should not happen at this point */
+               IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
+                        le32_to_cpu(notif->action));
+       }
+}
+
+void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
+                                    struct iwl_rx_packet *pkt)
+{
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_vif_iter_emlsr_mode_notif,
+                                               pkt->data);
+}
+
+static void
+iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       if (!iwl_mld_vif_has_emlsr_cap(vif))
+               return;
+
+       ieee80211_connection_loss(vif);
+}
+
+void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
+                                          struct iwl_rx_packet *pkt)
+{
+       const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
+       u32 fw_link_id = le32_to_cpu(notif->link_id);
+       struct ieee80211_bss_conf *bss_conf =
+               iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+
+       IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
+                      le32_to_cpu(notif->activation) ? "enter" : "exit",
+                      bss_conf ? bss_conf->link_id : -1,
+                      le32_to_cpu(notif->link_id),
+                      le32_to_cpu(notif->err_code));
+
+       if (IWL_FW_CHECK(mld, !bss_conf,
+                        "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
+                        le32_to_cpu(notif->activation) ? "" : "de",
+                        fw_link_id)) {
+               ieee80211_iterate_active_interfaces_mtx(
+                       mld->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mld_vif_iter_disconnect_emlsr, NULL);
+               return;
+       }
+
+       /* Disconnect if we failed to deactivate a link */
+       if (!le32_to_cpu(notif->activation)) {
+               ieee80211_connection_loss(bss_conf->vif);
+               return;
+       }
+
+       /*
+        * We failed to activate the second link, go back to the link specified
+        * by the firmware as that is the one that is still valid now.
+        */
+       iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
+                          bss_conf->link_id);
+}
+
+/* Active non-station link tracking */
+static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int *count = _data;
+
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+               return;
+
+       *count += iwl_mld_count_active_links(mld_vif->mld, vif);
+}
+
+struct iwl_mld_update_emlsr_block_data {
+       bool block;
+       int result;
+};
+
+static void
+iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       struct iwl_mld_update_emlsr_block_data *data = _data;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int ret;
+
+       if (data->block) {
+               ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+                                              IWL_MLD_EMLSR_BLOCKED_NON_BSS,
+                                              iwl_mld_get_primary_link(vif));
+               if (ret)
+                       data->result = ret;
+       } else {
+               iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+                                     IWL_MLD_EMLSR_BLOCKED_NON_BSS);
+       }
+}
+
+int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
+                                     int pending_link_changes)
+{
+       /* An active link of a non-station vif blocks EMLSR. Upon activation
+        * block EMLSR on the bss vif. Upon deactivation, check if this link
+        * was the last non-station link active, and if so unblock the bss vif
+        */
+       struct iwl_mld_update_emlsr_block_data block_data = {};
+       int count = pending_link_changes;
+
+       /* No need to count if we are activating a non-BSS link */
+       if (count <= 0)
+               ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                                       IEEE80211_IFACE_ITER_NORMAL,
+                                                       iwl_mld_count_non_bss_links,
+                                                       &count);
+
+       /*
+        * We could skip updating it if the block change did not change (and
+        * pending_link_changes is non-zero).
+        */
+       block_data.block = !!count;
+
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_vif_iter_update_emlsr_non_bss_block,
+                                               &block_data);
+
+       return block_data.result;
+}
+
+#define EMLSR_SEC_LINK_MIN_PERC 10
+#define EMLSR_MIN_TX 3000
+#define EMLSR_MIN_RX 400
+
+void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+       struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+                                                  emlsr.check_tpt_wk.work);
+       struct ieee80211_vif *vif =
+               container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+       struct iwl_mld *mld = mld_vif->mld;
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_link *sec_link;
+       unsigned long total_tx = 0, total_rx = 0;
+       unsigned long sec_link_tx = 0, sec_link_rx = 0;
+       u8 sec_link_tx_perc, sec_link_rx_perc;
+       s8 sec_link_id;
+
+       if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
+               return;
+
+       mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+       /* We only count for the AP sta in a MLO connection */
+       if (!mld_sta->mpdu_counters)
+               return;
+
+       /* This wk should only run when the TPT blocker isn't set.
+        * When the blocker is set, the decision to remove it, as well as
+        * clearing the counters is done in DP (to avoid having a wk every
+        * 5 seconds when idle. When the blocker is unset, we are not idle anyway)
+        */
+       if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
+               return;
+       /*
+        * TPT is unblocked, need to check if the TPT criteria is still met.
+        *
+        * If EMLSR is active, then we also need to check the secondar link
+        * requirements.
+        */
+       if (iwl_mld_emlsr_active(vif)) {
+               sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
+               sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
+               if (WARN_ON_ONCE(!sec_link))
+                       return;
+               /* We need the FW ID here */
+               sec_link_id = sec_link->fw_id;
+       } else {
+               sec_link_id = -1;
+       }
+
+       /* Sum up RX and TX MPDUs from the different queues/links */
+       for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+               struct iwl_mld_per_q_mpdu_counter *queue_counter =
+                       &mld_sta->mpdu_counters[q];
+
+               spin_lock_bh(&queue_counter->lock);
+
+               /* The link IDs that doesn't exist will contain 0 */
+               for (int link = 0;
+                    link < ARRAY_SIZE(queue_counter->per_link);
+                    link++) {
+                       total_tx += queue_counter->per_link[link].tx;
+                       total_rx += queue_counter->per_link[link].rx;
+               }
+
+               if (sec_link_id != -1) {
+                       sec_link_tx += queue_counter->per_link[sec_link_id].tx;
+                       sec_link_rx += queue_counter->per_link[sec_link_id].rx;
+               }
+
+               memset(queue_counter->per_link, 0,
+                      sizeof(queue_counter->per_link));
+
+               spin_unlock_bh(&queue_counter->lock);
+       }
+
+       IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+                      total_tx, total_rx);
+
+       /* If we don't have enough MPDUs - exit EMLSR */
+       if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
+           total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
+               iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
+                                   iwl_mld_get_primary_link(vif));
+               return;
+       }
+
+       /* EMLSR is not active */
+       if (sec_link_id == -1)
+               return;
+
+       IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
+                      sec_link_id, sec_link_tx, sec_link_rx);
+
+       /* Calculate the percentage of the secondary link TX/RX */
+       sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
+       sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
+
+       /*
+        * The TX/RX percentage is checked only if it exceeds the required
+        * minimum. In addition, RX is checked only if the TX check failed.
+        */
+       if ((total_tx > EMLSR_MIN_TX &&
+            sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
+           (total_rx > EMLSR_MIN_RX &&
+            sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
+               iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
+                                  iwl_mld_get_primary_link(vif));
+               return;
+       }
+
+       /* Check again when the next window ends  */
+       wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+                                &mld_vif->emlsr.check_tpt_wk,
+                                round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+}
+
+void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+       struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+                                                  emlsr.unblock_tpt_wk);
+       struct ieee80211_vif *vif =
+               container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+       iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
+}
+
+/*
+ * Link selection
+ */
+struct iwl_mld_link_sel_data {
+       u8 link_id;
+       const struct cfg80211_chan_def *chandef;
+       s32 signal;
+       u16 grade;
+};
+
+s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
+                                const struct cfg80211_chan_def *chandef,
+                                bool low)
+{
+       if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
+                   chandef->chan->band != NL80211_BAND_5GHZ &&
+                   chandef->chan->band != NL80211_BAND_6GHZ))
+               return S8_MAX;
+
+#define RSSI_THRESHOLD(_low, _bw)                      \
+       (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ     \
+              : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+       /* 320 MHz has the same thresholds as 20 MHz */
+       case NL80211_CHAN_WIDTH_320:
+               return RSSI_THRESHOLD(low, 20);
+       case NL80211_CHAN_WIDTH_40:
+               return RSSI_THRESHOLD(low, 40);
+       case NL80211_CHAN_WIDTH_80:
+               return RSSI_THRESHOLD(low, 80);
+       case NL80211_CHAN_WIDTH_160:
+               return RSSI_THRESHOLD(low, 160);
+       default:
+               WARN_ON(1);
+               return S8_MAX;
+       }
+#undef RSSI_THRESHOLD
+}
+
+static u32
+iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
+                                  struct ieee80211_vif *vif,
+                                  struct iwl_mld_link_sel_data *link,
+                                  bool primary)
+{
+       struct wiphy *wiphy = mld->wiphy;
+       struct ieee80211_bss_conf *conf;
+       enum iwl_mld_emlsr_exit ret = 0;
+
+       conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
+       if (WARN_ON_ONCE(!conf))
+               return false;
+
+       if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
+               ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
+
+       if (link->signal <
+           iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
+               ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
+
+       if (conf->csa_active)
+               ret |= IWL_MLD_EMLSR_EXIT_CSA;
+
+       if (ret) {
+               IWL_DEBUG_INFO(mld,
+                              "Link %d is not allowed for EMLSR as %s\n",
+                              link->link_id,
+                              primary ? "primary" : "secondary");
+               iwl_mld_print_emlsr_exit(mld, ret);
+       }
+
+       return ret;
+}
+
+static u8
+iwl_mld_set_link_sel_data(struct iwl_mld *mld,
+                         struct ieee80211_vif *vif,
+                         struct iwl_mld_link_sel_data *data,
+                         unsigned long usable_links,
+                         u8 *best_link_idx)
+{
+       u8 n_data = 0;
+       u16 max_grade = 0;
+       unsigned long link_id;
+
+       /*
+        * TODO: don't select links that weren't discovered in the last scan
+        * This requires mac80211 (or cfg80211) changes to forward/track when
+        * a BSS was last updated. cfg80211 already tracks this information but
+        * it is not exposed within the kernel.
+        */
+       for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+               struct ieee80211_bss_conf *link_conf =
+                       link_conf_dereference_protected(vif, link_id);
+
+               if (WARN_ON_ONCE(!link_conf))
+                       continue;
+
+               /* Ignore any BSS that was not seen in the last 5 seconds */
+               if (ktime_before(link_conf->bss->ts_boottime,
+                                ktime_sub_ns(ktime_get_boottime_ns(),
+                                             (u64)5 * NSEC_PER_SEC)))
+                       continue;
+
+               data[n_data].link_id = link_id;
+               data[n_data].chandef = &link_conf->chanreq.oper;
+               data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
+               data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
+
+               if (n_data == 0 || data[n_data].grade > max_grade) {
+                       max_grade = data[n_data].grade;
+                       *best_link_idx = n_data;
+               }
+               n_data++;
+       }
+
+       return n_data;
+}
+
+static bool
+iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
+                        struct iwl_mld_link_sel_data *a,
+                        struct iwl_mld_link_sel_data *b)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld *mld = mld_vif->mld;
+       enum iwl_mld_emlsr_exit ret = 0;
+
+       /* Per-link considerations */
+       if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) ||
+           iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false))
+               return false;
+
+       if (a->chandef->chan->band == b->chandef->chan->band) {
+               ret |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
+       } else if (a->chandef->width != b->chandef->width) {
+               /* TODO: task=EMLSR task=statistics
+                * replace BANDWIDTH exit reason with channel load criteria
+                */
+               ret |= IWL_MLD_EMLSR_EXIT_BANDWIDTH;
+       }
+
+       if (ret) {
+               IWL_DEBUG_INFO(mld,
+                              "Links %d and %d are not a valid pair for EMLSR\n",
+                              a->link_id, b->link_id);
+               IWL_DEBUG_INFO(mld,
+                              "Links bandwidth are: %d and %d\n",
+                              nl80211_chan_width_to_mhz(a->chandef->width),
+                              nl80211_chan_width_to_mhz(b->chandef->width));
+               iwl_mld_print_emlsr_exit(mld, ret);
+               return false;
+       }
+
+       return true;
+}
+
+/* Calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+
+/*
+ * Returns the combined grade of two given links.
+ * Returns 0 if EMLSR is not allowed with these 2 links.
+ */
+static
+unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
+                                    struct ieee80211_vif *vif,
+                                    struct iwl_mld_link_sel_data *a,
+                                    struct iwl_mld_link_sel_data *b,
+                                    u8 *primary_id)
+{
+       struct ieee80211_bss_conf *primary_conf;
+       struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+       unsigned int primary_load;
+
+       lockdep_assert_wiphy(wiphy);
+
+       /* a is always primary, b is always secondary */
+       if (b->grade > a->grade)
+               swap(a, b);
+
+       *primary_id = a->link_id;
+
+       if (!iwl_mld_valid_emlsr_pair(vif, a, b))
+               return 0;
+
+       primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
+
+       if (WARN_ON_ONCE(!primary_conf))
+               return 0;
+
+       primary_load = iwl_mld_get_chan_load(mld, primary_conf);
+
+       /* The more the primary link is loaded, the more worthwhile EMLSR becomes */
+       return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
+}
+
+static void _iwl_mld_select_links(struct iwl_mld *mld,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+       struct iwl_mld_link_sel_data *best_link;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int max_active_links = iwl_mld_max_active_links(mld, vif);
+       u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
+       u8 best_idx, new_primary, n_data;
+       u16 max_grade;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (!mld_vif->authorized || hweight16(usable_links) <= 1)
+               return;
+
+       if (WARN(time_before(mld->scan.last_mlo_scan_jiffies,
+                            jiffies - IWL_MLD_SCAN_EXPIRE_TIME),
+               "Last MLO scan was too long ago, can't select links\n"))
+               return;
+
+       /* The logic below is simple and not suited for more than 2 links */
+       WARN_ON_ONCE(max_active_links > 2);
+
+       n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
+                                          &best_idx);
+
+       if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+               return;
+
+       /* Default to selecting the single best link */
+       best_link = &data[best_idx];
+       new_primary = best_link->link_id;
+       new_active = BIT(best_link->link_id);
+       max_grade = best_link->grade;
+
+       /* If EMLSR is not possible, activate the best link */
+       if (max_active_links == 1 || n_data == 1 ||
+           !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
+           mld_vif->emlsr.blocked_reasons)
+               goto set_active;
+
+       /* Try to find the best link combination */
+       for (u8 a = 0; a < n_data; a++) {
+               for (u8 b = a + 1; b < n_data; b++) {
+                       u8 best_in_pair;
+                       u16 emlsr_grade =
+                               iwl_mld_get_emlsr_grade(mld, vif,
+                                                       &data[a], &data[b],
+                                                       &best_in_pair);
+
+                       /*
+                        * Prefer (new) EMLSR combination to prefer EMLSR over
+                        * a single link.
+                        */
+                       if (emlsr_grade < max_grade)
+                               continue;
+
+                       max_grade = emlsr_grade;
+                       new_primary = best_in_pair;
+                       new_active = BIT(data[a].link_id) |
+                                    BIT(data[b].link_id);
+               }
+       }
+
+set_active:
+       IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n",
+                      new_active, new_primary);
+
+       mld_vif->emlsr.selected_primary = new_primary;
+       mld_vif->emlsr.selected_links = new_active;
+
+       ieee80211_set_active_links_async(vif, new_active);
+}
+
+static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
+                                         struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld *mld = mld_vif->mld;
+
+       _iwl_mld_select_links(mld, vif);
+}
+
+void iwl_mld_select_links(struct iwl_mld *mld)
+{
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_vif_iter_select_links,
+                                               NULL);
+}
+
+void iwl_mld_emlsr_check_equal_bw(struct iwl_mld *mld,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_bss_conf *link)
+{
+       u8 other_link_id = iwl_mld_get_other_link(vif, link->link_id);
+       struct ieee80211_bss_conf *other_link =
+               link_conf_dereference_check(vif, other_link_id);
+
+       if (!ieee80211_vif_link_active(vif, link->link_id) ||
+           !iwl_mld_emlsr_active(vif) ||
+           WARN_ON(link->link_id == other_link_id || !other_link))
+               return;
+
+       if (link->chanreq.oper.width != other_link->chanreq.oper.width)
+               iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BANDWIDTH,
+                                  iwl_mld_get_primary_link(vif));
+}
+
+static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld *mld = mld_vif->mld;
+       struct ieee80211_bss_conf *link;
+       unsigned int link_id;
+
+       if (!mld->bt_is_active) {
+               iwl_mld_retry_emlsr(mld, vif);
+               return;
+       }
+
+       /* BT is turned ON but we are not in EMLSR, nothing to do */
+       if (!iwl_mld_emlsr_active(vif))
+               return;
+
+       /* In EMLSR and BT is turned ON */
+
+       for_each_vif_active_link(vif, link, link_id) {
+               if (WARN_ON(!link->chanreq.oper.chan))
+                       continue;
+
+               if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
+                       iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
+                                          iwl_mld_get_primary_link(vif));
+                       return;
+               }
+       }
+}
+
+void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
+{
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_emlsr_check_bt_iter,
+                                               NULL);
+}
+
+static void iwl_mld_emlsr_check_chan_load_iter(void *_data, u8 *mac,
+                                              struct ieee80211_vif *vif)
+{
+       struct iwl_mld *mld = (struct iwl_mld *)_data;
+       struct ieee80211_bss_conf *prim_link;
+       unsigned int prim_link_id;
+       int chan_load;
+
+       if (!iwl_mld_emlsr_active(vif))
+               return;
+
+       prim_link_id = iwl_mld_get_primary_link(vif);
+       prim_link = link_conf_dereference_protected(vif, prim_link_id);
+       if (WARN_ON(!prim_link))
+               return;
+
+       chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link, true);
+
+       if (chan_load < 0)
+               return;
+
+       /* chan_load is in range [0,255] */
+       if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_CHAN_LOAD_THRESH))
+               iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
+                                  prim_link_id);
+}
+
+void iwl_mld_emlsr_check_chan_load(struct iwl_mld *mld)
+{
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_emlsr_check_chan_load_iter,
+                                               (void *)(uintptr_t)mld);
+}
+
+void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) ||
+           mld_vif->emlsr.blocked_reasons)
+               return;
+
+       iwl_mld_trigger_link_selection(mld, vif);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_mlo_h__
+#define __iwl_mld_mlo_h__
+
+#include <linux/ieee80211.h>
+#include <linux/types.h>
+#include <net/mac80211.h>
+#include "iwl-config.h"
+#include "iwl-trans.h"
+#include "iface.h"
+
+struct iwl_mld;
+
+void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
+                                      struct wiphy_work *wk);
+
+static inline bool iwl_mld_emlsr_active(struct ieee80211_vif *vif)
+{
+       /* Set on phy context activation, so should be a good proxy */
+       return !!(vif->driver_flags & IEEE80211_VIF_EML_ACTIVE);
+}
+
+static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       /* We only track/permit EMLSR state once authorized */
+       if (!mld_vif->authorized)
+               return false;
+
+       /* No EMLSR on dual radio devices */
+       return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION &&
+              ieee80211_vif_is_mld(vif) &&
+              vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP &&
+              !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id);
+}
+
+static inline int
+iwl_mld_max_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       if (vif->type == NL80211_IFTYPE_AP)
+               return mld->fw->ucode_capa.num_beacons;
+
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+               return IWL_FW_MAX_ACTIVE_LINKS_NUM;
+
+       /* For now, do not accept more links on other interface types */
+       return 1;
+}
+
+static inline int
+iwl_mld_count_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *mld_link;
+       int n_active = 0;
+
+       for_each_mld_vif_valid_link(mld_vif, mld_link) {
+               if (rcu_access_pointer(mld_link->chan_ctx))
+                       n_active++;
+       }
+
+       return n_active;
+}
+
+static inline u8 iwl_mld_get_primary_link(struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       lockdep_assert_wiphy(mld_vif->mld->wiphy);
+
+       if (!ieee80211_vif_is_mld(vif) || WARN_ON(!vif->active_links))
+               return 0;
+
+       /* In AP mode, there is no primary link */
+       if (vif->type == NL80211_IFTYPE_AP)
+               return __ffs(vif->active_links);
+
+       if (iwl_mld_emlsr_active(vif) &&
+           !WARN_ON(!(BIT(mld_vif->emlsr.primary) & vif->active_links)))
+               return mld_vif->emlsr.primary;
+
+       return __ffs(vif->active_links);
+}
+
+/*
+ * For non-MLO/single link, this will return the deflink/single active link,
+ * respectively
+ */
+static inline u8 iwl_mld_get_other_link(struct ieee80211_vif *vif, u8 link_id)
+{
+       switch (hweight16(vif->active_links)) {
+       case 0:
+               return 0;
+       default:
+               WARN_ON(1);
+               fallthrough;
+       case 1:
+               return __ffs(vif->active_links);
+       case 2:
+               return __ffs(vif->active_links & ~BIT(link_id));
+       }
+}
+
+s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
+                                const struct cfg80211_chan_def *chandef,
+                                bool low);
+
+/* EMLSR block/unblock and exit */
+void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                        enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
+int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                            enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
+void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                          enum iwl_mld_emlsr_blocked reason);
+void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                       enum iwl_mld_emlsr_exit exit, u8 link_to_keep);
+
+int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
+                                     int pending_link_changes);
+
+void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
+                                    struct iwl_rx_packet *pkt);
+void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
+                                          struct iwl_rx_packet *pkt);
+
+void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+
+void iwl_mld_select_links(struct iwl_mld *mld);
+
+void iwl_mld_emlsr_check_equal_bw(struct iwl_mld *mld,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_bss_conf *link);
+
+void iwl_mld_emlsr_check_bt(struct iwl_mld *mld);
+
+void iwl_mld_emlsr_check_chan_load(struct iwl_mld *mld);
+
+void iwl_mld_trigger_link_selection(struct iwl_mld *mld,
+                                   struct ieee80211_vif *vif);
+
+/**
+ * iwl_mld_retry_emlsr - Retry entering EMLSR
+ * @mld: MLD context
+ * @vif: VIF to retry EMLSR on
+ *
+ * Retry entering EMLSR on the given VIF.
+ * Use this if one of the parameters that can prevent EMLSR has changed.
+ */
+void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif);
+
+#endif /* __iwl_mld_mlo_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "notif.h"
+#include "scan.h"
+#include "iface.h"
+#include "mlo.h"
+#include "iwl-trans.h"
+#include "fw/file.h"
+#include "fw/dbg.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/api/mac-cfg.h"
+#include "session-protect.h"
+#include "fw/api/time-event.h"
+#include "fw/api/tx.h"
+#include "fw/api/rs.h"
+#include "fw/api/offload.h"
+#include "fw/api/stats.h"
+#include "fw/api/rfi.h"
+#include "fw/api/coex.h"
+
+#include "mcc.h"
+#include "link.h"
+#include "tx.h"
+#include "rx.h"
+#include "tlc.h"
+#include "agg.h"
+#include "mac80211.h"
+#include "thermal.h"
+#include "roc.h"
+#include "stats.h"
+#include "coex.h"
+#include "time_sync.h"
+#include "ftm-initiator.h"
+
+/* Please use this in an increasing order of the versions */
+#define CMD_VER_ENTRY(_ver, _struct)                   \
+       { .size = sizeof(struct _struct), .ver = _ver },
+#define CMD_VERSIONS(name, ...)                                \
+       static const struct iwl_notif_struct_size       \
+       iwl_notif_struct_sizes_##name[] = { __VA_ARGS__ };
+
+#define RX_HANDLER_NO_OBJECT(_grp, _cmd, _name, _context)              \
+       {.cmd_id = WIDE_ID(_grp, _cmd),                                 \
+        .context = _context,                                           \
+        .fn = iwl_mld_handle_##_name,                                  \
+        .sizes = iwl_notif_struct_sizes_##_name,                       \
+        .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name),         \
+       },
+
+/* Use this for Rx handlers that do not need notification validation */
+#define RX_HANDLER_NO_VAL(_grp, _cmd, _name, _context)                 \
+       {.cmd_id = WIDE_ID(_grp, _cmd),                                 \
+        .context = _context,                                           \
+        .fn = iwl_mld_handle_##_name,                                  \
+       },
+
+#define RX_HANDLER_VAL_FN(_grp, _cmd, _name, _context)                 \
+       { .cmd_id = WIDE_ID(_grp, _cmd),                                \
+         .context = _context,                                          \
+         .fn = iwl_mld_handle_##_name,                                 \
+         .val_fn = iwl_mld_validate_##_name,                           \
+       },
+
+#define DEFINE_SIMPLE_CANCELLATION(name, notif_struct, id_member)              \
+static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld,                 \
+                                         struct iwl_rx_packet *pkt,            \
+                                         u32 obj_id)                           \
+{                                                                              \
+       const struct notif_struct *notif = (const void *)pkt->data;             \
+                                                                               \
+       return obj_id == _Generic((notif)->id_member,                           \
+                                 __le32: le32_to_cpu((notif)->id_member),      \
+                                 __le16: le16_to_cpu((notif)->id_member),      \
+                                 u8: (notif)->id_member);                      \
+}
+
+static bool iwl_mld_always_cancel(struct iwl_mld *mld,
+                                 struct iwl_rx_packet *pkt,
+                                 u32 obj_id)
+{
+       return true;
+}
+
+/* Currently only defined for the RX_HANDLER_SIZES options. Use this for
+ * notifications that belong to a specific object, and that should be
+ * canceled when the object is removed
+ */
+#define RX_HANDLER_OF_OBJ(_grp, _cmd, _name, _obj_type)                        \
+       {.cmd_id = WIDE_ID(_grp, _cmd),                                 \
+       /* Only async handlers can be canceled */                       \
+        .context = RX_HANDLER_ASYNC,                                   \
+        .fn = iwl_mld_handle_##_name,                                  \
+        .sizes = iwl_notif_struct_sizes_##_name,                       \
+        .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name),         \
+        .obj_type = IWL_MLD_OBJECT_TYPE_##_obj_type,                   \
+        .cancel = iwl_mld_cancel_##_name,                              \
+        },
+
+#define RX_HANDLER_OF_LINK(_grp, _cmd, _name)                          \
+       RX_HANDLER_OF_OBJ(_grp, _cmd, _name, LINK)                      \
+
+#define RX_HANDLER_OF_VIF(_grp, _cmd, _name)                           \
+       RX_HANDLER_OF_OBJ(_grp, _cmd, _name, VIF)                       \
+
+#define RX_HANDLER_OF_STA(_grp, _cmd, _name)                           \
+       RX_HANDLER_OF_OBJ(_grp, _cmd, _name, STA)                       \
+
+#define RX_HANDLER_OF_ROC(_grp, _cmd, _name)                           \
+       RX_HANDLER_OF_OBJ(_grp, _cmd, _name, ROC)
+
+#define RX_HANDLER_OF_SCAN(_grp, _cmd, _name)                          \
+       RX_HANDLER_OF_OBJ(_grp, _cmd, _name, SCAN)
+
+#define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name)                               \
+       RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ)
+
+static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt)
+{
+       struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+       IWL_DEBUG_INFO(mld,
+                      "MFUART: installed ver: 0x%08x, external ver: 0x%08x\n",
+                      le32_to_cpu(mfuart_notif->installed_ver),
+                      le32_to_cpu(mfuart_notif->external_ver));
+       IWL_DEBUG_INFO(mld,
+                      "MFUART: status: 0x%08x, duration: 0x%08x image size: 0x%08x\n",
+                      le32_to_cpu(mfuart_notif->status),
+                      le32_to_cpu(mfuart_notif->duration),
+                      le32_to_cpu(mfuart_notif->image_size));
+}
+
+static void iwl_mld_mu_mimo_iface_iterator(void *_data, u8 *mac,
+                                          struct ieee80211_vif *vif)
+{
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       unsigned int link_id = 0;
+
+       if (WARN(hweight16(vif->active_links) > 1,
+                "no support for this notif while in EMLSR 0x%x\n",
+                vif->active_links))
+               return;
+
+       if (ieee80211_vif_is_mld(vif)) {
+               link_id = __ffs(vif->active_links);
+               bss_conf = link_conf_dereference_check(vif, link_id);
+       }
+
+       if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) {
+               const struct iwl_mu_group_mgmt_notif *notif = _data;
+
+               BUILD_BUG_ON(sizeof(notif->membership_status) !=
+                            WLAN_MEMBERSHIP_LEN);
+               BUILD_BUG_ON(sizeof(notif->user_position) !=
+                            WLAN_USER_POSITION_LEN);
+
+               /* MU-MIMO Group Id action frame is little endian. We treat
+                * the data received from firmware as if it came from the
+                * action frame, so no conversion is needed.
+                */
+               ieee80211_update_mu_groups(vif, link_id,
+                                          (u8 *)¬if->membership_status,
+                                          (u8 *)¬if->user_position);
+       }
+}
+
+/* This handler is called in SYNC mode because it needs to be serialized with
+ * Rx as specified in ieee80211_update_mu_groups()'s documentation.
+ */
+static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld,
+                                            struct iwl_rx_packet *pkt)
+{
+       struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
+
+       ieee80211_iterate_active_interfaces_atomic(mld->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mld_mu_mimo_iface_iterator,
+                                                  notif);
+}
+
+static void
+iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld,
+                                  struct iwl_rx_packet *pkt)
+{
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+       struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+       struct ieee80211_rx_status rx_status = {};
+       struct sk_buff *skb;
+       u32 size = le32_to_cpu(sb->common.byte_count);
+
+       if (size == 0)
+               return;
+
+       if (pkt_len < struct_size(sb, data, size))
+               return;
+
+       skb = alloc_skb(size, GFP_ATOMIC);
+       if (!skb) {
+               IWL_ERR(mld, "alloc_skb failed\n");
+               return;
+       }
+
+       /* update rx_status according to the notification's metadata */
+       rx_status.mactime = le64_to_cpu(sb->common.tsf);
+       /* TSF as indicated by the firmware  is at INA time */
+       rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+       rx_status.device_timestamp = le32_to_cpu(sb->common.system_time);
+       rx_status.band =
+               iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band));
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel),
+                                              rx_status.band);
+
+       /* copy the data */
+       skb_put_data(skb, sb->data, size);
+       memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+       /* pass it as regular rx to mac80211 */
+       ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
+}
+
+static void
+iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld,
+                                         struct iwl_rx_packet *pkt)
+{
+       struct iwl_channel_switch_start_notif *notif = (void *)pkt->data;
+       u32 link_id = le32_to_cpu(notif->link_id);
+       struct ieee80211_bss_conf *link_conf =
+               iwl_mld_fw_id_to_link_conf(mld, link_id);
+       struct ieee80211_vif *vif;
+
+       if (WARN_ON(!link_conf))
+               return;
+
+       vif = link_conf->vif;
+
+       IWL_DEBUG_INFO(mld,
+                      "CSA Start Notification with vif type: %d, link_id: %d\n",
+                      vif->type,
+                      link_conf->link_id);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_AP:
+               /* We don't support canceling a CSA as it was advertised
+                * by the AP itself
+                */
+               if (!link_conf->csa_active)
+                       return;
+
+               ieee80211_csa_finish(vif, link_conf->link_id);
+               break;
+       case NL80211_IFTYPE_STATION:
+               if (!link_conf->csa_active) {
+                       /* Either unexpected cs notif or mac80211 chose to
+                        * ignore, for example in channel switch to same channel
+                        */
+                       struct iwl_cancel_channel_switch_cmd cmd = {
+                               .id = cpu_to_le32(link_id),
+                       };
+
+                       if (iwl_mld_send_cmd_pdu(mld,
+                                                WIDE_ID(MAC_CONF_GROUP,
+                                                        CANCEL_CHANNEL_SWITCH_CMD),
+                                                &cmd))
+                               IWL_ERR(mld,
+                                       "Failed to cancel the channel switch\n");
+                       return;
+               }
+
+               ieee80211_chswitch_done(vif, true, link_conf->link_id);
+               break;
+
+       default:
+               WARN(1, "CSA on invalid vif type: %d", vif->type);
+       }
+}
+
+static void
+iwl_mld_handle_channel_switch_error_notif(struct iwl_mld *mld,
+                                         struct iwl_rx_packet *pkt)
+{
+       struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+       struct ieee80211_bss_conf *link_conf;
+       struct ieee80211_vif *vif;
+       u32 link_id = le32_to_cpu(notif->link_id);
+       u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+       link_conf = iwl_mld_fw_id_to_link_conf(mld, link_id);
+       if (WARN_ON(!link_conf))
+               return;
+
+       vif = link_conf->vif;
+
+       IWL_DEBUG_INFO(mld, "FW reports CSA error: id=%u, csa_err_mask=%u\n",
+                      link_id, csa_err_mask);
+
+       if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+                           CS_ERR_LONG_DELAY_AFTER_CS |
+                           CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+               ieee80211_channel_switch_disconnect(vif);
+}
+
+static void iwl_mld_handle_beacon_notification(struct iwl_mld *mld,
+                                              struct iwl_rx_packet *pkt)
+{
+       struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
+
+       mld->ibss_manager = !!beacon->ibss_mgr_status;
+}
+
+/**
+ * DOC: Notification versioning
+ *
+ * The firmware's notifications change from time to time. In order to
+ * differentiate between different versions of the same notification, the
+ * firmware advertises the version of each notification.
+ * Here are listed all the notifications that are supported. Several versions
+ * of the same notification can be allowed at the same time:
+ *
+ * CMD_VERSION(my_multi_version_notif,
+ *            CMD_VER_ENTRY(1, iwl_my_multi_version_notif_ver1)
+ *            CMD_VER_ENTRY(2, iwl_my_multi_version_notif_ver2)
+ *
+ * etc...
+ *
+ * The driver will enforce that the notification coming from the firmware
+ * has its version listed here and it'll also enforce that the firmware sent
+ * at least enough bytes to cover the structure listed in the CMD_VER_ENTRY.
+ */
+
+CMD_VERSIONS(scan_complete_notif,
+            CMD_VER_ENTRY(1, iwl_umac_scan_complete))
+CMD_VERSIONS(scan_iter_complete_notif,
+            CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif))
+CMD_VERSIONS(mfuart_notif,
+            CMD_VER_ENTRY(2, iwl_mfuart_load_notif))
+CMD_VERSIONS(update_mcc,
+            CMD_VER_ENTRY(1, iwl_mcc_chub_notif))
+CMD_VERSIONS(session_prot_notif,
+            CMD_VER_ENTRY(3, iwl_session_prot_notif))
+CMD_VERSIONS(missed_beacon_notif,
+            CMD_VER_ENTRY(5, iwl_missed_beacons_notif))
+CMD_VERSIONS(tx_resp_notif,
+            CMD_VER_ENTRY(8, iwl_tx_resp))
+CMD_VERSIONS(compressed_ba_notif,
+            CMD_VER_ENTRY(5, iwl_compressed_ba_notif))
+CMD_VERSIONS(tlc_notif,
+            CMD_VER_ENTRY(3, iwl_tlc_update_notif))
+CMD_VERSIONS(mu_mimo_grp_notif,
+            CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif))
+CMD_VERSIONS(channel_switch_start_notif,
+            CMD_VER_ENTRY(3, iwl_channel_switch_start_notif))
+CMD_VERSIONS(channel_switch_error_notif,
+            CMD_VER_ENTRY(2, iwl_channel_switch_error_notif))
+CMD_VERSIONS(ct_kill_notif,
+            CMD_VER_ENTRY(2, ct_kill_notif))
+CMD_VERSIONS(temp_notif,
+            CMD_VER_ENTRY(2, iwl_dts_measurement_notif))
+CMD_VERSIONS(stored_beacon_notif,
+            CMD_VER_ENTRY(4, iwl_stored_beacon_notif))
+CMD_VERSIONS(roc_notif,
+            CMD_VER_ENTRY(1, iwl_roc_notif))
+CMD_VERSIONS(probe_resp_data_notif,
+            CMD_VER_ENTRY(1, iwl_probe_resp_data_notif))
+CMD_VERSIONS(datapath_monitor_notif,
+            CMD_VER_ENTRY(1, iwl_datapath_monitor_notif))
+CMD_VERSIONS(stats_oper_notif,
+            CMD_VER_ENTRY(3, iwl_system_statistics_notif_oper))
+CMD_VERSIONS(stats_oper_part1_notif,
+            CMD_VER_ENTRY(4, iwl_system_statistics_part1_notif_oper))
+CMD_VERSIONS(bt_coex_notif,
+            CMD_VER_ENTRY(1, iwl_bt_coex_profile_notif))
+CMD_VERSIONS(beacon_notification,
+            CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
+CMD_VERSIONS(emlsr_mode_notif,
+            CMD_VER_ENTRY(1, iwl_esr_mode_notif))
+CMD_VERSIONS(emlsr_trans_fail_notif,
+            CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
+CMD_VERSIONS(uapsd_misbehaving_ap_notif,
+            CMD_VER_ENTRY(1, iwl_uapsd_misbehaving_ap_notif))
+CMD_VERSIONS(time_msmt_notif,
+            CMD_VER_ENTRY(1, iwl_time_msmt_notify))
+CMD_VERSIONS(time_sync_confirm_notif,
+            CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify))
+CMD_VERSIONS(omi_status_notif,
+            CMD_VER_ENTRY(1, iwl_omi_send_status_notif))
+CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(9, iwl_tof_range_rsp_ntfy))
+
+DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id)
+DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id)
+DEFINE_SIMPLE_CANCELLATION(channel_switch_start,
+                          iwl_channel_switch_start_notif, link_id)
+DEFINE_SIMPLE_CANCELLATION(channel_switch_error,
+                          iwl_channel_switch_error_notif, link_id)
+DEFINE_SIMPLE_CANCELLATION(datapath_monitor, iwl_datapath_monitor_notif,
+                          link_id)
+DEFINE_SIMPLE_CANCELLATION(roc, iwl_roc_notif, activity)
+DEFINE_SIMPLE_CANCELLATION(scan_complete, iwl_umac_scan_complete, uid)
+DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif,
+                          mac_id)
+DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif,
+                          mac_id)
+#define iwl_mld_cancel_omi_status_notif iwl_mld_always_cancel
+DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id)
+
+/**
+ * DOC: Handlers for fw notifications
+ *
+ * Here are listed the notifications IDs (including the group ID), the handler
+ * of the notification and how it should be called:
+ *
+ *  - RX_HANDLER_SYNC: will be called as part of the Rx path
+ *  - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held
+ *
+ * This means that if the firmware sends two notifications A and B in that
+ * order and notification A is RX_HANDLER_ASYNC and notification is
+ * RX_HANDLER_SYNC, the handler of B will likely be called before the handler
+ * of A.
+ *
+ * This list should be in order of frequency for performance purposes.
+ * The handler can be one from two contexts, see &iwl_rx_handler_context
+ *
+ * A handler can declare that it relies on a specific object in which case it
+ * can be cancelled in case the object is deleted. In order to use this
+ * mechanism, a cancellation function is needed. The cancellation function must
+ * receive an object id (the index of that object in the firmware) and a
+ * notification payload. It'll return true if that specific notification should
+ * be cancelled upon the obliteration of the specific instance of the object.
+ *
+ * DEFINE_SIMPLE_CANCELLATION allows to easily create a cancellation function
+ * that wills simply return true if a given object id matches the object id in
+ * the firmware notification.
+ */
+
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP, TX_CMD, tx_resp_notif,
+                            RX_HANDLER_SYNC)
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BA_NOTIF, compressed_ba_notif,
+                            RX_HANDLER_SYNC)
+       RX_HANDLER_OF_SCAN(LEGACY_GROUP, SCAN_COMPLETE_UMAC,
+                          scan_complete_notif)
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP, SCAN_ITERATION_COMPLETE_UMAC,
+                            scan_iter_complete_notif,
+                            RX_HANDLER_SYNC)
+       RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION,
+                         match_found_notif, RX_HANDLER_SYNC)
+
+       RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
+                            stats_oper_notif, RX_HANDLER_ASYNC)
+       RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
+                            stats_oper_part1_notif, RX_HANDLER_ASYNC)
+
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MFUART_LOAD_NOTIFICATION,
+                            mfuart_notif, RX_HANDLER_SYNC)
+
+       RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+                            temp_notif, RX_HANDLER_ASYNC)
+       RX_HANDLER_OF_LINK(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
+                          session_prot_notif)
+       RX_HANDLER_OF_LINK(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
+                          missed_beacon_notif)
+       RX_HANDLER_OF_STA(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, tlc_notif)
+       RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
+                          channel_switch_start_notif)
+       RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+                          channel_switch_error_notif)
+       RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif)
+       RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
+                            mu_mimo_grp_notif, RX_HANDLER_SYNC)
+       RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+                            stored_beacon_notif, RX_HANDLER_SYNC)
+       RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
+                         probe_resp_data_notif)
+       RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
+                            ct_kill_notif, RX_HANDLER_ASYNC)
+       RX_HANDLER_OF_LINK(DATA_PATH_GROUP, MONITOR_NOTIF,
+                          datapath_monitor_notif)
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MCC_CHUB_UPDATE_CMD, update_mcc,
+                            RX_HANDLER_ASYNC)
+       RX_HANDLER_NO_OBJECT(BT_COEX_GROUP, PROFILE_NOTIF,
+                            bt_coex_notif, RX_HANDLER_ASYNC)
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BEACON_NOTIFICATION,
+                            beacon_notification, RX_HANDLER_ASYNC)
+       RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, ESR_MODE_NOTIF,
+                            emlsr_mode_notif, RX_HANDLER_ASYNC)
+       RX_HANDLER_NO_OBJECT(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF,
+                            emlsr_trans_fail_notif, RX_HANDLER_ASYNC)
+       RX_HANDLER_OF_VIF(LEGACY_GROUP, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+                         uapsd_misbehaving_ap_notif)
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
+                            WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
+                            time_msmt_notif, RX_HANDLER_SYNC)
+       RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
+                            WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
+                            time_sync_confirm_notif, RX_HANDLER_ASYNC)
+       RX_HANDLER_OF_LINK(DATA_PATH_GROUP, OMI_SEND_STATUS_NOTIF,
+                          omi_status_notif)
+       RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
+                             ftm_resp_notif)
+};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_mld_rx_handlers_num = ARRAY_SIZE(iwl_mld_rx_handlers);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers_num);
+#endif
+
+static bool
+iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
+                      const struct iwl_rx_handler *handler)
+{
+       unsigned int size = iwl_rx_packet_payload_len(pkt);
+       size_t notif_ver;
+
+       /* If n_sizes == 0, it indicates that a validation function may be used
+        * or that no validation is required.
+        */
+       if (!handler->n_sizes) {
+               if (handler->val_fn)
+                       return handler->val_fn(mld, pkt);
+               return true;
+       }
+
+       notif_ver = iwl_fw_lookup_notif_ver(mld->fw,
+                                           iwl_cmd_groupid(handler->cmd_id),
+                                           iwl_cmd_opcode(handler->cmd_id),
+                                           IWL_FW_CMD_VER_UNKNOWN);
+
+       for (int i = 0; i < handler->n_sizes; i++) {
+               if (handler->sizes[i].ver != notif_ver)
+                       continue;
+
+               if (IWL_FW_CHECK(mld, size < handler->sizes[i].size,
+                                "unexpected notification 0x%04x size %d, need %d\n",
+                                handler->cmd_id, size, handler->sizes[i].size))
+                       return false;
+               return true;
+       }
+
+       IWL_FW_CHECK_FAILED(mld,
+                           "notif 0x%04x ver %zu missing expected size, use version %u size\n",
+                           handler->cmd_id, notif_ver,
+                           handler->sizes[handler->n_sizes - 1].ver);
+
+       return size < handler->sizes[handler->n_sizes - 1].size;
+}
+
+struct iwl_async_handler_entry {
+       struct list_head list;
+       struct iwl_rx_cmd_buffer rxb;
+       const struct iwl_rx_handler *rx_h;
+};
+
+static void
+iwl_mld_log_async_handler_op(struct iwl_mld *mld, const char *op,
+                            struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       IWL_DEBUG_HC(mld,
+                    "%s async handler for notif %s (%.2x.%2x, seq 0x%x)\n",
+                    op, iwl_get_cmd_string(mld->trans,
+                    WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
+                    pkt->hdr.group_id, pkt->hdr.cmd,
+                    le16_to_cpu(pkt->hdr.sequence));
+}
+
+static void iwl_mld_rx_notif(struct iwl_mld *mld,
+                            struct iwl_rx_cmd_buffer *rxb,
+                            struct iwl_rx_packet *pkt)
+{
+       for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) {
+               const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i];
+               struct iwl_async_handler_entry *entry;
+
+               if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
+                       continue;
+
+               if (!iwl_mld_notif_is_valid(mld, pkt, rx_h))
+                       return;
+
+               if (rx_h->context == RX_HANDLER_SYNC) {
+                       rx_h->fn(mld, pkt);
+                       break;
+               }
+
+               entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+               /* we can't do much... */
+               if (!entry)
+                       return;
+
+               /* Set the async handler entry */
+               entry->rxb._page = rxb_steal_page(rxb);
+               entry->rxb._offset = rxb->_offset;
+               entry->rxb._rx_page_order = rxb->_rx_page_order;
+
+               entry->rx_h = rx_h;
+
+               /* Add it to the list and queue the work */
+               spin_lock(&mld->async_handlers_lock);
+               list_add_tail(&entry->list, &mld->async_handlers_list);
+               spin_unlock(&mld->async_handlers_lock);
+
+               wiphy_work_queue(mld->hw->wiphy,
+                                &mld->async_handlers_wk);
+
+               iwl_mld_log_async_handler_op(mld, "Queued", rxb);
+               break;
+       }
+
+       iwl_notification_wait_notify(&mld->notif_wait, pkt);
+}
+
+void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+               struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+       u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+       if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+               iwl_mld_rx_mpdu(mld, napi, rxb, 0);
+       else if (cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
+               iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0);
+       else if (cmd_id == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
+               iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0);
+       else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
+                                           RX_QUEUES_NOTIFICATION)))
+               iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0);
+       else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
+               iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0);
+       else
+               iwl_mld_rx_notif(mld, rxb, pkt);
+}
+
+void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                   struct iwl_rx_cmd_buffer *rxb, unsigned int queue)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+       u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+       if (unlikely(queue >= mld->trans->num_rx_queues))
+               return;
+
+       if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+               iwl_mld_rx_mpdu(mld, napi, rxb, queue);
+       else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
+                                           RX_QUEUES_NOTIFICATION)))
+               iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue);
+       else if (unlikely(cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
+               iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue);
+}
+
+void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds)
+{
+       struct iwl_async_handler_entry *entry, *tmp;
+
+       spin_lock_bh(&mld->async_handlers_lock);
+       list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+               bool match = false;
+
+               for (int i = 0; i < n_cmds; i++) {
+                       if (entry->rx_h->cmd_id == cmds[i]) {
+                               match = true;
+                               break;
+                       }
+               }
+
+               if (!match)
+                       continue;
+
+               iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb);
+               iwl_free_rxb(&entry->rxb);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+       spin_unlock_bh(&mld->async_handlers_lock);
+}
+
+void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+       struct iwl_mld *mld =
+               container_of(wk, struct iwl_mld, async_handlers_wk);
+       struct iwl_async_handler_entry *entry, *tmp;
+       LIST_HEAD(local_list);
+
+       /* Sync with Rx path with a lock. Remove all the entries from this
+        * list, add them to a local one (lock free), and then handle them.
+        */
+       spin_lock_bh(&mld->async_handlers_lock);
+       list_splice_init(&mld->async_handlers_list, &local_list);
+       spin_unlock_bh(&mld->async_handlers_lock);
+
+       list_for_each_entry_safe(entry, tmp, &local_list, list) {
+               iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb);
+               entry->rx_h->fn(mld, rxb_addr(&entry->rxb));
+               iwl_free_rxb(&entry->rxb);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+}
+
+void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld)
+{
+       struct iwl_async_handler_entry *entry, *tmp;
+
+       spin_lock_bh(&mld->async_handlers_lock);
+       list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+               iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb);
+               iwl_free_rxb(&entry->rxb);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+       spin_unlock_bh(&mld->async_handlers_lock);
+}
+
+void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
+                                           enum iwl_mld_object_type obj_type,
+                                           u32 obj_id)
+{
+       struct iwl_async_handler_entry *entry, *tmp;
+       LIST_HEAD(cancel_list);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (WARN_ON(obj_type == IWL_MLD_OBJECT_TYPE_NONE))
+               return;
+
+       /* Sync with RX path and remove matching entries from the async list */
+       spin_lock_bh(&mld->async_handlers_lock);
+       list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+               const struct iwl_rx_handler *rx_h = entry->rx_h;
+
+               if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel))
+                       continue;
+
+               if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) {
+                       iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb);
+                       list_del(&entry->list);
+                       list_add_tail(&entry->list, &cancel_list);
+               }
+       }
+
+       spin_unlock_bh(&mld->async_handlers_lock);
+
+       /* Free the matching entries outside of the spinlock */
+       list_for_each_entry_safe(entry, tmp, &cancel_list, list) {
+               iwl_free_rxb(&entry->rxb);
+               list_del(&entry->list);
+               kfree(entry);
+       }
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_notif_h__
+#define __iwl_mld_notif_h__
+
+struct iwl_mld;
+
+void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+               struct iwl_rx_cmd_buffer *rxb);
+
+void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                   struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
+
+void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+
+void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld);
+
+enum iwl_mld_object_type {
+       IWL_MLD_OBJECT_TYPE_NONE,
+       IWL_MLD_OBJECT_TYPE_LINK,
+       IWL_MLD_OBJECT_TYPE_STA,
+       IWL_MLD_OBJECT_TYPE_VIF,
+       IWL_MLD_OBJECT_TYPE_ROC,
+       IWL_MLD_OBJECT_TYPE_SCAN,
+       IWL_MLD_OBJECT_TYPE_FTM_REQ,
+};
+
+void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
+                                           enum iwl_mld_object_type obj_type,
+                                           u32 obj_id);
+void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds);
+
+#endif /* __iwl_mld_notif_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/mac80211.h>
+
+#include "phy.h"
+#include "hcmd.h"
+#include "fw/api/phy-ctxt.h"
+
+int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld)
+{
+       int id;
+       unsigned long used = mld->used_phy_ids;
+
+       for_each_clear_bit(id, &used, NUM_PHY_CTX) {
+               mld->used_phy_ids |= BIT(id);
+               return id;
+       }
+
+       return -ENOSPC;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_allocate_fw_phy_id);
+
+struct cfg80211_chan_def *
+iwl_mld_get_chandef_from_chanctx(struct ieee80211_chanctx_conf *ctx)
+{
+       bool use_def = cfg80211_channel_is_psc(ctx->def.chan) ||
+               (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+                ctx->def.width >= NL80211_CHAN_WIDTH_80);
+
+       return use_def ? &ctx->def : &ctx->min_def;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_get_chandef_from_chanctx);
+
+static u8
+iwl_mld_nl80211_width_to_fw(enum nl80211_chan_width width)
+{
+       switch (width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+               return IWL_PHY_CHANNEL_MODE20;
+       case NL80211_CHAN_WIDTH_40:
+               return IWL_PHY_CHANNEL_MODE40;
+       case NL80211_CHAN_WIDTH_80:
+               return IWL_PHY_CHANNEL_MODE80;
+       case NL80211_CHAN_WIDTH_160:
+               return IWL_PHY_CHANNEL_MODE160;
+       case NL80211_CHAN_WIDTH_320:
+               return IWL_PHY_CHANNEL_MODE320;
+       default:
+               WARN(1, "Invalid channel width=%u", width);
+               return IWL_PHY_CHANNEL_MODE20;
+       }
+}
+
+/* Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the fw values
+ */
+u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef)
+{
+       int offs = chandef->chan->center_freq - chandef->center_freq1;
+       int abs_offs = abs(offs);
+       u8 ret;
+
+       if (offs == 0) {
+               /* The FW is expected to check the control channel position only
+                * when in HT/VHT and the channel width is not 20MHz. Return
+                * this value as the default one.
+                */
+               return 0;
+       }
+
+       /* this results in a value 0-7, i.e. fitting into 0b0111 */
+       ret = (abs_offs - 10) / 20;
+       /* But we need the value to be in 0b1011 because 0b0100 is
+        * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
+        * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
+        */
+       ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
+             ((ret & BIT(2)) << 1);
+       /* and add the above bit */
+       ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
+
+       return ret;
+}
+
+int iwl_mld_phy_fw_action(struct iwl_mld *mld,
+                         struct ieee80211_chanctx_conf *ctx, u32 action)
+{
+       struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+       struct cfg80211_chan_def *chandef = &phy->chandef;
+       struct iwl_phy_context_cmd cmd = {
+               .id_and_color = cpu_to_le32(phy->fw_id),
+               .action = cpu_to_le32(action),
+               .puncture_mask = cpu_to_le16(chandef->punctured),
+               /* Channel info */
+               .ci.channel = cpu_to_le32(chandef->chan->hw_value),
+               .ci.band = iwl_mld_nl80211_band_to_fw(chandef->chan->band),
+               .ci.width = iwl_mld_nl80211_width_to_fw(chandef->width),
+               .ci.ctrl_pos = iwl_mld_get_fw_ctrl_pos(chandef),
+       };
+       int ret;
+
+       if (ctx->ap.chan) {
+               cmd.sbb_bandwidth =
+                       iwl_mld_nl80211_width_to_fw(ctx->ap.width);
+               cmd.sbb_ctrl_channel_loc = iwl_mld_get_fw_ctrl_pos(&ctx->ap);
+       }
+
+       ret = iwl_mld_send_cmd_pdu(mld, PHY_CONTEXT_CMD, &cmd);
+       if (ret)
+               IWL_ERR(mld, "Failed to send PHY_CONTEXT_CMD ret = %d\n", ret);
+
+       return ret;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_phy_h__
+#define __iwl_mld_phy_h__
+
+#include "mld.h"
+
+/**
+ * struct iwl_mld_phy - PHY configuration parameters
+ *
+ * @fw_id: fw id of the phy.
+ * @chandef: the last chandef that mac80211 configured the driver
+ *     with. Used to detect a no-op when the chanctx changes.
+ * @channel_load_by_us: channel load on this channel caused by
+ *     the NIC itself, as indicated by firmware
+ */
+struct iwl_mld_phy {
+       /* Add here fields that need clean up on hw restart */
+       struct_group(zeroed_on_hw_restart,
+               u8 fw_id;
+               struct cfg80211_chan_def chandef;
+       );
+       /* And here fields that survive a hw restart */
+       u32 channel_load_by_us;
+};
+
+static inline struct iwl_mld_phy *
+iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *channel)
+{
+       return (void *)channel->drv_priv;
+}
+
+/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+static inline void
+iwl_mld_cleanup_phy(struct iwl_mld *mld, struct iwl_mld_phy *phy)
+{
+       CLEANUP_STRUCT(phy);
+}
+
+int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld);
+int iwl_mld_phy_fw_action(struct iwl_mld *mld,
+                         struct ieee80211_chanctx_conf *ctx, u32 action);
+struct cfg80211_chan_def *
+iwl_mld_get_chandef_from_chanctx(struct ieee80211_chanctx_conf *ctx);
+u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef);
+
+#endif /* __iwl_mld_phy_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "hcmd.h"
+#include "power.h"
+#include "iface.h"
+#include "link.h"
+#include "constants.h"
+
+static void iwl_mld_vif_ps_iterator(void *data, u8 *mac,
+                                   struct ieee80211_vif *vif)
+{
+       bool *ps_enable = (bool *)data;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       *ps_enable &= vif->cfg.ps;
+}
+
+int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3)
+{
+       struct iwl_device_power_cmd cmd = {};
+       bool enable_ps = false;
+
+       if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) {
+               enable_ps = true;
+
+               /* Disable power save if any STA interface has
+                * power save turned off
+                */
+               ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                                       IEEE80211_IFACE_ITER_NORMAL,
+                                                       iwl_mld_vif_ps_iterator,
+                                                       &enable_ps);
+       }
+
+       if (enable_ps)
+               cmd.flags |=
+                       cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+       if (d3)
+               cmd.flags |=
+                       cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK);
+
+       IWL_DEBUG_POWER(mld,
+                       "Sending device power command with flags = 0x%X\n",
+                       cmd.flags);
+
+       return iwl_mld_send_cmd_pdu(mld, POWER_TABLE_CMD, &cmd);
+}
+
+int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
+                                const struct ieee80211_bss_conf *link_conf,
+                                bool d3)
+{
+       struct iwl_beacon_filter_cmd cmd = {
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter = cpu_to_le32(1),
+               .ba_enable_beacon_abort = cpu_to_le32(1),
+       };
+
+       if (ieee80211_vif_type_p2p(link_conf->vif) != NL80211_IFTYPE_STATION)
+               return 0;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (iwl_mld_vif_from_mac80211(link_conf->vif)->disable_bf)
+               return 0;
+#endif
+
+       if (link_conf->cqm_rssi_thold) {
+               cmd.bf_energy_delta =
+                       cpu_to_le32(link_conf->cqm_rssi_hyst);
+               /* fw uses an absolute value for this */
+               cmd.bf_roaming_state =
+                       cpu_to_le32(-link_conf->cqm_rssi_thold);
+       }
+
+       if (d3)
+               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+       return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
+                                   &cmd);
+}
+
+int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_beacon_filter_cmd cmd = {};
+
+       if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+               return 0;
+
+       return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
+                                   &cmd);
+}
+
+static bool iwl_mld_power_is_radar(struct iwl_mld *mld,
+                                  const struct ieee80211_bss_conf *link_conf)
+{
+       const struct ieee80211_chanctx_conf *chanctx_conf;
+
+       chanctx_conf = wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
+
+       if (WARN_ON(!chanctx_conf))
+               return false;
+
+       return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
+}
+
+static void iwl_mld_power_configure_uapsd(struct iwl_mld *mld,
+                                         struct iwl_mld_link *link,
+                                         struct iwl_mac_power_cmd *cmd,
+                                         bool ps_poll)
+{
+       bool tid_found = false;
+
+       cmd->rx_data_timeout_uapsd =
+               cpu_to_le32(IWL_MLD_UAPSD_RX_DATA_TIMEOUT);
+       cmd->tx_data_timeout_uapsd =
+               cpu_to_le32(IWL_MLD_UAPSD_TX_DATA_TIMEOUT);
+
+        /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+       if (ps_poll) {
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+               return;
+       }
+
+       for (enum ieee80211_ac_numbers ac = IEEE80211_AC_VO;
+            ac <= IEEE80211_AC_BK;
+            ac++) {
+               if (!link->queue_params[ac].uapsd)
+                       continue;
+
+               cmd->flags |=
+                       cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK |
+                                   POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+               cmd->uapsd_ac_flags |= BIT(ac);
+
+               /* QNDP TID - the highest TID with no admission control */
+               if (!tid_found && !link->queue_params[ac].acm) {
+                       tid_found = true;
+                       switch (ac) {
+                       case IEEE80211_AC_VO:
+                               cmd->qndp_tid = 6;
+                               break;
+                       case IEEE80211_AC_VI:
+                               cmd->qndp_tid = 5;
+                               break;
+                       case IEEE80211_AC_BE:
+                               cmd->qndp_tid = 0;
+                               break;
+                       case IEEE80211_AC_BK:
+                               cmd->qndp_tid = 1;
+                               break;
+                       }
+               }
+       }
+
+       if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+                                   BIT(IEEE80211_AC_VI) |
+                                   BIT(IEEE80211_AC_BE) |
+                                   BIT(IEEE80211_AC_BK))) {
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+               cmd->snooze_interval = cpu_to_le16(IWL_MLD_PS_SNOOZE_INTERVAL);
+               cmd->snooze_window = cpu_to_le16(IWL_MLD_PS_SNOOZE_WINDOW);
+       }
+
+       cmd->uapsd_max_sp = mld->hw->uapsd_max_sp_len;
+}
+
+static void
+iwl_mld_power_config_skip_dtim(struct iwl_mld *mld,
+                              const struct ieee80211_bss_conf *link_conf,
+                              struct iwl_mac_power_cmd *cmd)
+{
+       unsigned int dtimper_tu;
+       unsigned int dtimper;
+       unsigned int skip;
+
+       dtimper = link_conf->dtim_period ?: 1;
+       dtimper_tu = dtimper * link_conf->beacon_int;
+
+       if (dtimper >= 10 || iwl_mld_power_is_radar(mld, link_conf))
+               return;
+
+       if (WARN_ON(!dtimper_tu))
+               return;
+
+       /* configure skip over dtim up to 900 TU DTIM interval */
+       skip = max_t(int, 1, 900 / dtimper_tu);
+
+       cmd->skip_dtim_periods = skip;
+       cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC    25
+static void iwl_mld_power_build_cmd(struct iwl_mld *mld,
+                                   struct ieee80211_vif *vif,
+                                   struct iwl_mac_power_cmd *cmd,
+                                   bool d3)
+{
+       int dtimper, bi;
+       int keep_alive;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct ieee80211_bss_conf *link_conf = &vif->bss_conf;
+       struct iwl_mld_link *link = &mld_vif->deflink;
+       bool ps_poll = false;
+
+       cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
+
+       if (ieee80211_vif_is_mld(vif)) {
+               int link_id;
+
+               if (WARN_ON(!vif->active_links))
+                       return;
+
+               /* The firmware consumes one single configuration for the vif
+                * and can't differentiate between links, just pick the lowest
+                * link_id's configuration and use that.
+                */
+               link_id = __ffs(vif->active_links);
+               link_conf = link_conf_dereference_check(vif, link_id);
+               link = iwl_mld_link_dereference_check(mld_vif, link_id);
+
+               if (WARN_ON(!link_conf || !link))
+                       return;
+       }
+       dtimper = link_conf->dtim_period;
+       bi = link_conf->beacon_int;
+
+       /* Regardless of power management state the driver must set
+        * keep alive period. FW will use it for sending keep alive NDPs
+        * immediately after association. Check that keep alive period
+        * is at least 3 * DTIM
+        */
+       keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+                                 USEC_PER_SEC);
+       keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
+       cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+       if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+       if (!vif->cfg.ps || iwl_mld_tdls_sta_count(mld) > 0)
+               return;
+
+       cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+       /* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */
+       if (link_conf->beacon_rate &&
+           (link_conf->beacon_rate->bitrate == 10 ||
+            link_conf->beacon_rate->bitrate == 60)) {
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+               cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
+       }
+
+       if (d3) {
+               iwl_mld_power_config_skip_dtim(mld, link_conf, cmd);
+               cmd->rx_data_timeout =
+                       cpu_to_le32(IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT);
+               cmd->tx_data_timeout =
+                       cpu_to_le32(IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT);
+       } else if (iwl_mld_vif_low_latency(mld_vif) && vif->p2p) {
+               cmd->tx_data_timeout =
+                       cpu_to_le32(IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT);
+               cmd->rx_data_timeout =
+                       cpu_to_le32(IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT);
+       } else {
+               cmd->rx_data_timeout =
+                       cpu_to_le32(IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT);
+               cmd->tx_data_timeout =
+                       cpu_to_le32(IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT);
+       }
+
+       /* uAPSD is only enabled for specific certifications. For those cases,
+        * mac80211 will allow uAPSD. Always call iwl_mld_power_configure_uapsd
+        * which will look at what mac80211 is saying.
+        */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       ps_poll = mld_vif->use_ps_poll;
+#endif
+       iwl_mld_power_configure_uapsd(mld, link, cmd, ps_poll);
+}
+
+int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                            bool d3)
+{
+       struct iwl_mac_power_cmd cmd = {};
+
+       iwl_mld_power_build_cmd(mld, vif, &cmd, d3);
+
+       return iwl_mld_send_cmd_pdu(mld, MAC_PM_POWER_TABLE, &cmd);
+}
+
+static void
+iwl_mld_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
+                        const struct ieee80211_bss_conf *link)
+{
+       u8 i;
+
+       /* NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+        * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
+        */
+
+       BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
+                    ARRAY_SIZE(link->tpe.psd_local[0].power));
+
+       /* if not valid, mac80211 puts default (max value) */
+       for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
+               cmd->psd_pwr[i] = min(link->tpe.psd_local[0].power[i],
+                                     link->tpe.psd_reg_client[0].power[i]);
+
+       BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
+                    ARRAY_SIZE(link->tpe.max_local[0].power));
+
+       for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
+               cmd->eirp_pwr[i] = min(link->tpe.max_local[0].power[i],
+                                      link->tpe.max_reg_client[0].power[i]);
+}
+
+void
+iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_bss_conf *link)
+{
+       struct iwl_txpower_constraints_cmd cmd = {};
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (!mld_link->active)
+               return;
+
+       if (link->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
+               return;
+
+       cmd.link_id = cpu_to_le16(mld_link->fw_id);
+       memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
+       memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+
+       if (vif->type == NL80211_IFTYPE_AP) {
+               cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+       } else if (link->power_type == IEEE80211_REG_UNSET_AP) {
+               return;
+       } else {
+               cmd.ap_type = cpu_to_le16(link->power_type - 1);
+               iwl_mld_tpe_sta_cmd_data(&cmd, link);
+       }
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(PHY_OPS_GROUP,
+                                          AP_TX_POWER_CONSTRAINTS_CMD),
+                                  &cmd);
+       if (ret)
+               IWL_ERR(mld,
+                       "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
+                       ret);
+}
+
+int iwl_mld_set_tx_power(struct iwl_mld *mld,
+                        struct ieee80211_bss_conf *link_conf,
+                        s16 tx_power)
+{
+       u32 cmd_id = REDUCE_TX_POWER_CMD;
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+       u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
+               IWL_DEV_MAX_TX_POWER : 8 * tx_power;
+       struct iwl_dev_tx_power_cmd cmd = {
+               /* Those fields sit on the same place for v9 and v10 */
+               .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
+               .common.pwr_restriction = cpu_to_le16(u_tx_power),
+       };
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+                                          IWL_FW_CMD_VER_UNKNOWN);
+       int len = sizeof(cmd.common);
+
+       if (WARN_ON(!mld_link))
+               return -ENODEV;
+
+       cmd.common.link_id = cpu_to_le32(mld_link->fw_id);
+
+       if (cmd_ver == 10)
+               len += sizeof(cmd.v10);
+       else if (cmd_ver == 9)
+               len += sizeof(cmd.v9);
+
+       return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_power_h__
+#define __iwl_mld_power_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+
+int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3);
+
+int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
+                                const struct ieee80211_bss_conf *link_conf,
+                                bool d3);
+
+int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
+                                 struct ieee80211_vif *vif);
+
+int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                            bool d3);
+
+void
+iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_bss_conf *link);
+
+int iwl_mld_set_tx_power(struct iwl_mld *mld,
+                        struct ieee80211_bss_conf *link_conf,
+                        s16 tx_power);
+
+#endif /* __iwl_mld_power_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "iwl-debug.h"
+#include "hcmd.h"
+#include "ptp.h"
+#include <linux/timekeeping.h>
+
+/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional
+ * part, which means that a value of 1 in one of those fields actually means
+ * 2^-16 ppm, and 2^16=65536 is 1 ppm.
+ */
+#define PTP_SCALE_FACTOR       65536000000ULL
+
+#define IWL_PTP_GP2_WRAP       0x100000000ULL
+#define IWL_PTP_WRAP_TIME      (3600 * HZ)
+#define IWL_PTP_WRAP_THRESHOLD_USEC    (5000)
+
+static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2)
+{
+       *gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr);
+
+       if (*gp2 == 0x5a5a5a5a)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void iwl_mld_ptp_update_new_read(struct iwl_mld *mld, u32 gp2)
+{
+       IWL_DEBUG_PTP(mld, "PTP: last_gp2=%u, new gp2 read=%u\n",
+                     mld->ptp_data.last_gp2, gp2);
+
+       /* If the difference is above the threshold, assume it's a wraparound.
+        * Otherwise assume it's an old read and ignore it.
+        */
+       if (gp2 < mld->ptp_data.last_gp2) {
+               if (mld->ptp_data.last_gp2 - gp2 <
+                   IWL_PTP_WRAP_THRESHOLD_USEC) {
+                       IWL_DEBUG_PTP(mld,
+                                     "PTP: ignore old read (gp2=%u, last_gp2=%u)\n",
+                                     gp2, mld->ptp_data.last_gp2);
+                       return;
+               }
+
+               mld->ptp_data.wrap_counter++;
+               IWL_DEBUG_PTP(mld,
+                             "PTP: wraparound detected (new counter=%u)\n",
+                             mld->ptp_data.wrap_counter);
+       }
+
+       mld->ptp_data.last_gp2 = gp2;
+       schedule_delayed_work(&mld->ptp_data.dwork, IWL_PTP_WRAP_TIME);
+}
+
+u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns)
+{
+       struct ptp_data *data = &mld->ptp_data;
+       u64 scale_time_gp2_ns = mld->ptp_data.scale_update_gp2 * NSEC_PER_USEC;
+       u64 res;
+       u64 diff;
+       s64 scaled_diff;
+
+       lockdep_assert_held(&data->lock);
+
+       iwl_mld_ptp_update_new_read(mld,
+                                   div64_u64(base_time_ns, NSEC_PER_USEC));
+
+       base_time_ns = base_time_ns +
+               (data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC);
+
+       /* It is possible that a GP2 timestamp was received from fw before the
+        * last scale update.
+        */
+       if (base_time_ns < scale_time_gp2_ns) {
+               diff = scale_time_gp2_ns - base_time_ns;
+               scaled_diff = -mul_u64_u64_div_u64(diff,
+                                                  data->scaled_freq,
+                                                  PTP_SCALE_FACTOR);
+       } else {
+               diff = base_time_ns - scale_time_gp2_ns;
+               scaled_diff = mul_u64_u64_div_u64(diff,
+                                                 data->scaled_freq,
+                                                 PTP_SCALE_FACTOR);
+       }
+
+       IWL_DEBUG_PTP(mld, "base_time=%llu diff ns=%llu scaled_diff_ns=%lld\n",
+                     (unsigned long long)base_time_ns,
+                     (unsigned long long)diff, (long long)scaled_diff);
+
+       res = data->scale_update_adj_time_ns + data->delta + scaled_diff;
+
+       IWL_DEBUG_PTP(mld, "scale_update_ns=%llu delta=%lld adj=%llu\n",
+                     (unsigned long long)data->scale_update_adj_time_ns,
+                     (long long)data->delta, (unsigned long long)res);
+       return res;
+}
+
+static int iwl_mld_ptp_gettime(struct ptp_clock_info *ptp,
+                              struct timespec64 *ts)
+{
+       struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+                                          ptp_data.ptp_clock_info);
+       struct ptp_data *data = &mld->ptp_data;
+       u32 gp2;
+       u64 ns;
+
+       if (iwl_mld_get_systime(mld, &gp2)) {
+               IWL_DEBUG_PTP(mld, "PTP: gettime: failed to read systime\n");
+               return -EIO;
+       }
+
+       spin_lock_bh(&data->lock);
+       ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
+       spin_unlock_bh(&data->lock);
+
+       *ts = ns_to_timespec64(ns);
+       return 0;
+}
+
+static int iwl_mld_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+                                          ptp_data.ptp_clock_info);
+       struct ptp_data *data = &mld->ptp_data;
+
+       spin_lock_bh(&data->lock);
+       data->delta += delta;
+       IWL_DEBUG_PTP(mld, "delta=%lld, new delta=%lld\n", (long long)delta,
+                     (long long)data->delta);
+       spin_unlock_bh(&data->lock);
+       return 0;
+}
+
+static int iwl_mld_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+       struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+                                          ptp_data.ptp_clock_info);
+       struct ptp_data *data = &mld->ptp_data;
+       u32 gp2;
+
+       /* Must call iwl_mld_ptp_get_adj_time() before updating
+        * data->scale_update_gp2 or data->scaled_freq since
+        * scale_update_adj_time_ns should reflect the previous scaled_freq.
+        */
+       if (iwl_mld_get_systime(mld, &gp2)) {
+               IWL_DEBUG_PTP(mld, "adjfine: failed to read systime\n");
+               return -EBUSY;
+       }
+
+       spin_lock_bh(&data->lock);
+       data->scale_update_adj_time_ns =
+               iwl_mld_ptp_get_adj_time(mld, gp2 * NSEC_PER_USEC);
+       data->scale_update_gp2 = gp2;
+
+       /* scale_update_adj_time_ns now relects the configured delta, the
+        * wrap_counter and the previous scaled frequency. Thus delta and
+        * wrap_counter should be reset, and the scale frequency is updated
+        * to the new frequency.
+        */
+       data->delta = 0;
+       data->wrap_counter = 0;
+       data->scaled_freq = PTP_SCALE_FACTOR + scaled_ppm;
+       IWL_DEBUG_PTP(mld, "adjfine: scaled_ppm=%ld new=%llu\n",
+                     scaled_ppm, (unsigned long long)data->scaled_freq);
+       spin_unlock_bh(&data->lock);
+       return 0;
+}
+
+static void iwl_mld_ptp_work(struct work_struct *wk)
+{
+       struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+                                          ptp_data.dwork.work);
+       struct ptp_data *data = &mld->ptp_data;
+       u32 gp2;
+
+       spin_lock_bh(&data->lock);
+       if (!iwl_mld_get_systime(mld, &gp2))
+               iwl_mld_ptp_update_new_read(mld, gp2);
+       else
+               IWL_DEBUG_PTP(mld, "PTP work: failed to read GP2\n");
+       spin_unlock_bh(&data->lock);
+}
+
+static int
+iwl_mld_get_crosstimestamp_fw(struct iwl_mld *mld, u32 *gp2, u64 *sys_time)
+{
+       struct iwl_synced_time_cmd synced_time_cmd = {
+               .operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH)
+       };
+       struct iwl_host_cmd cmd = {
+               .id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD),
+               .flags = CMD_WANT_SKB,
+               .data[0] = &synced_time_cmd,
+               .len[0] = sizeof(synced_time_cmd),
+       };
+       struct iwl_synced_time_rsp *resp;
+       struct iwl_rx_packet *pkt;
+       int ret;
+       u64 gp2_10ns;
+
+       wiphy_lock(mld->wiphy);
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       wiphy_unlock(mld->wiphy);
+       if (ret)
+               return ret;
+
+       pkt = cmd.resp_pkt;
+
+       if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) {
+               IWL_DEBUG_PTP(mld, "PTP: Invalid PTM command response\n");
+               iwl_free_resp(&cmd);
+               return -EIO;
+       }
+
+       resp = (void *)pkt->data;
+
+       gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 |
+               le32_to_cpu(resp->gp2_timestamp_lo);
+       *gp2 = div_u64(gp2_10ns, 100);
+
+       *sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 |
+               le32_to_cpu(resp->platform_timestamp_lo);
+
+       iwl_free_resp(&cmd);
+       return ret;
+}
+
+static int
+iwl_mld_phc_get_crosstimestamp(struct ptp_clock_info *ptp,
+                              struct system_device_crosststamp *xtstamp)
+{
+       struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+                                          ptp_data.ptp_clock_info);
+       struct ptp_data *data = &mld->ptp_data;
+       int ret = 0;
+       /* Raw value read from GP2 register in usec */
+       u32 gp2;
+       /* GP2 value in ns*/
+       s64 gp2_ns;
+       /* System (wall) time */
+       ktime_t sys_time;
+
+       memset(xtstamp, 0, sizeof(struct system_device_crosststamp));
+
+       ret = iwl_mld_get_crosstimestamp_fw(mld, &gp2, &sys_time);
+       if (ret) {
+               IWL_DEBUG_PTP(mld,
+                             "PTP: fw get_crosstimestamp failed (ret=%d)\n",
+                             ret);
+               return ret;
+       }
+
+       spin_lock_bh(&data->lock);
+       gp2_ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
+       spin_unlock_bh(&data->lock);
+
+       IWL_DEBUG_PTP(mld,
+                     "Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n",
+                     gp2, mld->ptp_data.last_gp2, gp2_ns, (s64)sys_time);
+
+       /* System monotonic raw time is not used */
+       xtstamp->device = ns_to_ktime(gp2_ns);
+       xtstamp->sys_realtime = sys_time;
+
+       return ret;
+}
+
+void iwl_mld_ptp_init(struct iwl_mld *mld)
+{
+       if (WARN_ON(mld->ptp_data.ptp_clock))
+               return;
+
+       spin_lock_init(&mld->ptp_data.lock);
+       INIT_DELAYED_WORK(&mld->ptp_data.dwork, iwl_mld_ptp_work);
+
+       mld->ptp_data.ptp_clock_info.owner = THIS_MODULE;
+       mld->ptp_data.ptp_clock_info.gettime64 = iwl_mld_ptp_gettime;
+       mld->ptp_data.ptp_clock_info.max_adj = 0x7fffffff;
+       mld->ptp_data.ptp_clock_info.adjtime = iwl_mld_ptp_adjtime;
+       mld->ptp_data.ptp_clock_info.adjfine = iwl_mld_ptp_adjfine;
+       mld->ptp_data.scaled_freq = PTP_SCALE_FACTOR;
+       mld->ptp_data.ptp_clock_info.getcrosststamp =
+                                       iwl_mld_phc_get_crosstimestamp;
+
+       /* Give a short 'friendly name' to identify the PHC clock */
+       snprintf(mld->ptp_data.ptp_clock_info.name,
+                sizeof(mld->ptp_data.ptp_clock_info.name),
+                "%s", "iwlwifi-PTP");
+
+       mld->ptp_data.ptp_clock =
+               ptp_clock_register(&mld->ptp_data.ptp_clock_info, mld->dev);
+
+       if (IS_ERR_OR_NULL(mld->ptp_data.ptp_clock)) {
+               IWL_ERR(mld, "Failed to register PHC clock (%ld)\n",
+                       PTR_ERR(mld->ptp_data.ptp_clock));
+               mld->ptp_data.ptp_clock = NULL;
+       } else {
+               IWL_INFO(mld, "Registered PHC clock: %s, with index: %d\n",
+                        mld->ptp_data.ptp_clock_info.name,
+                        ptp_clock_index(mld->ptp_data.ptp_clock));
+       }
+}
+
+void iwl_mld_ptp_remove(struct iwl_mld *mld)
+{
+       if (mld->ptp_data.ptp_clock) {
+               IWL_INFO(mld, "Unregistering PHC clock: %s, with index: %d\n",
+                        mld->ptp_data.ptp_clock_info.name,
+                        ptp_clock_index(mld->ptp_data.ptp_clock));
+
+               ptp_clock_unregister(mld->ptp_data.ptp_clock);
+               mld->ptp_data.ptp_clock = NULL;
+               mld->ptp_data.last_gp2 = 0;
+               mld->ptp_data.wrap_counter = 0;
+               cancel_delayed_work_sync(&mld->ptp_data.dwork);
+       }
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_ptp_h__
+#define __iwl_mld_ptp_h__
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct ptp_data - PTP hardware clock data
+ *
+ * @ptp_clock: struct ptp_clock pointer returned by the ptp_clock_register()
+ *     function.
+ * @ptp_clock_info: struct ptp_clock_info that describes a PTP hardware clock
+ * @lock: protects the time adjustments data
+ * @delta: delta between hardware clock and ptp clock in nanoseconds
+ * @scale_update_gp2: GP2 time when the scale was last updated
+ * @scale_update_adj_time_ns: adjusted time when the scale was last updated,
+ *     in nanoseconds
+ * @scaled_freq: clock frequency offset, scaled to 65536000000
+ * @last_gp2: the last GP2 reading from the hardware, used for tracking GP2
+ *     wraparounds
+ * @wrap_counter: number of wraparounds since scale_update_adj_time_ns
+ * @dwork: worker scheduled every 1 hour to detect workarounds
+ */
+struct ptp_data {
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info ptp_clock_info;
+
+       spinlock_t lock;
+       s64 delta;
+       u32 scale_update_gp2;
+       u64 scale_update_adj_time_ns;
+       u64 scaled_freq;
+       u32 last_gp2;
+       u32 wrap_counter;
+       struct delayed_work dwork;
+};
+
+void iwl_mld_ptp_init(struct iwl_mld *mld);
+void iwl_mld_ptp_remove(struct iwl_mld *mld);
+u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns);
+
+#endif /* __iwl_mld_ptp_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "fw/regulatory.h"
+#include "fw/acpi.h"
+#include "fw/uefi.h"
+
+#include "regulatory.h"
+#include "mld.h"
+#include "hcmd.h"
+
+void iwl_mld_get_bios_tables(struct iwl_mld *mld)
+{
+       int ret;
+
+       iwl_acpi_get_guid_lock_status(&mld->fwrt);
+
+       ret = iwl_bios_get_ppag_table(&mld->fwrt);
+       if (ret < 0) {
+               IWL_DEBUG_RADIO(mld,
+                               "PPAG BIOS table invalid or unavailable. (%d)\n",
+                               ret);
+       }
+
+       ret = iwl_bios_get_wrds_table(&mld->fwrt);
+       if (ret < 0) {
+               IWL_DEBUG_RADIO(mld,
+                               "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+                               ret);
+
+               /* If not available, don't fail and don't bother with EWRD and
+                * WGDS
+                */
+
+               if (!iwl_bios_get_wgds_table(&mld->fwrt)) {
+                       /* If basic SAR is not available, we check for WGDS,
+                        * which should *not* be available either. If it is
+                        * available, issue an error, because we can't use SAR
+                        * Geo without basic SAR.
+                        */
+                       IWL_ERR(mld, "BIOS contains WGDS but no WRDS\n");
+               }
+
+       } else {
+               ret = iwl_bios_get_ewrd_table(&mld->fwrt);
+               /* If EWRD is not available, we can still use
+                * WRDS, so don't fail.
+                */
+               if (ret < 0)
+                       IWL_DEBUG_RADIO(mld,
+                                       "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+                                       ret);
+
+               ret = iwl_bios_get_wgds_table(&mld->fwrt);
+               if (ret < 0)
+                       IWL_DEBUG_RADIO(mld,
+                                       "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+                                       ret);
+               /* we don't fail if the table is not available */
+       }
+
+       ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
+       if (ret)
+               IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret);
+}
+
+static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
+{
+       u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
+       union iwl_geo_tx_power_profiles_cmd cmd;
+       u16 len;
+       u32 n_bands;
+       __le32 sk = cpu_to_le32(0);
+       int ret;
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+                                          IWL_FW_CMD_VER_UNKNOWN);
+
+       BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
+                    offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
+
+       cmd.v4.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+
+       /* Only set to South Korea if the table revision is 1 */
+       if (mld->fwrt.geo_rev == 1)
+               sk = cpu_to_le32(1);
+
+       if (cmd_ver == 5) {
+               len = sizeof(cmd.v5);
+               n_bands = ARRAY_SIZE(cmd.v5.table[0]);
+               cmd.v5.table_revision = sk;
+       } else if (cmd_ver == 4) {
+               len = sizeof(cmd.v4);
+               n_bands = ARRAY_SIZE(cmd.v4.table[0]);
+               cmd.v4.table_revision = sk;
+       } else {
+               return -EOPNOTSUPP;
+       }
+
+       BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
+                    offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
+       /* the table is at the same position for all versions, so set use v4 */
+       ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v4.table[0][0],
+                                    n_bands, BIOS_GEO_MAX_PROFILE_NUM);
+
+       /* It is a valid scenario to not support SAR, or miss wgds table,
+        * but in that case there is no need to send the command.
+        */
+       if (ret)
+               return 0;
+
+       return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
+
+int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b)
+{
+       u32 cmd_id = REDUCE_TX_POWER_CMD;
+       struct iwl_dev_tx_power_cmd cmd = {
+               .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+       };
+       __le16 *per_chain;
+       int ret;
+       u16 len = sizeof(cmd.common);
+       u32 n_subbands;
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+                                          IWL_FW_CMD_VER_UNKNOWN);
+
+       if (cmd_ver == 10) {
+               len += sizeof(cmd.v10);
+               n_subbands = IWL_NUM_SUB_BANDS_V2;
+               per_chain = &cmd.v10.per_chain[0][0][0];
+               cmd.v10.flags =
+                       cpu_to_le32(mld->fwrt.reduced_power_flags);
+       } else if (cmd_ver == 9) {
+               len += sizeof(cmd.v9);
+               n_subbands = IWL_NUM_SUB_BANDS_V1;
+               per_chain = &cmd.v9.per_chain[0][0];
+       } else {
+               return -EOPNOTSUPP;
+       }
+
+       /* TODO: CDB - support IWL_NUM_CHAIN_TABLES_V2 */
+       ret = iwl_sar_fill_profile(&mld->fwrt, per_chain,
+                                  IWL_NUM_CHAIN_TABLES,
+                                  n_subbands, prof_a, prof_b);
+       /* return on error or if the profile is disabled (positive number) */
+       if (ret)
+               return ret;
+
+       return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
+
+int iwl_mld_init_sar(struct iwl_mld *mld)
+{
+       int chain_a_prof = 1;
+       int chain_b_prof = 1;
+       int ret;
+
+       /* If no profile was chosen by the user yet, choose profile 1 (WRDS) as
+        * default for both chains
+        */
+       if (mld->fwrt.sar_chain_a_profile && mld->fwrt.sar_chain_b_profile) {
+               chain_a_prof = mld->fwrt.sar_chain_a_profile;
+               chain_b_prof = mld->fwrt.sar_chain_b_profile;
+       }
+
+       ret = iwl_mld_config_sar_profile(mld, chain_a_prof, chain_b_prof);
+       if (ret < 0)
+               return ret;
+
+       if (ret)
+               return 0;
+
+       return iwl_mld_geo_sar_init(mld);
+}
+
+int iwl_mld_init_sgom(struct iwl_mld *mld)
+{
+       int ret;
+       struct iwl_host_cmd cmd = {
+               .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                             SAR_OFFSET_MAPPING_TABLE_CMD),
+               .data[0] = &mld->fwrt.sgom_table,
+               .len[0] =  sizeof(mld->fwrt.sgom_table),
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+
+       if (!mld->fwrt.sgom_enabled) {
+               IWL_DEBUG_RADIO(mld, "SGOM table is disabled\n");
+               return 0;
+       }
+
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       if (ret)
+               IWL_ERR(mld,
+                       "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
+
+       return ret;
+}
+
+static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
+{
+       union iwl_ppag_table_cmd cmd = {};
+       int ret, len;
+
+       ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len);
+       /* Not supporting PPAG table is a valid scenario */
+       if (ret < 0)
+               return 0;
+
+       IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+       ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
+                                               PER_PLATFORM_ANT_GAIN_CMD),
+                                  &cmd, len);
+       if (ret < 0)
+               IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
+                       ret);
+
+       return ret;
+}
+
+int iwl_mld_init_ppag(struct iwl_mld *mld)
+{
+       /* no need to read the table, done in INIT stage */
+
+       if (!(iwl_is_ppag_approved(&mld->fwrt)))
+               return 0;
+
+       return iwl_mld_ppag_send_cmd(mld);
+}
+
+void iwl_mld_configure_lari(struct iwl_mld *mld)
+{
+       struct iwl_fw_runtime *fwrt = &mld->fwrt;
+       struct iwl_lari_config_change_cmd cmd = {
+               .config_bitmap = iwl_get_lari_config_bitmap(fwrt),
+       };
+       int ret;
+       u32 value;
+
+       ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
+       if (!ret)
+               cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
+
+       ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
+       if (!ret)
+               cmd.oem_unii4_allow_bitmap =
+                       cpu_to_le32(value &= DSM_UNII4_ALLOW_BITMAP);
+
+       ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+       if (!ret)
+               cmd.chan_state_active_bitmap = cpu_to_le32(value);
+
+       ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
+       if (!ret)
+               cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
+
+       ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
+       if (!ret)
+               cmd.force_disable_channels_bitmap = cpu_to_le32(value);
+
+       ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+                              &value);
+       if (!ret)
+               cmd.edt_bitmap = cpu_to_le32(value);
+
+       ret = iwl_bios_get_wbem(fwrt, &value);
+       if (!ret)
+               cmd.oem_320mhz_allow_bitmap = cpu_to_le32(value);
+
+       ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
+       if (!ret)
+               cmd.oem_11be_allow_bitmap = cpu_to_le32(value);
+
+       if (!cmd.config_bitmap &&
+           !cmd.oem_uhb_allow_bitmap &&
+           !cmd.oem_11ax_allow_bitmap &&
+           !cmd.oem_unii4_allow_bitmap &&
+           !cmd.chan_state_active_bitmap &&
+           !cmd.force_disable_channels_bitmap &&
+           !cmd.edt_bitmap &&
+           !cmd.oem_320mhz_allow_bitmap &&
+           !cmd.oem_11be_allow_bitmap)
+               return;
+
+       IWL_DEBUG_RADIO(mld,
+                       "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+                       le32_to_cpu(cmd.config_bitmap),
+                       le32_to_cpu(cmd.oem_11ax_allow_bitmap));
+       IWL_DEBUG_RADIO(mld,
+                       "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x\n",
+                       le32_to_cpu(cmd.oem_unii4_allow_bitmap),
+                       le32_to_cpu(cmd.chan_state_active_bitmap));
+       IWL_DEBUG_RADIO(mld,
+                       "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+                       le32_to_cpu(cmd.oem_uhb_allow_bitmap),
+                       le32_to_cpu(cmd.force_disable_channels_bitmap));
+       IWL_DEBUG_RADIO(mld,
+                       "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
+                       le32_to_cpu(cmd.edt_bitmap),
+                       le32_to_cpu(cmd.oem_320mhz_allow_bitmap));
+       IWL_DEBUG_RADIO(mld,
+                       "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
+                       le32_to_cpu(cmd.oem_11be_allow_bitmap));
+
+       ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                                               LARI_CONFIG_CHANGE), &cmd);
+       if (ret)
+               IWL_DEBUG_RADIO(mld,
+                               "Failed to send LARI_CONFIG_CHANGE (%d)\n",
+                               ret);
+}
+
+void iwl_mld_init_uats(struct iwl_mld *mld)
+{
+       int ret;
+       struct iwl_host_cmd cmd = {
+               .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                             MCC_ALLOWED_AP_TYPE_CMD),
+               .data[0] = &mld->fwrt.uats_table,
+               .len[0] =  sizeof(mld->fwrt.uats_table),
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+
+       if (!mld->fwrt.uats_valid)
+               return;
+
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       if (ret)
+               IWL_ERR(mld, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
+                       ret);
+}
+
+void iwl_mld_init_tas(struct iwl_mld *mld)
+{
+       int ret;
+       struct iwl_tas_data data = {};
+       struct iwl_tas_config_cmd cmd = {};
+       u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
+
+       BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+                    IWL_WTAS_BLACK_LIST_MAX);
+       BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) !=
+                    IWL_WTAS_BLACK_LIST_MAX);
+
+       if (!fw_has_capa(&mld->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
+               IWL_DEBUG_RADIO(mld, "TAS not enabled in FW\n");
+               return;
+       }
+
+       ret = iwl_bios_get_tas_table(&mld->fwrt, &data);
+       if (ret < 0) {
+               IWL_DEBUG_RADIO(mld,
+                               "TAS table invalid or unavailable. (%d)\n",
+                               ret);
+               return;
+       }
+
+       if (!iwl_is_tas_approved()) {
+               IWL_DEBUG_RADIO(mld,
+                               "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
+                               dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+               if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+                                                   &data.block_list_size,
+                                                   IWL_MCC_US)) ||
+                   (!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+                                                   &data.block_list_size,
+                                                   IWL_MCC_CANADA))) {
+                       IWL_DEBUG_RADIO(mld,
+                                       "Unable to add US/Canada to TAS block list, disabling TAS\n");
+                       return;
+               }
+       } else {
+               IWL_DEBUG_RADIO(mld,
+                               "System vendor '%s' is in the approved list.\n",
+                               dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+       }
+
+       cmd.block_list_size = cpu_to_le16(data.block_list_size);
+       for (u8 i = 0; i < data.block_list_size; i++)
+               cmd.block_list_array[i] =
+                       cpu_to_le16(data.block_list_array[i]);
+       cmd.tas_config_info.table_source = data.table_source;
+       cmd.tas_config_info.table_revision = data.table_revision;
+       cmd.tas_config_info.value = cpu_to_le32(data.tas_selection);
+
+       ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+       if (ret)
+               IWL_DEBUG_RADIO(mld, "failed to send TAS_CONFIG (%d)\n", ret);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_regulatory_h__
+#define __iwl_mld_regulatory_h__
+
+#include "mld.h"
+
+void iwl_mld_get_bios_tables(struct iwl_mld *mld);
+void iwl_mld_configure_lari(struct iwl_mld *mld);
+void iwl_mld_init_uats(struct iwl_mld *mld);
+void iwl_mld_init_tas(struct iwl_mld *mld);
+
+int iwl_mld_init_ppag(struct iwl_mld *mld);
+
+int iwl_mld_init_sgom(struct iwl_mld *mld);
+
+int iwl_mld_init_sar(struct iwl_mld *mld);
+
+int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b);
+
+#endif /* __iwl_mld_regulatory_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 - 2025 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "roc.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "sta.h"
+#include "mlo.h"
+
+#include "fw/api/context.h"
+#include "fw/api/time-event.h"
+
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(200)
+
+static void
+iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int *result = data;
+       int ret;
+
+       ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+                                      IWL_MLD_EMLSR_BLOCKED_ROC,
+                                      iwl_mld_get_primary_link(vif));
+       if (ret)
+               *result = ret;
+}
+
+int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     struct ieee80211_channel *channel, int duration,
+                     enum ieee80211_roc_type type)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_int_sta *aux_sta;
+       struct iwl_roc_req cmd = {
+               .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+       };
+       u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
+                                      WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+       u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
+       enum iwl_roc_activity activity;
+       int ret = 0;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_vif_iter_emlsr_block_roc,
+                                               &ret);
+       if (ret)
+               return ret;
+
+       /* TODO: task=Hotspot 2.0 */
+       if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+               IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n",
+                       vif->type);
+
+               return -EOPNOTSUPP;
+       }
+
+       switch (type) {
+       case IEEE80211_ROC_TYPE_NORMAL:
+               activity = ROC_ACTIVITY_P2P_DISC;
+               break;
+       case IEEE80211_ROC_TYPE_MGMT_TX:
+               activity = ROC_ACTIVITY_P2P_NEG;
+               break;
+       default:
+               WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+               return -EINVAL;
+       }
+
+       if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES))
+               return -EBUSY;
+
+       /* No MLO on P2P device */
+       aux_sta = &mld_vif->deflink.aux_sta;
+
+       ret = iwl_mld_add_aux_sta(mld, aux_sta);
+       if (ret)
+               return ret;
+
+       cmd.activity = cpu_to_le32(activity);
+       cmd.sta_id = cpu_to_le32(aux_sta->sta_id);
+       cmd.channel_info.channel = cpu_to_le32(channel->hw_value);
+       cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band);
+       cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20;
+       /* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC
+        * on the BSS vif
+        */
+       cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY);
+       cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+
+       memcpy(cmd.node_addr, vif->addr, ETH_ALEN);
+
+       ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+                                  &cmd, cmd_len);
+       if (ret) {
+               IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
+               return ret;
+       }
+       mld_vif->roc_activity = activity;
+
+       return 0;
+}
+
+static void
+iwl_mld_vif_iter_emlsr_unblock_roc(void *data, u8 *mac,
+                                  struct ieee80211_vif *vif)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_ROC);
+}
+
+static void iwl_mld_destroy_roc(struct iwl_mld *mld,
+                               struct ieee80211_vif *vif,
+                               struct iwl_mld_vif *mld_vif)
+{
+       mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_vif_iter_emlsr_unblock_roc,
+                                               NULL);
+
+       /* wait until every tx has seen that roc_activity has been reset */
+       synchronize_net();
+       /* from here, no new tx will be added
+        * we can flush the Tx on the queues
+        */
+
+       iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id);
+
+       iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf);
+}
+
+int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif)
+{
+       struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_roc_req cmd = {
+               .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+       };
+       u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
+                                      WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+       u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* TODO: task=Hotspot 2.0 */
+       if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+               return -EOPNOTSUPP;
+
+       /* No roc activity running it's probably already done */
+       if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES)
+               return 0;
+
+       cmd.activity = cpu_to_le32(mld_vif->roc_activity);
+
+       ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+                                  &cmd, cmd_len);
+       if (ret)
+               IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n");
+
+       /* We may have raced with the firmware expiring the ROC instance at
+        * this very moment. In that case, we can have a notification in the
+        * async processing queue. However, none can arrive _after_ this as
+        * ROC_CMD was sent synchronously, i.e. we waited for a response and
+        * the firmware cannot refer to this ROC after the response. Thus,
+        * if we just cancel the notification (if there's one) we'll be at a
+        * clean state for any possible next ROC.
+        */
+       iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_ROC,
+                                              mld_vif->roc_activity);
+
+       iwl_mld_destroy_roc(mld, vif, mld_vif);
+
+       return 0;
+}
+
+void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
+                             struct iwl_rx_packet *pkt)
+{
+       const struct iwl_roc_notif *notif = (void *)pkt->data;
+       u32 activity = le32_to_cpu(notif->activity);
+       /* TODO: task=Hotspot 2.0 - roc can run on BSS */
+       struct ieee80211_vif *vif = mld->p2p_device_vif;
+       struct iwl_mld_vif *mld_vif;
+
+       if (WARN_ON(!vif))
+               return;
+
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+       /* It is possible that the ROC was canceled
+        * but the notification was already fired.
+        */
+       if (mld_vif->roc_activity != activity)
+               return;
+
+       if (le32_to_cpu(notif->success) &&
+           le32_to_cpu(notif->started)) {
+               /* We had a successful start */
+               ieee80211_ready_on_channel(mld->hw);
+       } else {
+               /* ROC was not successful, tell the firmware to remove it */
+               if (le32_to_cpu(notif->started))
+                       iwl_mld_cancel_roc(mld->hw, vif);
+               else
+                       iwl_mld_destroy_roc(mld, vif, mld_vif);
+               /* we need to let know mac80211 about end OR
+                * an unsuccessful start
+                */
+               ieee80211_remain_on_channel_expired(mld->hw);
+       }
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_roc_h__
+#define __iwl_mld_roc_h__
+
+#include <net/mac80211.h>
+
+int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     struct ieee80211_channel *channel, int duration,
+                     enum ieee80211_roc_type type);
+
+int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif);
+
+void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
+                             struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_roc_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include <kunit/static_stub.h>
+
+#include "mld.h"
+#include "sta.h"
+#include "agg.h"
+#include "rx.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "time_sync.h"
+#include "fw/dbg.h"
+#include "fw/api/rx.h"
+
+/* stores relevant PHY data fields extracted from iwl_rx_mpdu_desc */
+struct iwl_mld_rx_phy_data {
+       enum iwl_rx_phy_info_type info_type;
+       __le32 data0;
+       __le32 data1;
+       __le32 data2;
+       __le32 data3;
+       __le32 eht_data4;
+       __le32 data5;
+       __le16 data4;
+       bool first_subframe;
+       bool with_data;
+       __le32 rx_vec[4];
+       u32 rate_n_flags;
+       u32 gp2_on_air_rise;
+       u16 phy_info;
+       u8 energy_a, energy_b;
+       u8 channel;
+};
+
+static void
+iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc,
+                     struct iwl_mld_rx_phy_data *phy_data)
+{
+       phy_data->phy_info = le16_to_cpu(desc->phy_info);
+       phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+       phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+       phy_data->channel = desc->v3.channel;
+       phy_data->energy_a = desc->v3.energy_a;
+       phy_data->energy_b = desc->v3.energy_b;
+       phy_data->data0 = desc->v3.phy_data0;
+       phy_data->data1 = desc->v3.phy_data1;
+       phy_data->data2 = desc->v3.phy_data2;
+       phy_data->data3 = desc->v3.phy_data3;
+       phy_data->data4 = desc->phy_data4;
+       phy_data->eht_data4 = desc->phy_eht_data4;
+       phy_data->data5 = desc->v3.phy_data5;
+       phy_data->with_data = true;
+}
+
+static inline int iwl_mld_check_pn(struct iwl_mld *mld, struct sk_buff *skb,
+                                  int queue, struct ieee80211_sta *sta)
+{
+       struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+       struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_ptk_pn *ptk_pn;
+       int res;
+       u8 tid, keyidx;
+       u8 pn[IEEE80211_CCMP_PN_LEN];
+       u8 *extiv;
+
+       /* multicast and non-data only arrives on default queue; avoid checking
+        * for default queue - we don't want to replicate all the logic that's
+        * necessary for checking the PN on fragmented frames, leave that
+        * to mac80211
+        */
+       if (queue == 0 || !ieee80211_is_data(hdr->frame_control) ||
+           is_multicast_ether_addr(hdr->addr1))
+               return 0;
+
+       if (!(stats->flag & RX_FLAG_DECRYPTED))
+               return 0;
+
+       /* if we are here - this for sure is either CCMP or GCMP */
+       if (!sta) {
+               IWL_DEBUG_DROP(mld,
+                              "expected hw-decrypted unicast frame for station\n");
+               return -1;
+       }
+
+       mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+       extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+       keyidx = extiv[3] >> 6;
+
+       ptk_pn = rcu_dereference(mld_sta->ptk_pn[keyidx]);
+       if (!ptk_pn)
+               return -1;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               tid = ieee80211_get_tid(hdr);
+       else
+               tid = 0;
+
+       /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
+       if (tid >= IWL_MAX_TID_COUNT)
+               return -1;
+
+       /* load pn */
+       pn[0] = extiv[7];
+       pn[1] = extiv[6];
+       pn[2] = extiv[5];
+       pn[3] = extiv[4];
+       pn[4] = extiv[1];
+       pn[5] = extiv[0];
+
+       res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+       if (res < 0)
+               return -1;
+       if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
+               return -1;
+
+       memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+       stats->flag |= RX_FLAG_PN_VALIDATED;
+
+       return 0;
+}
+
+/* iwl_mld_pass_packet_to_mac80211 - passes the packet for mac80211 */
+void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+                                    struct napi_struct *napi,
+                                    struct sk_buff *skb, int queue,
+                                    struct ieee80211_sta *sta)
+{
+       KUNIT_STATIC_STUB_REDIRECT(iwl_mld_pass_packet_to_mac80211,
+                                  mld, napi, skb, queue, sta);
+
+       if (unlikely(iwl_mld_check_pn(mld, skb, queue, sta))) {
+               kfree_skb(skb);
+               return;
+       }
+
+       ieee80211_rx_napi(mld->hw, sta, skb, napi);
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_pass_packet_to_mac80211);
+
+static void iwl_mld_fill_signal(struct iwl_mld *mld,
+                               struct ieee80211_rx_status *rx_status,
+                               struct iwl_mld_rx_phy_data *phy_data)
+{
+       u32 rate_n_flags = phy_data->rate_n_flags;
+       int energy_a = phy_data->energy_a;
+       int energy_b = phy_data->energy_b;
+       int max_energy;
+
+       energy_a = energy_a ? -energy_a : S8_MIN;
+       energy_b = energy_b ? -energy_b : S8_MIN;
+       max_energy = max(energy_a, energy_b);
+
+       IWL_DEBUG_STATS(mld, "energy in A %d B %d, and max %d\n",
+                       energy_a, energy_b, max_energy);
+
+       rx_status->signal = max_energy;
+       rx_status->chains =
+           (rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+       rx_status->chain_signal[0] = energy_a;
+       rx_status->chain_signal[1] = energy_b;
+}
+
+static void
+iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data *phy_data,
+                              struct ieee80211_radiotap_he *he,
+                              struct ieee80211_radiotap_he_mu *he_mu,
+                              struct ieee80211_rx_status *rx_status)
+{
+       /* Unfortunately, we have to leave the mac80211 data
+        * incorrect for the case that we receive an HE-MU
+        * transmission and *don't* have the HE phy data (due
+        * to the bits being used for TSF). This shouldn't
+        * happen though as management frames where we need
+        * the TSF/timers are not be transmitted in HE-MU.
+        */
+       u8 ru = le32_get_bits(phy_data->data1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+       u32 rate_n_flags = phy_data->rate_n_flags;
+       u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+       u8 offs = 0;
+
+       rx_status->bw = RATE_INFO_BW_HE_RU;
+
+       he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+       switch (ru) {
+       case 0 ... 36:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+               offs = ru;
+               break;
+       case 37 ... 52:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+               offs = ru - 37;
+               break;
+       case 53 ... 60:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+               offs = ru - 53;
+               break;
+       case 61 ... 64:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+               offs = ru - 61;
+               break;
+       case 65 ... 66:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+               offs = ru - 65;
+               break;
+       case 67:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+               break;
+       case 68:
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+               break;
+       }
+       he->data2 |= le16_encode_bits(offs,
+                                     IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+       he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
+                                IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
+       if (phy_data->data1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
+               he->data2 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+
+#define CHECK_BW(bw) \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+                    RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
+                    RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+       CHECK_BW(20);
+       CHECK_BW(40);
+       CHECK_BW(80);
+       CHECK_BW(160);
+
+       if (he_mu)
+               he_mu->flags2 |=
+                       le16_encode_bits(u32_get_bits(rate_n_flags,
+                                                     RATE_MCS_CHAN_WIDTH_MSK),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+       else if (he_type == RATE_MCS_HE_TYPE_TRIG)
+               he->data6 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
+                       le16_encode_bits(u32_get_bits(rate_n_flags,
+                                                     RATE_MCS_CHAN_WIDTH_MSK),
+                                        IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
+}
+
+static void
+iwl_mld_decode_he_mu_ext(struct iwl_mld_rx_phy_data *phy_data,
+                        struct ieee80211_radiotap_he_mu *he_mu)
+{
+       u32 phy_data2 = le32_to_cpu(phy_data->data2);
+       u32 phy_data3 = le32_to_cpu(phy_data->data3);
+       u16 phy_data4 = le16_to_cpu(phy_data->data4);
+       u32 rate_n_flags = phy_data->rate_n_flags;
+
+       if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK)) {
+               he_mu->flags1 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
+                                   IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
+
+               he_mu->flags1 |=
+                       le16_encode_bits(u32_get_bits(phy_data4,
+                                                     IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
+
+               he_mu->ru_ch1[0] = u32_get_bits(phy_data2,
+                                               IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0);
+               he_mu->ru_ch1[1] = u32_get_bits(phy_data3,
+                                               IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1);
+               he_mu->ru_ch1[2] = u32_get_bits(phy_data2,
+                                               IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2);
+               he_mu->ru_ch1[3] = u32_get_bits(phy_data3,
+                                               IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3);
+       }
+
+       if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK) &&
+           (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
+               he_mu->flags1 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
+                                   IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
+
+               he_mu->flags2 |=
+                       le16_encode_bits(u32_get_bits(phy_data4,
+                                                     IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
+
+               he_mu->ru_ch2[0] = u32_get_bits(phy_data2,
+                                               IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0);
+               he_mu->ru_ch2[1] = u32_get_bits(phy_data3,
+                                               IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1);
+               he_mu->ru_ch2[2] = u32_get_bits(phy_data2,
+                                               IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2);
+               he_mu->ru_ch2[3] = u32_get_bits(phy_data3,
+                                               IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3);
+       }
+}
+
+static void
+iwl_mld_decode_he_phy_data(struct iwl_mld_rx_phy_data *phy_data,
+                          struct ieee80211_radiotap_he *he,
+                          struct ieee80211_radiotap_he_mu *he_mu,
+                          struct ieee80211_rx_status *rx_status,
+                          int queue)
+{
+       switch (phy_data->info_type) {
+       case IWL_RX_PHY_INFO_TYPE_NONE:
+       case IWL_RX_PHY_INFO_TYPE_CCK:
+       case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
+       case IWL_RX_PHY_INFO_TYPE_HT:
+       case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+       case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+       case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+       case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+       case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+       case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+               return;
+       case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+               he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
+               he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+                                                           IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
+                                             IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
+               he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+                                                           IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
+                                             IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
+               he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+                                                           IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
+                                             IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
+               he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+                                                           IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
+                                             IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
+               fallthrough;
+       case IWL_RX_PHY_INFO_TYPE_HE_SU:
+       case IWL_RX_PHY_INFO_TYPE_HE_MU:
+       case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+       case IWL_RX_PHY_INFO_TYPE_HE_TB:
+               /* HE common */
+               he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
+               he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+                                        IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+               he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
+                                             IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+               if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
+                   phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
+                       he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+                       he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_UPLINK),
+                                                     IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+               }
+               he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
+                                             IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+               he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
+                                             IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+               he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
+                                             IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+               he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data1,
+                                                           IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
+                                             IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+               he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
+                                             IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+               he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_DOPPLER),
+                                             IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+               break;
+       }
+
+       switch (phy_data->info_type) {
+       case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+       case IWL_RX_PHY_INFO_TYPE_HE_MU:
+       case IWL_RX_PHY_INFO_TYPE_HE_SU:
+               he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
+               he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
+                                             IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+               break;
+       default:
+               /* nothing here */
+               break;
+       }
+
+       switch (phy_data->info_type) {
+       case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+               he_mu->flags1 |=
+                       le16_encode_bits(le16_get_bits(phy_data->data4,
+                                                      IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+               he_mu->flags1 |=
+                       le16_encode_bits(le16_get_bits(phy_data->data4,
+                                                      IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+               he_mu->flags2 |=
+                       le16_encode_bits(le16_get_bits(phy_data->data4,
+                                                      IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+               iwl_mld_decode_he_mu_ext(phy_data, he_mu);
+               fallthrough;
+       case IWL_RX_PHY_INFO_TYPE_HE_MU:
+               he_mu->flags2 |=
+                       le16_encode_bits(le32_get_bits(phy_data->data1,
+                                                      IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+               he_mu->flags2 |=
+                       le16_encode_bits(le32_get_bits(phy_data->data1,
+                                                      IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
+                                        IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+               fallthrough;
+       case IWL_RX_PHY_INFO_TYPE_HE_TB:
+       case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+               iwl_mld_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
+               break;
+       case IWL_RX_PHY_INFO_TYPE_HE_SU:
+               he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+               he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+                                                           IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
+                                             IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+               break;
+       default:
+               /* nothing */
+               break;
+       }
+}
+
+static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb,
+                         struct iwl_mld_rx_phy_data *phy_data,
+                         int queue)
+{
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_radiotap_he *he = NULL;
+       struct ieee80211_radiotap_he_mu *he_mu = NULL;
+       u32 rate_n_flags = phy_data->rate_n_flags;
+       u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+       u8 ltf;
+       static const struct ieee80211_radiotap_he known = {
+               .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+                                    IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+                                    IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN     |
+                                    IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+               .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+                                    IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+       };
+       static const struct ieee80211_radiotap_he_mu mu_known = {
+               .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+               .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
+                                     IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+       };
+       u16 phy_info = phy_data->phy_info;
+
+       he = skb_put_data(skb, &known, sizeof(known));
+       rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+       if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
+           phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
+               he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
+               rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+       }
+
+       /* report the AMPDU-EOF bit on single frames */
+       if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+               rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+               rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+               if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
+                       rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+       }
+
+       if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+               iwl_mld_decode_he_phy_data(phy_data, he, he_mu, rx_status,
+                                          queue);
+
+       /* update aggregation data for monitor sake on default queue */
+       if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
+           (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
+               rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+               if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+                       rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+       }
+
+       if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+           rate_n_flags & RATE_MCS_HE_106T_MSK) {
+               rx_status->bw = RATE_INFO_BW_HE_RU;
+               rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+       }
+
+       /* actually data is filled in mac80211 */
+       if (he_type == RATE_MCS_HE_TYPE_SU ||
+           he_type == RATE_MCS_HE_TYPE_EXT_SU)
+               he->data1 |=
+                       cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+#define CHECK_TYPE(F)                                                  \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F !=        \
+                    (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+       CHECK_TYPE(SU);
+       CHECK_TYPE(EXT_SU);
+       CHECK_TYPE(MU);
+       CHECK_TYPE(TRIG);
+
+       he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+       if (rate_n_flags & RATE_MCS_BF_MSK)
+               he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+       switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+               RATE_MCS_HE_GI_LTF_POS) {
+       case 0:
+               if (he_type == RATE_MCS_HE_TYPE_TRIG)
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+               else
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+               if (he_type == RATE_MCS_HE_TYPE_MU)
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               else
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+               break;
+       case 1:
+               if (he_type == RATE_MCS_HE_TYPE_TRIG)
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+               else
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+               break;
+       case 2:
+               if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               } else {
+                       rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+               }
+               break;
+       case 3:
+               rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               break;
+       case 4:
+               rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               break;
+       default:
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+       }
+
+       he->data5 |= le16_encode_bits(ltf,
+                                     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+}
+
+static void iwl_mld_decode_lsig(struct sk_buff *skb,
+                               struct iwl_mld_rx_phy_data *phy_data)
+{
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_radiotap_lsig *lsig;
+
+       switch (phy_data->info_type) {
+       case IWL_RX_PHY_INFO_TYPE_HT:
+       case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+       case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+       case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+       case IWL_RX_PHY_INFO_TYPE_HE_SU:
+       case IWL_RX_PHY_INFO_TYPE_HE_MU:
+       case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+       case IWL_RX_PHY_INFO_TYPE_HE_TB:
+       case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+       case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+       case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+       case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+               lsig = skb_put(skb, sizeof(*lsig));
+               lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
+               lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->data1,
+                                                            IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
+                                              IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
+               rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
+               break;
+       default:
+               break;
+       }
+}
+
+/* Put a TLV on the skb and return data pointer
+ *
+ * Also pad the len to 4 and zero out all data part
+ */
+static void *
+iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
+{
+       struct ieee80211_radiotap_tlv *tlv;
+
+       tlv = skb_put(skb, sizeof(*tlv));
+       tlv->type = cpu_to_le16(type);
+       tlv->len = cpu_to_le16(len);
+       return skb_put_zero(skb, ALIGN(len, 4));
+}
+
+#define LE32_DEC_ENC(value, dec_bits, enc_bits) \
+       le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define IWL_MLD_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \
+       typeof(enc_bits) _enc_bits = enc_bits; \
+       typeof(usig) _usig = usig; \
+       (_usig)->mask |= cpu_to_le32(_enc_bits); \
+       (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \
+} while (0)
+
+#define __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
+       eht->data[(rt_data)] |= \
+               (cpu_to_le32 \
+                (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \
+                LE32_DEC_ENC(data ## fw_data, \
+                             IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \
+                             IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru))
+
+#define _IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)    \
+       __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)
+
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_1       1
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_1       2
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_2       2
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_2       2
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_1       3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_1       3
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_2       3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_2       4
+
+#define IWL_RX_RU_DATA_A1                      2
+#define IWL_RX_RU_DATA_A2                      2
+#define IWL_RX_RU_DATA_B1                      2
+#define IWL_RX_RU_DATA_B2                      4
+#define IWL_RX_RU_DATA_C1                      3
+#define IWL_RX_RU_DATA_C2                      3
+#define IWL_RX_RU_DATA_D1                      4
+#define IWL_RX_RU_DATA_D2                      4
+
+#define IWL_MLD_ENC_EHT_RU(rt_ru, fw_ru)                               \
+       _IWL_MLD_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru,       \
+                           rt_ru,                                      \
+                           IWL_RX_RU_DATA_ ## fw_ru,                   \
+                           fw_ru)
+
+static void iwl_mld_decode_eht_ext_mu(struct iwl_mld *mld,
+                                     struct iwl_mld_rx_phy_data *phy_data,
+                                     struct ieee80211_rx_status *rx_status,
+                                     struct ieee80211_radiotap_eht *eht,
+                                     struct ieee80211_radiotap_eht_usig *usig)
+{
+       if (phy_data->with_data) {
+               __le32 data1 = phy_data->data1;
+               __le32 data2 = phy_data->data2;
+               __le32 data3 = phy_data->data3;
+               __le32 data4 = phy_data->eht_data4;
+               __le32 data5 = phy_data->data5;
+               u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+                                           IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+                                           IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, data4,
+                                           IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+               IWL_MLD_ENC_USIG_VALUE_MASK
+                       (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2,
+                        IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+
+               eht->user_info[0] |=
+                       cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) |
+                       LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR,
+                                    IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID);
+
+               eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M);
+               eht->data[7] |= LE32_DEC_ENC
+                       (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA,
+                        IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+
+               /*
+                * Hardware labels the content channels/RU allocation values
+                * as follows:
+                *           Content Channel 1          Content Channel 2
+                *   20 MHz: A1
+                *   40 MHz: A1                         B1
+                *   80 MHz: A1 C1                      B1 D1
+                *  160 MHz: A1 C1 A2 C2                B1 D1 B2 D2
+                *  320 MHz: A1 C1 A2 C2 A3 C3 A4 C4    B1 D1 B2 D2 B3 D3 B4 D4
+                *
+                * However firmware can only give us A1-D2, so the higher
+                * frequencies are missing.
+                */
+
+               switch (phy_bw) {
+               case RATE_MCS_CHAN_WIDTH_320:
+                       /* additional values are missing in RX metadata */
+                       fallthrough;
+               case RATE_MCS_CHAN_WIDTH_160:
+                       /* content channel 1 */
+                       IWL_MLD_ENC_EHT_RU(1_2_1, A2);
+                       IWL_MLD_ENC_EHT_RU(1_2_2, C2);
+                       /* content channel 2 */
+                       IWL_MLD_ENC_EHT_RU(2_2_1, B2);
+                       IWL_MLD_ENC_EHT_RU(2_2_2, D2);
+                       fallthrough;
+               case RATE_MCS_CHAN_WIDTH_80:
+                       /* content channel 1 */
+                       IWL_MLD_ENC_EHT_RU(1_1_2, C1);
+                       /* content channel 2 */
+                       IWL_MLD_ENC_EHT_RU(2_1_2, D1);
+                       fallthrough;
+               case RATE_MCS_CHAN_WIDTH_40:
+                       /* content channel 2 */
+                       IWL_MLD_ENC_EHT_RU(2_1_1, B1);
+                       fallthrough;
+               case RATE_MCS_CHAN_WIDTH_20:
+                       IWL_MLD_ENC_EHT_RU(1_1_1, A1);
+                       break;
+               }
+       } else {
+               __le32 usig_a1 = phy_data->rx_vec[0];
+               __le32 usig_a2 = phy_data->rx_vec[1];
+
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+                                           IWL_RX_USIG_A1_DISREGARD,
+                                           IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+                                           IWL_RX_USIG_A1_VALIDATE,
+                                           IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_PUNC_CHANNEL,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_SIG_MCS,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+               IWL_MLD_ENC_USIG_VALUE_MASK
+                       (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM,
+                        IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_CRC_OK,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC);
+       }
+}
+
+static void iwl_mld_decode_eht_ext_tb(struct iwl_mld *mld,
+                                     struct iwl_mld_rx_phy_data *phy_data,
+                                     struct ieee80211_rx_status *rx_status,
+                                     struct ieee80211_radiotap_eht *eht,
+                                     struct ieee80211_radiotap_eht_usig *usig)
+{
+       if (phy_data->with_data) {
+               __le32 data5 = phy_data->data5;
+
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+                                           IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+                                           IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+                                           IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+       } else {
+               __le32 usig_a1 = phy_data->rx_vec[0];
+               __le32 usig_a2 = phy_data->rx_vec[1];
+
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+                                           IWL_RX_USIG_A1_DISREGARD,
+                                           IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD);
+               IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+                                           IWL_RX_USIG_A2_EHT_CRC_OK,
+                                           IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC);
+       }
+}
+
+static void iwl_mld_decode_eht_ru(struct iwl_mld *mld,
+                                 struct ieee80211_rx_status *rx_status,
+                                 struct ieee80211_radiotap_eht *eht)
+{
+       u32 ru = le32_get_bits(eht->data[8],
+                              IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+       enum nl80211_eht_ru_alloc nl_ru;
+
+       /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields
+        * in an EHT variant User Info field
+        */
+
+       switch (ru) {
+       case 0 ... 36:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+               break;
+       case 37 ... 52:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+               break;
+       case 53 ... 60:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+               break;
+       case 61 ... 64:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+               break;
+       case 65 ... 66:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+               break;
+       case 67:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+               break;
+       case 68:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+               break;
+       case 69:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+               break;
+       case 70 ... 81:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+               break;
+       case 82 ... 89:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+               break;
+       case 90 ... 93:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+               break;
+       case 94 ... 95:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+               break;
+       case 96 ... 99:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+               break;
+       case 100 ... 103:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+               break;
+       case 104:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+               break;
+       case 105 ... 106:
+               nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+               break;
+       default:
+               return;
+       }
+
+       rx_status->bw = RATE_INFO_BW_EHT_RU;
+       rx_status->eht.ru = nl_ru;
+}
+
+static void iwl_mld_decode_eht_phy_data(struct iwl_mld *mld,
+                                       struct iwl_mld_rx_phy_data *phy_data,
+                                       struct ieee80211_rx_status *rx_status,
+                                       struct ieee80211_radiotap_eht *eht,
+                                       struct ieee80211_radiotap_eht_usig *usig)
+
+{
+       __le32 data0 = phy_data->data0;
+       __le32 data1 = phy_data->data1;
+       __le32 usig_a1 = phy_data->rx_vec[0];
+       u8 info_type = phy_data->info_type;
+
+       /* Not in EHT range */
+       if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU ||
+           info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT)
+               return;
+
+       usig->common |= cpu_to_le32
+               (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+                IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN);
+       if (phy_data->with_data) {
+               usig->common |= LE32_DEC_ENC(data0,
+                                            IWL_RX_PHY_DATA0_EHT_UPLINK,
+                                            IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+               usig->common |= LE32_DEC_ENC(data0,
+                                            IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK,
+                                            IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+       } else {
+               usig->common |= LE32_DEC_ENC(usig_a1,
+                                            IWL_RX_USIG_A1_UL_FLAG,
+                                            IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+               usig->common |= LE32_DEC_ENC(usig_a1,
+                                            IWL_RX_USIG_A1_BSS_COLOR,
+                                            IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+       }
+
+       usig->common |=
+               cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED);
+       usig->common |=
+               LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE,
+                            IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK);
+
+       eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE);
+       eht->data[0] |= LE32_DEC_ENC(data0,
+                                    IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK,
+                                    IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+       /* All RU allocating size/index is in TB format */
+       eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT);
+       eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160,
+                                    IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160);
+       eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0,
+                                    IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0);
+       eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7,
+                                    IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+
+       iwl_mld_decode_eht_ru(mld, rx_status, eht);
+
+       /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+        * which is on only in case of monitor mode so no need to check monitor
+        * mode
+        */
+       eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80);
+       eht->data[1] |=
+               le32_encode_bits(mld->monitor.p80,
+                                IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80);
+
+       usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN);
+       if (phy_data->with_data)
+               usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK,
+                                            IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+       else
+               usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION,
+                                            IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+
+       eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM);
+       eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM,
+                                    IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+       eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM);
+       eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK,
+                                   IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+       eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM);
+       eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG,
+                                    IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+       /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */
+
+       if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK))
+               usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+       usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN);
+       usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER,
+                                    IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER);
+
+       /*
+        * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE,
+        *                       IWL_RX_PHY_DATA1_EHT_TB_LOW_SS
+        */
+
+       eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF);
+       eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM,
+                                    IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+       if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT ||
+           info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB)
+               iwl_mld_decode_eht_ext_tb(mld, phy_data, rx_status, eht, usig);
+
+       if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT ||
+           info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU)
+               iwl_mld_decode_eht_ext_mu(mld, phy_data, rx_status, eht, usig);
+}
+
+static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb,
+                          struct iwl_mld_rx_phy_data *phy_data,
+                          int queue)
+{
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_radiotap_eht *eht;
+       struct ieee80211_radiotap_eht_usig *usig;
+       size_t eht_len = sizeof(*eht);
+
+       u32 rate_n_flags = phy_data->rate_n_flags;
+       u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+       /* EHT and HE have the same values for LTF */
+       u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+       u16 phy_info = phy_data->phy_info;
+       u32 bw;
+
+       /* u32 for 1 user_info */
+       if (phy_data->with_data)
+               eht_len += sizeof(u32);
+
+       eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
+
+       usig = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
+                                       sizeof(*usig));
+       rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+       usig->common |=
+               cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN);
+
+       /* specific handling for 320MHz */
+       bw = u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK);
+       if (bw == RATE_MCS_CHAN_WIDTH_320_VAL)
+               bw += le32_get_bits(phy_data->data0,
+                                   IWL_RX_PHY_DATA0_EHT_BW320_SLOT);
+
+       usig->common |= cpu_to_le32
+               (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw));
+
+       /* report the AMPDU-EOF bit on single frames */
+       if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+               rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+               rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+               if (phy_data->data0 &
+                   cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+                       rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+       }
+
+       if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+               iwl_mld_decode_eht_phy_data(mld, phy_data, rx_status, eht, usig);
+
+#define CHECK_TYPE(F)                                                  \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F !=        \
+                    (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+       CHECK_TYPE(SU);
+       CHECK_TYPE(EXT_SU);
+       CHECK_TYPE(MU);
+       CHECK_TYPE(TRIG);
+
+       switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) {
+       case 0:
+               if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+                       rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+               } else {
+                       rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+               }
+               break;
+       case 1:
+               rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+               break;
+       case 2:
+               ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+               if (he_type == RATE_MCS_HE_TYPE_TRIG)
+                       rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+               else
+                       rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+               break;
+       case 3:
+               if (he_type != RATE_MCS_HE_TYPE_TRIG) {
+                       ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+                       rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+               }
+               break;
+       default:
+               /* nothing here */
+               break;
+       }
+
+       if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) {
+               eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
+               eht->data[0] |= cpu_to_le32
+                       (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF,
+                                   ltf) |
+                        FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI,
+                                   rx_status->eht.gi));
+       }
+
+       if (!phy_data->with_data) {
+               eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+                                         IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S);
+               eht->data[7] |=
+                       le32_encode_bits(le32_get_bits(phy_data->rx_vec[2],
+                                                      RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK),
+                                        IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+               if (rate_n_flags & RATE_MCS_BF_MSK)
+                       eht->data[7] |=
+                               cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+       } else {
+               eht->user_info[0] |=
+                       cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+                                   IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+                                   IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+                                   IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+                                   IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER);
+
+               if (rate_n_flags & RATE_MCS_BF_MSK)
+                       eht->user_info[0] |=
+                               cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+               if (rate_n_flags & RATE_MCS_LDPC_MSK)
+                       eht->user_info[0] |=
+                               cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
+
+               eht->user_info[0] |= cpu_to_le32
+                       (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS,
+                                   u32_get_bits(rate_n_flags,
+                                                RATE_VHT_MCS_RATE_CODE_MSK)) |
+                        FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O,
+                                   u32_get_bits(rate_n_flags,
+                                                RATE_MCS_NSS_MSK)));
+       }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
+                                           struct sk_buff *skb)
+{
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_radiotap_vendor_content *radiotap;
+       const u16 vendor_data_len = sizeof(mld->monitor.cur_aid);
+
+       if (!mld->monitor.cur_aid)
+               return;
+
+       radiotap =
+               iwl_mld_radiotap_put_tlv(skb,
+                                        IEEE80211_RADIOTAP_VENDOR_NAMESPACE,
+                                        sizeof(*radiotap) + vendor_data_len);
+
+       /* Intel OUI */
+       radiotap->oui[0] = 0xf6;
+       radiotap->oui[1] = 0x54;
+       radiotap->oui[2] = 0x25;
+       /* radiotap sniffer config sub-namespace */
+       radiotap->oui_subtype = 1;
+       radiotap->vendor_type = 0;
+
+       /* fill the data now */
+       memcpy(radiotap->data, &mld->monitor.cur_aid,
+              sizeof(mld->monitor.cur_aid));
+
+       rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+}
+#endif
+
+static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
+                                  struct iwl_mld_rx_phy_data *phy_data,
+                                  struct iwl_rx_mpdu_desc *mpdu_desc,
+                                  struct ieee80211_hdr *hdr,
+                                  int queue)
+{
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+       u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+       u32 rate_n_flags = phy_data->rate_n_flags;
+       u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
+       bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK;
+
+       if (WARN_ON_ONCE(phy_data->with_data && (!mpdu_desc || !hdr)))
+               return;
+
+       /* Keep packets with CRC errors (and with overrun) for monitor mode
+        * (otherwise the firmware discards them) but mark them as bad.
+        */
+       if (phy_data->with_data &&
+           (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+            !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK)))) {
+               IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
+                            le32_to_cpu(mpdu_desc->status));
+               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+       }
+
+       phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
+
+       if (phy_data->with_data &&
+           likely(!(phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+               rx_status->mactime =
+                       le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
+
+               /* TSF as indicated by the firmware is at INA time */
+               rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+       } else {
+               phy_data->info_type =
+                       le32_get_bits(phy_data->data1,
+                                     IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+       }
+
+       /* management stuff on default queue */
+       if (!queue && phy_data->with_data &&
+           unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+                    ieee80211_is_probe_resp(hdr->frame_control))) {
+               rx_status->boottime_ns = ktime_get_boottime_ns();
+
+               if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+                       mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_FOUND;
+       }
+
+       /* set the preamble flag if appropriate */
+       if (format == RATE_MCS_CCK_MSK &&
+           phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+               rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+       iwl_mld_fill_signal(mld, rx_status, phy_data);
+
+       /* This may be overridden by iwl_mld_rx_he() to HE_RU */
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               rx_status->bw = RATE_INFO_BW_40;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               rx_status->bw = RATE_INFO_BW_80;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               rx_status->bw = RATE_INFO_BW_160;
+               break;
+       case RATE_MCS_CHAN_WIDTH_320:
+               rx_status->bw = RATE_INFO_BW_320;
+               break;
+       }
+
+       /* must be before L-SIG data */
+       if (format == RATE_MCS_HE_MSK)
+               iwl_mld_rx_he(mld, skb, phy_data, queue);
+
+       iwl_mld_decode_lsig(skb, phy_data);
+
+       rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+
+       /* using TLV format and must be after all fixed len fields */
+       if (format == RATE_MCS_EHT_MSK)
+               iwl_mld_rx_eht(mld, skb, phy_data, queue);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (unlikely(mld->monitor.on))
+               iwl_mld_add_rtap_sniffer_config(mld, skb);
+#endif
+
+       if (format != RATE_MCS_CCK_MSK && is_sgi)
+               rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+       if (rate_n_flags & RATE_MCS_LDPC_MSK)
+               rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+       switch (format) {
+       case RATE_MCS_HT_MSK:
+               rx_status->encoding = RX_ENC_HT;
+               rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
+               rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+               break;
+       case RATE_MCS_VHT_MSK:
+       case RATE_MCS_HE_MSK:
+       case RATE_MCS_EHT_MSK:
+               if (format == RATE_MCS_VHT_MSK) {
+                       rx_status->encoding = RX_ENC_VHT;
+               } else if (format == RATE_MCS_HE_MSK) {
+                       rx_status->encoding = RX_ENC_HE;
+                       rx_status->he_dcm =
+                               !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+               } else if (format == RATE_MCS_EHT_MSK) {
+                       rx_status->encoding = RX_ENC_EHT;
+               }
+
+               rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+               rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
+               rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+               break;
+       default: {
+               int rate =
+                   iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+                                                         rx_status->band);
+
+               /* valid rate */
+               if (rate >= 0 && rate <= 0xFF) {
+                       rx_status->rate_idx = rate;
+                       break;
+               }
+
+               /* invalid rate */
+               rx_status->rate_idx = 0;
+
+               if (net_ratelimit())
+                       IWL_ERR(mld, "invalid rate_n_flags=0x%x, band=%d\n",
+                               rate_n_flags, rx_status->band);
+               break;
+               }
+       }
+}
+
+/* iwl_mld_create_skb adds the rxb to a new skb */
+static int iwl_mld_build_rx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+                               struct ieee80211_hdr *hdr, u16 len,
+                               u8 crypt_len, struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+       unsigned int headlen, fraglen, pad_len = 0;
+       unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
+                                    IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
+
+       if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+               len -= 2;
+               pad_len = 2;
+       }
+
+       /* For non monitor interface strip the bytes the RADA might not have
+        * removed (it might be disabled, e.g. for mgmt frames). As a monitor
+        * interface cannot exist with other interfaces, this removal is safe
+        * and sufficient, in monitor mode there's no decryption being done.
+        */
+       if (len > mic_crc_len && !ieee80211_hw_check(mld->hw, RX_INCLUDES_FCS))
+               len -= mic_crc_len;
+
+       /* If frame is small enough to fit in skb->head, pull it completely.
+        * If not, only pull ieee80211_hdr (including crypto if present, and
+        * an additional 8 bytes for SNAP/ethertype, see below) so that
+        * splice() or TCP coalesce are more efficient.
+        *
+        * Since, in addition, ieee80211_data_to_8023() always pull in at
+        * least 8 bytes (possibly more for mesh) we can do the same here
+        * to save the cost of doing it later. That still doesn't pull in
+        * the actual IP header since the typical case has a SNAP header.
+        * If the latter changes (there are efforts in the standards group
+        * to do so) we should revisit this and ieee80211_data_to_8023().
+        */
+       headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
+
+       /* The firmware may align the packet to DWORD.
+        * The padding is inserted after the IV.
+        * After copying the header + IV skip the padding if
+        * present before copying packet data.
+        */
+       hdrlen += crypt_len;
+
+       if (unlikely(headlen < hdrlen))
+               return -EINVAL;
+
+       /* Since data doesn't move data while putting data on skb and that is
+        * the only way we use, data + len is the next place that hdr would
+        * be put
+        */
+       skb_set_mac_header(skb, skb->len);
+       skb_put_data(skb, hdr, hdrlen);
+       skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               struct {
+                       u8 hdr[6];
+                       __be16 type;
+               } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
+
+               if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
+                            !ether_addr_equal(shdr->hdr, rfc1042_header) ||
+                            (shdr->type != htons(ETH_P_IP) &&
+                             shdr->type != htons(ETH_P_ARP) &&
+                             shdr->type != htons(ETH_P_IPV6) &&
+                             shdr->type != htons(ETH_P_8021Q) &&
+                             shdr->type != htons(ETH_P_PAE) &&
+                             shdr->type != htons(ETH_P_TDLS))))
+                       skb->ip_summed = CHECKSUM_NONE;
+       }
+
+       fraglen = len - headlen;
+
+       if (fraglen) {
+               int offset = (u8 *)hdr + headlen + pad_len -
+                            (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
+
+               skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+                               fraglen, rxb->truesize);
+       }
+
+       return 0;
+}
+
+/* returns true if a packet is a duplicate or invalid tid and
+ * should be dropped. Updates AMSDU PN tracking info
+ */
+VISIBLE_IF_IWLWIFI_KUNIT
+bool
+iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
+              struct ieee80211_hdr *hdr,
+              const struct iwl_rx_mpdu_desc *mpdu_desc,
+              struct ieee80211_rx_status *rx_status, int queue)
+{
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_rxq_dup_data *dup_data;
+       u8 tid, sub_frame_idx;
+
+       if (WARN_ON(!sta))
+               return false;
+
+       mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+       if (WARN_ON_ONCE(!mld_sta->dup_data))
+               return false;
+
+       dup_data = &mld_sta->dup_data[queue];
+
+       /* Drop duplicate 802.11 retransmissions
+        * (IEEE 802.11-2020: 10.3.2.14 "Duplicate detection and recovery")
+        */
+       if (ieee80211_is_ctl(hdr->frame_control) ||
+           ieee80211_is_any_nullfunc(hdr->frame_control) ||
+           is_multicast_ether_addr(hdr->addr1))
+               return false;
+
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               /* frame has qos control */
+               tid = ieee80211_get_tid(hdr);
+               if (tid >= IWL_MAX_TID_COUNT)
+                       return true;
+       } else {
+               tid = IWL_MAX_TID_COUNT;
+       }
+
+       /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+       sub_frame_idx = mpdu_desc->amsdu_info &
+               IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+       if (IWL_FW_CHECK(mld,
+                        sub_frame_idx > 0 &&
+                        !(mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU),
+                        "got sub_frame_idx=%d but A-MSDU flag is not set\n",
+                        sub_frame_idx))
+               return true;
+
+       if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+                    dup_data->last_seq[tid] == hdr->seq_ctrl &&
+                    dup_data->last_sub_frame_idx[tid] >= sub_frame_idx))
+               return true;
+
+       /* Allow same PN as the first subframe for following sub frames */
+       if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+           sub_frame_idx > dup_data->last_sub_frame_idx[tid])
+               rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
+       dup_data->last_seq[tid] = hdr->seq_ctrl;
+       dup_data->last_sub_frame_idx[tid] = sub_frame_idx;
+
+       rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+
+       return false;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_is_dup);
+
+static void iwl_mld_update_last_rx_timestamp(struct iwl_mld *mld, u8 baid)
+{
+       unsigned long now = jiffies;
+       unsigned long timeout;
+       struct iwl_mld_baid_data *ba_data;
+
+       ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+       if (!ba_data) {
+               IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
+               return;
+       }
+
+       if (!ba_data->timeout)
+               return;
+
+       /* To minimize cache bouncing between RX queues, avoid frequent updates
+        * to last_rx_timestamp. update it only when the timeout period has
+        * passed. The worst-case scenario is the session expiring after
+        * approximately 2 * timeout, which is negligible (the update is
+        * atomic).
+        */
+       timeout = TU_TO_JIFFIES(ba_data->timeout);
+       if (time_is_before_jiffies(ba_data->last_rx_timestamp + timeout))
+               ba_data->last_rx_timestamp = now;
+}
+
+/* Processes received packets for a station.
+ * Sets *drop to true if the packet should be dropped.
+ * Returns the station if found, or NULL otherwise.
+ */
+static struct ieee80211_sta *
+iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr,
+                   struct sk_buff *skb,
+                   const struct iwl_rx_mpdu_desc *mpdu_desc,
+                   const struct iwl_rx_packet *pkt, int queue, bool *drop)
+{
+       struct ieee80211_sta *sta = NULL;
+       struct ieee80211_link_sta *link_sta = NULL;
+       struct ieee80211_rx_status *rx_status;
+       u8 baid;
+
+       if (mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+               u8 sta_id = le32_get_bits(mpdu_desc->status,
+                                         IWL_RX_MPDU_STATUS_STA_ID);
+
+               if (IWL_FW_CHECK(mld,
+                                sta_id >= mld->fw->ucode_capa.num_stations,
+                                "rx_mpdu: invalid sta_id %d\n", sta_id))
+                       return NULL;
+
+               link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+               if (!IS_ERR_OR_NULL(link_sta))
+                       sta = link_sta->sta;
+       } else if (!is_multicast_ether_addr(hdr->addr2)) {
+               /* Passing NULL is fine since we prevent two stations with the
+                * same address from being added.
+                */
+               sta = ieee80211_find_sta_by_ifaddr(mld->hw, hdr->addr2, NULL);
+       }
+
+       /* we may not have any station yet */
+       if (!sta)
+               return NULL;
+
+       rx_status = IEEE80211_SKB_RXCB(skb);
+
+       if (link_sta && sta->valid_links) {
+               rx_status->link_valid = true;
+               rx_status->link_id = link_sta->link_id;
+       }
+
+       /* fill checksum */
+       if (ieee80211_is_data(hdr->frame_control) &&
+           pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
+               u16 hwsum = be16_to_cpu(mpdu_desc->v3.raw_xsum);
+
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum = csum_unfold(~(__force __sum16)hwsum);
+       }
+
+       if (iwl_mld_is_dup(mld, sta, hdr, mpdu_desc, rx_status, queue)) {
+               IWL_DEBUG_DROP(mld, "Dropping duplicate packet 0x%x\n",
+                              le16_to_cpu(hdr->seq_ctrl));
+               *drop = true;
+               return NULL;
+       }
+
+       baid = le32_get_bits(mpdu_desc->reorder_data,
+                            IWL_RX_MPDU_REORDER_BAID_MASK);
+       if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+               iwl_mld_update_last_rx_timestamp(mld, baid);
+
+       if (link_sta && ieee80211_is_data(hdr->frame_control)) {
+               u8 sub_frame_idx = mpdu_desc->amsdu_info &
+                       IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+               /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
+               if (!sub_frame_idx || sub_frame_idx == 1)
+                       iwl_mld_count_mpdu_rx(link_sta, queue, 1);
+
+               if (!is_multicast_ether_addr(hdr->addr1))
+                       iwl_mld_low_latency_update_counters(mld, hdr, sta,
+                                                           queue);
+       }
+
+       return sta;
+}
+
+#define KEY_IDX_LEN 2
+
+static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
+                               struct ieee80211_hdr *hdr,
+                               struct ieee80211_rx_status *rx_status,
+                               u32 mpdu_status,
+                               u32 mpdu_len)
+{
+       struct wireless_dev *wdev;
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_vif *mld_vif;
+       u8 keyidx;
+       struct ieee80211_key_conf *key;
+       const u8 *frame = (void *)hdr;
+
+       if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+            IWL_RX_MPDU_STATUS_SEC_NONE)
+               return 0;
+
+       /* For non-beacon, we don't really care. But beacons may
+        * be filtered out, and we thus need the firmware's replay
+        * detection, otherwise beacons the firmware previously
+        * filtered could be replayed, or something like that, and
+        * it can filter a lot - though usually only if nothing has
+        * changed.
+        */
+       if (!ieee80211_is_beacon(hdr->frame_control))
+               return 0;
+
+       if (!sta)
+               return -1;
+
+       mld_sta = iwl_mld_sta_from_mac80211(sta);
+       mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+
+       /* key mismatch - will also report !MIC_OK but we shouldn't count it */
+       if (!(mpdu_status & IWL_RX_MPDU_STATUS_KEY_VALID))
+               goto report;
+
+       /* good cases */
+       if (likely(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK &&
+                  !(mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+               return 0;
+       }
+
+       /* both keys will have the same cipher and MIC length, use
+        * whichever one is available
+        */
+       key = rcu_dereference(mld_vif->bigtks[0]);
+       if (!key) {
+               key = rcu_dereference(mld_vif->bigtks[1]);
+               if (!key)
+                       goto report;
+       }
+
+       if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN)
+               goto report;
+
+       /* get the real key ID */
+       keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN];
+       /* and if that's the other key, look it up */
+       if (keyidx != key->keyidx) {
+               /* shouldn't happen since firmware checked, but be safe
+                * in case the MIC length is wrong too, for example
+                */
+               if (keyidx != 6 && keyidx != 7)
+                       return -1;
+
+               key = rcu_dereference(mld_vif->bigtks[keyidx - 6]);
+               if (!key)
+                       goto report;
+       }
+
+       /* Report status to mac80211 */
+       if (!(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK))
+               ieee80211_key_mic_failure(key);
+       else if (mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+               ieee80211_key_replay(key);
+report:
+       wdev = ieee80211_vif_to_wdev(mld_sta->vif);
+       if (wdev->netdev)
+               cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr,
+                                            mpdu_len);
+
+       return -1;
+}
+
+static int iwl_mld_rx_crypto(struct iwl_mld *mld,
+                            struct ieee80211_sta *sta,
+                            struct ieee80211_hdr *hdr,
+                            struct ieee80211_rx_status *rx_status,
+                            struct iwl_rx_mpdu_desc *desc, int queue,
+                            u32 pkt_flags, u8 *crypto_len)
+{
+       u32 status = le32_to_cpu(desc->status);
+
+       if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+                    !ieee80211_has_protected(hdr->frame_control)))
+               return iwl_mld_rx_mgmt_prot(sta, hdr, rx_status, status,
+                                           le16_to_cpu(desc->mpdu_len));
+
+       if (!ieee80211_has_protected(hdr->frame_control) ||
+           (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+           IWL_RX_MPDU_STATUS_SEC_NONE)
+               return 0;
+
+       switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
+       case IWL_RX_MPDU_STATUS_SEC_CCM:
+       case IWL_RX_MPDU_STATUS_SEC_GCM:
+               BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
+               if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+                       IWL_DEBUG_DROP(mld,
+                                      "Dropping packet, bad MIC (CCM/GCM)\n");
+                       return -1;
+               }
+
+               rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
+               *crypto_len = IEEE80211_CCMP_HDR_LEN;
+               return 0;
+       case IWL_RX_MPDU_STATUS_SEC_TKIP:
+               if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
+                       return -1;
+
+               if (!(status & RX_MPDU_RES_STATUS_MIC_OK))
+                       rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+               if (pkt_flags & FH_RSCSR_RADA_EN) {
+                       rx_status->flag |= RX_FLAG_ICV_STRIPPED;
+                       rx_status->flag |= RX_FLAG_MMIC_STRIPPED;
+               }
+
+               *crypto_len = IEEE80211_TKIP_IV_LEN;
+               rx_status->flag |= RX_FLAG_DECRYPTED;
+               return 0;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld,
+                                       struct iwl_mld_rx_phy_data *phy_data,
+                                       struct ieee80211_rx_status *rx_status)
+{
+       bool toggle_bit =
+               phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+
+       rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+       /* Toggle is switched whenever new aggregation starts. Make
+        * sure ampdu_reference is never 0 so we can later use it to
+        * see if the frame was really part of an A-MPDU or not.
+        */
+       if (toggle_bit != mld->monitor.ampdu_toggle) {
+               mld->monitor.ampdu_ref++;
+               if (mld->monitor.ampdu_ref == 0)
+                       mld->monitor.ampdu_ref++;
+               mld->monitor.ampdu_toggle = toggle_bit;
+               phy_data->first_subframe = true;
+       }
+       rx_status->ampdu_reference = mld->monitor.ampdu_ref;
+}
+
+static void
+iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data *phy_data,
+                                struct iwl_rx_mpdu_desc *mpdu_desc,
+                                struct ieee80211_rx_status *rx_status)
+{
+       enum nl80211_band band;
+
+       band = BAND_IN_RX_STATUS(mpdu_desc->mac_phy_idx);
+       rx_status->band = iwl_mld_phy_band_to_nl80211(band);
+       rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+                                                        rx_status->band);
+}
+
+void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
+                    struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mld_rx_phy_data phy_data = {};
+       struct iwl_rx_mpdu_desc *mpdu_desc = (void *)pkt->data;
+       struct ieee80211_sta *sta;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb;
+       size_t mpdu_desc_size = sizeof(*mpdu_desc);
+       bool drop = false;
+       u8 crypto_len = 0;
+       u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+       u32 mpdu_len;
+       enum iwl_mld_reorder_result reorder_res;
+       struct ieee80211_rx_status *rx_status;
+
+       if (unlikely(mld->fw_status.in_hw_restart))
+               return;
+
+       if (IWL_FW_CHECK(mld, pkt_len < mpdu_desc_size,
+                        "Bad REPLY_RX_MPDU_CMD size (%d)\n", pkt_len))
+               return;
+
+       mpdu_len = le16_to_cpu(mpdu_desc->mpdu_len);
+
+       if (IWL_FW_CHECK(mld, mpdu_len + mpdu_desc_size > pkt_len,
+                        "FW lied about packet len (%d)\n", pkt_len))
+               return;
+
+       /* Don't use dev_alloc_skb(), we'll have enough headroom once
+        * ieee80211_hdr pulled.
+        */
+       skb = alloc_skb(128, GFP_ATOMIC);
+       if (!skb) {
+               IWL_ERR(mld, "alloc_skb failed\n");
+               return;
+       }
+
+       hdr = (void *)(pkt->data + mpdu_desc_size);
+
+       iwl_mld_fill_phy_data(mpdu_desc, &phy_data);
+
+       if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+               /* If the device inserted padding it means that (it thought)
+                * the 802.11 header wasn't a multiple of 4 bytes long. In
+                * this case, reserve two bytes at the start of the SKB to
+                * align the payload properly in case we end up copying it.
+                */
+               skb_reserve(skb, 2);
+       }
+
+       rx_status = IEEE80211_SKB_RXCB(skb);
+
+       /* this is needed early */
+       iwl_mld_fill_rx_status_band_freq(&phy_data, mpdu_desc, rx_status);
+
+       rcu_read_lock();
+
+       sta = iwl_mld_rx_with_sta(mld, hdr, skb, mpdu_desc, pkt, queue, &drop);
+       if (drop)
+               goto drop;
+
+       /* update aggregation data for monitor sake on default queue */
+       if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU))
+               iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status);
+
+       iwl_mld_rx_fill_status(mld, skb, &phy_data, mpdu_desc, hdr, queue);
+
+       if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue,
+                             le32_to_cpu(pkt->len_n_flags), &crypto_len))
+               goto drop;
+
+       if (iwl_mld_build_rx_skb(mld, skb, hdr, mpdu_len, crypto_len, rxb))
+               goto drop;
+
+       /* time sync frame is saved and will be released later when the
+        * notification with the timestamps arrives.
+        */
+       if (iwl_mld_time_sync_frame(mld, skb, hdr->addr2))
+               goto out;
+
+       reorder_res = iwl_mld_reorder(mld, napi, queue, sta, skb, mpdu_desc);
+       switch (reorder_res) {
+       case IWL_MLD_PASS_SKB:
+               break;
+       case IWL_MLD_DROP_SKB:
+               goto drop;
+       case IWL_MLD_BUFFERED_SKB:
+               goto out;
+       default:
+               WARN_ON(1);
+               goto drop;
+       }
+
+       iwl_mld_pass_packet_to_mac80211(mld, napi, skb, queue, sta);
+
+       goto out;
+
+drop:
+       kfree_skb(skb);
+out:
+       rcu_read_unlock();
+}
+
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
+void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
+                           enum iwl_mld_internal_rxq_notif_type type,
+                           const void *notif_payload, u32 notif_payload_size)
+{
+       u8 num_rx_queues = mld->trans->num_rx_queues;
+       struct {
+               struct iwl_rxq_sync_cmd sync_cmd;
+               struct iwl_mld_internal_rxq_notif notif;
+       } __packed cmd = {
+               .sync_cmd.rxq_mask = cpu_to_le32(BIT(num_rx_queues) - 1),
+               .sync_cmd.count =
+                       cpu_to_le32(sizeof(struct iwl_mld_internal_rxq_notif) +
+                                   notif_payload_size),
+               .notif.type = type,
+               .notif.cookie = mld->rxq_sync.cookie,
+       };
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
+               .data[0] = &cmd,
+               .len[0] = sizeof(cmd),
+               .data[1] = notif_payload,
+               .len[1] = notif_payload_size,
+       };
+       int ret;
+
+       /* size must be a multiple of DWORD */
+       if (WARN_ON(cmd.sync_cmd.count & cpu_to_le32(3)))
+               return;
+
+       mld->rxq_sync.state = (1 << num_rx_queues) - 1;
+
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+       if (ret) {
+               IWL_ERR(mld, "Failed to trigger RX queues sync (%d)\n", ret);
+               goto out;
+       }
+
+       ret = wait_event_timeout(mld->rxq_sync.waitq,
+                                READ_ONCE(mld->rxq_sync.state) == 0,
+                                SYNC_RX_QUEUE_TIMEOUT);
+       WARN_ONCE(!ret, "RXQ sync failed: state=0x%lx, cookie=%d\n",
+                 mld->rxq_sync.state, mld->rxq_sync.cookie);
+
+out:
+       mld->rxq_sync.state = 0;
+       mld->rxq_sync.cookie++;
+}
+
+void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
+                                        struct napi_struct *napi,
+                                        struct iwl_rx_packet *pkt, int queue)
+{
+       struct iwl_rxq_sync_notification *notif;
+       struct iwl_mld_internal_rxq_notif *internal_notif;
+       u32 len = iwl_rx_packet_payload_len(pkt);
+       size_t combined_notif_len = sizeof(*notif) + sizeof(*internal_notif);
+
+       notif = (void *)pkt->data;
+       internal_notif = (void *)notif->payload;
+
+       if (IWL_FW_CHECK(mld, len < combined_notif_len,
+                        "invalid notification size %u (%zu)\n",
+                        len, combined_notif_len))
+               return;
+
+       len -= combined_notif_len;
+
+       if (IWL_FW_CHECK(mld, mld->rxq_sync.cookie != internal_notif->cookie,
+                        "received expired RX queue sync message (cookie=%d expected=%d q[%d])\n",
+                        internal_notif->cookie, mld->rxq_sync.cookie, queue))
+               return;
+
+       switch (internal_notif->type) {
+       case IWL_MLD_RXQ_EMPTY:
+               IWL_FW_CHECK(mld, len,
+                            "invalid empty notification size %d\n", len);
+               break;
+       case IWL_MLD_RXQ_NOTIF_DEL_BA:
+               if (IWL_FW_CHECK(mld, len != sizeof(struct iwl_mld_delba_data),
+                                "invalid delba notification size %u (%zu)\n",
+                                len, sizeof(struct iwl_mld_delba_data)))
+                       break;
+               iwl_mld_del_ba(mld, queue, (void *)internal_notif->payload);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+
+       IWL_FW_CHECK(mld, !test_and_clear_bit(queue, &mld->rxq_sync.state),
+                    "RXQ sync: queue %d responded a second time!\n", queue);
+
+       if (READ_ONCE(mld->rxq_sync.state) == 0)
+               wake_up(&mld->rxq_sync.waitq);
+}
+
+void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
+                               struct iwl_rx_packet *pkt, int queue)
+{
+       struct iwl_rx_no_data_ver_3 *desc;
+       struct iwl_mld_rx_phy_data phy_data;
+       struct ieee80211_rx_status *rx_status;
+       struct sk_buff *skb;
+       u32 format, rssi;
+
+       if (unlikely(mld->fw_status.in_hw_restart))
+               return;
+
+       if (IWL_FW_CHECK(mld, iwl_rx_packet_payload_len(pkt) < sizeof(*desc),
+                        "Bad RX_NO_DATA_NOTIF size (%d)\n",
+                        iwl_rx_packet_payload_len(pkt)))
+               return;
+
+       desc = (void *)pkt->data;
+
+       rssi = le32_to_cpu(desc->rssi);
+       phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
+       phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
+       phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
+       phy_data.data0 = desc->phy_info[0];
+       phy_data.data1 = desc->phy_info[1];
+       phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
+       phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
+       phy_data.rate_n_flags = le32_to_cpu(desc->rate);
+       phy_data.with_data = false;
+
+       BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec));
+       memcpy(phy_data.rx_vec, desc->rx_vec, sizeof(phy_data.rx_vec));
+
+       format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+       /* Don't use dev_alloc_skb(), we'll have enough headroom once
+        * ieee80211_hdr pulled.
+        */
+       skb = alloc_skb(128, GFP_ATOMIC);
+       if (!skb) {
+               IWL_ERR(mld, "alloc_skb failed\n");
+               return;
+       }
+
+       rx_status = IEEE80211_SKB_RXCB(skb);
+
+       /* 0-length PSDU */
+       rx_status->flag |= RX_FLAG_NO_PSDU;
+
+       /* mark as failed PLCP on any errors to skip checks in mac80211 */
+       if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
+           RX_NO_DATA_INFO_ERR_NONE)
+               rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+
+       switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
+       case RX_NO_DATA_INFO_TYPE_NDP:
+               rx_status->zero_length_psdu_type =
+                       IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+               break;
+       case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
+       case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED:
+               rx_status->zero_length_psdu_type =
+                       IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
+               break;
+       default:
+               rx_status->zero_length_psdu_type =
+                       IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
+               break;
+       }
+
+       rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
+               NL80211_BAND_2GHZ;
+
+       rx_status->freq = ieee80211_channel_to_frequency(phy_data.channel,
+                                                        rx_status->band);
+
+       iwl_mld_rx_fill_status(mld, skb, &phy_data, NULL, NULL, queue);
+
+       /* No more radiotap info should be added after this point.
+        * Mark it as mac header for upper layers to know where
+        * the radiotap header ends.
+        */
+       skb_set_mac_header(skb, skb->len);
+
+       /* Override the nss from the rx_vec since the rate_n_flags has
+        * only 1 bit for the nss which gives a max of 2 ss but there
+        * may be up to 8 spatial streams.
+        */
+       switch (format) {
+       case RATE_MCS_VHT_MSK:
+               rx_status->nss =
+                       le32_get_bits(desc->rx_vec[0],
+                                     RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
+               break;
+       case RATE_MCS_HE_MSK:
+               rx_status->nss =
+                       le32_get_bits(desc->rx_vec[0],
+                                     RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
+               break;
+       case RATE_MCS_EHT_MSK:
+               rx_status->nss =
+                       le32_get_bits(desc->rx_vec[2],
+                                     RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
+       }
+
+       /* pass the packet to mac80211 */
+       rcu_read_lock();
+       ieee80211_rx_napi(mld->hw, NULL, skb, napi);
+       rcu_read_unlock();
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_rx_h__
+#define __iwl_mld_rx_h__
+
+#include "mld.h"
+
+/**
+ * enum iwl_mld_internal_rxq_notif_type - RX queue sync notif types
+ *
+ * @IWL_MLD_RXQ_EMPTY: empty sync notification
+ * @IWL_MLD_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ */
+enum iwl_mld_internal_rxq_notif_type {
+       IWL_MLD_RXQ_EMPTY,
+       IWL_MLD_RXQ_NOTIF_DEL_BA,
+};
+
+/**
+ * struct iwl_mld_internal_rxq_notif - @iwl_rxq_sync_cmd internal data.
+ * This data is echoed by the firmware to all RSS queues and should be DWORD
+ * aligned. FW is agnostic to the data, so there are no endianness requirements
+ *
+ * @type: one of &iwl_mld_internal_rxq_notif_type
+ * @cookie: unique internal cookie to identify old notifications
+ * @reserved: reserved for alignment
+ * @payload: data to send to RX queues based on the type (may be empty)
+ */
+struct iwl_mld_internal_rxq_notif {
+       u8 type;
+       u8 reserved[3];
+       u32 cookie;
+       u8 payload[];
+} __packed;
+
+/**
+ * struct iwl_mld_rx_queues_sync - RX queues sync data
+ *
+ * @waitq: wait queue for RX queues sync completion
+ * @cookie: unique id to correlate sync requests with responses
+ * @state: bitmask representing the sync state of RX queues
+ *     all RX queues bits are set before sending the command, and the
+ *     corresponding queue bit cleared upon handling the notification
+ */
+struct iwl_mld_rx_queues_sync {
+       wait_queue_head_t waitq;
+       u32 cookie;
+       unsigned long state;
+};
+
+void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
+                    struct iwl_rx_cmd_buffer *rxb, int queue);
+
+void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
+                           enum iwl_mld_internal_rxq_notif_type type,
+                           const void *notif_payload, u32 notif_payload_size);
+
+void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
+                                        struct napi_struct *napi,
+                                        struct iwl_rx_packet *pkt, int queue);
+
+void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+                                    struct napi_struct *napi,
+                                    struct sk_buff *skb, int queue,
+                                    struct ieee80211_sta *sta);
+
+void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
+                               struct iwl_rx_packet *pkt, int queue);
+
+#endif /* __iwl_mld_agg_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <linux/crc32.h>
+
+#include "mld.h"
+#include "scan.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "phy.h"
+#include "mlo.h"
+
+#include "fw/api/scan.h"
+#include "fw/dbg.h"
+
+#define IWL_SCAN_DWELL_ACTIVE 10
+#define IWL_SCAN_DWELL_PASSIVE 110
+#define IWL_SCAN_NUM_OF_FRAGS 3
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+
+/* adaptive dwell default high band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+
+/* adaptive dwell default low band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
+
+/* adaptive dwell default APs number for P2P social channels (1, 6, 11) */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+
+/* adaptive dwell number of APs override for P2P friendly GO channels */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+
+/* adaptive dwell number of APs override for P2P social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
+
+/* adaptive dwell number of APs override mask for p2p friendly GO */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
+
+/* adaptive dwell number of APs override mask for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
+
+#define SCAN_TIMEOUT_MSEC (30000 * HZ)
+
+/* minimal number of 2GHz and 5GHz channels in the regular scan request */
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
+
+enum iwl_mld_scan_type {
+       IWL_SCAN_TYPE_NOT_SET,
+       IWL_SCAN_TYPE_UNASSOC,
+       IWL_SCAN_TYPE_WILD,
+       IWL_SCAN_TYPE_MILD,
+       IWL_SCAN_TYPE_FRAGMENTED,
+       IWL_SCAN_TYPE_FAST_BALANCE,
+};
+
+struct iwl_mld_scan_timing_params {
+       u32 suspend_time;
+       u32 max_out_time;
+};
+
+static const struct iwl_mld_scan_timing_params scan_timing[] = {
+       [IWL_SCAN_TYPE_UNASSOC] = {
+               .suspend_time = 0,
+               .max_out_time = 0,
+       },
+       [IWL_SCAN_TYPE_WILD] = {
+               .suspend_time = 30,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_MILD] = {
+               .suspend_time = 120,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_FRAGMENTED] = {
+               .suspend_time = 95,
+               .max_out_time = 44,
+       },
+       [IWL_SCAN_TYPE_FAST_BALANCE] = {
+               .suspend_time = 30,
+               .max_out_time = 37,
+       },
+};
+
+struct iwl_mld_scan_params {
+       enum iwl_mld_scan_type type;
+       u32 n_channels;
+       u16 delay;
+       int n_ssids;
+       struct cfg80211_ssid *ssids;
+       struct ieee80211_channel **channels;
+       u32 flags;
+       u8 *mac_addr;
+       u8 *mac_addr_mask;
+       bool no_cck;
+       bool pass_all;
+       int n_match_sets;
+       struct iwl_scan_probe_req preq;
+       struct cfg80211_match_set *match_sets;
+       int n_scan_plans;
+       struct cfg80211_sched_scan_plan *scan_plans;
+       bool iter_notif;
+       bool respect_p2p_go;
+       u8 fw_link_id;
+       struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+       u32 n_6ghz_params;
+       bool scan_6ghz;
+       bool enable_6ghz_passive;
+       u8 bssid[ETH_ALEN] __aligned(2);
+};
+
+struct iwl_mld_scan_respect_p2p_go_iter_data {
+       struct ieee80211_vif *current_vif;
+       bool p2p_go;
+};
+
+static void iwl_mld_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+                                            struct ieee80211_vif *vif)
+{
+       struct iwl_mld_scan_respect_p2p_go_iter_data *data = _data;
+
+       /* exclude the given vif */
+       if (vif == data->current_vif)
+               return;
+
+       /* TODO: CDB check the band of the GO */
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
+           iwl_mld_vif_from_mac80211(vif)->ap_ibss_active)
+               data->p2p_go = true;
+}
+
+static bool iwl_mld_get_respect_p2p_go(struct iwl_mld *mld,
+                                      struct ieee80211_vif *vif,
+                                      bool low_latency)
+{
+       struct iwl_mld_scan_respect_p2p_go_iter_data data = {
+               .current_vif = vif,
+               .p2p_go = false,
+       };
+
+       if (!low_latency)
+               return false;
+
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_scan_respect_p2p_go_iter,
+                                               &data);
+
+       return data.p2p_go;
+}
+
+struct iwl_mld_scan_iter_data {
+       struct ieee80211_vif *current_vif;
+       bool active_vif;
+       bool is_dcm_with_p2p_go;
+       bool global_low_latency;
+};
+
+static void iwl_mld_scan_iterator(void *_data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_mld_scan_iter_data *data = _data;
+       struct ieee80211_vif *curr_vif = data->current_vif;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_vif *curr_mld_vif;
+       unsigned long curr_vif_active_links;
+       u16 link_id;
+
+       data->global_low_latency |= iwl_mld_vif_low_latency(mld_vif);
+
+       if ((ieee80211_vif_is_mld(vif) && vif->active_links) ||
+           (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+            mld_vif->deflink.active))
+               data->active_vif = true;
+
+       if (vif == curr_vif)
+               return;
+
+       if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_GO)
+               return;
+
+       /* Currently P2P GO can't be AP MLD so the logic below assumes that */
+       WARN_ON_ONCE(ieee80211_vif_is_mld(vif));
+
+       curr_vif_active_links =
+               ieee80211_vif_is_mld(curr_vif) ? curr_vif->active_links : 1;
+
+       curr_mld_vif = iwl_mld_vif_from_mac80211(curr_vif);
+
+       for_each_set_bit(link_id, &curr_vif_active_links,
+                        IEEE80211_MLD_MAX_NUM_LINKS) {
+               struct iwl_mld_link *curr_mld_link =
+                       iwl_mld_link_dereference_check(curr_mld_vif, link_id);
+
+               if (WARN_ON(!curr_mld_link))
+                       return;
+
+               if (rcu_access_pointer(curr_mld_link->chan_ctx) &&
+                   rcu_access_pointer(mld_vif->deflink.chan_ctx) !=
+                   rcu_access_pointer(curr_mld_link->chan_ctx)) {
+                       data->is_dcm_with_p2p_go = true;
+                       return;
+               }
+       }
+}
+
+static enum
+iwl_mld_scan_type iwl_mld_get_scan_type(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif,
+                                       struct iwl_mld_scan_iter_data *data)
+{
+       enum iwl_mld_traffic_load load = mld->scan.traffic_load.status;
+
+       /* A scanning AP interface probably wants to generate a survey to do
+        * ACS (automatic channel selection).
+        * Force a non-fragmented scan in that case.
+        */
+       if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+               return IWL_SCAN_TYPE_WILD;
+
+       if (!data->active_vif)
+               return IWL_SCAN_TYPE_UNASSOC;
+
+       if ((load == IWL_MLD_TRAFFIC_HIGH || data->global_low_latency) &&
+           vif->type != NL80211_IFTYPE_P2P_DEVICE)
+               return IWL_SCAN_TYPE_FRAGMENTED;
+
+       /* In case of DCM with P2P GO set all scan requests as
+        * fast-balance scan
+        */
+       if (vif->type == NL80211_IFTYPE_STATION &&
+           data->is_dcm_with_p2p_go)
+               return IWL_SCAN_TYPE_FAST_BALANCE;
+
+       if (load >= IWL_MLD_TRAFFIC_MEDIUM || data->global_low_latency)
+               return IWL_SCAN_TYPE_MILD;
+
+       return IWL_SCAN_TYPE_WILD;
+}
+
+static u8 *
+iwl_mld_scan_add_2ghz_elems(struct iwl_mld *mld, const u8 *ies,
+                           size_t len, u8 *const pos)
+{
+       static const u8 before_ds_params[] = {
+           WLAN_EID_SSID,
+           WLAN_EID_SUPP_RATES,
+           WLAN_EID_REQUEST,
+           WLAN_EID_EXT_SUPP_RATES,
+       };
+       size_t offs;
+       u8 *newpos = pos;
+
+       offs = ieee80211_ie_split(ies, len,
+                                 before_ds_params,
+                                 ARRAY_SIZE(before_ds_params),
+                                 0);
+
+       memcpy(newpos, ies, offs);
+       newpos += offs;
+
+       /* Add a placeholder for DS Parameter Set element */
+       *newpos++ = WLAN_EID_DS_PARAMS;
+       *newpos++ = 1;
+       *newpos++ = 0;
+
+       memcpy(newpos, ies + offs, len - offs);
+       newpos += len - offs;
+
+       return newpos;
+}
+
+static void
+iwl_mld_scan_add_tpc_report_elem(u8 *pos)
+{
+       pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+       pos[1] = WFA_TPC_IE_LEN - 2;
+       pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+       pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+       pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+       pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+       pos[6] = 0;
+       /* pos[7] - tx power will be inserted by the FW */
+       pos[7] = 0;
+       pos[8] = 0;
+}
+
+static u32
+iwl_mld_scan_ooc_priority(enum iwl_mld_scan_status scan_status)
+{
+       if (scan_status == IWL_MLD_SCAN_REGULAR)
+               return IWL_SCAN_PRIORITY_EXT_6;
+       if (scan_status == IWL_MLD_SCAN_INT_MLO)
+               return IWL_SCAN_PRIORITY_EXT_4;
+
+       return IWL_SCAN_PRIORITY_EXT_2;
+}
+
+static bool
+iwl_mld_scan_is_regular(struct iwl_mld_scan_params *params)
+{
+       return params->n_scan_plans == 1 &&
+               params->scan_plans[0].iterations == 1;
+}
+
+static bool
+iwl_mld_scan_is_fragmented(enum iwl_mld_scan_type type)
+{
+       return (type == IWL_SCAN_TYPE_FRAGMENTED ||
+               type == IWL_SCAN_TYPE_FAST_BALANCE);
+}
+
+static int
+iwl_mld_scan_uid_by_status(struct iwl_mld *mld, int status)
+{
+       for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++)
+               if (mld->scan.uid_status[i] == status)
+                       return i;
+
+       return -ENOENT;
+}
+
+static const char *
+iwl_mld_scan_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+       switch (status) {
+       case IWL_SCAN_EBS_SUCCESS:
+               return "successful";
+       case IWL_SCAN_EBS_INACTIVE:
+               return "inactive";
+       case IWL_SCAN_EBS_FAILED:
+       case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+       default:
+               return "failed";
+       }
+}
+
+static int
+iwl_mld_scan_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+       for (int i = 0; i < PROBE_OPTION_MAX; i++) {
+               if (!ssid_list[i].len)
+                       return -1;
+               if (ssid_list[i].len == ssid_len &&
+                   !memcmp(ssid_list[i].ssid, ssid, ssid_len))
+                       return i;
+       }
+
+       return -1;
+}
+
+static bool
+iwl_mld_scan_fits(struct iwl_mld *mld, int n_ssids,
+                 struct ieee80211_scan_ies *ies, int n_channels)
+{
+       return ((n_ssids <= PROBE_OPTION_MAX) &&
+               (n_channels <= mld->fw->ucode_capa.n_scan_channels) &
+               (ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
+                ies->len[NL80211_BAND_5GHZ] + ies->len[NL80211_BAND_6GHZ] <=
+                iwl_mld_scan_max_template_size()));
+}
+
+static void
+iwl_mld_scan_build_probe_req(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                            struct ieee80211_scan_ies *ies,
+                            struct iwl_mld_scan_params *params)
+{
+       struct ieee80211_mgmt *frame = (void *)params->preq.buf;
+       u8 *pos, *newpos;
+       const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+               params->mac_addr : NULL;
+
+       if (mac_addr)
+               get_random_mask_addr(frame->sa, mac_addr,
+                                    params->mac_addr_mask);
+       else
+               memcpy(frame->sa, vif->addr, ETH_ALEN);
+
+       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+       eth_broadcast_addr(frame->da);
+       ether_addr_copy(frame->bssid, params->bssid);
+       frame->seq_ctrl = 0;
+
+       pos = frame->u.probe_req.variable;
+       *pos++ = WLAN_EID_SSID;
+       *pos++ = 0;
+
+       params->preq.mac_header.offset = 0;
+       params->preq.mac_header.len = cpu_to_le16(24 + 2);
+
+       /* Insert DS parameter set element on 2.4 GHz band */
+       newpos = iwl_mld_scan_add_2ghz_elems(mld,
+                                            ies->ies[NL80211_BAND_2GHZ],
+                                            ies->len[NL80211_BAND_2GHZ],
+                                            pos);
+       params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+       params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
+       pos = newpos;
+
+       memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+              ies->len[NL80211_BAND_5GHZ]);
+       params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+       params->preq.band_data[1].len =
+           cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+       pos += ies->len[NL80211_BAND_5GHZ];
+
+       memcpy(pos, ies->ies[NL80211_BAND_6GHZ],
+              ies->len[NL80211_BAND_6GHZ]);
+       params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf);
+       params->preq.band_data[2].len =
+               cpu_to_le16(ies->len[NL80211_BAND_6GHZ]);
+       pos += ies->len[NL80211_BAND_6GHZ];
+
+       memcpy(pos, ies->common_ies, ies->common_ie_len);
+       params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+
+       iwl_mld_scan_add_tpc_report_elem(pos + ies->common_ie_len);
+       params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+                                                  WFA_TPC_IE_LEN);
+}
+
+static u16
+iwl_mld_scan_get_cmd_gen_flags(struct iwl_mld *mld,
+                              struct iwl_mld_scan_params *params,
+                              struct ieee80211_vif *vif,
+                              enum iwl_mld_scan_status scan_status)
+{
+       u16 flags = 0;
+
+       /* If no direct SSIDs are provided perform a passive scan. Otherwise,
+        * if there is a single SSID which is not the broadcast SSID, assume
+        * that the scan is intended for roaming purposes and thus enable Rx on
+        * all chains to improve chances of hearing the beacons/probe responses.
+        */
+       if (params->n_ssids == 0)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+       else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
+
+       if (params->pass_all)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+       else
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
+
+       if (iwl_mld_scan_is_fragmented(params->type))
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
+
+       if (!iwl_mld_scan_is_regular(params))
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
+
+       if (params->iter_notif ||
+           mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+
+       if (scan_status == IWL_MLD_SCAN_SCHED ||
+           scan_status == IWL_MLD_SCAN_NETDETECT)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
+
+       if (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
+                            NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
+                            NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
+
+       if ((scan_status == IWL_MLD_SCAN_SCHED ||
+            scan_status == IWL_MLD_SCAN_NETDETECT) &&
+           params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
+
+       if (params->enable_6ghz_passive)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
+
+       flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+       return flags;
+}
+
+static u8
+iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
+                               struct iwl_mld_scan_params *params,
+                               struct ieee80211_vif *vif, u16 gen_flags)
+{
+       u8 flags = 0;
+
+       /* TODO: CDB */
+       if (params->respect_p2p_go)
+               flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+                       IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+
+       if (params->scan_6ghz)
+               flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+
+       return flags;
+}
+
+static void
+iwl_mld_scan_cmd_set_dwell(struct iwl_mld *mld,
+                          struct iwl_scan_general_params_v11 *gp,
+                          struct iwl_mld_scan_params *params)
+{
+       const struct iwl_mld_scan_timing_params *timing =
+               &scan_timing[params->type];
+
+       gp->adwell_default_social_chn =
+           IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+       gp->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+       gp->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+       if (params->n_ssids && params->ssids[0].ssid_len)
+               gp->adwell_max_budget =
+                       cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+       else
+               gp->adwell_max_budget =
+                       cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+       gp->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+
+       gp->max_out_of_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time);
+       gp->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time);
+
+       gp->active_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE;
+       gp->passive_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE;
+       gp->active_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE;
+       gp->passive_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE;
+
+       IWL_DEBUG_SCAN(mld,
+                      "Scan: adwell_max_budget=%d max_out_of_time=%d suspend_time=%d\n",
+                      gp->adwell_max_budget,
+                      gp->max_out_of_time[SCAN_LB_LMAC_IDX],
+                      gp->suspend_time[SCAN_LB_LMAC_IDX]);
+}
+
+static void
+iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld,
+                               struct iwl_mld_scan_params *params,
+                               struct ieee80211_vif *vif,
+                               struct iwl_scan_general_params_v11 *gp,
+                               enum iwl_mld_scan_status scan_status)
+{
+       u16 gen_flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
+                                                      scan_status);
+       u8 gen_flags2 = iwl_mld_scan_get_cmd_gen_flags2(mld, params, vif,
+                                                       gen_flags);
+
+       IWL_DEBUG_SCAN(mld, "General: flags=0x%x, flags2=0x%x\n",
+                      gen_flags, gen_flags2);
+
+       gp->flags = cpu_to_le16(gen_flags);
+       gp->flags2 = gen_flags2;
+
+       iwl_mld_scan_cmd_set_dwell(mld, gp, params);
+
+       if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+               gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+
+       if (params->fw_link_id != IWL_MLD_INVALID_FW_ID)
+               gp->scan_start_mac_or_link_id = params->fw_link_id;
+}
+
+static int
+iwl_mld_scan_cmd_set_sched_params(struct iwl_mld_scan_params *params,
+                                 struct iwl_scan_umac_schedule *schedule,
+                                 __le16 *delay)
+{
+       if (WARN_ON(!params->n_scan_plans ||
+                   params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+               return -EINVAL;
+
+       for (int i = 0; i < params->n_scan_plans; i++) {
+               struct cfg80211_sched_scan_plan *scan_plan =
+                   ¶ms->scan_plans[i];
+
+               schedule[i].iter_count = scan_plan->iterations;
+               schedule[i].interval =
+                   cpu_to_le16(scan_plan->interval);
+       }
+
+       /* If the number of iterations of the last scan plan is set to zero,
+        * it should run infinitely. However, this is not always the case.
+        * For example, when regular scan is requested the driver sets one scan
+        * plan with one iteration.
+        */
+       if (!schedule[params->n_scan_plans - 1].iter_count)
+               schedule[params->n_scan_plans - 1].iter_count = 0xff;
+
+       *delay = cpu_to_le16(params->delay);
+
+       return 0;
+}
+
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void
+iwl_mld_scan_cmd_build_ssids(struct iwl_mld_scan_params *params,
+                            struct iwl_ssid_ie *ssids, u32 *ssid_bitmap)
+{
+       int i, j;
+       int index;
+       u32 tmp_bitmap = 0;
+
+       /* copy SSIDs from match list. iwl_config_sched_scan_profiles()
+        * uses the order of these ssids to config match list.
+        */
+       for (i = 0, j = params->n_match_sets - 1;
+            j >= 0 && i < PROBE_OPTION_MAX;
+            i++, j--) {
+               /* skip empty SSID match_sets */
+               if (!params->match_sets[j].ssid.ssid_len)
+                       continue;
+
+               ssids[i].id = WLAN_EID_SSID;
+               ssids[i].len = params->match_sets[j].ssid.ssid_len;
+               memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+                      ssids[i].len);
+       }
+
+       /* add SSIDs from scan SSID list */
+       for (j = params->n_ssids - 1;
+            j >= 0 && i < PROBE_OPTION_MAX;
+            i++, j--) {
+               index = iwl_mld_scan_ssid_exist(params->ssids[j].ssid,
+                                               params->ssids[j].ssid_len,
+                                               ssids);
+               if (index < 0) {
+                       ssids[i].id = WLAN_EID_SSID;
+                       ssids[i].len = params->ssids[j].ssid_len;
+                       memcpy(ssids[i].ssid, params->ssids[j].ssid,
+                              ssids[i].len);
+                       tmp_bitmap |= BIT(i);
+               } else {
+                       tmp_bitmap |= BIT(index);
+               }
+       }
+
+       if (ssid_bitmap)
+               *ssid_bitmap = tmp_bitmap;
+}
+
+static void
+iwl_mld_scan_fill_6g_chan_list(struct iwl_mld_scan_params *params,
+                              struct iwl_scan_probe_params_v4 *pp)
+{
+       int j, idex_s = 0, idex_b = 0;
+       struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+               params->scan_6ghz_params;
+
+       for (j = 0;
+            j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
+            j++) {
+               if (!params->ssids[j].ssid_len)
+                       continue;
+
+               pp->short_ssid[idex_s] =
+                       cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
+                                             params->ssids[j].ssid_len));
+
+               /* hidden 6ghz scan */
+               pp->direct_scan[idex_s].id = WLAN_EID_SSID;
+               pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
+               memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
+                      params->ssids[j].ssid_len);
+               idex_s++;
+       }
+
+       /* Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz
+        * collocated parameters. This might not be optimal, as this processing
+        * does not (yet) correspond to the actual channels, so it is possible
+        * that some entries would be left out.
+        */
+       for (j = 0; j < params->n_6ghz_params; j++) {
+               int k;
+
+               /* First, try to place the short SSID */
+               if (scan_6ghz_params[j].short_ssid_valid) {
+                       for (k = 0; k < idex_s; k++) {
+                               if (pp->short_ssid[k] ==
+                                   cpu_to_le32(scan_6ghz_params[j].short_ssid))
+                                       break;
+                       }
+
+                       if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
+                               pp->short_ssid[idex_s++] =
+                                       cpu_to_le32(scan_6ghz_params[j].short_ssid);
+                       }
+               }
+
+               /* try to place BSSID for the same entry */
+               for (k = 0; k < idex_b; k++) {
+                       if (!memcmp(&pp->bssid_array[k],
+                                   scan_6ghz_params[j].bssid, ETH_ALEN))
+                               break;
+               }
+
+               if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+                   !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+                              "scan: invalid BSSID at index %u, index_b=%u\n",
+                              j, idex_b)) {
+                       memcpy(&pp->bssid_array[idex_b++],
+                              scan_6ghz_params[j].bssid, ETH_ALEN);
+               }
+       }
+
+       pp->short_ssid_num = idex_s;
+       pp->bssid_num = idex_b;
+}
+
+static void
+iwl_mld_scan_cmd_set_probe_params(struct iwl_mld_scan_params *params,
+                                 struct iwl_scan_probe_params_v4 *pp,
+                                 u32 *bitmap_ssid)
+{
+       pp->preq = params->preq;
+
+       if (params->scan_6ghz) {
+               iwl_mld_scan_fill_6g_chan_list(params, pp);
+               return;
+       }
+
+       /* relevant only for 2.4 GHz /5 GHz scan */
+       iwl_mld_scan_cmd_build_ssids(params, pp->direct_scan, bitmap_ssid);
+}
+
+static bool
+iwl_mld_scan_use_ebs(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                    bool low_latency)
+{
+       const struct iwl_ucode_capabilities *capa = &mld->fw->ucode_capa;
+
+       /* We can only use EBS if:
+        *      1. the feature is supported.
+        *      2. the last EBS was successful.
+        *      3. it's not a p2p find operation.
+        *      4. we are not in low latency mode,
+        *         or if fragmented ebs is supported by the FW
+        *      5. the VIF is not an AP interface (scan wants survey results)
+        */
+       return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+               !mld->scan.last_ebs_failed &&
+               vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+               (!low_latency || fw_has_api(capa, IWL_UCODE_TLV_API_FRAG_EBS)) &&
+               ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP);
+}
+
+static u8
+iwl_mld_scan_cmd_set_chan_flags(struct iwl_mld *mld,
+                               struct iwl_mld_scan_params *params,
+                               struct ieee80211_vif *vif,
+                               bool low_latency)
+{
+       u8 flags = 0;
+
+       flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+       if (iwl_mld_scan_use_ebs(mld, vif, low_latency))
+               flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
+                        IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                        IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+       /* set fragmented ebs for fragmented scan */
+       if (iwl_mld_scan_is_fragmented(params->type))
+               flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+
+       /* Force EBS in case the scan is a fragmented and there is a need
+        * to take P2P GO operation into consideration during scan operation.
+        */
+       /* TODO: CDB */
+       if (iwl_mld_scan_is_fragmented(params->type) &&
+           params->respect_p2p_go) {
+               IWL_DEBUG_SCAN(mld, "Respect P2P GO. Force EBS\n");
+               flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+       }
+
+       return flags;
+}
+
+static const u8 p2p_go_friendly_chs[] = {
+       36, 40, 44, 48, 149, 153, 157, 161, 165,
+};
+
+static const u8 social_chs[] = {
+       1, 6, 11
+};
+
+static u32 iwl_mld_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
+{
+       if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+               return 0;
+
+       for (int i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+               if (ch_id == p2p_go_friendly_chs[i])
+                       return IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+       }
+
+       for (int i = 0; i < ARRAY_SIZE(social_chs); i++) {
+               if (ch_id == social_chs[i])
+                       return IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
+       }
+
+       return 0;
+}
+
+static void
+iwl_mld_scan_cmd_set_channels(struct iwl_mld *mld,
+                             struct ieee80211_channel **channels,
+                             struct iwl_scan_channel_params_v7 *cp,
+                             int n_channels, u32 flags,
+                             enum nl80211_iftype vif_type)
+{
+       for (int i = 0; i < n_channels; i++) {
+               enum nl80211_band band = channels[i]->band;
+               struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
+               u8 iwl_band = iwl_mld_nl80211_band_to_fw(band);
+               u32 n_aps_flag =
+                       iwl_mld_scan_ch_n_aps_flag(vif_type,
+                                                  channels[i]->hw_value);
+
+               if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE)
+                       n_aps_flag = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+
+               cfg->flags = cpu_to_le32(flags | n_aps_flag);
+               cfg->channel_num = channels[i]->hw_value;
+               if (cfg80211_channel_is_psc(channels[i]))
+                       cfg->flags = 0;
+
+               if (band == NL80211_BAND_6GHZ) {
+                       /* 6 GHz channels should only appear in a scan request
+                        * that has scan_6ghz set. The only exception is MLO
+                        * scan, which has to be passive.
+                        */
+                       WARN_ON_ONCE(cfg->flags != 0);
+                       cfg->flags =
+                               cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE);
+               }
+
+               cfg->v2.iter_count = 1;
+               cfg->v2.iter_interval = 0;
+               cfg->flags |= cpu_to_le32(iwl_band <<
+                                         IWL_CHAN_CFG_FLAGS_BAND_POS);
+       }
+}
+
+static u8
+iwl_mld_scan_cfg_channels_6g(struct iwl_mld *mld,
+                            struct iwl_mld_scan_params *params,
+                            u32 n_channels,
+                            struct iwl_scan_probe_params_v4 *pp,
+                            struct iwl_scan_channel_params_v7 *cp,
+                            enum nl80211_iftype vif_type)
+{
+       struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+               params->scan_6ghz_params;
+       u32 i;
+       u8 ch_cnt;
+
+       for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
+               struct iwl_scan_channel_cfg_umac *cfg =
+                       &cp->channel_config[ch_cnt];
+
+               u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
+               u8 k, n_s_ssids = 0, n_bssids = 0;
+               u8 max_s_ssids, max_bssids;
+               bool force_passive = false, found = false, allow_passive = true,
+                    unsolicited_probe_on_chan = false, psc_no_listen = false;
+               s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+               /* Avoid performing passive scan on non PSC channels unless the
+                * scan is specifically a passive scan, i.e., no SSIDs
+                * configured in the scan command.
+                */
+               if (!cfg80211_channel_is_psc(params->channels[i]) &&
+                   !params->n_6ghz_params && params->n_ssids)
+                       continue;
+
+               cfg->channel_num = params->channels[i]->hw_value;
+               cfg->flags |=
+                       cpu_to_le32(PHY_BAND_6 << IWL_CHAN_CFG_FLAGS_BAND_POS);
+
+               cfg->v5.iter_count = 1;
+               cfg->v5.iter_interval = 0;
+
+               for (u32 j = 0; j < params->n_6ghz_params; j++) {
+                       s8 tmp_psd_20;
+
+                       if (!(scan_6ghz_params[j].channel_idx == i))
+                               continue;
+
+                       unsolicited_probe_on_chan |=
+                               scan_6ghz_params[j].unsolicited_probe;
+
+                       /* Use the highest PSD value allowed as advertised by
+                        * APs for this channel
+                        */
+                       tmp_psd_20 = scan_6ghz_params[j].psd_20;
+                       if (tmp_psd_20 !=
+                           IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED &&
+                           (psd_20 ==
+                            IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED ||
+                            psd_20 < tmp_psd_20))
+                               psd_20 = tmp_psd_20;
+
+                       psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
+               }
+
+               /* In the following cases apply passive scan:
+                * 1. Non fragmented scan:
+                *      - PSC channel with NO_LISTEN_FLAG on should be treated
+                *        like non PSC channel
+                *      - Non PSC channel with more than 3 short SSIDs or more
+                *        than 9 BSSIDs.
+                *      - Non PSC Channel with unsolicited probe response and
+                *        more than 2 short SSIDs or more than 6 BSSIDs.
+                *      - PSC channel with more than 2 short SSIDs or more than
+                *        6 BSSIDs.
+                * 2. Fragmented scan:
+                *      - PSC channel with more than 1 SSID or 3 BSSIDs.
+                *      - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
+                *      - Non PSC channel with unsolicited probe response and
+                *        more than 1 SSID or more than 3 BSSIDs.
+                */
+               if (!iwl_mld_scan_is_fragmented(params->type)) {
+                       if (!cfg80211_channel_is_psc(params->channels[i]) ||
+                           psc_no_listen) {
+                               if (unsolicited_probe_on_chan) {
+                                       max_s_ssids = 2;
+                                       max_bssids = 6;
+                               } else {
+                                       max_s_ssids = 3;
+                                       max_bssids = 9;
+                               }
+                       } else {
+                               max_s_ssids = 2;
+                               max_bssids = 6;
+                       }
+               } else if (cfg80211_channel_is_psc(params->channels[i])) {
+                       max_s_ssids = 1;
+                       max_bssids = 3;
+               } else {
+                       if (unsolicited_probe_on_chan) {
+                               max_s_ssids = 1;
+                               max_bssids = 3;
+                       } else {
+                               max_s_ssids = 2;
+                               max_bssids = 6;
+                       }
+               }
+
+               /* To optimize the scan time, i.e., reduce the scan dwell time
+                * on each channel, the below logic tries to set 3 direct BSSID
+                * probe requests for each broadcast probe request with a short
+                * SSID.
+                */
+               for (u32 j = 0; j < params->n_6ghz_params; j++) {
+                       if (!(scan_6ghz_params[j].channel_idx == i))
+                               continue;
+
+                       found = false;
+
+                       for (k = 0;
+                            k < pp->short_ssid_num && n_s_ssids < max_s_ssids;
+                            k++) {
+                               if (!scan_6ghz_params[j].unsolicited_probe &&
+                                   le32_to_cpu(pp->short_ssid[k]) ==
+                                   scan_6ghz_params[j].short_ssid) {
+                                       /* Relevant short SSID bit set */
+                                       if (s_ssid_bitmap & BIT(k)) {
+                                               found = true;
+                                               break;
+                                       }
+
+                                       /* Prefer creating BSSID entries unless
+                                        * the short SSID probe can be done in
+                                        * the same channel dwell iteration.
+                                        *
+                                        * We also need to create a short SSID
+                                        * entry for any hidden AP.
+                                        */
+                                       if (3 * n_s_ssids > n_bssids &&
+                                           !pp->direct_scan[k].len)
+                                               break;
+
+                                       /* Hidden AP, cannot do passive scan */
+                                       if (pp->direct_scan[k].len)
+                                               allow_passive = false;
+
+                                       s_ssid_bitmap |= BIT(k);
+                                       n_s_ssids++;
+                                       found = true;
+                                       break;
+                               }
+                       }
+
+                       if (found)
+                               continue;
+
+                       for (k = 0; k < pp->bssid_num; k++) {
+                               if (memcmp(&pp->bssid_array[k],
+                                          scan_6ghz_params[j].bssid,
+                                          ETH_ALEN))
+                                       continue;
+
+                               if (bssid_bitmap & BIT(k))
+                                       break;
+
+                               if (n_bssids < max_bssids) {
+                                       bssid_bitmap |= BIT(k);
+                                       n_bssids++;
+                               } else {
+                                       force_passive = TRUE;
+                               }
+
+                               break;
+                       }
+               }
+
+               if (cfg80211_channel_is_psc(params->channels[i]) &&
+                   psc_no_listen)
+                       flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
+
+               if (unsolicited_probe_on_chan)
+                       flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
+
+               if ((allow_passive && force_passive) ||
+                   (!(bssid_bitmap | s_ssid_bitmap) &&
+                    !cfg80211_channel_is_psc(params->channels[i])))
+                       flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
+               else
+                       flags |= bssid_bitmap | (s_ssid_bitmap << 16);
+
+               cfg->flags |= cpu_to_le32(flags);
+               cfg->v5.psd_20 = psd_20;
+
+               ch_cnt++;
+       }
+
+       if (params->n_channels > ch_cnt)
+               IWL_DEBUG_SCAN(mld,
+                              "6GHz: reducing number channels: (%u->%u)\n",
+                              params->n_channels, ch_cnt);
+
+       return ch_cnt;
+}
+
+static int
+iwl_mld_scan_cmd_set_6ghz_chan_params(struct iwl_mld *mld,
+                                     struct iwl_mld_scan_params *params,
+                                     struct ieee80211_vif *vif,
+                                     struct iwl_scan_req_params_v17 *scan_p,
+                                     enum iwl_mld_scan_status scan_status)
+{
+       struct iwl_scan_channel_params_v7 *chan_p = &scan_p->channel_params;
+       struct iwl_scan_probe_params_v4 *probe_p = &scan_p->probe_params;
+
+       chan_p->flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
+                                                      scan_status);
+       chan_p->count = iwl_mld_scan_cfg_channels_6g(mld, params,
+                                                    params->n_channels,
+                                                    probe_p, chan_p,
+                                                    vif->type);
+       if (!chan_p->count)
+               return -EINVAL;
+
+       if (!params->n_ssids ||
+           (params->n_ssids == 1 && !params->ssids[0].ssid_len))
+               chan_p->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
+
+       return 0;
+}
+
+static int
+iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld,
+                                struct iwl_mld_scan_params *params,
+                                struct ieee80211_vif *vif,
+                                struct iwl_scan_req_params_v17 *scan_p,
+                                bool low_latency,
+                                enum iwl_mld_scan_status scan_status,
+                                u32 channel_cfg_flags)
+{
+       struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
+       struct ieee80211_supported_band *sband =
+               &mld->nvm_data->bands[NL80211_BAND_6GHZ];
+
+       cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+       cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+
+       if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE)
+               cp->n_aps_override[0] = IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE;
+
+       if (params->scan_6ghz)
+               return iwl_mld_scan_cmd_set_6ghz_chan_params(mld, params,
+                                                            vif, scan_p,
+                                                            scan_status);
+
+       /* relevant only for 2.4 GHz/5 GHz scan */
+       cp->flags = iwl_mld_scan_cmd_set_chan_flags(mld, params, vif,
+                                                   low_latency);
+       cp->count = params->n_channels;
+
+       iwl_mld_scan_cmd_set_channels(mld, params->channels, cp,
+                                     params->n_channels, channel_cfg_flags,
+                                     vif->type);
+
+       if (!params->enable_6ghz_passive)
+               return 0;
+
+       /* fill 6 GHz passive scan cfg */
+       for (int i = 0; i < sband->n_channels; i++) {
+               struct ieee80211_channel *channel =
+                       &sband->channels[i];
+               struct iwl_scan_channel_cfg_umac *cfg =
+                       &cp->channel_config[cp->count];
+
+               if (!cfg80211_channel_is_psc(channel))
+                       continue;
+
+               cfg->channel_num = channel->hw_value;
+               cfg->v5.iter_count = 1;
+               cfg->v5.iter_interval = 0;
+               cfg->v5.psd_20 =
+                       IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+               cfg->flags = cpu_to_le32(PHY_BAND_6 <<
+                                        IWL_CHAN_CFG_FLAGS_BAND_POS);
+               cp->count++;
+       }
+
+       return 0;
+}
+
+static int
+iwl_mld_scan_build_cmd(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                      struct iwl_mld_scan_params *params,
+                      enum iwl_mld_scan_status scan_status,
+                      bool low_latency)
+{
+       struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd;
+       struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
+       u32 bitmap_ssid = 0;
+       int uid, ret;
+
+       memset(mld->scan.cmd, 0, mld->scan.cmd_size);
+
+       /* find a free UID entry */
+       uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_NONE);
+       if (uid < 0)
+               return uid;
+
+       cmd->uid = cpu_to_le32(uid);
+       cmd->ooc_priority =
+               cpu_to_le32(iwl_mld_scan_ooc_priority(scan_status));
+
+       iwl_mld_scan_cmd_set_gen_params(mld, params, vif,
+                                       &scan_p->general_params, scan_status);
+
+       ret = iwl_mld_scan_cmd_set_sched_params(params,
+                                               scan_p->periodic_params.schedule,
+                                               &scan_p->periodic_params.delay);
+       if (ret)
+               return ret;
+
+       iwl_mld_scan_cmd_set_probe_params(params, &scan_p->probe_params,
+                                         &bitmap_ssid);
+
+       ret = iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_p,
+                                              low_latency, scan_status,
+                                              bitmap_ssid);
+       if (ret)
+               return ret;
+
+       return uid;
+}
+
+static bool
+iwl_mld_scan_pass_all(struct iwl_mld *mld,
+                     struct cfg80211_sched_scan_request *req)
+{
+       if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+               IWL_DEBUG_SCAN(mld,
+                              "Sending scheduled scan with filtering, n_match_sets %d\n",
+                              req->n_match_sets);
+               mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+               return false;
+       }
+
+       IWL_DEBUG_SCAN(mld, "Sending Scheduled scan without filtering\n");
+       mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED;
+
+       return true;
+}
+
+static int
+iwl_mld_config_sched_scan_profiles(struct iwl_mld *mld,
+                                  struct cfg80211_sched_scan_request *req)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+       struct iwl_scan_offload_profile *profile;
+       struct iwl_scan_offload_profile_cfg_data *cfg_data;
+       struct iwl_scan_offload_profile_cfg *profile_cfg;
+       struct iwl_scan_offload_blocklist *blocklist;
+       u32 blocklist_size = IWL_SCAN_MAX_BLACKLIST_LEN * sizeof(*blocklist);
+       u32 cmd_size = blocklist_size + sizeof(*profile_cfg);
+       u8 *cmd;
+       int ret;
+
+       if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES_V2))
+               return -EIO;
+
+       cmd = kzalloc(cmd_size, GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       hcmd.data[0] = cmd;
+       hcmd.len[0] = cmd_size;
+
+       blocklist = (struct iwl_scan_offload_blocklist *)cmd;
+       profile_cfg = (struct iwl_scan_offload_profile_cfg *)(cmd + blocklist_size);
+
+       /* No blocklist configuration */
+       cfg_data = &profile_cfg->data;
+       cfg_data->num_profiles = req->n_match_sets;
+       cfg_data->active_clients = SCAN_CLIENT_SCHED_SCAN;
+       cfg_data->pass_match = SCAN_CLIENT_SCHED_SCAN;
+       cfg_data->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+       if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+               cfg_data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+
+       for (int i = 0; i < req->n_match_sets; i++) {
+               profile = &profile_cfg->profiles[i];
+
+               /* Support any cipher and auth algorithm */
+               profile->unicast_cipher = 0xff;
+               profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
+                       IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK |
+                       IWL_AUTH_ALGO_8021X | IWL_AUTH_ALGO_SAE |
+                       IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
+               profile->network_type = IWL_NETWORK_TYPE_ANY;
+               profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+               profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+               profile->ssid_index = i;
+       }
+
+       IWL_DEBUG_SCAN(mld,
+                      "Sending scheduled scan profile config (n_match_sets=%u)\n",
+                      req->n_match_sets);
+
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+
+       kfree(cmd);
+
+       return ret;
+}
+
+static int
+iwl_mld_sched_scan_handle_non_psc_channels(struct iwl_mld_scan_params *params,
+                                          bool *non_psc_included)
+{
+       int i, j;
+
+       *non_psc_included = false;
+       /* for 6 GHZ band only PSC channels need to be added */
+       for (i = 0; i < params->n_channels; i++) {
+               struct ieee80211_channel *channel = params->channels[i];
+
+               if (channel->band == NL80211_BAND_6GHZ &&
+                   !cfg80211_channel_is_psc(channel)) {
+                       *non_psc_included = true;
+                       break;
+               }
+       }
+
+       if (!*non_psc_included)
+               return 0;
+
+       params->channels =
+               kmemdup(params->channels,
+                       sizeof(params->channels[0]) * params->n_channels,
+                       GFP_KERNEL);
+       if (!params->channels)
+               return -ENOMEM;
+
+       for (i = j = 0; i < params->n_channels; i++) {
+               if (params->channels[i]->band == NL80211_BAND_6GHZ &&
+                   !cfg80211_channel_is_psc(params->channels[i]))
+                       continue;
+               params->channels[j++] = params->channels[i];
+       }
+
+       params->n_channels = j;
+
+       return 0;
+}
+
+static void
+iwl_mld_scan_6ghz_passive_scan(struct iwl_mld *mld,
+                              struct iwl_mld_scan_params *params,
+                              struct ieee80211_vif *vif)
+{
+       struct ieee80211_supported_band *sband =
+               &mld->nvm_data->bands[NL80211_BAND_6GHZ];
+       u32 n_disabled, i;
+
+       params->enable_6ghz_passive = false;
+
+       /* 6 GHz passive scan may be enabled in the first 2.4 GHz/5 GHz scan
+        * phase to discover geo location if no AP's are found. Skip it when
+        * we're in the 6 GHz scan phase.
+        */
+       if (params->scan_6ghz)
+               return;
+
+       /* 6 GHz passive scan allowed only on station interface  */
+       if (vif->type != NL80211_IFTYPE_STATION) {
+               IWL_DEBUG_SCAN(mld,
+                              "6GHz passive scan: not station interface\n");
+               return;
+       }
+
+       /* 6 GHz passive scan is allowed in a defined time interval following
+        * HW reset or resume flow, or while not associated and a large
+        * interval has passed since the last 6 GHz passive scan.
+        */
+       if ((vif->cfg.assoc ||
+            time_after(mld->scan.last_6ghz_passive_jiffies +
+                       (IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
+           (time_before(mld->scan.last_start_time_jiffies +
+                        (IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
+                        jiffies))) {
+               IWL_DEBUG_SCAN(mld, "6GHz passive scan: %s\n",
+                              vif->cfg.assoc ? "associated" :
+                              "timeout did not expire");
+               return;
+       }
+
+       /* not enough channels in the regular scan request */
+       if (params->n_channels < IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
+               IWL_DEBUG_SCAN(mld,
+                              "6GHz passive scan: not enough channels %d\n",
+                              params->n_channels);
+               return;
+       }
+
+       for (i = 0; i < params->n_ssids; i++) {
+               if (!params->ssids[i].ssid_len)
+                       break;
+       }
+
+       /* not a wildcard scan, so cannot enable passive 6 GHz scan */
+       if (i == params->n_ssids) {
+               IWL_DEBUG_SCAN(mld,
+                              "6GHz passive scan: no wildcard SSID\n");
+               return;
+       }
+
+       if (!sband || !sband->n_channels) {
+               IWL_DEBUG_SCAN(mld,
+                              "6GHz passive scan: no 6GHz channels\n");
+               return;
+       }
+
+       for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
+               if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
+                       n_disabled++;
+       }
+
+       /* Not all the 6 GHz channels are disabled, so no need for 6 GHz
+        * passive scan
+        */
+       if (n_disabled != sband->n_channels) {
+               IWL_DEBUG_SCAN(mld,
+                              "6GHz passive scan: 6GHz channels enabled\n");
+               return;
+       }
+
+       /* all conditions to enable 6 GHz passive scan are satisfied */
+       IWL_DEBUG_SCAN(mld, "6GHz passive scan: can be enabled\n");
+       params->enable_6ghz_passive = true;
+}
+
+static void
+iwl_mld_scan_set_link_id(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                        struct iwl_mld_scan_params *params,
+                        s8 tsf_report_link_id,
+                        enum iwl_mld_scan_status scan_status)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *link;
+
+       if (tsf_report_link_id < 0) {
+               if (vif->active_links)
+                       tsf_report_link_id = __ffs(vif->active_links);
+               else
+                       tsf_report_link_id = 0;
+       }
+
+       link = iwl_mld_link_dereference_check(mld_vif, tsf_report_link_id);
+       if (!WARN_ON(!link)) {
+               params->fw_link_id = link->fw_id;
+               /* we to store fw_link_id only for regular scan,
+                * and use it in scan complete notif
+                */
+               if (scan_status == IWL_MLD_SCAN_REGULAR)
+                       mld->scan.fw_link_id = link->fw_id;
+       } else {
+               mld->scan.fw_link_id = IWL_MLD_INVALID_FW_ID;
+               params->fw_link_id = IWL_MLD_INVALID_FW_ID;
+       }
+}
+
+static int
+_iwl_mld_single_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                          struct cfg80211_scan_request *req,
+                          struct ieee80211_scan_ies *ies,
+                          enum iwl_mld_scan_status scan_status)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC),
+               .len = { mld->scan.cmd_size, },
+               .data = { mld->scan.cmd, },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       struct iwl_mld_scan_iter_data scan_iter_data = {
+               .current_vif = vif,
+       };
+       struct cfg80211_sched_scan_plan scan_plan = {.iterations = 1};
+       struct iwl_mld_scan_params params = {};
+       int ret, uid;
+
+       /* we should have failed registration if scan_cmd was NULL */
+       if (WARN_ON(!mld->scan.cmd))
+               return -ENOMEM;
+
+       if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, req->n_channels))
+               return -ENOBUFS;
+
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_scan_iterator,
+                                               &scan_iter_data);
+
+       params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data);
+       params.n_ssids = req->n_ssids;
+       params.flags = req->flags;
+       params.n_channels = req->n_channels;
+       params.delay = 0;
+       params.ssids = req->ssids;
+       params.channels = req->channels;
+       params.mac_addr = req->mac_addr;
+       params.mac_addr_mask = req->mac_addr_mask;
+       params.no_cck = req->no_cck;
+       params.pass_all = true;
+       params.n_match_sets = 0;
+       params.match_sets = NULL;
+       params.scan_plans = &scan_plan;
+       params.n_scan_plans = 1;
+
+       params.n_6ghz_params = req->n_6ghz_params;
+       params.scan_6ghz_params = req->scan_6ghz_params;
+       params.scan_6ghz = req->scan_6ghz;
+
+       ether_addr_copy(params.bssid, req->bssid);
+       /* TODO: CDB - per-band flag */
+       params.respect_p2p_go =
+               iwl_mld_get_respect_p2p_go(mld, vif,
+                                          scan_iter_data.global_low_latency);
+
+       if (req->duration)
+               params.iter_notif = true;
+
+       iwl_mld_scan_set_link_id(mld, vif, ¶ms, req->tsf_report_link_id,
+                                scan_status);
+
+       iwl_mld_scan_build_probe_req(mld, vif, ies, ¶ms);
+
+       iwl_mld_scan_6ghz_passive_scan(mld, ¶ms, vif);
+
+       uid = iwl_mld_scan_build_cmd(mld, vif, ¶ms, scan_status,
+                                    scan_iter_data.global_low_latency);
+       if (uid < 0)
+               return uid;
+
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+       if (ret) {
+               IWL_ERR(mld, "Scan failed! ret %d\n", ret);
+               return ret;
+       }
+
+       IWL_DEBUG_SCAN(mld, "Scan request send success: status=%u, uid=%u\n",
+                      scan_status, uid);
+
+       mld->scan.uid_status[uid] = scan_status;
+       mld->scan.status |= scan_status;
+
+       if (params.enable_6ghz_passive)
+               mld->scan.last_6ghz_passive_jiffies = jiffies;
+
+       return 0;
+}
+
+static int
+iwl_mld_scan_send_abort_cmd_status(struct iwl_mld *mld, int uid, u32 *status)
+{
+       struct iwl_umac_scan_abort abort_cmd = {
+               .uid = cpu_to_le32(uid),
+       };
+       struct iwl_host_cmd cmd = {
+               .id = WIDE_ID(LONG_GROUP, SCAN_ABORT_UMAC),
+               .flags = CMD_WANT_SKB,
+               .data = { &abort_cmd },
+               .len[0] = sizeof(abort_cmd),
+       };
+       struct iwl_rx_packet *pkt;
+       struct iwl_cmd_response *resp;
+       u32 resp_len;
+       int ret;
+
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       if (ret)
+               return ret;
+
+       pkt = cmd.resp_pkt;
+
+       resp_len = iwl_rx_packet_payload_len(pkt);
+       if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
+                        "Scan Abort: unexpected response length %d\n",
+                        resp_len)) {
+               ret = -EIO;
+               goto out;
+       }
+
+       resp = (void *)pkt->data;
+       *status = le32_to_cpu(resp->status);
+
+out:
+       iwl_free_resp(&cmd);
+       return ret;
+}
+
+static int
+iwl_mld_scan_abort(struct iwl_mld *mld, int type, int uid, bool *wait)
+{
+       enum iwl_umac_scan_abort_status status;
+       int ret;
+
+       *wait = true;
+
+       IWL_DEBUG_SCAN(mld, "Sending scan abort, uid %u\n", uid);
+
+       ret = iwl_mld_scan_send_abort_cmd_status(mld, uid, &status);
+
+       IWL_DEBUG_SCAN(mld, "Scan abort: ret=%d status=%u\n", ret, status);
+
+       /* We don't need to wait to scan complete in the following cases:
+        * 1. Driver failed to send the scan abort cmd.
+        * 2. The FW is no longer familiar with the scan that needs to be
+        *    stopped. It is expected that the scan complete notification was
+        *    already received but not yet processed.
+        *
+        * In both cases the flow should continue similar to the case that the
+        * scan was really aborted.
+        */
+       if (ret || status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND)
+               *wait = false;
+
+       return ret;
+}
+
+static int
+iwl_mld_scan_stop_wait(struct iwl_mld *mld, int type, int uid)
+{
+       struct iwl_notification_wait wait_scan_done;
+       static const u16 scan_comp_notif[] = { SCAN_COMPLETE_UMAC };
+       bool wait = true;
+       int ret;
+
+       iwl_init_notification_wait(&mld->notif_wait, &wait_scan_done,
+                                  scan_comp_notif,
+                                  ARRAY_SIZE(scan_comp_notif),
+                                  NULL, NULL);
+
+       IWL_DEBUG_SCAN(mld, "Preparing to stop scan, type=%x\n", type);
+
+       ret = iwl_mld_scan_abort(mld, type, uid, &wait);
+       if (ret) {
+               IWL_DEBUG_SCAN(mld, "couldn't stop scan type=%d\n", type);
+               goto return_no_wait;
+       }
+
+       if (!wait) {
+               IWL_DEBUG_SCAN(mld, "no need to wait for scan type=%d\n", type);
+               goto return_no_wait;
+       }
+
+       return iwl_wait_notification(&mld->notif_wait, &wait_scan_done, HZ);
+
+return_no_wait:
+       iwl_remove_notification(&mld->notif_wait, &wait_scan_done);
+       return ret;
+}
+
+int iwl_mld_sched_scan_start(struct iwl_mld *mld,
+                            struct ieee80211_vif *vif,
+                            struct cfg80211_sched_scan_request *req,
+                            struct ieee80211_scan_ies *ies,
+                            int type)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC),
+               .len = { mld->scan.cmd_size, },
+               .data = { mld->scan.cmd, },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       struct iwl_mld_scan_params params = {};
+       struct iwl_mld_scan_iter_data scan_iter_data = {
+               .current_vif = vif,
+       };
+       bool non_psc_included = false;
+       int ret, uid;
+
+       /* we should have failed registration if scan_cmd was NULL */
+       if (WARN_ON(!mld->scan.cmd))
+               return -ENOMEM;
+
+       /* FW supports only a single periodic scan */
+       if (mld->scan.status & (IWL_MLD_SCAN_SCHED | IWL_MLD_SCAN_NETDETECT))
+               return -EBUSY;
+
+       ieee80211_iterate_active_interfaces_mtx(mld->hw,
+                                               IEEE80211_IFACE_ITER_NORMAL,
+                                               iwl_mld_scan_iterator,
+                                               &scan_iter_data);
+
+       params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data);
+       params.flags = req->flags;
+       params.n_ssids = req->n_ssids;
+       params.ssids = req->ssids;
+       params.n_channels = req->n_channels;
+       params.channels = req->channels;
+       params.mac_addr = req->mac_addr;
+       params.mac_addr_mask = req->mac_addr_mask;
+       params.no_cck = false;
+       params.pass_all =  iwl_mld_scan_pass_all(mld, req);
+       params.n_match_sets = req->n_match_sets;
+       params.match_sets = req->match_sets;
+       params.n_scan_plans = req->n_scan_plans;
+       params.scan_plans = req->scan_plans;
+       /* TODO: CDB - per-band flag */
+       params.respect_p2p_go =
+               iwl_mld_get_respect_p2p_go(mld, vif,
+                                          scan_iter_data.global_low_latency);
+
+       /* UMAC scan supports up to 16-bit delays, trim it down to 16-bits */
+       params.delay = req->delay > U16_MAX ? U16_MAX : req->delay;
+
+       eth_broadcast_addr(params.bssid);
+
+       ret = iwl_mld_config_sched_scan_profiles(mld, req);
+       if (ret)
+               return ret;
+
+       iwl_mld_scan_build_probe_req(mld, vif, ies, ¶ms);
+
+       ret = iwl_mld_sched_scan_handle_non_psc_channels(¶ms,
+                                                        &non_psc_included);
+       if (ret)
+               goto out;
+
+       if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, params.n_channels)) {
+               ret = -ENOBUFS;
+               goto out;
+       }
+
+       uid = iwl_mld_scan_build_cmd(mld, vif, ¶ms, type,
+                                    scan_iter_data.global_low_latency);
+       if (uid < 0) {
+               ret = uid;
+               goto out;
+       }
+
+       ret = iwl_mld_send_cmd(mld, &hcmd);
+       if (!ret) {
+               IWL_DEBUG_SCAN(mld,
+                              "Sched scan request send success: type=%u, uid=%u\n",
+                              type, uid);
+               mld->scan.uid_status[uid] = type;
+               mld->scan.status |= type;
+       } else {
+               IWL_ERR(mld, "Sched scan failed! ret %d\n", ret);
+               mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+       }
+
+out:
+       if (non_psc_included)
+               kfree(params.channels);
+       return ret;
+}
+
+int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify)
+{
+       int uid, ret;
+
+       IWL_DEBUG_SCAN(mld,
+                      "Request to stop scan: type=0x%x, status=0x%x\n",
+                      type, mld->scan.status);
+
+       if (!(mld->scan.status & type))
+               return 0;
+
+       uid = iwl_mld_scan_uid_by_status(mld, type);
+       /* must be valid, we just checked it's running */
+       if (WARN_ON_ONCE(uid < 0))
+               return uid;
+
+       ret = iwl_mld_scan_stop_wait(mld, type, uid);
+       if (ret)
+               IWL_DEBUG_SCAN(mld, "Failed to stop scan\n");
+
+       /* Clear the scan status so the next scan requests will
+        * succeed and mark the scan as stopping, so that the Rx
+        * handler doesn't do anything, as the scan was stopped from
+        * above. Also remove the handler to not notify mac80211
+        * erroneously after a new scan starts, for example.
+        */
+       mld->scan.status &= ~type;
+       mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+       iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_SCAN,
+                                              uid);
+
+       if (type == IWL_MLD_SCAN_REGULAR) {
+               if (notify) {
+                       struct cfg80211_scan_info info = {
+                               .aborted = true,
+                       };
+
+                       ieee80211_scan_completed(mld->hw, &info);
+               }
+       } else if (notify) {
+               ieee80211_sched_scan_stopped(mld->hw);
+               mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+       }
+
+       return ret;
+}
+
+int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                              struct cfg80211_scan_request *req,
+                              struct ieee80211_scan_ies *ies)
+{
+       return _iwl_mld_single_scan_start(mld, vif, req, ies,
+                                         IWL_MLD_SCAN_REGULAR);
+}
+
+static void iwl_mld_int_mlo_scan_start(struct iwl_mld *mld,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_channel **channels,
+                                      size_t n_channels)
+{
+       struct cfg80211_scan_request *req __free(kfree) = NULL;
+       struct ieee80211_scan_ies ies = {};
+       size_t size;
+       int ret;
+
+       IWL_DEBUG_SCAN(mld, "Starting Internal MLO scan: n_channels=%zu\n",
+                      n_channels);
+
+       size = struct_size(req, channels, n_channels);
+       req = kzalloc(size, GFP_KERNEL);
+       if (!req)
+               return;
+
+       /* set the requested channels */
+       for (int i = 0; i < n_channels; i++)
+               req->channels[i] = channels[i];
+
+       req->n_channels = n_channels;
+
+       /* set the rates */
+       for (int i = 0; i < NUM_NL80211_BANDS; i++)
+               if (mld->wiphy->bands[i])
+                       req->rates[i] =
+                               (1 << mld->wiphy->bands[i]->n_bitrates) - 1;
+
+       req->wdev = ieee80211_vif_to_wdev(vif);
+       req->wiphy = mld->wiphy;
+       req->scan_start = jiffies;
+       req->tsf_report_link_id = -1;
+
+       ret = _iwl_mld_single_scan_start(mld, vif, req, &ies,
+                                        IWL_MLD_SCAN_INT_MLO);
+
+       IWL_DEBUG_SCAN(mld, "Internal MLO scan: ret=%d\n", ret);
+}
+
+void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+       struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+       unsigned long usable_links = ieee80211_vif_usable_links(vif);
+       size_t n_channels = 0;
+       u8 link_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
+           hweight16(vif->valid_links) == 1)
+               return;
+
+       if (mld->scan.status & IWL_MLD_SCAN_INT_MLO) {
+               IWL_DEBUG_SCAN(mld, "Internal MLO scan is already running\n");
+               return;
+       }
+
+       for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+               struct ieee80211_bss_conf *link_conf =
+                       link_conf_dereference_check(vif, link_id);
+
+               if (WARN_ON_ONCE(!link_conf))
+                       continue;
+
+               channels[n_channels++] = link_conf->chanreq.oper.chan;
+       }
+
+       if (!n_channels)
+               return;
+
+       iwl_mld_int_mlo_scan_start(mld, vif, channels, n_channels);
+}
+
+void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld,
+                                            struct iwl_rx_packet *pkt)
+{
+       struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+       u32 uid = __le32_to_cpu(notif->uid);
+
+       if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status),
+                        "FW reports out-of-range scan UID %d\n", uid))
+               return;
+
+       if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR)
+               mld->scan.start_tsf = le64_to_cpu(notif->start_tsf);
+
+       IWL_DEBUG_SCAN(mld,
+                      "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+                      notif->status, notif->scanned_channels);
+
+       if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_FOUND) {
+               IWL_DEBUG_SCAN(mld, "Pass all scheduled scan results found\n");
+               ieee80211_sched_scan_results(mld->hw);
+               mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED;
+       }
+
+       IWL_DEBUG_SCAN(mld,
+                      "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+                      le64_to_cpu(notif->start_tsf));
+}
+
+void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
+                                     struct iwl_rx_packet *pkt)
+{
+       IWL_DEBUG_SCAN(mld, "Scheduled scan results\n");
+       ieee80211_sched_scan_results(mld->hw);
+}
+
+void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt)
+{
+       struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+       bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+       u32 uid = __le32_to_cpu(notif->uid);
+
+       if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status),
+                        "FW reports out-of-range scan UID %d\n", uid))
+               return;
+
+       IWL_DEBUG_SCAN(mld,
+                      "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n",
+                      uid, mld->scan.uid_status[uid],
+                      notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+                               "completed" : "aborted",
+                      iwl_mld_scan_ebs_status_str(notif->ebs_status));
+       IWL_DEBUG_SCAN(mld, "Scan completed: scan_status=0x%x\n",
+                      mld->scan.status);
+       IWL_DEBUG_SCAN(mld,
+                      "Scan completed: line=%u, iter=%u, elapsed time=%u\n",
+                      notif->last_schedule, notif->last_iter,
+                      __le32_to_cpu(notif->time_from_last_iter));
+
+       if (IWL_FW_CHECK(mld, !(mld->scan.uid_status[uid] & mld->scan.status),
+                        "FW reports scan UID %d we didn't trigger\n", uid))
+               return;
+
+       /* if the scan is already stopping, we don't need to notify mac80211 */
+       if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR) {
+               struct cfg80211_scan_info info = {
+                       .aborted = aborted,
+                       .scan_start_tsf = mld->scan.start_tsf,
+               };
+               int fw_link_id = mld->scan.fw_link_id;
+               struct ieee80211_bss_conf *link_conf = NULL;
+
+               if (fw_link_id != IWL_MLD_INVALID_FW_ID)
+                       link_conf =
+                               wiphy_dereference(mld->wiphy,
+                                                 mld->fw_id_to_bss_conf[fw_link_id]);
+
+               /* It is possible that by the time the scan is complete the
+                * link was already removed and is not valid.
+                */
+               if (link_conf)
+                       ether_addr_copy(info.tsf_bssid, link_conf->bssid);
+               else
+                       IWL_DEBUG_SCAN(mld, "Scan link is no longer valid\n");
+
+               ieee80211_scan_completed(mld->hw, &info);
+       } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_SCHED) {
+               ieee80211_sched_scan_stopped(mld->hw);
+               mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+       } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_INT_MLO) {
+               IWL_DEBUG_SCAN(mld, "Internal MLO scan completed\n");
+               mld->scan.last_mlo_scan_jiffies = jiffies;
+
+               /*
+                * We limit link selection to internal MLO scans as otherwise
+                * we do not know whether all channels were covered.
+                */
+               iwl_mld_select_links(mld);
+       }
+
+       mld->scan.status &= ~mld->scan.uid_status[uid];
+
+       IWL_DEBUG_SCAN(mld, "Scan completed: after update: scan_status=0x%x\n",
+                      mld->scan.status);
+
+       mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+
+       if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+           notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
+               mld->scan.last_ebs_failed = true;
+}
+
+/* This function is used in nic restart flow, to inform mac80211 about scans
+ * that were aborted by restart flow or by an assert.
+ */
+void iwl_mld_report_scan_aborted(struct iwl_mld *mld)
+{
+       int uid;
+
+       uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_REGULAR);
+       if (uid >= 0) {
+               struct cfg80211_scan_info info = {
+                       .aborted = true,
+               };
+
+               ieee80211_scan_completed(mld->hw, &info);
+               mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+       }
+
+       uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_SCHED);
+       if (uid >= 0) {
+               mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+               mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+
+               /* sched scan will be restarted by mac80211 in reconfig.
+                * report to mac80211 that sched scan stopped only if we won't
+                * restart the firmware.
+                */
+               if (!iwlwifi_mod_params.fw_restart)
+                       ieee80211_sched_scan_stopped(mld->hw);
+       }
+
+       uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_INT_MLO);
+       if (uid >= 0) {
+               IWL_DEBUG_SCAN(mld, "Internal MLO scan aborted\n");
+               mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+       }
+
+       BUILD_BUG_ON(IWL_MLD_SCAN_NONE != 0);
+       memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status));
+}
+
+int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld)
+{
+       u8 scan_cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, SCAN_REQ_UMAC,
+                                               IWL_FW_CMD_VER_UNKNOWN);
+       size_t scan_cmd_size;
+
+       if (scan_cmd_ver == 17) {
+               scan_cmd_size = sizeof(struct iwl_scan_req_umac_v17);
+       } else {
+               IWL_ERR(mld, "Unexpected scan cmd version %d\n", scan_cmd_ver);
+               return -EINVAL;
+       }
+
+       mld->scan.cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
+       if (!mld->scan.cmd)
+               return -ENOMEM;
+
+       mld->scan.cmd_size = scan_cmd_size;
+
+       return 0;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_scan_h__
+#define __iwl_mld_scan_h__
+
+int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld);
+
+int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                              struct cfg80211_scan_request *req,
+                              struct ieee80211_scan_ies *ies);
+
+void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif);
+
+void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld,
+                                            struct iwl_rx_packet *pkt);
+
+int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify);
+
+int iwl_mld_sched_scan_start(struct iwl_mld *mld,
+                            struct ieee80211_vif *vif,
+                            struct cfg80211_sched_scan_request *req,
+                            struct ieee80211_scan_ies *ies,
+                            int type);
+
+void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
+                                     struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt);
+
+#define WFA_TPC_IE_LEN 9
+
+static inline int iwl_mld_scan_max_template_size(void)
+{
+#define MAC_HDR_LEN 24
+#define DS_IE_LEN 3
+#define SSID_IE_LEN 2
+
+/* driver create the 802.11 header, WFA TPC IE, DS parameter and SSID IE */
+#define DRIVER_TOTAL_IES_LEN \
+       (MAC_HDR_LEN + WFA_TPC_IE_LEN + DS_IE_LEN + SSID_IE_LEN)
+
+       BUILD_BUG_ON(SCAN_OFFLOAD_PROBE_REQ_SIZE < DRIVER_TOTAL_IES_LEN);
+
+       return SCAN_OFFLOAD_PROBE_REQ_SIZE - DRIVER_TOTAL_IES_LEN;
+}
+
+void iwl_mld_report_scan_aborted(struct iwl_mld *mld);
+
+enum iwl_mld_scan_status {
+       IWL_MLD_SCAN_NONE               = 0,
+       IWL_MLD_SCAN_REGULAR            = BIT(0),
+       IWL_MLD_SCAN_SCHED              = BIT(1),
+       IWL_MLD_SCAN_NETDETECT          = BIT(2),
+       IWL_MLD_SCAN_INT_MLO            = BIT(3),
+};
+
+/* enum iwl_mld_pass_all_sched_results_states - Defines the states for
+ * handling/passing scheduled scan results to mac80211
+ * @SCHED_SCAN_PASS_ALL_STATE_DISABLED: Don't pass all scan results, only when
+ *     a match found.
+ * @SCHED_SCAN_PASS_ALL_STATE_ENABLED: Pass all scan results is enabled
+ *     (no filtering).
+ * @SCHED_SCAN_PASS_ALL_STATE_FOUND: A scan result is found, pass it on the
+ *     next scan iteration complete notification.
+ */
+enum iwl_mld_pass_all_sched_results_states {
+       SCHED_SCAN_PASS_ALL_STATE_DISABLED,
+       SCHED_SCAN_PASS_ALL_STATE_ENABLED,
+       SCHED_SCAN_PASS_ALL_STATE_FOUND,
+};
+
+/**
+ * enum iwl_mld_traffic_load - Levels of traffic load
+ *
+ * @IWL_MLD_TRAFFIC_LOW: low traffic load
+ * @IWL_MLD_TRAFFIC_MEDIUM: medium traffic load
+ * @IWL_MLD_TRAFFIC_HIGH: high traffic load
+ */
+enum iwl_mld_traffic_load {
+       IWL_MLD_TRAFFIC_LOW,
+       IWL_MLD_TRAFFIC_MEDIUM,
+       IWL_MLD_TRAFFIC_HIGH,
+};
+
+/**
+ * struct iwl_mld_scan - Scan data
+ * @status: scan status, a combination of %enum iwl_mld_scan_status,
+ *     reflects the %scan.uid_status array.
+ * @uid_status: array to track the scan status per uid.
+ * @start_tsf: start time of last scan in TSF of the link that requested
+ *     the scan.
+ * @last_ebs_failed: true if the last EBS (Energy Based Scan) failed.
+ * @pass_all_sched_res: see %enum iwl_mld_pass_all_sched_results_states.
+ * @fw_link_id: the current (regular) scan fw link id, used by scan
+ *     complete notif.
+ * @traffic_load: traffic load related data
+ * @traffic_load.last_stats_ts_usec: The timestamp of the last statistics
+ *     notification, used to calculate the elapsed time between two
+ *     notifications and determine the traffic load
+ * @traffic_load.status: The current traffic load status, see
+ *     &enum iwl_mld_traffic_load
+ * @cmd_size: size of %cmd.
+ * @cmd: pointer to scan cmd buffer (allocated once in op mode start).
+ * @last_6ghz_passive_jiffies: stores the last 6GHz passive scan time
+ *     in jiffies.
+ * @last_start_time_jiffies: stores the last start time in jiffies
+ *     (interface up/reset/resume).
+ * @last_mlo_scan_jiffies: end time of the last MLO scan in jiffies.
+ */
+struct iwl_mld_scan {
+       /* Add here fields that need clean up on restart */
+       struct_group(zeroed_on_hw_restart,
+               unsigned int status;
+               u32 uid_status[IWL_MAX_UMAC_SCANS];
+               u64 start_tsf;
+               bool last_ebs_failed;
+               enum iwl_mld_pass_all_sched_results_states pass_all_sched_res;
+               u8 fw_link_id;
+               struct {
+                       u32 last_stats_ts_usec;
+                       enum iwl_mld_traffic_load status;
+               } traffic_load;
+       );
+       /* And here fields that survive a fw restart */
+       size_t cmd_size;
+       void *cmd;
+       unsigned long last_6ghz_passive_jiffies;
+       unsigned long last_start_time_jiffies;
+       unsigned long last_mlo_scan_jiffies;
+};
+
+#endif /* __iwl_mld_scan_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include "session-protect.h"
+#include "fw/api/time-event.h"
+#include "fw/api/context.h"
+#include "iface.h"
+#include <net/mac80211.h>
+
+void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
+                                      struct iwl_rx_packet *pkt)
+{
+       struct iwl_session_prot_notif *notif = (void *)pkt->data;
+       int fw_link_id = le32_to_cpu(notif->mac_link_id);
+       struct ieee80211_bss_conf *link_conf =
+               iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+       struct ieee80211_vif *vif;
+       struct iwl_mld_vif *mld_vif;
+       struct iwl_mld_session_protect *session_protect;
+
+       if (WARN_ON(!link_conf))
+               return;
+
+       vif = link_conf->vif;
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+       session_protect = &mld_vif->session_protect;
+
+       if (!le32_to_cpu(notif->status)) {
+               memset(session_protect, 0, sizeof(*session_protect));
+       } else if (le32_to_cpu(notif->start)) {
+               /* End_jiffies indicates an active session */
+               session_protect->session_requested = false;
+               session_protect->end_jiffies =
+                       TU_TO_EXP_TIME(session_protect->duration);
+               /* !session_protect->end_jiffies means inactive session */
+               if (!session_protect->end_jiffies)
+                       session_protect->end_jiffies = 1;
+       } else {
+               memset(session_protect, 0, sizeof(*session_protect));
+       }
+}
+
+static int _iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+                                               struct ieee80211_vif *vif,
+                                               u32 duration, u32 min_duration,
+                                               int link_id)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *link =
+               iwl_mld_link_dereference_check(mld_vif, link_id);
+       struct iwl_mld_session_protect *session_protect =
+               &mld_vif->session_protect;
+       struct iwl_session_prot_cmd cmd = {
+               .id_and_color = cpu_to_le32(link->fw_id),
+               .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+               .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+               .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+       };
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       WARN(hweight16(vif->active_links) > 1,
+            "Session protection isn't allowed with more than one active link");
+
+       if (session_protect->end_jiffies &&
+           time_after(session_protect->end_jiffies,
+                      TU_TO_EXP_TIME(min_duration))) {
+               IWL_DEBUG_TE(mld, "We have ample in the current session: %u\n",
+                            jiffies_to_msecs(session_protect->end_jiffies -
+                                             jiffies));
+               return -EALREADY;
+       }
+
+       IWL_DEBUG_TE(mld, "Add a new session protection, duration %d TU\n",
+                    le32_to_cpu(cmd.duration_tu));
+
+       ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP,
+                                               SESSION_PROTECTION_CMD), &cmd);
+
+       if (ret)
+               return ret;
+
+       /* end_jiffies will be updated when handling session_prot_notif */
+       session_protect->end_jiffies = 0;
+       session_protect->duration = duration;
+       session_protect->session_requested = true;
+
+       return 0;
+}
+
+void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+                                        struct ieee80211_vif *vif,
+                                        u32 duration, u32 min_duration,
+                                        int link_id)
+{
+       int ret;
+
+       ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
+                                                  min_duration, link_id);
+       if (ret && ret != -EALREADY)
+               IWL_ERR(mld,
+                       "Couldn't send the SESSION_PROTECTION_CMD (%d)\n",
+                       ret);
+}
+
+struct iwl_mld_session_start_data {
+       struct iwl_mld *mld;
+       struct ieee80211_bss_conf *link_conf;
+       bool success;
+};
+
+static bool iwl_mld_session_start_fn(struct iwl_notif_wait_data *notif_wait,
+                                    struct iwl_rx_packet *pkt, void *_data)
+{
+       struct iwl_session_prot_notif *notif = (void *)pkt->data;
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+       struct iwl_mld_session_start_data *data = _data;
+       struct ieee80211_bss_conf *link_conf;
+       struct iwl_mld *mld = data->mld;
+       int fw_link_id;
+
+       if (IWL_FW_CHECK(mld, pkt_len < sizeof(*notif),
+                        "short session prot notif (%d)\n",
+                        pkt_len))
+               return false;
+
+       fw_link_id = le32_to_cpu(notif->mac_link_id);
+       link_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+
+       if (link_conf != data->link_conf)
+               return false;
+
+       if (!le32_to_cpu(notif->status))
+               return true;
+
+       if (notif->start) {
+               data->success = true;
+               return true;
+       }
+
+       return false;
+}
+
+int iwl_mld_start_session_protection(struct iwl_mld *mld,
+                                    struct ieee80211_vif *vif,
+                                    u32 duration, u32 min_duration,
+                                    int link_id, unsigned long timeout)
+{
+       static const u16 start_notif[] = { SESSION_PROTECTION_NOTIF };
+       struct iwl_notification_wait start_wait;
+       struct iwl_mld_session_start_data data = {
+               .mld = mld,
+               .link_conf = wiphy_dereference(mld->wiphy,
+                                              vif->link_conf[link_id]),
+       };
+       int ret;
+
+       if (WARN_ON(!data.link_conf))
+               return -EINVAL;
+
+       iwl_init_notification_wait(&mld->notif_wait, &start_wait,
+                                  start_notif, ARRAY_SIZE(start_notif),
+                                  iwl_mld_session_start_fn, &data);
+
+       ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
+                                                  min_duration, link_id);
+
+       if (ret) {
+               iwl_remove_notification(&mld->notif_wait, &start_wait);
+               return ret == -EALREADY ? 0 : ret;
+       }
+
+       ret = iwl_wait_notification(&mld->notif_wait, &start_wait, timeout);
+       if (ret)
+               return ret;
+       return data.success ? 0 : -EIO;
+}
+
+int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
+                                     struct ieee80211_vif *vif,
+                                     int link_id)
+{
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       struct iwl_mld_link *link =
+               iwl_mld_link_dereference_check(mld_vif, link_id);
+       struct iwl_mld_session_protect *session_protect =
+               &mld_vif->session_protect;
+       struct iwl_session_prot_cmd cmd = {
+               .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+               .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+       };
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* If there isn't an active session or a requested one for this
+        * link do nothing
+        */
+       if (!session_protect->session_requested &&
+           !session_protect->end_jiffies)
+               return 0;
+
+       if (WARN_ON(!link))
+               return -EINVAL;
+
+       cmd.id_and_color = cpu_to_le32(link->fw_id);
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(MAC_CONF_GROUP,
+                                          SESSION_PROTECTION_CMD), &cmd);
+       if (ret) {
+               IWL_ERR(mld,
+                       "Couldn't send the SESSION_PROTECTION_CMD\n");
+               return ret;
+       }
+
+       memset(session_protect, 0, sizeof(*session_protect));
+
+       return 0;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __session_protect_h__
+#define __session_protect_h__
+
+#include "mld.h"
+#include "hcmd.h"
+#include <net/mac80211.h>
+#include "fw/api/mac-cfg.h"
+
+/**
+ * DOC: session protection
+ *
+ * Session protection is an API from the firmware that allows the driver to
+ * request time on medium. This is needed before the association when we need
+ * to be on medium for the association frame exchange. Once we configure the
+ * firmware as 'associated', the firmware will allocate time on medium without
+ * needed a session protection.
+ *
+ * TDLS discover uses this API as well even after association to ensure that
+ * other activities internal to the firmware will not interrupt our presence
+ * on medium.
+ */
+
+/**
+ * struct iwl_mld_session_protect - session protection parameters
+ * @end_jiffies: expected end_jiffies of current session protection.
+ *     0 if not active
+ * @duration: the duration in tu of current session
+ * @session_requested: A session protection command was sent and wasn't yet
+ *     answered
+ */
+struct iwl_mld_session_protect {
+       unsigned long end_jiffies;
+       u32 duration;
+       bool session_requested;
+};
+
+#define IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS 900
+#define IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mld_handle_session_prot_notif - handles %SESSION_PROTECTION_NOTIF
+ * @mld: the mld component
+ * @pkt: the RX packet containing the notification
+ */
+void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
+                                      struct iwl_rx_packet *pkt);
+
+/**
+ * iwl_mld_schedule_session_protection - schedule a session protection
+ * @mld: the mld component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
+ * @link_id: The link to schedule a session protection for
+ */
+void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+                                        struct ieee80211_vif *vif,
+                                        u32 duration, u32 min_duration,
+                                        int link_id);
+
+/**
+ * iwl_mld_start_session_protection - start a session protection
+ * @mld: the mld component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
+ * @link_id: The link to schedule a session protection for
+ * @timeout: timeout for waiting
+ *
+ * This schedules the session protection, and waits for it to start
+ * (with timeout)
+ *
+ * Returns: 0 if successful, error code otherwise
+ */
+int iwl_mld_start_session_protection(struct iwl_mld *mld,
+                                    struct ieee80211_vif *vif,
+                                    u32 duration, u32 min_duration,
+                                    int link_id, unsigned long timeout);
+
+/**
+ * iwl_mld_cancel_session_protection - cancel the session protection.
+ * @mld: the mld component
+ * @vif: the virtual interface for which the session is issued
+ * @link_id: cancel the session protection for given link
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be canceled because
+ * the other mac contexts wait for the medium during that time.
+ *
+ * Returns: 0 if successful, error code otherwise
+ *
+ */
+int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
+                                     struct ieee80211_vif *vif,
+                                     int link_id);
+
+#endif /* __session_protect_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <linux/ieee80211.h>
+#include <kunit/static_stub.h>
+
+#include "sta.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "mlo.h"
+#include "key.h"
+#include "agg.h"
+#include "tlc.h"
+#include "fw/api/sta.h"
+#include "fw/api/mac.h"
+#include "fw/api/rx.h"
+
+int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
+                                   struct ieee80211_link_sta *link_sta)
+{
+       struct iwl_mld_link_sta *mld_link_sta;
+
+       /* This function should only be used with the wiphy lock held,
+        * In other cases, it is not guaranteed that the link_sta will exist
+        * in the driver too, and it is checked here.
+        */
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* This is not meant to be called with a NULL pointer */
+       if (WARN_ON(!link_sta))
+               return -ENOENT;
+
+       mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+       if (!mld_link_sta) {
+               WARN_ON(!iwl_mld_error_before_recovery(mld));
+               return -ENOENT;
+       }
+
+       return mld_link_sta->fw_id;
+}
+
+static void
+iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta *link_sta,
+                                struct ieee80211_bss_conf *link,
+                                __le32 *tx_ampdu_max_size,
+                                __le32 *tx_ampdu_spacing)
+{
+       u32 agg_size = 0, mpdu_dens = 0;
+
+       if (WARN_ON(!link_sta || !link))
+               return;
+
+       /* Note that we always use only legacy & highest supported PPDUs, so
+        * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
+        * the maximum A-MPDU size of various PPDU types in different bands,
+        * we only need to worry about the highest supported PPDU type here.
+        */
+
+       if (link_sta->ht_cap.ht_supported) {
+               agg_size = link_sta->ht_cap.ampdu_factor;
+               mpdu_dens = link_sta->ht_cap.ampdu_density;
+       }
+
+       if (link->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
+               /* overwrite HT values on 6 GHz */
+               mpdu_dens =
+                       le16_get_bits(link_sta->he_6ghz_capa.capa,
+                                     IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+               agg_size =
+                       le16_get_bits(link_sta->he_6ghz_capa.capa,
+                                     IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+       } else if (link_sta->vht_cap.vht_supported) {
+               /* if VHT supported overwrite HT value */
+               agg_size =
+                       u32_get_bits(link_sta->vht_cap.cap,
+                                    IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+       }
+
+       /* D6.0 10.12.2 A-MPDU length limit rules
+        * A STA indicates the maximum length of the A-MPDU preEOF padding
+        * that it can receive in an HE PPDU in the Maximum A-MPDU Length
+        * Exponent field in its HT Capabilities, VHT Capabilities,
+        * and HE 6 GHz Band Capabilities elements (if present) and the
+        * Maximum AMPDU Length Exponent Extension field in its HE
+        * Capabilities element
+        */
+       if (link_sta->he_cap.has_he)
+               agg_size +=
+                       u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
+                                   IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+       if (link_sta->eht_cap.has_eht)
+               agg_size +=
+                       u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
+                                   IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
+
+       /* Limit to max A-MPDU supported by FW */
+       agg_size = min_t(u32, agg_size,
+                        STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
+
+       *tx_ampdu_max_size = cpu_to_le32(agg_size);
+       *tx_ampdu_spacing = cpu_to_le32(mpdu_dens);
+}
+
+static u8 iwl_mld_get_uapsd_acs(struct ieee80211_sta *sta)
+{
+       u8 uapsd_acs = 0;
+
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+               uapsd_acs |= BIT(AC_BK);
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+               uapsd_acs |= BIT(AC_BE);
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+               uapsd_acs |= BIT(AC_VI);
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+               uapsd_acs |= BIT(AC_VO);
+
+       return uapsd_acs | uapsd_acs << 4;
+}
+
+static u8 iwl_mld_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+       u8 byte_num = ppe_pos_bit / 8;
+       u8 bit_num = ppe_pos_bit % 8;
+       u8 residue_bits;
+       u8 res;
+
+       if (bit_num <= 5)
+               return (ppe[byte_num] >> bit_num) &
+                      (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+       /* If bit_num > 5, we have to combine bits with next byte.
+        * Calculate how many bits we need to take from current byte (called
+        * here "residue_bits"), and add them to bits from next byte.
+        */
+
+       residue_bits = 8 - bit_num;
+
+       res = (ppe[byte_num + 1] &
+              (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+             residue_bits;
+       res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+       return res;
+}
+
+static void iwl_mld_parse_ppe(struct iwl_mld *mld,
+                             struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+                             u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
+                             bool inheritance)
+{
+       /* FW currently supports only nss == MAX_HE_SUPP_NSS
+        *
+        * If nss > MAX: we can ignore values we don't support
+        * If nss < MAX: we can set zeros in other streams
+        */
+       if (nss > MAX_HE_SUPP_NSS) {
+               IWL_DEBUG_INFO(mld, "Got NSS = %d - trimming to %d\n", nss,
+                              MAX_HE_SUPP_NSS);
+               nss = MAX_HE_SUPP_NSS;
+       }
+
+       for (int i = 0; i < nss; i++) {
+               u8 ru_index_tmp = ru_index_bitmap << 1;
+               u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+
+               for (u8 bw = 0;
+                    bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+                    bw++) {
+                       ru_index_tmp >>= 1;
+
+                       /* According to the 11be spec, if for a specific BW the PPE Thresholds
+                        * isn't present - it should inherit the thresholds from the last
+                        * BW for which we had PPE Thresholds. In 11ax though, we don't have
+                        * this inheritance - continue in this case
+                        */
+                       if (!(ru_index_tmp & 1)) {
+                               if (inheritance)
+                                       goto set_thresholds;
+                               else
+                                       continue;
+                       }
+
+                       high_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
+                       ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+                       low_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
+                       ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+set_thresholds:
+                       pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+                       pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+               }
+       }
+}
+
+static void iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld *mld,
+                                           struct ieee80211_link_sta *link_sta,
+                                           struct iwl_he_pkt_ext_v2 *pkt_ext,
+                                           bool inheritance)
+{
+       u8 nss = (link_sta->he_cap.ppe_thres[0] &
+                 IEEE80211_PPE_THRES_NSS_MASK) + 1;
+       u8 *ppe = &link_sta->he_cap.ppe_thres[0];
+       u8 ru_index_bitmap =
+               u8_get_bits(*ppe,
+                           IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+       /* Starting after PPE header */
+       u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+       iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
+                         inheritance);
+}
+
+static int
+iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+                                        u8 nominal_padding)
+{
+       int low_th = -1;
+       int high_th = -1;
+
+       /* all the macros are the same for EHT and HE */
+       switch (nominal_padding) {
+       case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
+               low_th = IWL_HE_PKT_EXT_NONE;
+               high_th = IWL_HE_PKT_EXT_NONE;
+               break;
+       case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
+               low_th = IWL_HE_PKT_EXT_BPSK;
+               high_th = IWL_HE_PKT_EXT_NONE;
+               break;
+       case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
+       case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
+               low_th = IWL_HE_PKT_EXT_NONE;
+               high_th = IWL_HE_PKT_EXT_BPSK;
+               break;
+       }
+
+       if (low_th < 0 || high_th < 0)
+               return -EINVAL;
+
+       /* Set the PPE thresholds accordingly */
+       for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+               for (u8 bw = 0;
+                       bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+                       bw++) {
+                       pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+                       pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+               }
+       }
+
+       return 0;
+}
+
+static void iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
+                                        u8 nominal_padding)
+{
+       for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+               for (u8 bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+                    bw++) {
+                       u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+                       if (nominal_padding >
+                           IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+                           qam_th[1] == IWL_HE_PKT_EXT_NONE)
+                               qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
+                       else if (nominal_padding ==
+                                IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+                                qam_th[0] == IWL_HE_PKT_EXT_NONE &&
+                                qam_th[1] == IWL_HE_PKT_EXT_NONE)
+                               qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
+               }
+       }
+}
+
+static void iwl_mld_fill_pkt_ext(struct iwl_mld *mld,
+                                struct ieee80211_link_sta *link_sta,
+                                struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+       if (WARN_ON(!link_sta))
+               return;
+
+       /* Initialize the PPE thresholds to "None" (7), as described in Table
+        * 9-262ac of 80211.ax/D3.0.
+        */
+       memset(pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(*pkt_ext));
+
+       if (link_sta->eht_cap.has_eht) {
+               u8 nominal_padding =
+                       u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
+                                   IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+               /* If PPE Thresholds exists, parse them into a FW-familiar
+                * format.
+                */
+               if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+                   IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+                       u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] &
+                               IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
+                       u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0];
+                       u8 ru_index_bitmap =
+                               u16_get_bits(*ppe,
+                                            IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+                        /* Starting after PPE header */
+                       u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+
+                       iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap,
+                                         ppe, ppe_pos_bit, true);
+               /* EHT PPE Thresholds doesn't exist - set the API according to
+                * HE PPE Tresholds
+                */
+               } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+                          IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+                       /* Even though HE Capabilities IE doesn't contain PPE
+                        * Thresholds for BW 320Mhz, thresholds for this BW will
+                        * be filled in with the same values as 160Mhz, due to
+                        * the inheritance, as required.
+                        */
+                       iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
+                                                       true);
+
+                       /* According to the requirements, for MCSs 12-13 the
+                        * maximum value between HE PPE Threshold and Common
+                        * Nominal Packet Padding needs to be taken
+                        */
+                       iwl_mld_get_optimal_ppe_info(pkt_ext, nominal_padding);
+
+               /* if PPE Thresholds doesn't present in both EHT IE and HE IE -
+                * take the Thresholds from Common Nominal Packet Padding field
+                */
+               } else {
+                       iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
+                                                                nominal_padding);
+               }
+       } else if (link_sta->he_cap.has_he) {
+               /* If PPE Thresholds exist, parse them into a FW-familiar format. */
+               if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+                       IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+                       iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
+                                                       false);
+               /* PPE Thresholds doesn't exist - set the API PPE values
+                * according to Common Nominal Packet Padding field.
+                */
+               } else {
+                       u8 nominal_padding =
+                               u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9],
+                                           IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+                       if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+                               iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
+                                                                        nominal_padding);
+               }
+       }
+
+       for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+               for (int bw = 0;
+                    bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]);
+                    bw++) {
+                       u8 *qam_th =
+                               &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+                       IWL_DEBUG_HT(mld,
+                                    "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
+                                    i, bw, qam_th[0], qam_th[1]);
+               }
+       }
+}
+
+static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
+{
+       u8 *mac_cap_info =
+               &link_sta->he_cap.he_cap_elem.mac_cap_info[0];
+       u32 htc_flags = 0;
+
+       if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+               htc_flags |= IWL_HE_HTC_SUPPORT;
+       if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+           (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+               u8 link_adap =
+                       ((mac_cap_info[2] &
+                         IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+                        (mac_cap_info[1] &
+                         IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+               if (link_adap == 2)
+                       htc_flags |=
+                               IWL_HE_HTC_LINK_ADAP_UNSOLICITED;
+               else if (link_adap == 3)
+                       htc_flags |= IWL_HE_HTC_LINK_ADAP_BOTH;
+       }
+       if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+               htc_flags |= IWL_HE_HTC_BSR_SUPP;
+       if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+               htc_flags |= IWL_HE_HTC_OMI_SUPP;
+       if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+               htc_flags |= IWL_HE_HTC_BQR_SUPP;
+
+       return htc_flags;
+}
+
+static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
+                               const struct iwl_sta_cfg_cmd *cmd)
+{
+       int ret = iwl_mld_send_cmd_pdu(mld,
+                                      WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
+                                      cmd);
+       if (ret)
+               IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
+       return ret;
+}
+
+static int
+iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld,
+                          struct ieee80211_link_sta *link_sta)
+{
+       struct ieee80211_sta *sta = link_sta->sta;
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct ieee80211_bss_conf *link;
+       struct iwl_mld_link *mld_link;
+       struct iwl_sta_cfg_cmd cmd = {};
+       int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       link = link_conf_dereference_protected(mld_sta->vif,
+                                              link_sta->link_id);
+
+       mld_link = iwl_mld_link_from_mac80211(link);
+
+       if (WARN_ON(!link || !mld_link) || fw_id < 0)
+               return -EINVAL;
+
+       cmd.sta_id = cpu_to_le32(fw_id);
+       cmd.station_type = cpu_to_le32(mld_sta->sta_type);
+       cmd.link_id = cpu_to_le32(mld_link->fw_id);
+
+       memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
+       memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
+
+       if (mld_sta->sta_state >= IEEE80211_STA_ASSOC)
+               cmd.assoc_id = cpu_to_le32(sta->aid);
+
+       if (sta->mfp || mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
+               cmd.mfp = cpu_to_le32(1);
+
+       switch (link_sta->rx_nss) {
+       case 1:
+               cmd.mimo = cpu_to_le32(0);
+               break;
+       case 2 ... 8:
+               cmd.mimo = cpu_to_le32(1);
+               break;
+       }
+
+       switch (link_sta->smps_mode) {
+       case IEEE80211_SMPS_AUTOMATIC:
+       case IEEE80211_SMPS_NUM_MODES:
+               WARN_ON(1);
+               break;
+       case IEEE80211_SMPS_STATIC:
+               /* override NSS */
+               cmd.mimo = cpu_to_le32(0);
+               break;
+       case IEEE80211_SMPS_DYNAMIC:
+               cmd.mimo_protection = cpu_to_le32(1);
+               break;
+       case IEEE80211_SMPS_OFF:
+               /* nothing */
+               break;
+       }
+
+       iwl_mld_fill_ampdu_size_and_dens(link_sta, link,
+                                        &cmd.tx_ampdu_max_size,
+                                        &cmd.tx_ampdu_spacing);
+
+       if (sta->wme) {
+               cmd.sp_length =
+                       cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128);
+               cmd.uapsd_acs = cpu_to_le32(iwl_mld_get_uapsd_acs(sta));
+       }
+
+       if (link_sta->he_cap.has_he) {
+               cmd.trig_rnd_alloc =
+                       cpu_to_le32(link->uora_exists ? 1 : 0);
+
+               /* PPE Thresholds */
+               iwl_mld_fill_pkt_ext(mld, link_sta, &cmd.pkt_ext);
+
+               /* HTC flags */
+               cmd.htc_flags =
+                       cpu_to_le32(iwl_mld_get_htc_flags(link_sta));
+
+               if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] &
+                   IEEE80211_HE_MAC_CAP2_ACK_EN)
+                       cmd.ack_enabled = cpu_to_le32(1);
+       }
+
+       return iwl_mld_send_sta_cmd(mld, &cmd);
+}
+
+IWL_MLD_ALLOC_FN(link_sta, link_sta)
+
+static int
+iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+       struct iwl_mld_link_sta *mld_link_sta;
+       int ret;
+       u8 fw_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* We will fail to add it to the FW anyway */
+       if (iwl_mld_error_before_recovery(mld))
+               return -ENODEV;
+
+       mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+
+       /* We need to preserve the fw sta ids during a restart, since the fw
+        * will recover SN/PN for them, this is why the mld_link_sta exists.
+        */
+       if (mld_link_sta) {
+               /* But if we are not restarting, this is not OK */
+               WARN_ON(!mld->fw_status.in_hw_restart);
+
+               /* Avoid adding a STA that is already in FW to avoid an assert */
+               if (WARN_ON(mld_link_sta->in_fw))
+                       return -EINVAL;
+
+               fw_id = mld_link_sta->fw_id;
+               goto add_to_fw;
+       }
+
+       /* Allocate a fw id and map it to the link_sta */
+       ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
+       if (ret)
+               return ret;
+
+       if (link_sta == &link_sta->sta->deflink) {
+               mld_link_sta = &mld_sta->deflink;
+       } else {
+               mld_link_sta = kzalloc(sizeof(*mld_link_sta), GFP_KERNEL);
+               if (!mld_link_sta)
+                       return -ENOMEM;
+       }
+
+       mld_link_sta->fw_id = fw_id;
+       rcu_assign_pointer(mld_sta->link[link_sta->link_id], mld_link_sta);
+
+add_to_fw:
+       ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
+       if (ret) {
+               RCU_INIT_POINTER(mld->fw_id_to_link_sta[fw_id], NULL);
+               RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
+               if (link_sta != &link_sta->sta->deflink)
+                       kfree(mld_link_sta);
+               return ret;
+       }
+       mld_link_sta->in_fw = true;
+
+       return 0;
+}
+
+static int iwl_mld_rm_sta_from_fw(struct iwl_mld *mld, u8 fw_sta_id)
+{
+       struct iwl_remove_sta_cmd cmd = {
+               .sta_id = cpu_to_le32(fw_sta_id),
+       };
+       int ret;
+
+       ret = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD),
+                                  &cmd);
+       if (ret)
+               IWL_ERR(mld, "Failed to remove station. Id=%d\n", fw_sta_id);
+
+       return ret;
+}
+
+static void
+iwl_mld_remove_link_sta(struct iwl_mld *mld,
+                       struct ieee80211_link_sta *link_sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+       struct iwl_mld_link_sta *mld_link_sta =
+               iwl_mld_link_sta_from_mac80211(link_sta);
+
+       if (WARN_ON(!mld_link_sta))
+               return;
+
+       iwl_mld_rm_sta_from_fw(mld, mld_link_sta->fw_id);
+       mld_link_sta->in_fw = false;
+
+       /* Now that the STA doesn't exist in FW, we don't expect any new
+        * notifications for it. Cancel the ones that are already pending
+        */
+       iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_STA,
+                                              mld_link_sta->fw_id);
+
+       /* This will not be done upon reconfig, so do it also when
+        * failed to remove from fw
+        */
+       RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id], NULL);
+       RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
+       if (mld_link_sta != &mld_sta->deflink)
+               kfree_rcu(mld_link_sta, rcu_head);
+}
+
+int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
+                                    struct ieee80211_sta *sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct ieee80211_link_sta *link_sta;
+       int link_id;
+
+       for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+               int ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
+
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+static void iwl_mld_destroy_sta(struct ieee80211_sta *sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+       kfree(mld_sta->dup_data);
+       kfree(mld_sta->mpdu_counters);
+}
+
+static int
+iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
+{
+       struct iwl_mld_rxq_dup_data *dup_data;
+
+       if (mld->fw_status.in_hw_restart)
+               return 0;
+
+       dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data),
+                          GFP_KERNEL);
+       if (!dup_data)
+               return -ENOMEM;
+
+       /* Initialize all the last_seq values to 0xffff which can never
+        * compare equal to the frame's seq_ctrl in the check in
+        * iwl_mld_is_dup() since the lower 4 bits are the fragment
+        * number and fragmented packets don't reach that function.
+        *
+        * This thus allows receiving a packet with seqno 0 and the
+        * retry bit set as the very first packet on a new TID.
+        */
+       for (int q = 0; q < mld->trans->num_rx_queues; q++)
+               memset(dup_data[q].last_seq, 0xff,
+                      sizeof(dup_data[q].last_seq));
+       mld_sta->dup_data = dup_data;
+
+       return 0;
+}
+
+static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld,
+                                       struct ieee80211_sta *sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct ieee80211_vif *vif = mld_sta->vif;
+
+       if (mld->fw_status.in_hw_restart)
+               return;
+
+       /* MPDUs are counted only when EMLSR is possible */
+       if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION ||
+           sta->tdls || !ieee80211_vif_is_mld(vif))
+               return;
+
+       mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues,
+                                        sizeof(*mld_sta->mpdu_counters),
+                                        GFP_KERNEL);
+       if (!mld_sta->mpdu_counters)
+               return;
+
+       for (int q = 0; q < mld->trans->num_rx_queues; q++)
+               spin_lock_init(&mld_sta->mpdu_counters[q].lock);
+}
+
+static int
+iwl_mld_init_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+       mld_sta->vif = vif;
+       mld_sta->sta_type = type;
+       mld_sta->mld = mld;
+
+       if (!mld->fw_status.in_hw_restart)
+               for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+                       iwl_mld_init_txq(iwl_mld_txq_from_mac80211(sta->txq[i]));
+
+       iwl_mld_alloc_mpdu_counters(mld, sta);
+
+       iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
+
+       return iwl_mld_alloc_dup_data(mld, mld_sta);
+}
+
+int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                   struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct ieee80211_link_sta *link_sta;
+       int link_id;
+       int ret;
+
+       ret = iwl_mld_init_sta(mld, sta, vif, type);
+       if (ret)
+               return ret;
+
+       /* We could have add only the deflink link_sta, but it will not work
+        * in the restart case if the single link that is active during
+        * reconfig is not the deflink one.
+        */
+       for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+               ret = iwl_mld_add_link_sta(mld, link_sta);
+               if (ret)
+                       goto destroy_sta;
+       }
+
+       return 0;
+
+destroy_sta:
+       iwl_mld_destroy_sta(sta);
+
+       return ret;
+}
+
+void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct ieee80211_link_sta *link_sta;
+       int link_id;
+
+       for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+               int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+               if (fw_sta_id < 0)
+                       continue;
+
+               iwl_mld_flush_link_sta_txqs(mld, fw_sta_id);
+       }
+}
+
+void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+       /* Avoid a warning in iwl_trans_wait_txq_empty if are anyway on the way
+        * to a restart.
+        */
+       if (iwl_mld_error_before_recovery(mld))
+               return;
+
+       for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+               struct iwl_mld_txq *mld_txq =
+                       iwl_mld_txq_from_mac80211(sta->txq[i]);
+
+               if (!mld_txq->status.allocated)
+                       continue;
+
+               iwl_trans_wait_txq_empty(mld->trans, mld_txq->fw_id);
+       }
+}
+
+void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct ieee80211_vif *vif = mld_sta->vif;
+       struct ieee80211_link_sta *link_sta;
+       u8 link_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* Tell the HW to flush the queues */
+       iwl_mld_flush_sta_txqs(mld, sta);
+
+       /* Wait for trans to empty its queues */
+       iwl_mld_wait_sta_txqs_empty(mld, sta);
+
+       /* Now we can remove the queues */
+       for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+               iwl_mld_remove_txq(mld, sta->txq[i]);
+
+       for_each_sta_active_link(vif, sta, link_sta, link_id) {
+               /* Mac8011 will remove the groupwise keys after the sta is
+                * removed, but FW expects all the keys to be removed before
+                * the STA is, so remove them all here.
+                */
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       iwl_mld_remove_ap_keys(mld, vif, sta, link_id);
+
+               /* Remove the link_sta */
+               iwl_mld_remove_link_sta(mld, link_sta);
+       }
+
+       iwl_mld_destroy_sta(sta);
+}
+
+u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+       struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
+       u32 result = 0;
+
+       KUNIT_STATIC_STUB_REDIRECT(iwl_mld_fw_sta_id_mask, mld, sta);
+
+       /* This function should only be used with the wiphy lock held,
+        * In other cases, it is not guaranteed that the link_sta will exist
+        * in the driver too, and it is checked in
+        * iwl_mld_fw_sta_id_from_link_sta.
+        */
+       lockdep_assert_wiphy(mld->wiphy);
+
+       for_each_sta_active_link(vif, sta, link_sta, link_id) {
+               int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+               if (!(fw_id < 0))
+                       result |= BIT(fw_id);
+       }
+
+       return result;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_fw_sta_id_mask);
+
+static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue,
+                              u32 count, bool tx)
+{
+       struct iwl_mld_per_q_mpdu_counter *queue_counter;
+       struct iwl_mld_per_link_mpdu_counter *link_counter;
+       struct iwl_mld_vif *mld_vif;
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_link *mld_link;
+       struct iwl_mld *mld;
+       int total_mpdus = 0;
+
+       if (WARN_ON(!link_sta))
+               return;
+
+       mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+       if (!mld_sta->mpdu_counters)
+               return;
+
+       mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+       mld_link = iwl_mld_link_dereference_check(mld_vif, link_sta->link_id);
+
+       if (WARN_ON_ONCE(!mld_link))
+               return;
+
+       queue_counter = &mld_sta->mpdu_counters[queue];
+
+       mld = mld_vif->mld;
+
+       /* If it the window is over, first clear the counters.
+        * When we are not blocked by TPT, the window is managed by check_tpt_wk
+        */
+       if ((mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT) &&
+           time_is_before_jiffies(queue_counter->window_start_time +
+                                       IWL_MLD_TPT_COUNT_WINDOW)) {
+               memset(queue_counter->per_link, 0,
+                      sizeof(queue_counter->per_link));
+               queue_counter->window_start_time = jiffies;
+
+               IWL_DEBUG_INFO(mld, "MPDU counters are cleared\n");
+       }
+
+       link_counter = &queue_counter->per_link[mld_link->fw_id];
+
+       spin_lock_bh(&queue_counter->lock);
+
+       /* Update the statistics for this TPT measurement window */
+       if (tx)
+               link_counter->tx += count;
+       else
+               link_counter->rx += count;
+
+       /*
+        * Next, evaluate whether we should queue an unblock,
+        * skip this if we are not blocked due to low throughput.
+        */
+       if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
+               goto unlock;
+
+       for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++)
+               total_mpdus += tx ? queue_counter->per_link[i].tx :
+                                   queue_counter->per_link[i].rx;
+
+       /* Unblock is already queued if the threshold was reached before */
+       if (total_mpdus - count >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
+               goto unlock;
+
+       if (total_mpdus >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
+               wiphy_work_queue(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
+
+unlock:
+       spin_unlock_bh(&queue_counter->lock);
+}
+
+/* must be called under rcu_read_lock() */
+void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
+                          u32 count)
+{
+       iwl_mld_count_mpdu(link_sta, queue, count, false);
+}
+
+/* must be called under rcu_read_lock() */
+void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count)
+{
+       /* use queue 0 for all TX */
+       iwl_mld_count_mpdu(link_sta, 0, count, true);
+}
+
+static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld,
+                                        struct iwl_mld_int_sta *internal_sta,
+                                        u8 tid)
+{
+       u32 sta_mask = BIT(internal_sta->sta_id);
+       int queue, size;
+
+       size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+                    mld->trans->cfg->min_txq_size);
+
+       queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size,
+                                   IWL_WATCHDOG_DISABLED);
+
+       if (queue >= 0)
+               IWL_DEBUG_TX_QUEUES(mld,
+                                   "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+                                   queue, sta_mask, tid);
+       return queue;
+}
+
+static int iwl_mld_send_aux_sta_cmd(struct iwl_mld *mld,
+                                   const struct iwl_mld_int_sta *internal_sta)
+{
+       struct iwl_aux_sta_cmd cmd = {
+               .sta_id = cpu_to_le32(internal_sta->sta_id),
+               /* TODO: CDB - properly set the lmac_id */
+               .lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX),
+       };
+
+       return iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD),
+                                   &cmd);
+}
+
+static int
+iwl_mld_add_internal_sta_to_fw(struct iwl_mld *mld,
+                              const struct iwl_mld_int_sta *internal_sta,
+                              u8 fw_link_id,
+                              const u8 *addr)
+{
+       struct iwl_sta_cfg_cmd cmd = {};
+
+       if (internal_sta->sta_type == STATION_TYPE_AUX)
+               return iwl_mld_send_aux_sta_cmd(mld, internal_sta);
+
+       cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id);
+       cmd.link_id = cpu_to_le32(fw_link_id);
+       cmd.station_type = cpu_to_le32(internal_sta->sta_type);
+
+       /* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP.
+        * On the other hand, FW will never check this flag during RX since
+        * an AP/GO doesn't receive protected broadcast management frames.
+        * So, we can set it unconditionally.
+        */
+       if (internal_sta->sta_type == STATION_TYPE_BCAST_MGMT)
+               cmd.mfp = cpu_to_le32(1);
+
+       if (addr) {
+               memcpy(cmd.peer_mld_address, addr, ETH_ALEN);
+               memcpy(cmd.peer_link_address, addr, ETH_ALEN);
+       }
+
+       return iwl_mld_send_sta_cmd(mld, &cmd);
+}
+
+static int iwl_mld_add_internal_sta(struct iwl_mld *mld,
+                                   struct iwl_mld_int_sta *internal_sta,
+                                   enum iwl_fw_sta_type sta_type,
+                                   u8 fw_link_id, const u8 *addr, u8 tid)
+{
+       int ret, queue_id;
+
+       ret = iwl_mld_allocate_link_sta_fw_id(mld,
+                                             &internal_sta->sta_id,
+                                             ERR_PTR(-EINVAL));
+       if (ret)
+               return ret;
+
+       internal_sta->sta_type = sta_type;
+
+       ret = iwl_mld_add_internal_sta_to_fw(mld, internal_sta, fw_link_id,
+                                            addr);
+       if (ret)
+               goto err;
+
+       queue_id = iwl_mld_allocate_internal_txq(mld, internal_sta, tid);
+       if (queue_id < 0) {
+               iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
+               ret = queue_id;
+               goto err;
+       }
+
+       internal_sta->queue_id = queue_id;
+
+       return 0;
+err:
+       iwl_mld_free_internal_sta(mld, internal_sta);
+       return ret;
+}
+
+int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       const u8 bcast_addr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       const u8 *addr;
+
+       if (WARN_ON(!mld_link))
+               return -EINVAL;
+
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_ADHOC))
+               return -EINVAL;
+
+       addr = vif->type == NL80211_IFTYPE_ADHOC ? link->bssid : bcast_addr;
+
+       return iwl_mld_add_internal_sta(mld, &mld_link->bcast_sta,
+                                       STATION_TYPE_BCAST_MGMT,
+                                       mld_link->fw_id, addr,
+                                       IWL_MGMT_TID);
+}
+
+int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+       const u8 mcast_addr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+       if (WARN_ON(!mld_link))
+               return -EINVAL;
+
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_ADHOC))
+               return -EINVAL;
+
+       return iwl_mld_add_internal_sta(mld, &mld_link->mcast_sta,
+                                       STATION_TYPE_MCAST,
+                                       mld_link->fw_id, mcast_addr, 0);
+}
+
+int iwl_mld_add_aux_sta(struct iwl_mld *mld,
+                       struct iwl_mld_int_sta *internal_sta)
+{
+       return iwl_mld_add_internal_sta(mld, internal_sta, STATION_TYPE_AUX,
+                                       0, NULL, IWL_MAX_TID_COUNT);
+}
+
+static void iwl_mld_remove_internal_sta(struct iwl_mld *mld,
+                                       struct iwl_mld_int_sta *internal_sta,
+                                       bool flush, u8 tid)
+{
+       if (WARN_ON_ONCE(internal_sta->sta_id == IWL_INVALID_STA ||
+                        internal_sta->queue_id == IWL_MLD_INVALID_QUEUE))
+               return;
+
+       if (flush)
+               iwl_mld_flush_link_sta_txqs(mld, internal_sta->sta_id);
+
+       iwl_mld_free_txq(mld, BIT(internal_sta->sta_id),
+                        tid, internal_sta->queue_id);
+
+       iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
+
+       iwl_mld_free_internal_sta(mld, internal_sta);
+}
+
+void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+       if (WARN_ON(!mld_link))
+               return;
+
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_ADHOC))
+               return;
+
+       iwl_mld_remove_internal_sta(mld, &mld_link->bcast_sta, true,
+                                   IWL_MGMT_TID);
+}
+
+void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+       if (WARN_ON(!mld_link))
+               return;
+
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_ADHOC))
+               return;
+
+       iwl_mld_remove_internal_sta(mld, &mld_link->mcast_sta, true, 0);
+}
+
+void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_bss_conf *link)
+{
+       struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+       if (WARN_ON(!mld_link))
+               return;
+
+       /* TODO: Hotspot 2.0 */
+       if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+               return;
+
+       iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false,
+                                   IWL_MAX_TID_COUNT);
+}
+
+static int iwl_mld_update_sta_resources(struct iwl_mld *mld,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_sta *sta,
+                                       u32 old_sta_mask,
+                                       u32 new_sta_mask)
+{
+       int ret;
+
+       ret = iwl_mld_update_sta_txqs(mld, sta, old_sta_mask, new_sta_mask);
+       if (ret)
+               return ret;
+
+       ret = iwl_mld_update_sta_keys(mld, vif, sta, old_sta_mask, new_sta_mask);
+       if (ret)
+               return ret;
+
+       return iwl_mld_update_sta_baids(mld, old_sta_mask, new_sta_mask);
+}
+
+int iwl_mld_update_link_stas(struct iwl_mld *mld,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            u16 old_links, u16 new_links)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_link_sta *mld_link_sta;
+       unsigned long links_to_add = ~old_links & new_links;
+       unsigned long links_to_rem = old_links & ~new_links;
+       unsigned long old_links_long = old_links;
+       unsigned long sta_mask_added = 0;
+       u32 current_sta_mask = 0, sta_mask_to_rem = 0;
+       unsigned int link_id, sta_id;
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       for_each_set_bit(link_id, &old_links_long,
+                        IEEE80211_MLD_MAX_NUM_LINKS) {
+               mld_link_sta =
+                       iwl_mld_link_sta_dereference_check(mld_sta, link_id);
+
+               if (WARN_ON(!mld_link_sta))
+                       return -EINVAL;
+
+               current_sta_mask |= BIT(mld_link_sta->fw_id);
+               if (links_to_rem & BIT(link_id))
+                       sta_mask_to_rem |= BIT(mld_link_sta->fw_id);
+       }
+
+       if (sta_mask_to_rem) {
+               ret = iwl_mld_update_sta_resources(mld, vif, sta,
+                                                  current_sta_mask,
+                                                  current_sta_mask &
+                                                       ~sta_mask_to_rem);
+               if (ret)
+                       return ret;
+
+               current_sta_mask &= ~sta_mask_to_rem;
+       }
+
+       for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+               struct ieee80211_link_sta *link_sta =
+                       link_sta_dereference_protected(sta, link_id);
+
+               if (WARN_ON(!link_sta))
+                       return -EINVAL;
+
+               iwl_mld_remove_link_sta(mld, link_sta);
+       }
+
+       for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+               struct ieee80211_link_sta *link_sta =
+                       link_sta_dereference_protected(sta, link_id);
+               struct ieee80211_bss_conf *link;
+
+               if (WARN_ON(!link_sta))
+                       return -EINVAL;
+
+               ret = iwl_mld_add_link_sta(mld, link_sta);
+               if (ret)
+                       goto remove_added_link_stas;
+
+               mld_link_sta =
+                       iwl_mld_link_sta_dereference_check(mld_sta,
+                                                          link_id);
+
+               link = link_conf_dereference_protected(mld_sta->vif,
+                                                      link_sta->link_id);
+               iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+
+               sta_mask_added |= BIT(mld_link_sta->fw_id);
+       }
+
+       if (sta_mask_added) {
+               ret = iwl_mld_update_sta_resources(mld, vif, sta,
+                                                  current_sta_mask,
+                                                  current_sta_mask |
+                                                       sta_mask_added);
+               if (ret)
+                       goto remove_added_link_stas;
+       }
+
+       /* We couldn't activate the links before it has a STA. Now we can */
+       for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+               struct ieee80211_bss_conf *link =
+                       link_conf_dereference_protected(mld_sta->vif, link_id);
+
+               if (WARN_ON(!link))
+                       continue;
+
+               iwl_mld_activate_link(mld, link);
+       }
+
+       return 0;
+
+remove_added_link_stas:
+       for_each_set_bit(sta_id, &sta_mask_added, mld->fw->ucode_capa.num_stations) {
+               struct ieee80211_link_sta *link_sta =
+                       wiphy_dereference(mld->wiphy,
+                                         mld->fw_id_to_link_sta[sta_id]);
+
+               if (WARN_ON(!link_sta))
+                       continue;
+
+               iwl_mld_remove_link_sta(mld, link_sta);
+       }
+
+       return ret;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __iwl_mld_sta_h__
+#define __iwl_mld_sta_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "tx.h"
+
+/**
+ * struct iwl_mld_rxq_dup_data - Duplication detection data, per STA & Rx queue
+ * @last_seq: last sequence per tid.
+ * @last_sub_frame_idx: the index of the last subframe in an A-MSDU. This value
+ *     will be zero if the packet is not part of an A-MSDU.
+ */
+struct iwl_mld_rxq_dup_data {
+       __le16 last_seq[IWL_MAX_TID_COUNT + 1];
+       u8 last_sub_frame_idx[IWL_MAX_TID_COUNT + 1];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_link_sta - link-level station
+ *
+ * This represents the link-level sta - the driver level equivalent to the
+ * ieee80211_link_sta
+ *
+ * @last_rate_n_flags: rate_n_flags from the last &iwl_tlc_update_notif
+ * @signal_avg: the signal average coming from the firmware
+ * @in_fw: whether the link STA is uploaded to the FW (false during restart)
+ * @rcu_head: RCU head for freeing this object
+ * @fw_id: the FW id of this link sta.
+ */
+struct iwl_mld_link_sta {
+       /* Add here fields that need clean up on restart */
+       struct_group(zeroed_on_hw_restart,
+               u32 last_rate_n_flags;
+               bool in_fw;
+               s8 signal_avg;
+       );
+       /* And here fields that survive a fw restart */
+       struct rcu_head rcu_head;
+       u32 fw_id;
+};
+
+#define iwl_mld_link_sta_dereference_check(mld_sta, link_id)           \
+       rcu_dereference_check((mld_sta)->link[link_id],                 \
+                             lockdep_is_held(&mld_sta->mld->wiphy->mtx))
+
+#define for_each_mld_link_sta(mld_sta, link_sta, link_id)              \
+       for (link_id = 0; link_id < ARRAY_SIZE((mld_sta)->link);        \
+            link_id++)                                                 \
+               if ((link_sta =                                         \
+                       iwl_mld_link_sta_dereference_check(mld_sta, link_id)))
+
+#define IWL_NUM_DEFAULT_KEYS 4
+
+/* struct iwl_mld_ptk_pn - Holds Packet Number (PN) per TID.
+ * @rcu_head: RCU head for freeing this data.
+ * @pn: Array storing PN for each TID.
+ */
+struct iwl_mld_ptk_pn {
+       struct rcu_head rcu_head;
+       struct {
+               u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
+       } ____cacheline_aligned_in_smp q[];
+};
+
+/**
+ * struct iwl_mld_per_link_mpdu_counter - per-link TX/RX MPDU counters
+ *
+ * @tx: Number of TX MPDUs.
+ * @rx: Number of RX MPDUs.
+ */
+struct iwl_mld_per_link_mpdu_counter {
+       u32 tx;
+       u32 rx;
+};
+
+/**
+ * struct iwl_mld_per_q_mpdu_counter - per-queue MPDU counter
+ *
+ * @lock: Needed to protect the counters when modified from statistics.
+ * @per_link: per-link counters.
+ * @window_start_time: timestamp of the counting-window start
+ */
+struct iwl_mld_per_q_mpdu_counter {
+       spinlock_t lock;
+       struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINK_ID + 1];
+       unsigned long window_start_time;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_sta - representation of a station in the driver.
+ *
+ * This represent the MLD-level sta, and will not be added to the FW.
+ * Embedded in ieee80211_sta.
+ *
+ * @vif: pointer the vif object.
+ * @sta_state: station state according to enum %ieee80211_sta_state
+ * @sta_type: type of this station. See &enum iwl_fw_sta_type
+ * @mld: a pointer to the iwl_mld object
+ * @dup_data: per queue duplicate packet detection data
+ * @data_tx_ant: stores the last TX antenna index; used for setting
+ *     TX rate_n_flags for injected data frames (toggles on every TX failure).
+ * @tid_to_baid: a simple map of TID to Block-Ack fw id
+ * @deflink: This holds the default link STA information, for non MLO STA all
+ *     link specific STA information is accessed through @deflink or through
+ *     link[0] which points to address of @deflink. For MLO Link STA
+ *     the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ *     i.e link[0] all links would be assigned to NULL by default and
+ *     would access link information via @deflink or link[0]. For MLO
+ *     STA, first link STA being added will point its link pointer to
+ *     @deflink address and remaining would be allocated and the address
+ *     would be assigned to link[link_id] where link_id is the id assigned
+ *     by the AP.
+ * @ptk_pn: Array of pointers to PTK PN data, used to track the Packet Number
+ *     per key index and per queue (TID).
+ * @mpdu_counters: RX/TX MPDUs counters for each queue.
+ */
+struct iwl_mld_sta {
+       /* Add here fields that need clean up on restart */
+       struct_group(zeroed_on_hw_restart,
+               enum ieee80211_sta_state sta_state;
+               enum iwl_fw_sta_type sta_type;
+       );
+       /* And here fields that survive a fw restart */
+       struct iwl_mld *mld;
+       struct ieee80211_vif *vif;
+       struct iwl_mld_rxq_dup_data *dup_data;
+       u8 tid_to_baid[IWL_MAX_TID_COUNT];
+       u8 data_tx_ant;
+
+       struct iwl_mld_link_sta deflink;
+       struct iwl_mld_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+       struct iwl_mld_ptk_pn __rcu *ptk_pn[IWL_NUM_DEFAULT_KEYS];
+       struct iwl_mld_per_q_mpdu_counter *mpdu_counters;
+};
+
+static inline struct iwl_mld_sta *
+iwl_mld_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+       return (void *)sta->drv_priv;
+}
+
+static inline void
+iwl_mld_cleanup_sta(void *data, struct ieee80211_sta *sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_link_sta *mld_link_sta;
+       u8 link_id;
+
+       for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+               CLEANUP_STRUCT(iwl_mld_txq_from_mac80211(sta->txq[i]));
+
+       for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) {
+               CLEANUP_STRUCT(mld_link_sta);
+
+               if (!ieee80211_vif_is_mld(mld_sta->vif)) {
+                       /* not an MLD STA; only has the deflink with ID zero */
+                       WARN_ON(link_id);
+                       continue;
+               }
+
+               if (mld_sta->vif->active_links & BIT(link_id))
+                       continue;
+
+               /* Should not happen as link removal should always succeed */
+               WARN_ON(1);
+               RCU_INIT_POINTER(mld_sta->link[link_id], NULL);
+               RCU_INIT_POINTER(mld_sta->mld->fw_id_to_link_sta[mld_link_sta->fw_id],
+                                NULL);
+               if (mld_link_sta != &mld_sta->deflink)
+                       kfree_rcu(mld_link_sta, rcu_head);
+       }
+
+       CLEANUP_STRUCT(mld_sta);
+}
+
+static inline struct iwl_mld_link_sta *
+iwl_mld_link_sta_from_mac80211(struct ieee80211_link_sta *link_sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+       return iwl_mld_link_sta_dereference_check(mld_sta, link_sta->link_id);
+}
+
+int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+                   struct ieee80211_vif *vif, enum iwl_fw_sta_type type);
+void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta);
+int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
+                                   struct ieee80211_link_sta *link_sta);
+u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta);
+int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
+                                    struct ieee80211_sta *sta);
+void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta);
+void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld,
+                                struct ieee80211_sta *sta);
+void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
+                          u32 count);
+void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count);
+
+/**
+ * struct iwl_mld_int_sta - representation of an internal station
+ * (a station that exist in FW and in driver, but not in mac80211)
+ *
+ * @sta_id: the index of the station in the fw
+ * @queue_id: the if of the queue used by the station
+ * @sta_type: station type. One of &iwl_fw_sta_type
+ */
+struct iwl_mld_int_sta {
+       u8 sta_id;
+       u32 queue_id;
+       enum iwl_fw_sta_type sta_type;
+};
+
+static inline void
+iwl_mld_init_internal_sta(struct iwl_mld_int_sta *internal_sta)
+{
+       internal_sta->sta_id = IWL_INVALID_STA;
+       internal_sta->queue_id = IWL_MLD_INVALID_QUEUE;
+}
+
+static inline void
+iwl_mld_free_internal_sta(struct iwl_mld *mld,
+                         struct iwl_mld_int_sta *internal_sta)
+{
+       if (WARN_ON(internal_sta->sta_id == IWL_INVALID_STA))
+               return;
+
+       RCU_INIT_POINTER(mld->fw_id_to_link_sta[internal_sta->sta_id], NULL);
+       iwl_mld_init_internal_sta(internal_sta);
+}
+
+int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link);
+
+int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_bss_conf *link);
+
+int iwl_mld_add_aux_sta(struct iwl_mld *mld,
+                       struct iwl_mld_int_sta *internal_sta);
+
+void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_bss_conf *link);
+
+void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_bss_conf *link);
+
+void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_bss_conf *link);
+
+int iwl_mld_update_link_stas(struct iwl_mld *mld,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            u16 old_links, u16 new_links);
+#endif /* __iwl_mld_sta_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "stats.h"
+#include "sta.h"
+#include "mlo.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "scan.h"
+#include "phy.h"
+#include "fw/api/stats.h"
+
+static int iwl_mld_send_fw_stats_cmd(struct iwl_mld *mld, u32 cfg_mask,
+                                    u32 cfg_time, u32 type_mask)
+{
+       u32 cmd_id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD);
+       struct iwl_system_statistics_cmd stats_cmd = {
+               .cfg_mask = cpu_to_le32(cfg_mask),
+               .config_time_sec = cpu_to_le32(cfg_time),
+               .type_id_mask = cpu_to_le32(type_mask),
+       };
+
+       return iwl_mld_send_cmd_pdu(mld, cmd_id, &stats_cmd);
+}
+
+int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld)
+{
+       u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK;
+       u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
+                       IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
+
+       return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
+}
+
+static void
+iwl_mld_fill_stats_from_oper_notif(struct iwl_mld *mld,
+                                  struct iwl_rx_packet *pkt,
+                                  u8 fw_sta_id, struct station_info *sinfo)
+{
+       const struct iwl_system_statistics_notif_oper *notif =
+               (void *)&pkt->data;
+       const struct iwl_stats_ntfy_per_sta *per_sta =
+               ¬if->per_sta[fw_sta_id];
+       struct ieee80211_link_sta *link_sta;
+       struct iwl_mld_link_sta *mld_link_sta;
+
+       /* 0 isn't a valid value, but FW might send 0.
+        * In that case, set the latest non-zero value we stored
+        */
+       rcu_read_lock();
+
+       link_sta = rcu_dereference(mld->fw_id_to_link_sta[fw_sta_id]);
+       if (IS_ERR_OR_NULL(link_sta))
+               goto unlock;
+
+       mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+       if (WARN_ON(!mld_link_sta))
+               goto unlock;
+
+       if (per_sta->average_energy)
+               mld_link_sta->signal_avg =
+                       -(s8)le32_to_cpu(per_sta->average_energy);
+
+       sinfo->signal_avg = mld_link_sta->signal_avg;
+       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+
+unlock:
+       rcu_read_unlock();
+}
+
+struct iwl_mld_stats_data {
+       u8 fw_sta_id;
+       struct station_info *sinfo;
+       struct iwl_mld *mld;
+};
+
+static bool iwl_mld_wait_stats_handler(struct iwl_notif_wait_data *notif_data,
+                                      struct iwl_rx_packet *pkt, void *data)
+{
+       struct iwl_mld_stats_data *stats_data = data;
+       u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+       switch (cmd) {
+       case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF):
+               iwl_mld_fill_stats_from_oper_notif(stats_data->mld, pkt,
+                                                  stats_data->fw_sta_id,
+                                                  stats_data->sinfo);
+               break;
+       case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF):
+               break;
+       case WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF):
+               return true;
+       }
+
+       return false;
+}
+
+static int
+iwl_mld_fw_stats_to_mac80211(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta,
+                            struct station_info *sinfo)
+{
+       u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK |
+                      IWL_STATS_CFG_FLG_RESET_MSK;
+       u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
+                       IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
+       static const u16 notifications[] = {
+               WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF),
+               WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF),
+               WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
+       };
+       struct iwl_mld_stats_data wait_stats_data = {
+               /* We don't support drv_sta_statistics in EMLSR */
+               .fw_sta_id = mld_sta->deflink.fw_id,
+               .sinfo = sinfo,
+               .mld = mld,
+       };
+       struct iwl_notification_wait stats_wait;
+       int ret;
+
+       iwl_init_notification_wait(&mld->notif_wait, &stats_wait,
+                                  notifications, ARRAY_SIZE(notifications),
+                                  iwl_mld_wait_stats_handler,
+                                  &wait_stats_data);
+
+       ret = iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
+       if (ret) {
+               iwl_remove_notification(&mld->notif_wait, &stats_wait);
+               return ret;
+       }
+
+       /* Wait 500ms for OPERATIONAL, PART1, and END notifications,
+        * which should be sufficient for the firmware to gather data
+        * from all LMACs and send notifications to the host.
+        */
+       ret = iwl_wait_notification(&mld->notif_wait, &stats_wait, HZ / 2);
+       if (ret)
+               return ret;
+
+       /* When periodic statistics are sent, FW will clear its statistics DB.
+        * If the statistics request here happens shortly afterwards,
+        * the response will contain data collected over a short time
+        * interval. The response we got here shouldn't be processed by
+        * the general statistics processing because it's incomplete.
+        * So, we delete it from the list so it won't be processed.
+        */
+       iwl_mld_delete_handlers(mld, notifications, ARRAY_SIZE(notifications));
+
+       return 0;
+}
+
+#define PERIODIC_STATS_SECONDS 5
+
+int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable)
+{
+       u32 cfg_mask = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
+       u32 type_mask = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
+                                 IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
+       u32 cfg_time = enable ? PERIODIC_STATS_SECONDS : 0;
+
+       return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, cfg_time, type_mask);
+}
+
+static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
+                                         struct station_info *sinfo)
+{
+       struct rate_info *rinfo = &sinfo->txrate;
+       u32 rate_n_flags = mld_sta->deflink.last_rate_n_flags;
+       u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+       u32 gi_ltf;
+
+       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               rinfo->bw = RATE_INFO_BW_20;
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               rinfo->bw = RATE_INFO_BW_40;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               rinfo->bw = RATE_INFO_BW_80;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               rinfo->bw = RATE_INFO_BW_160;
+               break;
+       case RATE_MCS_CHAN_WIDTH_320:
+               rinfo->bw = RATE_INFO_BW_320;
+               break;
+       }
+
+       if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) {
+               int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
+
+               /* add the offset needed to get to the legacy ofdm indices */
+               if (format == RATE_MCS_LEGACY_OFDM_MSK)
+                       rate += IWL_FIRST_OFDM_RATE;
+
+               switch (rate) {
+               case IWL_RATE_1M_INDEX:
+                       rinfo->legacy = 10;
+                       break;
+               case IWL_RATE_2M_INDEX:
+                       rinfo->legacy = 20;
+                       break;
+               case IWL_RATE_5M_INDEX:
+                       rinfo->legacy = 55;
+                       break;
+               case IWL_RATE_11M_INDEX:
+                       rinfo->legacy = 110;
+                       break;
+               case IWL_RATE_6M_INDEX:
+                       rinfo->legacy = 60;
+                       break;
+               case IWL_RATE_9M_INDEX:
+                       rinfo->legacy = 90;
+                       break;
+               case IWL_RATE_12M_INDEX:
+                       rinfo->legacy = 120;
+                       break;
+               case IWL_RATE_18M_INDEX:
+                       rinfo->legacy = 180;
+                       break;
+               case IWL_RATE_24M_INDEX:
+                       rinfo->legacy = 240;
+                       break;
+               case IWL_RATE_36M_INDEX:
+                       rinfo->legacy = 360;
+                       break;
+               case IWL_RATE_48M_INDEX:
+                       rinfo->legacy = 480;
+                       break;
+               case IWL_RATE_54M_INDEX:
+                       rinfo->legacy = 540;
+               }
+               return;
+       }
+
+       rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+
+       if (format == RATE_MCS_HT_MSK)
+               rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags);
+       else
+               rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
+
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+       switch (format) {
+       case RATE_MCS_EHT_MSK:
+               rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
+               break;
+       case RATE_MCS_HE_MSK:
+               gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
+
+               rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+
+               if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+                       rinfo->bw = RATE_INFO_BW_HE_RU;
+                       rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+               }
+
+               switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+               case RATE_MCS_HE_TYPE_SU:
+               case RATE_MCS_HE_TYPE_EXT_SU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else if (gi_ltf == 3)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       break;
+               case RATE_MCS_HE_TYPE_MU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               case RATE_MCS_HE_TYPE_TRIG:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               }
+
+               if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+                       rinfo->he_dcm = 1;
+               break;
+       case RATE_MCS_HT_MSK:
+               rinfo->flags |= RATE_INFO_FLAGS_MCS;
+               break;
+       case RATE_MCS_VHT_MSK:
+               rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+               break;
+       }
+}
+
+void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_sta *sta,
+                                    struct station_info *sinfo)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+       /* This API is not EMLSR ready, so we cannot provide complete
+        * information if EMLSR is active
+        */
+       if (hweight16(vif->active_links) > 1)
+               return;
+
+       if (iwl_mld_fw_stats_to_mac80211(mld_sta->mld, mld_sta, sinfo))
+               return;
+
+       iwl_mld_sta_stats_fill_txrate(mld_sta, sinfo);
+
+       /* TODO: NL80211_STA_INFO_BEACON_RX */
+
+       /* TODO: NL80211_STA_INFO_BEACON_SIGNAL_AVG */
+}
+
+#define IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH     10 /* percentage */
+#define IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH       50 /* percentage */
+#define IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC   (500 * 1000)
+
+static u8 iwl_mld_stats_load_percentage(u32 last_ts_usec, u32 curr_ts_usec,
+                                       u32 total_airtime_usec)
+{
+       u32 elapsed_usec = curr_ts_usec - last_ts_usec;
+
+       if (elapsed_usec < IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC)
+               return 0;
+
+       return (100 * total_airtime_usec / elapsed_usec);
+}
+
+static void iwl_mld_stats_recalc_traffic_load(struct iwl_mld *mld,
+                                             u32 total_airtime_usec,
+                                             u32 curr_ts_usec)
+{
+       u32 last_ts_usec = mld->scan.traffic_load.last_stats_ts_usec;
+       u8 load_prec;
+
+       /* Skip the calculation as this is the first notification received */
+       if (!last_ts_usec)
+               goto out;
+
+       load_prec = iwl_mld_stats_load_percentage(last_ts_usec, curr_ts_usec,
+                                                 total_airtime_usec);
+
+       if (load_prec > IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH)
+               mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_HIGH;
+       else if (load_prec > IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH)
+               mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_MEDIUM;
+       else
+               mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_LOW;
+
+out:
+       mld->scan.traffic_load.last_stats_ts_usec = curr_ts_usec;
+}
+
+static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
+                                   struct ieee80211_bss_conf *bss_conf)
+{
+       struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
+       int exit_emlsr_thresh;
+
+       if (sig == 0) {
+               IWL_DEBUG_RX(mld, "RSSI is 0 - skip signal based decision\n");
+               return;
+       }
+
+       /* TODO: task=statistics handle CQM notifications */
+
+       if (!iwl_mld_vif_has_emlsr_cap(vif))
+               return;
+
+       /* Handle inactive EMLSR, check whether to switch links */
+       if (!iwl_mld_emlsr_active(vif)) {
+               if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
+                       iwl_mld_trigger_link_selection(mld, vif);
+               return;
+       }
+
+       /* We are in EMLSR, check if we need to exit */
+       exit_emlsr_thresh =
+               iwl_mld_get_emlsr_rssi_thresh(mld, &bss_conf->chanreq.oper,
+                                             true);
+
+       if (sig < exit_emlsr_thresh)
+               iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LOW_RSSI,
+                                  iwl_mld_get_other_link(vif,
+                                                         bss_conf->link_id));
+}
+
+static void
+iwl_mld_process_per_link_stats(struct iwl_mld *mld,
+                              const struct iwl_stats_ntfy_per_link *per_link,
+                              u32 curr_ts_usec)
+{
+       u32 total_airtime_usec = 0;
+
+       for (u32 fw_id = 0;
+            fw_id < ARRAY_SIZE(mld->fw_id_to_bss_conf);
+            fw_id++) {
+               const struct iwl_stats_ntfy_per_link *link_stats;
+               struct ieee80211_bss_conf *bss_conf;
+               int sig;
+
+               bss_conf = wiphy_dereference(mld->wiphy,
+                                            mld->fw_id_to_bss_conf[fw_id]);
+               if (!bss_conf || bss_conf->vif->type != NL80211_IFTYPE_STATION)
+                       continue;
+
+               link_stats = &per_link[fw_id];
+
+               total_airtime_usec += le32_to_cpu(link_stats->air_time);
+
+               sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
+               iwl_mld_update_link_sig(bss_conf->vif, sig, bss_conf);
+
+               /* TODO: parse more fields here (task=statistics)*/
+       }
+
+       iwl_mld_stats_recalc_traffic_load(mld, total_airtime_usec,
+                                         curr_ts_usec);
+}
+
+static void
+iwl_mld_process_per_sta_stats(struct iwl_mld *mld,
+                             const struct iwl_stats_ntfy_per_sta *per_sta)
+{
+       for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+               struct ieee80211_link_sta *link_sta =
+                       wiphy_dereference(mld->wiphy,
+                                         mld->fw_id_to_link_sta[i]);
+               struct iwl_mld_link_sta *mld_link_sta;
+               s8 avg_energy =
+                       -(s8)le32_to_cpu(per_sta[i].average_energy);
+
+               if (IS_ERR_OR_NULL(link_sta) || !avg_energy)
+                       continue;
+
+               mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+               if (WARN_ON(!mld_link_sta))
+                       continue;
+
+               mld_link_sta->signal_avg = avg_energy;
+       }
+}
+
+static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
+                                      struct ieee80211_chanctx_conf *ctx,
+                                      void *data)
+{
+       struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+       const struct iwl_stats_ntfy_per_phy *per_phy = data;
+
+       if (WARN_ON(phy->fw_id >= IWL_STATS_MAX_PHY_OPERATIONAL))
+               return;
+
+       phy->channel_load_by_us =
+               le32_to_cpu(per_phy[phy->fw_id].channel_load_by_us);
+
+       /* TODO: channel load not by us (task=statistics) */
+}
+
+static void
+iwl_mld_process_per_phy_stats(struct iwl_mld *mld,
+                             const struct iwl_stats_ntfy_per_phy *per_phy)
+{
+       ieee80211_iter_chan_contexts_mtx(mld->hw,
+                                        iwl_mld_fill_chanctx_stats,
+                                        (void *)(uintptr_t)per_phy);
+
+       /* channel_load_by_us may have been updated, so recheck */
+       iwl_mld_emlsr_check_chan_load(mld);
+}
+
+void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
+                                    struct iwl_rx_packet *pkt)
+{
+       const struct iwl_system_statistics_notif_oper *stats =
+               (void *)&pkt->data;
+       u32 curr_ts_usec = le32_to_cpu(stats->time_stamp);
+
+       BUILD_BUG_ON(ARRAY_SIZE(stats->per_sta) != IWL_STATION_COUNT_MAX);
+       BUILD_BUG_ON(ARRAY_SIZE(stats->per_link) <
+                    ARRAY_SIZE(mld->fw_id_to_bss_conf));
+
+       iwl_mld_process_per_link_stats(mld, stats->per_link, curr_ts_usec);
+       iwl_mld_process_per_sta_stats(mld, stats->per_sta);
+       iwl_mld_process_per_phy_stats(mld, stats->per_phy);
+
+       iwl_mld_check_omi_bw_reduction(mld);
+}
+
+void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
+                                          struct iwl_rx_packet *pkt)
+{
+       /* TODO */
+}
+
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_stats_h__
+#define __iwl_mld_stats_h__
+
+int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable);
+
+void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_sta *sta,
+                                    struct station_info *sinfo);
+
+void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
+                                    struct iwl_rx_packet *pkt);
+void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
+                                          struct iwl_rx_packet *pkt);
+
+int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_stats_h__ */
 
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o
+
+ccflags-y += -I$(src)/../
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include <kunit/skbuff.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "sta.h"
+#include "agg.h"
+#include "rx.h"
+
+#define FC_QOS_DATA (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)
+#define BA_WINDOW_SIZE 64
+#define QUEUE 0
+
+static const struct reorder_buffer_case {
+       const char *desc;
+       struct {
+               /* ieee80211_hdr fields */
+               u16 fc;
+               u8 tid;
+               bool multicast;
+               /* iwl_rx_mpdu_desc fields */
+               u16 nssn;
+               /* used also for setting hdr::seq_ctrl */
+               u16 sn;
+               u8 baid;
+               bool amsdu;
+               bool last_subframe;
+               bool old_sn;
+               bool dup;
+       } rx_pkt;
+       struct {
+               bool valid;
+               u16 head_sn;
+               u8 baid;
+               u16 num_entries;
+               /* The test prepares the reorder buffer with fake skbs based
+                * on the sequence numbers provided in @entries array.
+                */
+               struct {
+                       u16 sn;
+                       /* Set add_subframes > 0 to simulate an A-MSDU by
+                        * queueing additional @add_subframes skbs in the
+                        * appropriate reorder buffer entry (based on the @sn)
+                        */
+                       u8 add_subframes;
+               } entries[BA_WINDOW_SIZE];
+       } reorder_buf_state;
+       struct {
+               enum iwl_mld_reorder_result reorder_res;
+               u16 head_sn;
+               u16 num_stored;
+               u16 skb_release_order[BA_WINDOW_SIZE];
+               u16 skb_release_order_count;
+       } expected;
+} reorder_buffer_cases[] = {
+       {
+               .desc = "RX packet with invalid BAID",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .baid = IWL_RX_REORDER_DATA_INVALID_BAID,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+               },
+               .expected = {
+                       /* Invalid BAID should not be buffered. The frame is
+                        * passed to the network stack immediately.
+                        */
+                       .reorder_res = IWL_MLD_PASS_SKB,
+                       .num_stored = 0,
+               },
+       },
+       {
+               .desc = "RX Multicast packet",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .multicast = true,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+               },
+               .expected = {
+                       /* Multicast packets are not buffered. The packet is
+                        * passed to the network stack immediately.
+                        */
+                       .reorder_res = IWL_MLD_PASS_SKB,
+                       .num_stored = 0,
+               },
+       },
+       {
+               .desc = "RX non-QoS data",
+               .rx_pkt = {
+                       .fc = IEEE80211_FTYPE_DATA,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+               },
+               .expected = {
+                       /* non-QoS data frames do not require reordering.
+                        * The packet is passed to the network stack
+                        * immediately.
+                        */
+               .reorder_res = IWL_MLD_PASS_SKB,
+               },
+       },
+       {
+               .desc = "RX old SN packet, reorder buffer is not yet valid",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .old_sn = true,
+               },
+               .reorder_buf_state = {
+                       .valid = false,
+               },
+               .expected = {
+                       /* The buffer is invalid and the RX packet has an old
+                        * SN. The packet is passed to the network stack
+                        * immediately.
+                        */
+                       .reorder_res = IWL_MLD_PASS_SKB,
+               },
+       },
+       {
+               .desc = "RX old SN packet, reorder buffer valid",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .old_sn = true,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+               },
+               .expected = {
+                       /* The buffer is valid and the RX packet has an old SN.
+                        * The packet should be dropped.
+                        */
+                       .reorder_res = IWL_MLD_DROP_SKB,
+                       .num_stored = 0,
+                       .head_sn = 100,
+               },
+       },
+       {
+               .desc = "RX duplicate packet",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .dup = true,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+               },
+               .expected = {
+                       /* Duplicate packets should be dropped */
+                       .reorder_res = IWL_MLD_DROP_SKB,
+                       .num_stored = 0,
+                       .head_sn = 100,
+               },
+       },
+       {
+               .desc = "RX In-order packet, sn < nssn",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 100,
+                       .nssn = 101,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+               },
+               .expected = {
+                       /* 1. Reorder buffer is empty.
+                        * 2. RX packet SN is in order and less than NSSN.
+                        * Packet is released to the network stack immediately
+                        * and buffer->head_sn is updated to NSSN.
+                        */
+                       .reorder_res = IWL_MLD_PASS_SKB,
+                       .num_stored = 0,
+                       .head_sn = 101,
+               },
+       },
+       {
+               .desc = "RX In-order packet, sn == head_sn",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 101,
+                       .nssn = 100,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 101,
+               },
+               .expected = {
+                       /* 1. Reorder buffer is empty.
+                        * 2. RX packet SN is equal to buffer->head_sn.
+                        * Packet is released to the network stack immediately
+                        * and buffer->head_sn is incremented.
+                        */
+                       .reorder_res = IWL_MLD_PASS_SKB,
+                       .num_stored = 0,
+                       .head_sn = 102,
+               },
+       },
+       {
+               .desc = "RX In-order packet, IEEE80211_MAX_SN wrap around",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = IEEE80211_MAX_SN,
+                       .nssn = IEEE80211_MAX_SN - 1,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = IEEE80211_MAX_SN,
+               },
+               .expected = {
+                       /* 1. Reorder buffer is empty.
+                        * 2. RX SN == buffer->head_sn == IEEE80211_MAX_SN
+                        * Packet is released to the network stack immediately
+                        * and buffer->head_sn is incremented correctly (wraps
+                        * around to 0).
+                        */
+                       .reorder_res = IWL_MLD_PASS_SKB,
+                       .num_stored = 0,
+                       .head_sn = 0,
+               },
+       },
+       {
+               .desc = "RX Out-of-order packet, pending packet in buffer",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 100,
+                       .nssn = 102,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+                       .num_entries = 1,
+                       .entries[0].sn = 101,
+               },
+               .expected = {
+                       /* 1. Reorder buffer contains one packet with SN=101.
+                        * 2. RX packet SN = buffer->head_sn.
+                        * Both packets are released (in order) to the network
+                        * stack as there are no gaps.
+                        */
+                       .reorder_res = IWL_MLD_BUFFERED_SKB,
+                       .num_stored = 0,
+                       .head_sn = 102,
+                       .skb_release_order = {100, 101},
+                       .skb_release_order_count = 2,
+               },
+       },
+       {
+               .desc = "RX Out-of-order packet, pending packet in buffer (wrap around)",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 0,
+                       .nssn = 1,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = IEEE80211_MAX_SN - 1,
+                       .num_entries = 1,
+                       .entries[0].sn = IEEE80211_MAX_SN,
+               },
+               .expected = {
+                       /* 1. Reorder buffer contains one packet with
+                        *    SN=IEEE80211_MAX_SN.
+                        * 2. RX Packet SN = 0 (after wrap around)
+                        * Both packets are released (in order) to the network
+                        * stack as there are no gaps.
+                        */
+                       .reorder_res = IWL_MLD_BUFFERED_SKB,
+                       .num_stored = 0,
+                       .head_sn = 1,
+                       .skb_release_order = { 4095, 0 },
+                       .skb_release_order_count = 2,
+               },
+       },
+       {
+               .desc = "RX Out-of-order packet, filling 1/2 holes in buffer, release RX packet",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 100,
+                       .nssn = 101,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+                       .num_entries = 1,
+                       .entries[0].sn = 102,
+               },
+               .expected = {
+                       /* 1. Reorder buffer contains one packet with SN=102.
+                        * 2. There are 2 holes at SN={100, 101}.
+                        * Only the RX packet (SN=100) is released, there is
+                        * still a hole at 101.
+                        */
+                       .reorder_res = IWL_MLD_BUFFERED_SKB,
+                       .num_stored = 1,
+                       .head_sn = 101,
+                       .skb_release_order = {100},
+                       .skb_release_order_count = 1,
+               },
+       },
+       {
+               .desc = "RX Out-of-order packet, filling 1/2 holes, release 2 packets",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 102,
+                       .nssn = 103,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+                       .num_entries = 3,
+                       .entries[0].sn = 101,
+                       .entries[1].sn = 104,
+                       .entries[2].sn = 105,
+               },
+               .expected = {
+                       /* 1. Reorder buffer contains three packets.
+                        * 2. RX packet fills one of two holes (at SN=102).
+                        * Two packets are released (until the next hole at
+                        * SN=103).
+                        */
+                       .reorder_res = IWL_MLD_BUFFERED_SKB,
+                       .num_stored = 2,
+                       .head_sn = 103,
+                       .skb_release_order = {101, 102},
+                       .skb_release_order_count = 2,
+               },
+       },
+               {
+               .desc = "RX Out-of-order packet, filling 1/1 holes, no packets released",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 102,
+                       .nssn = 100,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+                       .num_entries = 3,
+                       .entries[0].sn = 101,
+                       .entries[1].sn = 103,
+                       .entries[2].sn = 104,
+               },
+               .expected = {
+                       /* 1. Reorder buffer contains three packets:
+                        *    SN={101, 103, 104}.
+                        * 2. RX packet fills a hole (SN=102), but NSSN is
+                        *    smaller than buffered frames.
+                        * No packets can be released yet and buffer->head_sn
+                        * is not updated.
+                        */
+                       .reorder_res = IWL_MLD_BUFFERED_SKB,
+                       .num_stored = 4,
+                       .head_sn = 100,
+               },
+       },
+       {
+               .desc = "RX In-order A-MSDU, last subframe",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 100,
+                       .nssn = 101,
+                       .amsdu = true,
+                       .last_subframe = true,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+                       .num_entries = 1,
+                       .entries[0] = {
+                               .sn = 100,
+                               .add_subframes = 1,
+                       },
+               },
+               .expected = {
+                       /* 1. Reorder buffer contains a 2-sub frames A-MSDU
+                        *    at SN=100.
+                        * 2. RX packet is the last SN=100 A-MSDU subframe
+                        * All packets are released in order (3 x SN=100).
+                        */
+                       .reorder_res = IWL_MLD_BUFFERED_SKB,
+                       .num_stored = 0,
+                       .head_sn = 101,
+                       .skb_release_order = {100, 100, 100},
+                       .skb_release_order_count = 3,
+               },
+       },
+       {
+               .desc = "RX In-order A-MSDU, not the last subframe",
+               .rx_pkt = {
+                       .fc = FC_QOS_DATA,
+                       .sn = 100,
+                       .nssn = 101,
+                       .amsdu = true,
+                       .last_subframe = false,
+               },
+               .reorder_buf_state = {
+                       .valid = true,
+                       .head_sn = 100,
+                       .num_entries = 1,
+                       .entries[0] = {
+                               .sn = 100,
+                               .add_subframes = 1,
+                       },
+               },
+               .expected = {
+                       /* 1. Reorder buffer contains a 2-sub frames A-MSDU
+                        *    at SN=100.
+                        * 2. RX packet additional SN=100 A-MSDU subframe,
+                        *    but not the last one
+                        * No packets are released and head_sn is not updated.
+                        */
+                       .reorder_res = IWL_MLD_BUFFERED_SKB,
+                       .num_stored = 3,
+                       .head_sn = 100,
+               },
+       },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_reorder_buffer, reorder_buffer_cases, desc);
+
+static struct sk_buff_head g_released_skbs;
+static u16 g_num_released_skbs;
+
+/* Add released skbs from reorder buffer to a global list; This allows us
+ * to verify the correct release order of packets after they pass through the
+ * simulated reorder logic.
+ */
+static void
+fake_iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+                                    struct napi_struct *napi,
+                                    struct sk_buff *skb, int queue,
+                                    struct ieee80211_sta *sta)
+{
+       __skb_queue_tail(&g_released_skbs, skb);
+       g_num_released_skbs++;
+}
+
+static u32
+fake_iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld_link_sta *mld_link_sta;
+       u8 link_id;
+       u32 sta_mask = 0;
+
+       /* This is the expectation in the real function */
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* We can't use for_each_sta_active_link */
+       for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+               sta_mask |= BIT(mld_link_sta->fw_id);
+       return sta_mask;
+}
+
+static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void)
+{
+       struct kunit *test = kunit_get_current_test();
+       const struct reorder_buffer_case *param =
+               (const void *)(test->param_value);
+       struct iwl_rx_mpdu_desc *mpdu_desc;
+
+       KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc);
+
+       mpdu_desc->reorder_data |=
+               cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK,
+                                      param->rx_pkt.baid));
+       mpdu_desc->reorder_data |=
+               cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK,
+                                      param->rx_pkt.sn));
+       mpdu_desc->reorder_data |=
+               cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK,
+                                      param->rx_pkt.nssn));
+       if (param->rx_pkt.old_sn)
+               mpdu_desc->reorder_data |=
+                       cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN);
+
+       if (param->rx_pkt.dup)
+               mpdu_desc->status |= cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE);
+
+       if (param->rx_pkt.amsdu) {
+               mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
+               if (param->rx_pkt.last_subframe)
+                       mpdu_desc->amsdu_info |=
+                               IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+       }
+
+       return mpdu_desc;
+}
+
+static struct sk_buff *alloc_and_setup_skb(u16 fc, u16 seq_ctrl, u8 tid,
+                                          bool mcast)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct ieee80211_hdr hdr = {
+               .frame_control = cpu_to_le16(fc),
+               .seq_ctrl = cpu_to_le16(seq_ctrl),
+       };
+       struct sk_buff *skb;
+
+       skb = kunit_zalloc_skb(test, 128, GFP_KERNEL);
+       KUNIT_ASSERT_NOT_NULL(test, skb);
+
+       if (ieee80211_is_data_qos(hdr.frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(&hdr);
+
+               qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
+       }
+
+       if (mcast)
+               hdr.addr1[0] = 0x1;
+
+       skb_set_mac_header(skb, skb->len);
+       skb_put_data(skb, &hdr, ieee80211_hdrlen(hdr.frame_control));
+
+       return skb;
+}
+
+static struct iwl_mld_reorder_buffer *
+setup_reorder_buffer(struct iwl_mld_baid_data *baid_data)
+{
+       struct kunit *test = kunit_get_current_test();
+       const struct reorder_buffer_case *param =
+               (const void *)(test->param_value);
+       struct iwl_mld_reorder_buffer *buffer = baid_data->reorder_buf;
+       struct iwl_mld_reorder_buf_entry *entries = baid_data->entries;
+       struct sk_buff *fake_skb;
+
+       buffer->valid = param->reorder_buf_state.valid;
+       buffer->head_sn = param->reorder_buf_state.head_sn;
+       buffer->queue = QUEUE;
+
+       for (int i = 0; i < baid_data->buf_size; i++)
+               __skb_queue_head_init(&entries[i].frames);
+
+       for (int i = 0; i < param->reorder_buf_state.num_entries; i++) {
+               u16 sn = param->reorder_buf_state.entries[i].sn;
+               int index = sn % baid_data->buf_size;
+               u8 add_subframes =
+                       param->reorder_buf_state.entries[i].add_subframes;
+               /* create 1 skb per entry + additional skbs per num of
+                * requested subframes
+                */
+               u8 num_skbs = 1 + add_subframes;
+
+               for (int j = 0; j < num_skbs; j++) {
+                       fake_skb = alloc_and_setup_skb(FC_QOS_DATA, sn, 0,
+                                                      false);
+                       __skb_queue_tail(&entries[index].frames, fake_skb);
+                       buffer->num_stored++;
+               }
+       }
+
+       return buffer;
+}
+
+static struct iwl_mld_reorder_buffer *setup_ba_data(struct ieee80211_sta *sta)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       const struct reorder_buffer_case *param =
+               (const void *)(test->param_value);
+       struct iwl_mld_baid_data *baid_data = NULL;
+       struct iwl_mld_reorder_buffer *buffer;
+       u32 reorder_buf_size = BA_WINDOW_SIZE * sizeof(baid_data->entries[0]);
+       u8 baid = param->reorder_buf_state.baid;
+
+       /* Assuming only 1 RXQ */
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, baid_data,
+                                   sizeof(*baid_data) + reorder_buf_size);
+
+       baid_data->baid = baid;
+       baid_data->tid = param->rx_pkt.tid;
+       baid_data->buf_size = BA_WINDOW_SIZE;
+
+       wiphy_lock(mld->wiphy);
+       baid_data->sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
+       wiphy_unlock(mld->wiphy);
+
+       baid_data->entries_per_queue = BA_WINDOW_SIZE;
+
+       buffer = setup_reorder_buffer(baid_data);
+
+       KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld->fw_id_to_ba[baid]));
+       rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
+
+       return buffer;
+}
+
+static void test_reorder_buffer(struct kunit *test)
+{
+       struct iwl_mld *mld = test->priv;
+       const struct reorder_buffer_case *param =
+               (const void *)(test->param_value);
+       struct iwl_rx_mpdu_desc *mpdu_desc;
+       struct ieee80211_vif *vif;
+       struct ieee80211_sta *sta;
+       struct sk_buff *skb;
+       struct iwl_mld_reorder_buffer *buffer;
+       enum iwl_mld_reorder_result reorder_res;
+       u16 skb_release_order_count = param->expected.skb_release_order_count;
+       u16 skb_idx = 0;
+
+       /* Init globals and activate stubs */
+       __skb_queue_head_init(&g_released_skbs);
+       g_num_released_skbs = 0;
+       kunit_activate_static_stub(test, iwl_mld_fw_sta_id_mask,
+                                  fake_iwl_mld_fw_sta_id_mask);
+       kunit_activate_static_stub(test, iwl_mld_pass_packet_to_mac80211,
+                                  fake_iwl_mld_pass_packet_to_mac80211);
+
+       vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
+       sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
+
+       /* Prepare skb, mpdu_desc, BA data and the reorder buffer */
+       skb = alloc_and_setup_skb(param->rx_pkt.fc, param->rx_pkt.sn,
+                                 param->rx_pkt.tid, param->rx_pkt.multicast);
+       buffer = setup_ba_data(sta);
+       mpdu_desc = setup_mpdu_desc();
+
+       rcu_read_lock();
+       reorder_res = iwl_mld_reorder(mld, NULL, QUEUE, sta, skb, mpdu_desc);
+       rcu_read_unlock();
+
+       KUNIT_ASSERT_EQ(test, reorder_res, param->expected.reorder_res);
+       KUNIT_ASSERT_EQ(test, buffer->num_stored, param->expected.num_stored);
+       KUNIT_ASSERT_EQ(test, buffer->head_sn, param->expected.head_sn);
+
+       /* Verify skbs release order */
+       KUNIT_ASSERT_EQ(test, skb_release_order_count, g_num_released_skbs);
+       while ((skb = __skb_dequeue(&g_released_skbs))) {
+               struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+
+               KUNIT_ASSERT_EQ(test, le16_to_cpu(hdr->seq_ctrl),
+                               param->expected.skb_release_order[skb_idx]);
+               skb_idx++;
+       }
+       KUNIT_ASSERT_EQ(test, skb_idx, skb_release_order_count);
+}
+
+static struct kunit_case reorder_buffer_test_cases[] = {
+       KUNIT_CASE_PARAM(test_reorder_buffer, test_reorder_buffer_gen_params),
+       {},
+};
+
+static struct kunit_suite reorder_buffer = {
+       .name = "iwlmld-reorder-buffer",
+       .test_cases = reorder_buffer_test_cases,
+       .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(reorder_buffer);
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+
+#include <iwl-trans.h>
+#include "mld.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static void test_hcmd_names_sorted(struct kunit *test)
+{
+       int i;
+
+       for (i = 0; i < global_iwl_mld_goups_size; i++) {
+               const struct iwl_hcmd_arr *arr = &iwl_mld_groups[i];
+               int j;
+
+               if (!arr->arr)
+                       continue;
+               for (j = 0; j < arr->size - 1; j++)
+                       KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id,
+                                       arr->arr[j + 1].cmd_id);
+       }
+}
+
+static void test_hcmd_names_for_rx(struct kunit *test)
+{
+       static struct iwl_trans t = {
+               .command_groups = iwl_mld_groups,
+       };
+
+       t.command_groups_size = global_iwl_mld_goups_size;
+
+       for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) {
+               const struct iwl_rx_handler *rxh;
+               const char *name;
+
+               rxh = &iwl_mld_rx_handlers[i];
+
+               name = iwl_get_cmd_string(&t, rxh->cmd_id);
+               KUNIT_EXPECT_NOT_NULL(test, name);
+               KUNIT_EXPECT_NE_MSG(test, strcmp(name, "UNKNOWN"), 0,
+                                   "ID 0x%04x is UNKNOWN", rxh->cmd_id);
+       }
+}
+
+static struct kunit_case hcmd_names_cases[] = {
+       KUNIT_CASE(test_hcmd_names_sorted),
+       KUNIT_CASE(test_hcmd_names_for_rx),
+       {},
+};
+
+static struct kunit_suite hcmd_names = {
+       .name = "iwlmld-hcmd-names",
+       .test_cases = hcmd_names_cases,
+};
+
+kunit_test_suite(hcmd_names);
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for link selection functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "link.h"
+#include "iface.h"
+#include "phy.h"
+
+static const struct link_grading_test_case {
+       const char *desc;
+       struct {
+               struct {
+                       u8 link_id;
+                       const struct cfg80211_chan_def *chandef;
+                       bool active;
+                       s32 signal;
+                       bool has_chan_util_elem;
+                       u8 chan_util; /* 0-255 , used only if has_chan_util_elem is true */
+                       u8 chan_load_by_us; /* 0-100, used only if active is true */;
+               } link;
+       } input;
+       unsigned int expected_grade;
+} link_grading_cases[] = {
+       {
+               .desc = "channel util of 128 (50%)",
+               .input.link = {
+                       .link_id = 0,
+                       .chandef = &chandef_2ghz,
+                       .active = false,
+                       .has_chan_util_elem = true,
+                       .chan_util = 128,
+               },
+               .expected_grade = 86,
+       },
+       {
+               .desc = "channel util of 180 (70%)",
+               .input.link = {
+                       .link_id = 0,
+                       .chandef = &chandef_2ghz,
+                       .active = false,
+                       .has_chan_util_elem = true,
+                       .chan_util = 180,
+               },
+               .expected_grade = 51,
+       },
+       {
+               .desc = "channel util of 180 (70%), channel load by us of 10%",
+               .input.link = {
+                       .link_id = 0,
+                       .chandef = &chandef_2ghz,
+                       .has_chan_util_elem = true,
+                       .chan_util = 180,
+                       .active = true,
+                       .chan_load_by_us = 10,
+               },
+               .expected_grade = 67,
+       },
+               {
+               .desc = "no channel util element",
+               .input.link = {
+                       .link_id = 0,
+                       .chandef = &chandef_2ghz,
+                       .active = true,
+               },
+               .expected_grade = 120,
+       },
+};
+
+KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc);
+
+static void setup_link(struct ieee80211_bss_conf *link)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       const struct link_grading_test_case *test_param =
+               (const void *)(test->param_value);
+
+       KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+
+       link->bss->signal = DBM_TO_MBM(test_param->input.link.signal);
+
+       link->chanreq.oper = *test_param->input.link.chandef;
+
+       if (test_param->input.link.has_chan_util_elem) {
+               struct cfg80211_bss_ies *ies;
+               struct ieee80211_bss_load_elem bss_load = {
+                       .channel_util = test_param->input.link.chan_util,
+               };
+               struct element *elem =
+                       iwlmld_kunit_gen_element(WLAN_EID_QBSS_LOAD,
+                                                &bss_load,
+                                                sizeof(bss_load));
+               unsigned int elem_len = sizeof(*elem) + sizeof(bss_load);
+
+               KUNIT_ALLOC_AND_ASSERT_SIZE(test, ies, sizeof(*ies) + elem_len);
+               memcpy(ies->data, elem, elem_len);
+               ies->len = elem_len;
+               rcu_assign_pointer(link->bss->beacon_ies, ies);
+               rcu_assign_pointer(link->bss->ies, ies);
+       }
+
+       if (test_param->input.link.active) {
+               struct ieee80211_chanctx_conf *chan_ctx =
+                       wiphy_dereference(mld->wiphy, link->chanctx_conf);
+               struct iwl_mld_phy *phy;
+
+               KUNIT_ASSERT_NOT_NULL(test, chan_ctx);
+
+               phy = iwl_mld_phy_from_mac80211(chan_ctx);
+
+               phy->channel_load_by_us = test_param->input.link.chan_load_by_us;
+       }
+}
+
+static void test_link_grading(struct kunit *test)
+{
+       struct iwl_mld *mld = test->priv;
+       const struct link_grading_test_case *test_param =
+               (const void *)(test->param_value);
+       struct ieee80211_vif *vif;
+       struct ieee80211_bss_conf *link;
+       unsigned int actual_grade;
+       u8 assoc_link_id;
+       /* Extract test case parameters */
+       u8 link_id = test_param->input.link.link_id;
+       enum nl80211_band band = test_param->input.link.chandef->chan->band;
+       bool active = test_param->input.link.active;
+       u16 valid_links;
+
+       /* If the link is not active, use a different link as the assoc link */
+       if (active) {
+               assoc_link_id = link_id;
+               valid_links = BIT(link_id);
+       } else {
+               assoc_link_id = BIT(ffz(BIT(link_id)));
+               valid_links = BIT(assoc_link_id) | BIT(link_id);
+       }
+
+       vif = iwlmld_kunit_setup_mlo_assoc(valid_links, assoc_link_id, band);
+
+       wiphy_lock(mld->wiphy);
+       link = wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
+       KUNIT_ASSERT_NOT_NULL(test, link);
+
+       setup_link(link);
+
+       actual_grade = iwl_mld_get_link_grade(mld, link);
+       wiphy_unlock(mld->wiphy);
+
+       /* Assert that the returned grade matches the expected grade */
+       KUNIT_EXPECT_EQ(test, actual_grade, test_param->expected_grade);
+}
+
+static struct kunit_case link_selection_cases[] = {
+       KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
+       {},
+};
+
+static struct kunit_suite link_selection = {
+       .name = "iwlmld-link-selection-tests",
+       .test_cases = link_selection_cases,
+       .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(link_selection);
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "link.h"
+#include "iface.h"
+#include "fw/api/mac-cfg.h"
+
+static const struct missed_beacon_test_case {
+       const char *desc;
+       struct {
+               struct iwl_missed_beacons_notif notif;
+               bool emlsr;
+       } input;
+       struct {
+               bool disconnected;
+               bool emlsr;
+       } output;
+} missed_beacon_cases[] = {
+       {
+               .desc = "no EMLSR, no disconnect",
+               .input.notif = {
+                       .consec_missed_beacons = cpu_to_le32(4),
+               },
+       },
+       {
+               .desc = "no EMLSR, no beacon loss since Rx, no disconnect",
+               .input.notif = {
+                       .consec_missed_beacons = cpu_to_le32(20),
+               },
+       },
+       {
+               .desc = "no EMLSR, beacon loss since Rx, disconnect",
+               .input.notif = {
+                       .consec_missed_beacons = cpu_to_le32(20),
+                       .consec_missed_beacons_since_last_rx =
+                               cpu_to_le32(10),
+               },
+               .output.disconnected = true,
+       },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_missed_beacon, missed_beacon_cases, desc);
+
+static void fake_ieee80211_connection_loss(struct ieee80211_vif *vif)
+{
+       vif->cfg.assoc = false;
+}
+
+static void test_missed_beacon(struct kunit *test)
+{
+       struct iwl_mld *mld = test->priv;
+       struct iwl_missed_beacons_notif *notif;
+       const struct missed_beacon_test_case *test_param =
+               (const void *)(test->param_value);
+       struct ieee80211_vif *vif;
+       struct iwl_rx_packet *pkt;
+
+       kunit_activate_static_stub(test, ieee80211_connection_loss,
+                                  fake_ieee80211_connection_loss);
+       pkt = iwl_mld_kunit_create_pkt(test_param->input.notif);
+       notif = (void *)pkt->data;
+
+       if (test_param->input.emlsr) {
+               vif = iwlmld_kunit_assoc_emlsr(0x3, NL80211_BAND_5GHZ,
+                                              NL80211_BAND_6GHZ);
+       } else {
+               struct iwl_mld_vif *mld_vif;
+
+               vif = iwlmld_kunit_setup_non_mlo_assoc(NL80211_BAND_6GHZ);
+               mld_vif = iwl_mld_vif_from_mac80211(vif);
+               notif->link_id = cpu_to_le32(mld_vif->deflink.fw_id);
+       }
+
+       wiphy_lock(mld->wiphy);
+
+       iwl_mld_handle_missed_beacon_notif(mld, pkt);
+
+       wiphy_unlock(mld->wiphy);
+
+       KUNIT_ASSERT_NE(test, vif->cfg.assoc, test_param->output.disconnected);
+
+       /* TODO: add test cases for esr and check */
+}
+
+static struct kunit_case link_cases[] = {
+       KUNIT_CASE_PARAM(test_missed_beacon, test_missed_beacon_gen_params),
+       {},
+};
+
+static struct kunit_suite link = {
+       .name = "iwlmld-link",
+       .test_cases = link_cases,
+       .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(link);
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * This is just module boilerplate for the iwlmld kunit module.
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_IMPORT_NS("IWLWIFI");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlmld");
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "utils.h"
+#include "iwl-trans.h"
+#include "mld.h"
+#include "sta.h"
+
+static const struct is_dup_case {
+       const char *desc;
+       struct {
+               /* ieee80211_hdr fields */
+               __le16 fc;
+               __le16 seq;
+               u8 tid;
+               bool multicast;
+               /* iwl_rx_mpdu_desc fields */
+               bool is_amsdu;
+               u8 sub_frame_idx;
+       } rx_pkt;
+       struct {
+               __le16 last_seq;
+               u8 last_sub_frame_idx;
+               u8 tid;
+       } dup_data_state;
+       struct {
+               bool is_dup;
+               u32 rx_status_flag;
+       } result;
+} is_dup_cases[] = {
+       {
+               .desc = "Control frame",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_CTL),
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = 0,
+               }
+       },
+       {
+               .desc = "Null func frame",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_NULLFUNC),
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = 0,
+               }
+       },
+       {
+               .desc = "Multicast data",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+                       .multicast = true,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = 0,
+               }
+       },
+       {
+               .desc = "QoS null func frame",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_NULLFUNC),
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = 0,
+               }
+       },
+       {
+               .desc = "QoS data new sequence",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA),
+                       .seq = __cpu_to_le16(0x101),
+               },
+               .dup_data_state = {
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+               },
+       },
+       {
+               .desc = "QoS data same sequence, no retry",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA),
+                       .seq = __cpu_to_le16(0x100),
+               },
+               .dup_data_state = {
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+               },
+       },
+       {
+               .desc = "QoS data same sequence, has retry",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA |
+                                           IEEE80211_FCTL_RETRY),
+                       .seq = __cpu_to_le16(0x100),
+               },
+               .dup_data_state = {
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = true,
+                       .rx_status_flag = 0,
+               },
+       },
+       {
+               .desc = "QoS data invalid tid",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA),
+                       .seq = __cpu_to_le16(0x100),
+                       .tid = IWL_MAX_TID_COUNT + 1,
+               },
+               .result = {
+                       .is_dup = true,
+                       .rx_status_flag = 0,
+               },
+       },
+       {
+               .desc = "non-QoS data, same sequence, same tid, no retry",
+               .rx_pkt = {
+                       /* Driver will use tid = IWL_MAX_TID_COUNT */
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+                       .seq = __cpu_to_le16(0x100),
+               },
+               .dup_data_state = {
+                       .tid = IWL_MAX_TID_COUNT,
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+               },
+       },
+       {
+               .desc = "non-QoS data, same sequence, same tid, has retry",
+               .rx_pkt = {
+                       /* Driver will use tid = IWL_MAX_TID_COUNT */
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_FCTL_RETRY),
+                       .seq = __cpu_to_le16(0x100),
+               },
+               .dup_data_state = {
+                       .tid = IWL_MAX_TID_COUNT,
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = true,
+                       .rx_status_flag = 0,
+               },
+       },
+       {
+               .desc = "non-QoS data, same sequence on different tid's",
+               .rx_pkt = {
+                       /* Driver will use tid = IWL_MAX_TID_COUNT */
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+                       .seq = __cpu_to_le16(0x100),
+               },
+               .dup_data_state = {
+                       .tid = 7,
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+               },
+       },
+       {
+               .desc = "A-MSDU new subframe, allow same PN",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA),
+                       .seq = __cpu_to_le16(0x100),
+                       .is_amsdu = true,
+                       .sub_frame_idx = 1,
+               },
+               .dup_data_state = {
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = RX_FLAG_ALLOW_SAME_PN |
+                                         RX_FLAG_DUP_VALIDATED,
+               },
+       },
+       {
+               .desc = "A-MSDU subframe with smaller idx, disallow same PN",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA),
+                       .seq = __cpu_to_le16(0x100),
+                       .is_amsdu = true,
+                       .sub_frame_idx = 1,
+               },
+               .dup_data_state = {
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 2,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+               },
+       },
+       {
+               .desc = "A-MSDU same subframe, no retry, disallow same PN",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA),
+                       .seq = __cpu_to_le16(0x100),
+                       .is_amsdu = true,
+                       .sub_frame_idx = 0,
+               },
+               .dup_data_state = {
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = false,
+                       .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+               },
+       },
+       {
+               .desc = "A-MSDU same subframe, has retry",
+               .rx_pkt = {
+                       .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                           IEEE80211_STYPE_QOS_DATA |
+                                           IEEE80211_FCTL_RETRY),
+                       .seq = __cpu_to_le16(0x100),
+                       .is_amsdu = true,
+                       .sub_frame_idx = 0,
+               },
+               .dup_data_state = {
+                       .last_seq = __cpu_to_le16(0x100),
+                       .last_sub_frame_idx = 0,
+               },
+               .result = {
+                       .is_dup = true,
+                       .rx_status_flag = 0,
+               },
+       },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_is_dup, is_dup_cases, desc);
+
+static void
+setup_dup_data_state(struct ieee80211_sta *sta)
+{
+       struct kunit *test = kunit_get_current_test();
+       const struct is_dup_case *param = (const void *)(test->param_value);
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       u8 tid = param->dup_data_state.tid;
+       struct iwl_mld_rxq_dup_data *dup_data;
+
+       /* Allocate dup_data only for 1 queue */
+       KUNIT_ALLOC_AND_ASSERT(test, dup_data);
+
+       /* Initialize dup data, see iwl_mld_alloc_dup_data */
+       memset(dup_data->last_seq, 0xff, sizeof(dup_data->last_seq));
+
+       dup_data->last_seq[tid] = param->dup_data_state.last_seq;
+       dup_data->last_sub_frame_idx[tid] =
+               param->dup_data_state.last_sub_frame_idx;
+
+       mld_sta->dup_data = dup_data;
+}
+
+static void setup_rx_pkt(const struct is_dup_case *param,
+                        struct ieee80211_hdr *hdr,
+                        struct iwl_rx_mpdu_desc *mpdu_desc)
+{
+       u8 tid = param->rx_pkt.tid;
+
+       /* Set "new rx packet" header */
+       hdr->frame_control = param->rx_pkt.fc;
+       hdr->seq_ctrl = param->rx_pkt.seq;
+
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+               qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
+       }
+
+       if (param->rx_pkt.multicast)
+               hdr->addr1[0] = 0x1;
+
+       /* Set mpdu_desc */
+       mpdu_desc->amsdu_info = param->rx_pkt.sub_frame_idx &
+                               IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+       if (param->rx_pkt.is_amsdu)
+               mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
+}
+
+static void test_is_dup(struct kunit *test)
+{
+       const struct is_dup_case *param = (const void *)(test->param_value);
+       struct iwl_mld *mld = test->priv;
+       struct iwl_rx_mpdu_desc mpdu_desc = { };
+       struct ieee80211_rx_status rx_status = { };
+       struct ieee80211_vif *vif;
+       struct ieee80211_sta *sta;
+       struct ieee80211_hdr hdr;
+
+       vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
+       sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
+
+       /* Prepare test case state */
+       setup_dup_data_state(sta);
+       setup_rx_pkt(param, &hdr, &mpdu_desc);
+
+       KUNIT_EXPECT_EQ(test,
+                       iwl_mld_is_dup(mld, sta, &hdr, &mpdu_desc, &rx_status,
+                                      0), /* assuming only 1 queue */
+                       param->result.is_dup);
+       KUNIT_EXPECT_EQ(test, rx_status.flag, param->result.rx_status_flag);
+}
+
+static struct kunit_case is_dup_test_cases[] = {
+       KUNIT_CASE_PARAM(test_is_dup, test_is_dup_gen_params),
+       {},
+};
+
+static struct kunit_suite is_dup = {
+       .name = "iwlmld-rx-is-dup",
+       .test_cases = is_dup_test_cases,
+       .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(is_dup);
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "utils.h"
+
+#include <linux/device.h>
+
+#include "fw/api/scan.h"
+#include "fw/api/mac-cfg.h"
+#include "iwl-trans.h"
+#include "mld.h"
+#include "iface.h"
+#include "link.h"
+#include "phy.h"
+#include "sta.h"
+
+int iwlmld_kunit_test_init(struct kunit *test)
+{
+       struct iwl_mld *mld;
+       struct iwl_trans *trans;
+       const struct iwl_cfg *cfg;
+       struct iwl_fw *fw;
+       struct ieee80211_hw *hw;
+
+       KUNIT_ALLOC_AND_ASSERT(test, trans);
+       KUNIT_ALLOC_AND_ASSERT(test, trans->dev);
+       KUNIT_ALLOC_AND_ASSERT(test, cfg);
+       KUNIT_ALLOC_AND_ASSERT(test, fw);
+       KUNIT_ALLOC_AND_ASSERT(test, hw);
+       KUNIT_ALLOC_AND_ASSERT(test, hw->wiphy);
+
+       mutex_init(&hw->wiphy->mtx);
+
+       /* Allocate and initialize the mld structure */
+       KUNIT_ALLOC_AND_ASSERT(test, mld);
+       iwl_construct_mld(mld, trans, cfg, fw, hw, NULL);
+
+       fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX;
+       fw->ucode_capa.num_links = IWL_FW_MAX_LINK_ID + 1;
+
+       mld->fwrt.trans = trans;
+       mld->fwrt.fw = fw;
+       mld->fwrt.dev = trans->dev;
+
+       /* TODO: add priv_size to hw allocation and setup hw->priv to enable
+        * testing mac80211 callbacks
+        */
+
+       KUNIT_ALLOC_AND_ASSERT(test, mld->nvm_data);
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, mld->scan.cmd,
+                                   sizeof(struct iwl_scan_req_umac_v17));
+       mld->scan.cmd_size = sizeof(struct iwl_scan_req_umac_v17);
+
+       /* This is not the state at the end of the regular opmode_start,
+        * but it is more common to need it. Explicitly undo this if needed.
+        */
+       mld->trans->state = IWL_TRANS_FW_ALIVE;
+       mld->fw_status.running = true;
+
+       /* Avoid passing mld struct around */
+       test->priv = mld;
+       return 0;
+}
+
+IWL_MLD_ALLOC_FN(link, bss_conf)
+
+static void iwlmld_kunit_init_link(struct ieee80211_vif *vif,
+                                  struct ieee80211_bss_conf *link,
+                                  struct iwl_mld_link *mld_link, int link_id)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+       int ret;
+
+       /* setup mac80211 link */
+       rcu_assign_pointer(vif->link_conf[link_id], link);
+       link->link_id = link_id;
+       link->vif = vif;
+       link->beacon_int = 100;
+       link->dtim_period = 3;
+       link->qos = true;
+
+       /* and mld_link */
+       ret = iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+       rcu_assign_pointer(mld_vif->link[link_id], mld_link);
+       rcu_assign_pointer(vif->link_conf[link_id], link);
+}
+
+IWL_MLD_ALLOC_FN(vif, vif)
+
+/* Helper function to add and initialize a VIF for KUnit tests */
+struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       struct ieee80211_vif *vif;
+       struct iwl_mld_vif *mld_vif;
+       int ret;
+
+       /* TODO: support more types */
+       KUNIT_ASSERT_EQ(test, type, NL80211_IFTYPE_STATION);
+
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, vif,
+                                   sizeof(*vif) + sizeof(*mld_vif));
+
+       vif->type = type;
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+       mld_vif->mld = mld;
+
+       ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+
+       /* TODO: revisit (task=EHT) */
+       if (mlo)
+               return vif;
+
+       /* Initialize the default link */
+       iwlmld_kunit_init_link(vif, &vif->bss_conf, &mld_vif->deflink, 0);
+
+       return vif;
+}
+
+/* Use only for MLO vif */
+struct ieee80211_bss_conf *
+iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct ieee80211_bss_conf *link;
+       struct iwl_mld_link *mld_link;
+
+       KUNIT_ALLOC_AND_ASSERT(test, link);
+       KUNIT_ALLOC_AND_ASSERT(test, mld_link);
+
+       iwlmld_kunit_init_link(vif, link, mld_link, link_id);
+       vif->valid_links |= BIT(link_id);
+
+       return link;
+}
+
+struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       struct ieee80211_chanctx_conf *ctx;
+       struct iwl_mld_phy *phy;
+       int fw_id;
+
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, ctx, sizeof(*ctx) + sizeof(*phy));
+
+       /* Setup the chanctx conf */
+       ctx->def = *def;
+       ctx->min_def = *def;
+       ctx->ap = *def;
+
+       /* and the iwl_mld_phy */
+       phy = iwl_mld_phy_from_mac80211(ctx);
+
+       fw_id = iwl_mld_allocate_fw_phy_id(mld);
+       KUNIT_ASSERT_GE(test, fw_id, 0);
+
+       phy->fw_id = fw_id;
+       phy->chandef = *iwl_mld_get_chandef_from_chanctx(ctx);
+
+       return ctx;
+}
+
+void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
+                                        struct ieee80211_bss_conf *link,
+                                        struct ieee80211_chanctx_conf *ctx)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       struct iwl_mld_link *mld_link;
+
+       KUNIT_EXPECT_NULL(test, rcu_access_pointer(link->chanctx_conf));
+       rcu_assign_pointer(link->chanctx_conf, ctx);
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       mld_link = iwl_mld_link_from_mac80211(link);
+
+       KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld_link->chan_ctx));
+       KUNIT_EXPECT_FALSE(test, mld_link->active);
+
+       rcu_assign_pointer(mld_link->chan_ctx, ctx);
+       mld_link->active = true;
+
+       if (ieee80211_vif_is_mld(vif))
+               vif->active_links |= BIT(link->link_id);
+}
+
+IWL_MLD_ALLOC_FN(link_sta, link_sta)
+
+static void iwlmld_kunit_add_link_sta(struct ieee80211_sta *sta,
+                                     struct ieee80211_link_sta *link_sta,
+                                     struct iwl_mld_link_sta *mld_link_sta,
+                                     u8 link_id)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+       struct iwl_mld *mld = test->priv;
+       u8 fw_id;
+       int ret;
+
+       /* initialize mac80211's link_sta */
+       link_sta->link_id = link_id;
+       rcu_assign_pointer(sta->link[link_id], link_sta);
+
+       link_sta->sta = sta;
+
+       /* and the iwl_mld_link_sta */
+       ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
+       KUNIT_ASSERT_EQ(test, ret, 0);
+       mld_link_sta->fw_id = fw_id;
+
+       rcu_assign_pointer(mld_sta->link[link_id], mld_link_sta);
+}
+
+static struct ieee80211_link_sta *
+iwlmld_kunit_alloc_link_sta(struct ieee80211_sta *sta, int link_id)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct ieee80211_link_sta *link_sta;
+       struct iwl_mld_link_sta *mld_link_sta;
+
+       /* Only valid for MLO */
+       KUNIT_ASSERT_TRUE(test, sta->valid_links);
+
+       KUNIT_ALLOC_AND_ASSERT(test, link_sta);
+       KUNIT_ALLOC_AND_ASSERT(test, mld_link_sta);
+
+       iwlmld_kunit_add_link_sta(sta, link_sta, mld_link_sta, link_id);
+
+       sta->valid_links |= BIT(link_id);
+
+       return link_sta;
+}
+
+/* Allocate and initialize a STA with the first link_sta */
+static struct ieee80211_sta *
+iwlmld_kunit_add_sta(struct ieee80211_vif *vif, int link_id)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct ieee80211_sta *sta;
+       struct iwl_mld_sta *mld_sta;
+
+       /* Allocate memory for ieee80211_sta with embedded iwl_mld_sta */
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, sta, sizeof(*sta) + sizeof(*mld_sta));
+
+       /* TODO: allocate and initialize the TXQs ? */
+
+       mld_sta = iwl_mld_sta_from_mac80211(sta);
+       mld_sta->vif = vif;
+       mld_sta->mld = test->priv;
+
+       /* TODO: adjust for internal stations */
+       mld_sta->sta_type = STATION_TYPE_PEER;
+
+       if (link_id >= 0) {
+               iwlmld_kunit_add_link_sta(sta, &sta->deflink,
+                                         &mld_sta->deflink, link_id);
+               sta->valid_links = BIT(link_id);
+       } else {
+               iwlmld_kunit_add_link_sta(sta, &sta->deflink,
+                                         &mld_sta->deflink, 0);
+       }
+       return sta;
+}
+
+/* Move s STA to a state */
+static void iwlmld_kunit_move_sta_state(struct ieee80211_vif *vif,
+                                       struct ieee80211_sta *sta,
+                                       enum ieee80211_sta_state state)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld_sta *mld_sta;
+       struct iwl_mld_vif *mld_vif;
+
+       /* The sta will be removed automatically at the end of the test */
+       KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
+
+       mld_sta = iwl_mld_sta_from_mac80211(sta);
+       mld_sta->sta_state = state;
+
+       mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+       mld_vif->authorized = state == IEEE80211_STA_AUTHORIZED;
+
+       if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+               mld_vif->ap_sta = sta;
+}
+
+struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
+                                            enum ieee80211_sta_state state,
+                                            int link_id)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct ieee80211_sta *sta;
+
+       /* The sta will be removed automatically at the end of the test */
+       KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
+
+       /* First - allocate and init the STA */
+       sta = iwlmld_kunit_add_sta(vif, link_id);
+
+       /* Now move it all the way to the wanted state */
+       for (enum ieee80211_sta_state _state = IEEE80211_STA_NONE;
+            _state <= state; _state++)
+               iwlmld_kunit_move_sta_state(vif, sta, state);
+
+       return sta;
+}
+
+static void iwlmld_kunit_set_vif_associated(struct ieee80211_vif *vif)
+{
+       /* TODO: setup chanreq */
+       /* TODO setup capabilities */
+
+       vif->cfg.assoc = 1;
+}
+
+static struct ieee80211_vif *
+iwlmld_kunit_setup_assoc(bool mlo, u8 link_id, enum nl80211_band band)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       struct ieee80211_vif *vif;
+       struct ieee80211_bss_conf *link;
+       struct ieee80211_chanctx_conf *chan_ctx;
+
+       KUNIT_ASSERT_TRUE(test, mlo || link_id == 0);
+
+       vif = iwlmld_kunit_add_vif(mlo, NL80211_IFTYPE_STATION);
+
+       if (mlo)
+               link = iwlmld_kunit_add_link(vif, link_id);
+       else
+               link = &vif->bss_conf;
+
+       chan_ctx = iwlmld_kunit_add_chanctx(band);
+
+       wiphy_lock(mld->wiphy);
+       iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
+       wiphy_unlock(mld->wiphy);
+
+       /* The AP sta will now be pointer to by mld_vif->ap_sta */
+       iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, link_id);
+
+       iwlmld_kunit_set_vif_associated(vif);
+
+       return vif;
+}
+
+struct ieee80211_vif *iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
+                                                  u8 assoc_link_id,
+                                                  enum nl80211_band band)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct ieee80211_vif *vif;
+
+       KUNIT_ASSERT_TRUE(test,
+                         hweight16(valid_links) == 1 ||
+                         hweight16(valid_links) == 2);
+       KUNIT_ASSERT_TRUE(test, valid_links & BIT(assoc_link_id));
+
+       vif = iwlmld_kunit_setup_assoc(true, assoc_link_id, band);
+
+       /* Add the other link, if applicable */
+       if (hweight16(valid_links) > 1) {
+               u8 other_link_id = ffs(valid_links & ~BIT(assoc_link_id)) - 1;
+
+               iwlmld_kunit_add_link(vif, other_link_id);
+       }
+
+       return vif;
+}
+
+struct ieee80211_vif *iwlmld_kunit_setup_non_mlo_assoc(enum nl80211_band band)
+{
+       return iwlmld_kunit_setup_assoc(false, 0, band);
+}
+
+struct iwl_rx_packet *
+_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_rx_packet *pkt;
+
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, pkt, sizeof(pkt) + notif_sz);
+
+       memcpy(pkt->data, notif, notif_sz);
+       pkt->len_n_flags = cpu_to_le32(sizeof(pkt->hdr) + notif_sz);
+
+       return pkt;
+}
+
+struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(u16 valid_links,
+                                              enum nl80211_band band1,
+                                              enum nl80211_band band2)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       struct ieee80211_vif *vif;
+       struct ieee80211_bss_conf *link;
+       struct ieee80211_chanctx_conf *chan_ctx;
+       struct ieee80211_sta *sta;
+       struct iwl_mld_vif *mld_vif;
+       u8 assoc_link_id, other_link_id;
+
+       KUNIT_ASSERT_TRUE(test, hweight16(valid_links) == 2);
+
+       assoc_link_id = ffs(valid_links) - 1;
+       other_link_id = ffs(valid_links & ~BIT(assoc_link_id)) - 1;
+
+       vif = iwlmld_kunit_setup_mlo_assoc(valid_links, assoc_link_id, band1);
+       mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+       /* Activate second link */
+       wiphy_lock(mld->wiphy);
+
+       link = wiphy_dereference(mld->wiphy, vif->link_conf[other_link_id]);
+       KUNIT_EXPECT_NOT_NULL(test, link);
+
+       chan_ctx = iwlmld_kunit_add_chanctx(band2);
+       iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
+
+       wiphy_unlock(mld->wiphy);
+
+       /* And other link sta */
+       sta = mld_vif->ap_sta;
+       KUNIT_EXPECT_NOT_NULL(test, sta);
+
+       iwlmld_kunit_alloc_link_sta(sta, other_link_id);
+
+       return vif;
+}
+
+struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct element *elem;
+
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, elem, sizeof(*elem) + len);
+
+       elem->id = id;
+       elem->datalen = len;
+       memcpy(elem->data, data, len);
+
+       return elem;
+}
+
+struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
+                                                u8 link_id)
+{
+       struct kunit *test = kunit_get_current_test();
+       struct iwl_mld *mld = test->priv;
+       struct ieee80211_chanctx_conf *chanctx;
+       struct ieee80211_bss_conf *link =
+               wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
+
+       KUNIT_EXPECT_NOT_NULL(test, link);
+
+       chanctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+       KUNIT_EXPECT_NOT_NULL(test, chanctx);
+
+       return iwl_mld_phy_from_mac80211(chanctx);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __iwl_mld_kunit_utils_h__
+#define __iwl_mld_kunit_utils_h__
+
+#include <net/mac80211.h>
+#include <kunit/test-bug.h>
+
+struct iwl_mld;
+
+int iwlmld_kunit_test_init(struct kunit *test);
+
+enum nl80211_iftype;
+
+struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type);
+
+struct ieee80211_bss_conf *
+iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id);
+
+#define KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, size)                   \
+do {                                                                   \
+       (ptr) = kunit_kzalloc((test), (size), GFP_KERNEL);              \
+       KUNIT_ASSERT_NOT_NULL((test), (ptr));                           \
+} while (0)
+
+#define KUNIT_ALLOC_AND_ASSERT(test, ptr)                              \
+       KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, sizeof(*(ptr)))
+
+#define CHANNEL(_name, _band, _freq)                           \
+static struct ieee80211_channel _name = {                      \
+       .band = (_band),                                        \
+       .center_freq = (_freq),                                 \
+       .hw_value = (_freq),                                    \
+}
+
+#define CHANDEF(_name, _channel, _freq1, _width)               \
+__maybe_unused static struct cfg80211_chan_def _name = {       \
+       .chan = &(_channel),                                    \
+       .center_freq1 = (_freq1),                               \
+       .width = (_width),                                      \
+}
+
+CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412);
+CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200);
+CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115);
+/* Feel free to add more */
+
+CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20);
+CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40);
+CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160);
+/* Feel free to add more */
+
+//struct cfg80211_chan_def;
+
+struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def);
+
+static inline struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx(enum nl80211_band band)
+{
+       struct cfg80211_chan_def *chandef;
+
+       switch (band) {
+       case NL80211_BAND_2GHZ:
+               chandef = &chandef_2ghz;
+               break;
+       case NL80211_BAND_5GHZ:
+               chandef = &chandef_5ghz;
+               break;
+       default:
+       case NL80211_BAND_6GHZ:
+               chandef = &chandef_6ghz;
+               break;
+       }
+
+       return iwlmld_kunit_add_chanctx_from_def(chandef);
+}
+
+void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
+                                        struct ieee80211_bss_conf *link,
+                                        struct ieee80211_chanctx_conf *ctx);
+
+/* Allocate a sta, initialize it and move it to the wanted state */
+struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
+                                            enum ieee80211_sta_state state,
+                                            int link_id);
+
+struct ieee80211_vif *iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
+                                                  u8 assoc_link_id,
+                                                  enum nl80211_band band);
+struct ieee80211_vif *iwlmld_kunit_setup_non_mlo_assoc(enum nl80211_band band);
+
+struct iwl_rx_packet *
+_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz);
+
+#define iwl_mld_kunit_create_pkt(_notif)       \
+       _iwl_mld_kunit_create_pkt(&(_notif), sizeof(_notif))
+
+struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(u16 valid_links,
+                                              enum nl80211_band band1,
+                                              enum nl80211_band band2);
+
+struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len);
+
+/**
+ * iwlmld_kunit_get_phy_of_link - Get the phy of a link
+ *
+ * @vif: The vif to get the phy from.
+ * @link_id: The id of the link to get the phy for.
+ *
+ * given a vif and link id, return the phy pointer of that link.
+ * This assumes that the link exists, and that it had a chanctx
+ * assigned.
+ * If this is not the case, the test will fail.
+ *
+ * Return: phy pointer.
+ */
+struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
+                                                u8 link_id);
+
+#endif /* __iwl_mld_kunit_utils_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifdef CONFIG_THERMAL
+#include <linux/sort.h>
+#include <linux/thermal.h>
+#endif
+
+#include "fw/api/phy.h"
+
+#include "thermal.h"
+#include "mld.h"
+#include "hcmd.h"
+
+#define IWL_MLD_CT_KILL_DURATION (5 * HZ)
+
+void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
+                                 struct iwl_rx_packet *pkt)
+{
+       const struct ct_kill_notif *notif = (const void *)pkt->data;
+
+       IWL_ERR(mld,
+               "CT Kill notification: temp = %d, DTS = 0x%x, Scheme 0x%x - Enter CT Kill\n",
+               le16_to_cpu(notif->temperature), notif->dts,
+               notif->scheme);
+
+       iwl_mld_set_ctkill(mld, true);
+
+       wiphy_delayed_work_queue(mld->wiphy, &mld->ct_kill_exit_wk,
+                                round_jiffies_relative(IWL_MLD_CT_KILL_DURATION));
+}
+
+static void iwl_mld_exit_ctkill(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+       struct iwl_mld *mld;
+
+       mld = container_of(wk, struct iwl_mld, ct_kill_exit_wk.work);
+
+       IWL_ERR(mld, "Exit CT Kill\n");
+       iwl_mld_set_ctkill(mld, false);
+}
+
+void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
+{
+       const struct iwl_dts_measurement_notif *notif =
+               (const void *)pkt->data;
+       int temp;
+       u32 ths_crossed;
+
+       temp = le32_to_cpu(notif->temp);
+
+       /* shouldn't be negative, but since it's s32, make sure it isn't */
+       if (IWL_FW_CHECK(mld, temp < 0, "negative temperature %d\n", temp))
+               return;
+
+       ths_crossed = le32_to_cpu(notif->threshold_idx);
+
+       /* 0xFF in ths_crossed means the notification is not related
+        * to a trip, so we can ignore it here.
+        */
+       if (ths_crossed == 0xFF)
+               return;
+
+       IWL_DEBUG_TEMP(mld, "Temp = %d Threshold crossed = %d\n",
+                      temp, ths_crossed);
+
+       if (IWL_FW_CHECK(mld, ths_crossed >= IWL_MAX_DTS_TRIPS,
+                        "bad threshold: %d\n", ths_crossed))
+               return;
+
+#ifdef CONFIG_THERMAL
+       if (mld->tzone)
+               thermal_zone_device_update(mld->tzone, THERMAL_TRIP_VIOLATED);
+#endif /* CONFIG_THERMAL */
+}
+
+#ifdef CONFIG_THERMAL
+static int iwl_mld_get_temp(struct iwl_mld *mld, s32 *temp)
+{
+       struct iwl_host_cmd cmd = {
+               .id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+               .flags = CMD_WANT_SKB,
+       };
+       const struct iwl_dts_measurement_resp *resp;
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       if (ret) {
+               IWL_ERR(mld,
+                       "Failed to send the temperature measurement command (err=%d)\n",
+                       ret);
+               return ret;
+       }
+
+       if (iwl_rx_packet_payload_len(cmd.resp_pkt) < sizeof(*resp)) {
+               IWL_ERR(mld,
+                       "Failed to get a valid response to DTS measurement\n");
+               ret = -EIO;
+               goto free_resp;
+       }
+
+       resp = (const void *)cmd.resp_pkt->data;
+       *temp = le32_to_cpu(resp->temp);
+
+       IWL_DEBUG_TEMP(mld,
+                      "Got temperature measurement response: temp=%d\n",
+                      *temp);
+
+free_resp:
+       iwl_free_resp(&cmd);
+       return ret;
+}
+
+static int compare_temps(const void *a, const void *b)
+{
+       return ((s16)le16_to_cpu(*(__le16 *)a) -
+               (s16)le16_to_cpu(*(__le16 *)b));
+}
+
+struct iwl_trip_walk_data {
+       __le16 *thresholds;
+       int count;
+};
+
+static int iwl_trip_temp_iter(struct thermal_trip *trip, void *arg)
+{
+       struct iwl_trip_walk_data *twd = arg;
+
+       if (trip->temperature == THERMAL_TEMP_INVALID)
+               return 0;
+
+       twd->thresholds[twd->count++] =
+               cpu_to_le16((s16)(trip->temperature / 1000));
+       return 0;
+}
+#endif
+
+int iwl_mld_config_temp_report_ths(struct iwl_mld *mld)
+{
+       struct temp_report_ths_cmd cmd = {0};
+       int ret;
+#ifdef CONFIG_THERMAL
+       struct iwl_trip_walk_data twd = {
+               .thresholds = cmd.thresholds,
+               .count = 0
+       };
+
+       if (!mld->tzone)
+               goto send;
+
+       /* The thermal core holds an array of temperature trips that are
+        * unsorted and uncompressed, the FW should get it compressed and
+        * sorted.
+        */
+
+       /* compress trips to cmd array, remove uninitialized values*/
+       for_each_thermal_trip(mld->tzone, iwl_trip_temp_iter, &twd);
+
+       cmd.num_temps = cpu_to_le32(twd.count);
+       if (twd.count)
+               sort(cmd.thresholds, twd.count, sizeof(s16),
+                    compare_temps, NULL);
+
+send:
+#endif
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
+                                               TEMP_REPORTING_THRESHOLDS_CMD),
+                                  &cmd);
+       if (ret)
+               IWL_ERR(mld, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
+                       ret);
+
+       return ret;
+}
+
+#ifdef CONFIG_THERMAL
+static int iwl_mld_tzone_get_temp(struct thermal_zone_device *device,
+                                 int *temperature)
+{
+       struct iwl_mld *mld = thermal_zone_device_priv(device);
+       int temp;
+       int ret = 0;
+
+       wiphy_lock(mld->wiphy);
+
+       if (!mld->fw_status.running) {
+               /* Tell the core that there is no valid temperature value to
+                * return, but it need not worry about this.
+                */
+               *temperature = THERMAL_TEMP_INVALID;
+               goto unlock;
+       }
+
+       ret = iwl_mld_get_temp(mld, &temp);
+       if (ret)
+               goto unlock;
+
+       *temperature = temp * 1000;
+unlock:
+       wiphy_unlock(mld->wiphy);
+       return ret;
+}
+
+static int iwl_mld_tzone_set_trip_temp(struct thermal_zone_device *device,
+                                      const struct thermal_trip *trip,
+                                      int temp)
+{
+       struct iwl_mld *mld = thermal_zone_device_priv(device);
+       int ret;
+
+       wiphy_lock(mld->wiphy);
+
+       if (!mld->fw_status.running) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       if ((temp / 1000) > S16_MAX) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       ret = iwl_mld_config_temp_report_ths(mld);
+unlock:
+       wiphy_unlock(mld->wiphy);
+       return ret;
+}
+
+static  struct thermal_zone_device_ops tzone_ops = {
+       .get_temp = iwl_mld_tzone_get_temp,
+       .set_trip_temp = iwl_mld_tzone_set_trip_temp,
+};
+
+static void iwl_mld_thermal_zone_register(struct iwl_mld *mld)
+{
+       int ret;
+       char name[16];
+       static atomic_t counter = ATOMIC_INIT(0);
+       struct thermal_trip trips[IWL_MAX_DTS_TRIPS] = {
+               [0 ... IWL_MAX_DTS_TRIPS - 1] = {
+                       .temperature = THERMAL_TEMP_INVALID,
+                       .type = THERMAL_TRIP_PASSIVE,
+                       .flags = THERMAL_TRIP_FLAG_RW_TEMP,
+               },
+       };
+
+       BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+       sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+       mld->tzone =
+               thermal_zone_device_register_with_trips(name, trips,
+                                                       IWL_MAX_DTS_TRIPS,
+                                                       mld, &tzone_ops,
+                                                       NULL, 0, 0);
+       if (IS_ERR(mld->tzone)) {
+               IWL_DEBUG_TEMP(mld,
+                              "Failed to register to thermal zone (err = %ld)\n",
+                              PTR_ERR(mld->tzone));
+               mld->tzone = NULL;
+               return;
+       }
+
+       ret = thermal_zone_device_enable(mld->tzone);
+       if (ret) {
+               IWL_DEBUG_TEMP(mld, "Failed to enable thermal zone\n");
+               thermal_zone_device_unregister(mld->tzone);
+       }
+}
+
+/* budget in mWatt */
+static const u32 iwl_mld_cdev_budgets[] = {
+       2400,   /* cooling state 0 */
+       2000,   /* cooling state 1 */
+       1800,   /* cooling state 2 */
+       1600,   /* cooling state 3 */
+       1400,   /* cooling state 4 */
+       1200,   /* cooling state 5 */
+       1000,   /* cooling state 6 */
+       900,    /* cooling state 7 */
+       800,    /* cooling state 8 */
+       700,    /* cooling state 9 */
+       650,    /* cooling state 10 */
+       600,    /* cooling state 11 */
+       550,    /* cooling state 12 */
+       500,    /* cooling state 13 */
+       450,    /* cooling state 14 */
+       400,    /* cooling state 15 */
+       350,    /* cooling state 16 */
+       300,    /* cooling state 17 */
+       250,    /* cooling state 18 */
+       200,    /* cooling state 19 */
+       150,    /* cooling state 20 */
+};
+
+int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
+                       enum iwl_ctdp_cmd_operation op)
+{
+       struct iwl_ctdp_cmd cmd = {
+               .operation = cpu_to_le32(op),
+               .budget = cpu_to_le32(iwl_mld_cdev_budgets[state]),
+               .window_size = 0,
+       };
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD),
+                                  &cmd);
+
+       if (ret) {
+               IWL_ERR(mld, "cTDP command failed (err=%d)\n", ret);
+               return ret;
+       }
+
+       if (op == CTDP_CMD_OPERATION_START)
+               mld->cooling_dev.cur_state = state;
+
+       return 0;
+}
+
+static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev,
+                                      unsigned long *state)
+{
+       *state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1;
+
+       return 0;
+}
+
+static int iwl_mld_tcool_get_cur_state(struct thermal_cooling_device *cdev,
+                                      unsigned long *state)
+{
+       struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
+
+       *state = mld->cooling_dev.cur_state;
+
+       return 0;
+}
+
+static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev,
+                                      unsigned long new_state)
+{
+       struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
+       int ret;
+
+       wiphy_lock(mld->wiphy);
+
+       if (!mld->fw_status.running) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       ret = iwl_mld_config_ctdp(mld, new_state, CTDP_CMD_OPERATION_START);
+
+unlock:
+       wiphy_unlock(mld->wiphy);
+       return ret;
+}
+
+static const struct thermal_cooling_device_ops tcooling_ops = {
+       .get_max_state = iwl_mld_tcool_get_max_state,
+       .get_cur_state = iwl_mld_tcool_get_cur_state,
+       .set_cur_state = iwl_mld_tcool_set_cur_state,
+};
+
+static void iwl_mld_cooling_device_register(struct iwl_mld *mld)
+{
+       char name[] = "iwlwifi";
+
+       BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+       mld->cooling_dev.cdev =
+               thermal_cooling_device_register(name,
+                                               mld,
+                                               &tcooling_ops);
+
+       if (IS_ERR(mld->cooling_dev.cdev)) {
+               IWL_DEBUG_TEMP(mld,
+                              "Failed to register to cooling device (err = %ld)\n",
+                              PTR_ERR(mld->cooling_dev.cdev));
+               mld->cooling_dev.cdev = NULL;
+               return;
+       }
+}
+
+static void iwl_mld_thermal_zone_unregister(struct iwl_mld *mld)
+{
+       if (!mld->tzone)
+               return;
+
+       IWL_DEBUG_TEMP(mld, "Thermal zone device unregister\n");
+       if (mld->tzone) {
+               thermal_zone_device_unregister(mld->tzone);
+               mld->tzone = NULL;
+       }
+}
+
+static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld)
+{
+       if (!mld->cooling_dev.cdev)
+               return;
+
+       IWL_DEBUG_TEMP(mld, "Cooling device unregister\n");
+       if (mld->cooling_dev.cdev) {
+               thermal_cooling_device_unregister(mld->cooling_dev.cdev);
+               mld->cooling_dev.cdev = NULL;
+       }
+}
+#endif /* CONFIG_THERMAL */
+
+void iwl_mld_thermal_initialize(struct iwl_mld *mld)
+{
+       wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill);
+
+#ifdef CONFIG_THERMAL
+       iwl_mld_cooling_device_register(mld);
+       iwl_mld_thermal_zone_register(mld);
+#endif
+}
+
+void iwl_mld_thermal_exit(struct iwl_mld *mld)
+{
+       wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk);
+
+#ifdef CONFIG_THERMAL
+       iwl_mld_cooling_device_unregister(mld);
+       iwl_mld_thermal_zone_unregister(mld);
+#endif
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_thermal_h__
+#define __iwl_mld_thermal_h__
+
+#include "iwl-trans.h"
+
+struct iwl_mld;
+
+#ifdef CONFIG_THERMAL
+#include <linux/thermal.h>
+
+/*
+ * struct iwl_mld_cooling_device
+ * @cur_state: current state
+ * @cdev: struct thermal cooling device
+ */
+struct iwl_mld_cooling_device {
+       u32 cur_state;
+       struct thermal_cooling_device *cdev;
+};
+
+int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
+                       enum iwl_ctdp_cmd_operation op);
+#endif
+
+void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
+                                 struct iwl_rx_packet *pkt);
+int iwl_mld_config_temp_report_ths(struct iwl_mld *mld);
+void iwl_mld_thermal_initialize(struct iwl_mld *mld);
+void iwl_mld_thermal_exit(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_thermal_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "hcmd.h"
+#include "ptp.h"
+#include "time_sync.h"
+#include <linux/ieee80211.h>
+
+static int iwl_mld_init_time_sync(struct iwl_mld *mld, u32 protocols,
+                                 const u8 *addr)
+{
+       struct iwl_mld_time_sync_data *time_sync = kzalloc(sizeof(*time_sync),
+                                                          GFP_KERNEL);
+
+       if (!time_sync)
+               return -ENOMEM;
+
+       time_sync->active_protocols = protocols;
+       ether_addr_copy(time_sync->peer_addr, addr);
+       skb_queue_head_init(&time_sync->frame_list);
+       rcu_assign_pointer(mld->time_sync, time_sync);
+
+       return 0;
+}
+
+int iwl_mld_time_sync_fw_config(struct iwl_mld *mld)
+{
+       struct iwl_time_sync_cfg_cmd cmd = {};
+       struct iwl_mld_time_sync_data *time_sync;
+       int err;
+
+       time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
+       if (!time_sync)
+               return -EINVAL;
+
+       cmd.protocols = cpu_to_le32(time_sync->active_protocols);
+       ether_addr_copy(cmd.peer_addr, time_sync->peer_addr);
+
+       err = iwl_mld_send_cmd_pdu(mld,
+                                  WIDE_ID(DATA_PATH_GROUP,
+                                          WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+                                  &cmd);
+       if (err)
+               IWL_ERR(mld, "Failed to send time sync cfg cmd: %d\n", err);
+
+       return err;
+}
+
+int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr, u32 protocols)
+{
+       struct iwl_mld_time_sync_data *time_sync;
+       int err;
+
+       time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
+
+       /* The fw only supports one peer. We do allow reconfiguration of the
+        * same peer for cases of fw reset etc.
+        */
+       if (time_sync && time_sync->active_protocols &&
+           !ether_addr_equal(addr, time_sync->peer_addr)) {
+               IWL_DEBUG_INFO(mld, "Time sync: reject config for peer: %pM\n",
+                              addr);
+               return -ENOBUFS;
+       }
+
+       if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM |
+                         IWL_TIME_SYNC_PROTOCOL_FTM))
+               return -EINVAL;
+
+       IWL_DEBUG_INFO(mld, "Time sync: set peer addr=%pM\n", addr);
+
+       iwl_mld_deinit_time_sync(mld);
+       err = iwl_mld_init_time_sync(mld, protocols, addr);
+       if (err)
+               return err;
+
+       err = iwl_mld_time_sync_fw_config(mld);
+       return err;
+}
+
+void iwl_mld_deinit_time_sync(struct iwl_mld *mld)
+{
+       struct iwl_mld_time_sync_data *time_sync =
+               wiphy_dereference(mld->wiphy, mld->time_sync);
+
+       if (!time_sync)
+               return;
+
+       RCU_INIT_POINTER(mld->time_sync, NULL);
+       skb_queue_purge(&time_sync->frame_list);
+       kfree_rcu(time_sync, rcu_head);
+}
+
+bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb, u8 *addr)
+{
+       struct iwl_mld_time_sync_data *time_sync;
+
+       rcu_read_lock();
+       time_sync = rcu_dereference(mld->time_sync);
+       if (time_sync && ether_addr_equal(time_sync->peer_addr, addr) &&
+           (ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) {
+               skb_queue_tail(&time_sync->frame_list, skb);
+               rcu_read_unlock();
+               return true;
+       }
+       rcu_read_unlock();
+
+       return false;
+}
+
+static bool iwl_mld_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token)
+{
+       struct ieee80211_mgmt *mgmt = (void *)skb->data;
+       u8 skb_dialog_token;
+
+       if (ieee80211_is_timing_measurement(skb))
+               skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token;
+       else
+               skb_dialog_token = mgmt->u.action.u.ftm.dialog_token;
+
+       if ((ether_addr_equal(mgmt->sa, addr) ||
+            ether_addr_equal(mgmt->da, addr)) &&
+           skb_dialog_token == dialog_token)
+               return true;
+
+       return false;
+}
+
+static struct sk_buff *iwl_mld_time_sync_find_skb(struct iwl_mld *mld, u8 *addr,
+                                                 u8 dialog_token)
+{
+       struct iwl_mld_time_sync_data *time_sync;
+       struct sk_buff *skb;
+
+       rcu_read_lock();
+
+       time_sync = rcu_dereference(mld->time_sync);
+       if (IWL_FW_CHECK(mld, !time_sync,
+                        "Time sync notification but time sync is not initialized\n")) {
+               rcu_read_unlock();
+               return NULL;
+       }
+
+       /* The notifications are expected to arrive in the same order of the
+        * frames. If the incoming notification doesn't match the first SKB
+        * in the queue, it means there was no time sync notification for this
+        * SKB and it can be dropped.
+        */
+       while ((skb = skb_dequeue(&time_sync->frame_list))) {
+               if (iwl_mld_is_skb_match(skb, addr, dialog_token))
+                       break;
+
+               kfree_skb(skb);
+               skb = NULL;
+               IWL_DEBUG_DROP(mld,
+                              "Time sync: drop SKB without matching notification\n");
+       }
+       rcu_read_unlock();
+
+       return skb;
+}
+
+static u64 iwl_mld_get_64_bit(__le32 high, __le32 low)
+{
+       return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low);
+}
+
+void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
+                                   struct iwl_rx_packet *pkt)
+{
+       struct ptp_data *data = &mld->ptp_data;
+       struct iwl_time_msmt_notify *notif = (void *)pkt->data;
+       struct ieee80211_rx_status *rx_status;
+       struct skb_shared_hwtstamps *shwt;
+       u64 ts_10ns;
+       struct sk_buff *skb =
+               iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
+                                          le32_to_cpu(notif->dialog_token));
+       u64 adj_time;
+
+       if (IWL_FW_CHECK(mld, !skb, "Time sync event but no pending skb\n"))
+               return;
+
+       spin_lock_bh(&data->lock);
+       ts_10ns = iwl_mld_get_64_bit(notif->t2_hi, notif->t2_lo);
+       adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+       shwt = skb_hwtstamps(skb);
+       shwt->hwtstamp = ktime_set(0, adj_time);
+
+       ts_10ns = iwl_mld_get_64_bit(notif->t3_hi, notif->t3_lo);
+       adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+       rx_status = IEEE80211_SKB_RXCB(skb);
+       rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time);
+       spin_unlock_bh(&data->lock);
+
+       IWL_DEBUG_INFO(mld,
+                      "Time sync: RX event - report frame t2=%llu t3=%llu\n",
+                      ktime_to_ns(shwt->hwtstamp),
+                      ktime_to_ns(rx_status->ack_tx_hwtstamp));
+       ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
+}
+
+void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
+                                           struct iwl_rx_packet *pkt)
+{
+       struct ptp_data *data = &mld->ptp_data;
+       struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data;
+       struct ieee80211_tx_status status = {};
+       struct skb_shared_hwtstamps *shwt;
+       u64 ts_10ns, adj_time;
+
+       status.skb =
+               iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
+                                          le32_to_cpu(notif->dialog_token));
+
+       if (IWL_FW_CHECK(mld, !status.skb,
+                        "Time sync confirm but no pending skb\n"))
+               return;
+
+       spin_lock_bh(&data->lock);
+       ts_10ns = iwl_mld_get_64_bit(notif->t1_hi, notif->t1_lo);
+       adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+       shwt = skb_hwtstamps(status.skb);
+       shwt->hwtstamp = ktime_set(0, adj_time);
+
+       ts_10ns = iwl_mld_get_64_bit(notif->t4_hi, notif->t4_lo);
+       adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+       status.info = IEEE80211_SKB_CB(status.skb);
+       status.ack_hwtstamp = ktime_set(0, adj_time);
+       spin_unlock_bh(&data->lock);
+
+       IWL_DEBUG_INFO(mld,
+                      "Time sync: TX event - report frame t1=%llu t4=%llu\n",
+                      ktime_to_ns(shwt->hwtstamp),
+                      ktime_to_ns(status.ack_hwtstamp));
+       ieee80211_tx_status_ext(mld->hw, &status);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_time_sync_h__
+#define __iwl_mld_time_sync_h__
+
+struct iwl_mld_time_sync_data {
+       struct rcu_head rcu_head;
+       u8 peer_addr[ETH_ALEN];
+       u32 active_protocols;
+       struct sk_buff_head frame_list;
+};
+
+int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr,
+                            u32 protocols);
+int iwl_mld_time_sync_fw_config(struct iwl_mld *mld);
+void iwl_mld_deinit_time_sync(struct iwl_mld *mld);
+void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
+                                   struct iwl_rx_packet *pkt);
+bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb,
+                            u8 *addr);
+void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
+                                           struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_time_sync_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+
+#include "tlc.h"
+#include "hcmd.h"
+#include "sta.h"
+
+#include "fw/api/rs.h"
+#include "fw/api/context.h"
+#include "fw/api/dhc.h"
+
+static u8 iwl_mld_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta)
+{
+       switch (link_sta->bandwidth) {
+       case IEEE80211_STA_RX_BW_320:
+               return IWL_TLC_MNG_CH_WIDTH_320MHZ;
+       case IEEE80211_STA_RX_BW_160:
+               return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+       case IEEE80211_STA_RX_BW_80:
+               return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+       case IEEE80211_STA_RX_BW_40:
+               return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+       case IEEE80211_STA_RX_BW_20:
+       default:
+               return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+       }
+}
+
+static __le16
+iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
+                         struct ieee80211_vif *vif,
+                         struct ieee80211_link_sta *link_sta,
+                         const struct ieee80211_sta_he_cap *own_he_cap,
+                         const struct ieee80211_sta_eht_cap *own_eht_cap)
+{
+       struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+       struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+       bool has_vht = vht_cap->vht_supported;
+       u16 flags = 0;
+
+       /* STBC flags */
+       if (mld->cfg->ht_params->stbc &&
+           (hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) {
+               if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
+                                     IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+               else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+               else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
+                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+       }
+
+       /* LDPC */
+       if (mld->cfg->ht_params->ldpc &&
+           ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
+            (has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+       if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
+           IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+       if (own_he_cap &&
+           !(own_he_cap->he_cap_elem.phy_cap_info[1] &
+                       IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+               flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+       /* DCM */
+       if (he_cap->has_he &&
+           (he_cap->he_cap_elem.phy_cap_info[3] &
+            IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+            own_he_cap &&
+            own_he_cap->he_cap_elem.phy_cap_info[3] &
+                       IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+
+       /* Extra EHT LTF */
+       if (own_eht_cap &&
+           own_eht_cap->eht_cap_elem.phy_cap_info[5] &
+               IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF &&
+           link_sta->eht_cap.has_eht &&
+           link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+           IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) {
+               flags |= IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK;
+       }
+
+       return cpu_to_le16(flags);
+}
+
+static u8 iwl_mld_get_fw_chains(struct iwl_mld *mld)
+{
+       u8 chains = iwl_mld_get_valid_tx_ant(mld);
+       u8 fw_chains = 0;
+
+       if (chains & ANT_A)
+               fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+       if (chains & ANT_B)
+               fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+
+       return fw_chains;
+}
+
+static u8 iwl_mld_get_fw_sgi(struct ieee80211_link_sta *link_sta)
+{
+       struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+       struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+       u8 sgi_chwidths = 0;
+
+       /* If the association supports HE, HT/VHT rates will never be used for
+        * Tx and therefor there's no need to set the
+        * sgi-per-channel-width-support bits
+        */
+       if (he_cap->has_he)
+               return 0;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+               sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+               sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+       if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+               sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+       if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+               sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+
+       return sgi_chwidths;
+}
+
+static int
+iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap,
+                          int nss)
+{
+       u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+               (0x3 << (2 * (nss - 1)));
+       rx_mcs >>= (2 * (nss - 1));
+
+       switch (rx_mcs) {
+       case IEEE80211_VHT_MCS_SUPPORT_0_7:
+               return IWL_TLC_MNG_HT_RATE_MCS7;
+       case IEEE80211_VHT_MCS_SUPPORT_0_8:
+               return IWL_TLC_MNG_HT_RATE_MCS8;
+       case IEEE80211_VHT_MCS_SUPPORT_0_9:
+               return IWL_TLC_MNG_HT_RATE_MCS9;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+
+       return 0;
+}
+
+static void
+iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
+                      const struct ieee80211_sta_vht_cap *vht_cap,
+                      struct iwl_tlc_config_cmd_v4 *cmd)
+{
+       u16 supp;
+       int i, highest_mcs;
+       u8 max_nss = link_sta->rx_nss;
+       struct ieee80211_vht_cap ieee_vht_cap = {
+               .vht_cap_info = cpu_to_le32(vht_cap->cap),
+               .supp_mcs = vht_cap->vht_mcs,
+       };
+
+       /* the station support only a single receive chain */
+       if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+               max_nss = 1;
+
+       for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
+               int nss = i + 1;
+
+               highest_mcs = iwl_mld_get_highest_fw_mcs(vht_cap, nss);
+               if (!highest_mcs)
+                       continue;
+
+               supp = BIT(highest_mcs + 1) - 1;
+               if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
+                       supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+               cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
+               /* Check if VHT extended NSS indicates that the bandwidth/NSS
+                * configuration is supported - only for MCS 0 since we already
+                * decoded the MCS bits anyway ourselves.
+                */
+               if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+                   ieee80211_get_vht_max_nss(&ieee_vht_cap,
+                                             IEEE80211_VHT_CHANWIDTH_160MHZ,
+                                             0, true, nss) >= nss)
+                       cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+                               cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
+       }
+}
+
+static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
+{
+       switch (mcs) {
+       case IEEE80211_HE_MCS_SUPPORT_0_7:
+               return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+       case IEEE80211_HE_MCS_SUPPORT_0_9:
+               return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+       case IEEE80211_HE_MCS_SUPPORT_0_11:
+               return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+       case IEEE80211_HE_MCS_NOT_SUPPORTED:
+               return 0;
+       }
+
+       WARN(1, "invalid HE MCS %d\n", mcs);
+       return 0;
+}
+
+static void
+iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
+                     const struct ieee80211_sta_he_cap *own_he_cap,
+                     struct iwl_tlc_config_cmd_v4 *cmd)
+{
+       const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+       u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+       u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+       u16 tx_mcs_80 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_80);
+       u16 tx_mcs_160 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_160);
+       int i;
+       u8 nss = link_sta->rx_nss;
+
+       /* the station support only a single receive chain */
+       if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+               nss = 1;
+
+       for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
+               u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+               u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+               u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
+               u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
+
+               /* If one side doesn't support - mark both as not supporting */
+               if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+                   _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+                       _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+                       _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+               }
+               if (_mcs_80 > _tx_mcs_80)
+                       _mcs_80 = _tx_mcs_80;
+               cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
+                       cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
+
+               /* If one side doesn't support - mark both as not supporting */
+               if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+                   _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+                       _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+                       _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+               }
+               if (_mcs_160 > _tx_mcs_160)
+                       _mcs_160 = _tx_mcs_160;
+               cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+                       cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
+       }
+}
+
+static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3],
+                               enum IWL_TLC_MCS_PER_BW bw,
+                               u8 max_nss, u16 mcs_msk)
+{
+       if (max_nss >= 2)
+               ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+
+       if (max_nss >= 1)
+               ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+}
+
+static const
+struct ieee80211_eht_mcs_nss_supp_bw *
+iwl_mld_get_eht_mcs_of_bw(enum IWL_TLC_MCS_PER_BW bw,
+                         const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
+{
+       switch (bw) {
+       case IWL_TLC_MCS_PER_BW_80:
+               return &eht_mcs->bw._80;
+       case IWL_TLC_MCS_PER_BW_160:
+               return &eht_mcs->bw._160;
+       case IWL_TLC_MCS_PER_BW_320:
+               return &eht_mcs->bw._320;
+       default:
+               return NULL;
+       }
+}
+
+static u8 iwl_mld_get_eht_max_nss(u8 rx_nss, u8 tx_nss)
+{
+       u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+       u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
+       /* the max nss that can be used,
+        * is the min with our tx capa and the peer rx capa.
+        */
+       return min(tx, rx);
+}
+
+#define MAX_NSS_MCS(mcs_num, rx, tx) \
+       iwl_mld_get_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
+                               (tx)->rx_tx_mcs ##mcs_num## _max_nss)
+
+static void
+iwl_mld_fill_eht_rates(struct ieee80211_vif *vif,
+                      const struct ieee80211_link_sta *link_sta,
+                      const struct ieee80211_sta_he_cap *own_he_cap,
+                      const struct ieee80211_sta_eht_cap *own_eht_cap,
+                      struct iwl_tlc_config_cmd_v4 *cmd)
+{
+       /* peer RX mcs capa */
+       const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
+               &link_sta->eht_cap.eht_mcs_nss_supp;
+       /* our TX mcs capa */
+       const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
+               &own_eht_cap->eht_mcs_nss_supp;
+
+       enum IWL_TLC_MCS_PER_BW bw;
+       struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
+       struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
+
+       /* peer is 20 MHz only */
+       if (vif->type == NL80211_IFTYPE_AP &&
+           !(link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
+             IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+               mcs_rx_20 = eht_rx_mcs->only_20mhz;
+       } else {
+               mcs_rx_20.rx_tx_mcs7_max_nss =
+                       eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+               mcs_rx_20.rx_tx_mcs9_max_nss =
+                       eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+               mcs_rx_20.rx_tx_mcs11_max_nss =
+                       eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
+               mcs_rx_20.rx_tx_mcs13_max_nss =
+                       eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
+       }
+
+       /* NIC is capable of 20 MHz only */
+       if (!(own_he_cap->he_cap_elem.phy_cap_info[0] &
+             IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+               mcs_tx_20 = eht_tx_mcs->only_20mhz;
+       } else {
+               mcs_tx_20.rx_tx_mcs7_max_nss =
+                       eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+               mcs_tx_20.rx_tx_mcs9_max_nss =
+                       eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+               mcs_tx_20.rx_tx_mcs11_max_nss =
+                       eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
+               mcs_tx_20.rx_tx_mcs13_max_nss =
+                       eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
+       }
+
+       /* rates for 20/40/80 MHz */
+       bw = IWL_TLC_MCS_PER_BW_80;
+       iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+                           MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20),
+                           GENMASK(7, 0));
+       iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+                           MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20),
+                           GENMASK(9, 8));
+       iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+                           MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20),
+                           GENMASK(11, 10));
+       iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+                           MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20),
+                           GENMASK(13, 12));
+
+       /* rates for 160/320 MHz */
+       for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
+               const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
+                       iwl_mld_get_eht_mcs_of_bw(bw, eht_rx_mcs);
+               const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
+                       iwl_mld_get_eht_mcs_of_bw(bw, eht_tx_mcs);
+
+               /* got unsupported index for bw */
+               if (!mcs_rx || !mcs_tx)
+                       continue;
+
+               /* break out if we don't support the bandwidth */
+               if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ))
+                       break;
+
+               iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+                                   MAX_NSS_MCS(9, mcs_rx, mcs_tx),
+                                   GENMASK(9, 0));
+               iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+                                   MAX_NSS_MCS(11, mcs_rx, mcs_tx),
+                                   GENMASK(11, 10));
+               iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+                                   MAX_NSS_MCS(13, mcs_rx, mcs_tx),
+                                   GENMASK(13, 12));
+       }
+
+       /* the station support only a single receive chain */
+       if (link_sta->smps_mode == IEEE80211_SMPS_STATIC ||
+           link_sta->rx_nss < 2)
+               memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
+                      sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
+}
+
+static void
+iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                       struct ieee80211_link_sta *link_sta,
+                       struct ieee80211_supported_band *sband,
+                       const struct ieee80211_sta_he_cap *own_he_cap,
+                       const struct ieee80211_sta_eht_cap *own_eht_cap,
+                       struct iwl_tlc_config_cmd_v4 *cmd)
+{
+       int i;
+       u16 non_ht_rates = 0;
+       unsigned long rates_bitmap;
+       const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+       const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+       const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+
+       /* non HT rates */
+       rates_bitmap = link_sta->supp_rates[sband->band];
+       for_each_set_bit(i, &rates_bitmap, BITS_PER_LONG)
+               non_ht_rates |= BIT(sband->bitrates[i].hw_value);
+
+       cmd->non_ht_rates = cpu_to_le16(non_ht_rates);
+       cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+       if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) {
+               cmd->mode = IWL_TLC_MNG_MODE_EHT;
+               iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap,
+                                      own_eht_cap, cmd);
+       } else if (he_cap->has_he && own_he_cap) {
+               cmd->mode = IWL_TLC_MNG_MODE_HE;
+               iwl_mld_fill_he_rates(link_sta, own_he_cap, cmd);
+       } else if (vht_cap->vht_supported) {
+               cmd->mode = IWL_TLC_MNG_MODE_VHT;
+               iwl_mld_fill_vht_rates(link_sta, vht_cap, cmd);
+       } else if (ht_cap->ht_supported) {
+               cmd->mode = IWL_TLC_MNG_MODE_HT;
+               cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
+                       cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+
+               /* the station support only a single receive chain */
+               if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+                       cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+                               0;
+               else
+                       cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+                               cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+       }
+}
+
+static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_link_sta *link_sta,
+                                enum nl80211_band band)
+{
+       struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+       struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band];
+       const struct ieee80211_sta_he_cap *own_he_cap =
+               ieee80211_get_he_iftype_cap_vif(sband, vif);
+       const struct ieee80211_sta_eht_cap *own_eht_cap =
+               ieee80211_get_eht_iftype_cap_vif(sband, vif);
+       struct iwl_tlc_config_cmd_v4 cmd = {
+               /* For AP mode, use 20 MHz until the STA is authorized */
+               .max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
+                       iwl_mld_fw_bw_from_sta_bw(link_sta) :
+                       IWL_TLC_MNG_CH_WIDTH_20MHZ,
+               .flags = iwl_mld_get_tlc_cmd_flags(mld, vif, link_sta,
+                                                  own_he_cap, own_eht_cap),
+               .chains = iwl_mld_get_fw_chains(mld),
+               .sgi_ch_width_supp = iwl_mld_get_fw_sgi(link_sta),
+               .max_mpdu_len = cpu_to_le16(link_sta->agg.max_rc_amsdu_len),
+       };
+       int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+       int ret;
+
+       if (fw_sta_id < 0)
+               return;
+
+       cmd.sta_id = fw_sta_id;
+
+       iwl_mld_fill_supp_rates(mld, vif, link_sta, sband,
+                               own_he_cap, own_eht_cap,
+                               &cmd);
+
+       IWL_DEBUG_RATE(mld,
+                      "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+                      cmd.sta_id, cmd.max_ch_width, cmd.mode);
+
+       /* Send async since this can be called within a RCU-read section */
+       ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP,
+                                                          TLC_MNG_CONFIG_CMD),
+                                             CMD_ASYNC, &cmd);
+       if (ret)
+               IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret);
+}
+
+static void iwl_mld_recalc_amsdu_len(struct iwl_mld *mld,
+                                    struct ieee80211_link_sta *link_sta)
+{
+       const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+
+       /* For EHT, HE and VHT - we can use the value as it was calculated by
+        * mac80211.
+        */
+       if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he ||
+           link_sta->vht_cap.vht_supported)
+               goto recalc;
+
+       /* But for HT, mac80211 doesn't enforce to 4095, so force it here */
+       if (ht_cap->ht_supported && ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
+               /* Agg is offloaded, so we need to assume that agg are enabled
+                * and max mpdu in ampdu is 4095 (spec 802.11-2016 9.3.2.1)
+                */
+               link_sta->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
+
+recalc:
+       link_sta->agg.max_rc_amsdu_len = link_sta->agg.max_amsdu_len;
+       ieee80211_sta_recalc_aggregates(link_sta->sta);
+}
+
+int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data)
+{
+       struct {
+               struct iwl_dhc_cmd dhc;
+               struct iwl_dhc_tlc_cmd tlc;
+       } __packed cmd = {
+               .tlc.sta_id = sta_id,
+               .tlc.type = cpu_to_le32(type),
+               .tlc.data[0] = cpu_to_le32(data),
+               .dhc.length = cpu_to_le32(sizeof(cmd.tlc) >> 2),
+               .dhc.index_and_mask =
+                       cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
+                                   DHC_INTEGRATION_TLC_DEBUG_CONFIG),
+       };
+       int ret;
+
+       ret = iwl_mld_send_cmd_with_flags_pdu(mld,
+                                             WIDE_ID(IWL_ALWAYS_LONG_GROUP,
+                                                     DEBUG_HOST_COMMAND),
+                                             CMD_ASYNC, &cmd);
+       IWL_DEBUG_RATE(mld, "sta_id %d, type: 0x%X, value: 0x%X, ret%d\n",
+                      sta_id, type, data, ret);
+       return ret;
+}
+
+void iwl_mld_config_tlc_link(struct iwl_mld *mld,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *link_conf,
+                            struct ieee80211_link_sta *link_sta)
+{
+       enum nl80211_band band;
+
+       if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
+               return;
+
+       band = link_conf->chanreq.oper.chan->band;
+
+       iwl_mld_recalc_amsdu_len(mld, link_sta);
+
+       iwl_mld_send_tlc_cmd(mld, vif, link_sta, band);
+}
+
+void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta)
+{
+       struct ieee80211_bss_conf *link;
+       int link_id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       for_each_vif_active_link(vif, link, link_id) {
+               struct ieee80211_link_sta *link_sta =
+                       link_sta_dereference_check(sta, link_id);
+
+               if (!link || !link_sta)
+                       continue;
+
+               iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+       }
+}
+
+static u16
+iwl_mld_get_amsdu_size_of_tid(struct iwl_mld *mld,
+                             struct ieee80211_link_sta *link_sta,
+                             unsigned int tid)
+{
+       struct ieee80211_sta *sta = link_sta->sta;
+       struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
+       const u8 tid_to_mac80211_ac[] = {
+               IEEE80211_AC_BE,
+               IEEE80211_AC_BK,
+               IEEE80211_AC_BK,
+               IEEE80211_AC_BE,
+               IEEE80211_AC_VI,
+               IEEE80211_AC_VI,
+               IEEE80211_AC_VO,
+               IEEE80211_AC_VO,
+       };
+       unsigned int result = link_sta->agg.max_rc_amsdu_len;
+       u8 ac, txf, lmac;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* Don't send an AMSDU that will be longer than the TXF.
+        * Add a security margin of 256 for the TX command + headers.
+        * We also want to have the start of the next packet inside the
+        * fifo to be able to send bursts.
+        */
+
+       if (WARN_ON(tid >= ARRAY_SIZE(tid_to_mac80211_ac)))
+               return 0;
+
+       ac = tid_to_mac80211_ac[tid];
+
+       /* For HE redirect to trigger based fifos */
+       if (link_sta->he_cap.has_he)
+               ac += 4;
+
+       txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(ac);
+
+       /* Only one link: take the lmac according to the band */
+       if (hweight16(sta->valid_links) <= 1) {
+               enum nl80211_band band;
+               struct ieee80211_bss_conf *link =
+                       wiphy_dereference(mld->wiphy,
+                                         vif->link_conf[link_sta->link_id]);
+
+               if (WARN_ON(!link || !link->chanreq.oper.chan))
+                       band = NL80211_BAND_2GHZ;
+               else
+                       band = link->chanreq.oper.chan->band;
+               lmac = iwl_mld_get_lmac_id(mld, band);
+
+       /* More than one link but with 2 lmacs: take the minimum */
+       } else if (fw_has_capa(&mld->fw->ucode_capa,
+                              IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
+               lmac = IWL_LMAC_5G_INDEX;
+               result = min_t(unsigned int, result,
+                              mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+               lmac = IWL_LMAC_24G_INDEX;
+       /* More than one link but only one lmac */
+       } else {
+               lmac = IWL_LMAC_24G_INDEX;
+       }
+
+       return min_t(unsigned int, result,
+                    mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
+void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
+                             struct iwl_rx_packet *pkt)
+{
+       struct iwl_tlc_update_notif *notif = (void *)pkt->data;
+       struct ieee80211_link_sta *link_sta;
+       u32 flags = le32_to_cpu(notif->flags);
+       u32 enabled;
+       u16 size;
+
+       if (IWL_FW_CHECK(mld, notif->sta_id >= mld->fw->ucode_capa.num_stations,
+                        "Invalid sta id (%d) in TLC notification\n",
+                        notif->sta_id))
+               return;
+
+       link_sta = wiphy_dereference(mld->wiphy,
+                                    mld->fw_id_to_link_sta[notif->sta_id]);
+
+       if (WARN(IS_ERR_OR_NULL(link_sta),
+                "link_sta of sta id (%d) doesn't exist\n", notif->sta_id))
+               return;
+
+       if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+               struct iwl_mld_link_sta *mld_link_sta =
+                       iwl_mld_link_sta_from_mac80211(link_sta);
+               char pretty_rate[100];
+
+               if (WARN_ON(!mld_link_sta))
+                       return;
+
+               mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+
+               rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
+                                    mld_link_sta->last_rate_n_flags);
+               IWL_DEBUG_RATE(mld, "TLC notif: new rate = %s\n", pretty_rate);
+       }
+
+       /* We are done processing the notif */
+       if (!(flags & IWL_TLC_NOTIF_FLAG_AMSDU))
+               return;
+
+       enabled = le32_to_cpu(notif->amsdu_enabled);
+       size = le32_to_cpu(notif->amsdu_size);
+
+       if (size < 2000) {
+               size = 0;
+               enabled = 0;
+       }
+
+       if (IWL_FW_CHECK(mld, size > link_sta->agg.max_amsdu_len,
+                        "Invalid AMSDU len in TLC notif: %d (Max AMSDU len: %d)\n",
+                        size, link_sta->agg.max_amsdu_len))
+               return;
+
+       link_sta->agg.max_rc_amsdu_len = size;
+
+       for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+               if (enabled & BIT(i))
+                       link_sta->agg.max_tid_amsdu_len[i] =
+                               iwl_mld_get_amsdu_size_of_tid(mld, link_sta, i);
+               else
+                       link_sta->agg.max_tid_amsdu_len[i] = 0;
+       }
+
+       ieee80211_sta_recalc_aggregates(link_sta->sta);
+
+       IWL_DEBUG_RATE(mld,
+                      "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+                      le32_to_cpu(notif->amsdu_size), size, enabled);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_tlc_h__
+#define __iwl_mld_tlc_h__
+
+#include "mld.h"
+
+void iwl_mld_config_tlc_link(struct iwl_mld *mld,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *link_conf,
+                            struct ieee80211_link_sta *link_sta);
+
+void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta);
+
+void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
+                             struct iwl_rx_packet *pkt);
+
+int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data);
+
+#endif /* __iwl_mld_tlc_h__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 - 2025 Intel Corporation
+ */
+#include <net/ip.h>
+
+#include "tx.h"
+#include "sta.h"
+#include "hcmd.h"
+#include "iwl-utils.h"
+#include "iface.h"
+
+#include "fw/dbg.h"
+
+#include "fw/api/tx.h"
+#include "fw/api/rs.h"
+#include "fw/api/txq.h"
+#include "fw/api/datapath.h"
+#include "fw/api/time-event.h"
+
+#define MAX_ANT_NUM 2
+
+/* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
+ * the *index* used for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
+{
+       u8 index = last_idx;
+
+       for (int i = 0; i < MAX_ANT_NUM; i++) {
+               index = (index + 1) % MAX_ANT_NUM;
+               if (valid & BIT(index))
+                       return index;
+       }
+
+       WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+
+       return last_idx;
+}
+
+void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
+{
+       *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
+}
+
+static int
+iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+       struct ieee80211_sta *sta = txq->sta;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
+       int max_size = IWL_DEFAULT_QUEUE_SIZE;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
+               if (link_sta->eht_cap.has_eht) {
+                       max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
+                       break;
+               }
+
+               if (link_sta->he_cap.has_he)
+                       max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
+       }
+
+       return max_size;
+}
+
+static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+       u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
+       u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
+       /* We can't know when the station is asleep or awake, so we
+        * must disable the queue hang detection.
+        */
+       unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
+                               IWL_WATCHDOG_DISABLED :
+                               mld->trans->trans_cfg->base_params->wd_timeout;
+       int queue, size;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (tid == IWL_MGMT_TID)
+               size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+                            mld->trans->cfg->min_txq_size);
+       else
+               size = iwl_mld_get_queue_size(mld, txq);
+
+       queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
+                                   watchdog_timeout);
+
+       if (queue >= 0)
+               IWL_DEBUG_TX_QUEUES(mld,
+                                   "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+                                   queue, fw_sta_mask, tid);
+       return queue;
+}
+
+static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+       struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+       int id;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       /* This will alse send the SCD_QUEUE_CONFIG_CMD */
+       id = iwl_mld_allocate_txq(mld, txq);
+       if (id < 0)
+               return id;
+
+       mld_txq->fw_id = id;
+       mld_txq->status.allocated = true;
+
+       rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
+
+       return 0;
+}
+
+void iwl_mld_add_txq_list(struct iwl_mld *mld)
+{
+       lockdep_assert_wiphy(mld->wiphy);
+
+       while (!list_empty(&mld->txqs_to_add)) {
+               struct ieee80211_txq *txq;
+               struct iwl_mld_txq *mld_txq =
+                       list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
+                                        list);
+               int failed;
+
+               txq = container_of((void *)mld_txq, struct ieee80211_txq,
+                                  drv_priv);
+
+               failed = iwl_mld_add_txq(mld, txq);
+
+               local_bh_disable();
+               spin_lock(&mld->add_txqs_lock);
+               list_del_init(&mld_txq->list);
+               spin_unlock(&mld->add_txqs_lock);
+               /* If the queue allocation failed, we can't transmit. Leave the
+                * frames on the txq, maybe the attempt to allocate the queue
+                * will succeed.
+                */
+               if (!failed)
+                       iwl_mld_tx_from_txq(mld, txq);
+               local_bh_enable();
+       }
+}
+
+void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+       struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+                                          add_txqs_wk);
+
+       /* will reschedule to run after restart */
+       if (mld->fw_status.in_hw_restart)
+               return;
+
+       iwl_mld_add_txq_list(mld);
+}
+
+void
+iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
+{
+       struct iwl_scd_queue_cfg_cmd remove_cmd = {
+               .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+               .u.remove.tid = cpu_to_le32(tid),
+               .u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
+       };
+
+       iwl_mld_send_cmd_pdu(mld,
+                            WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
+                            &remove_cmd);
+
+       iwl_trans_txq_free(mld->trans, queue_id);
+}
+
+void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+       struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+       u32 sta_msk, tid;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       spin_lock_bh(&mld->add_txqs_lock);
+       if (!list_empty(&mld_txq->list))
+               list_del_init(&mld_txq->list);
+       spin_unlock_bh(&mld->add_txqs_lock);
+
+       if (!mld_txq->status.allocated ||
+           WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
+               return;
+
+       sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
+
+       tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
+                                              txq->tid;
+
+       iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
+
+       RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
+       mld_txq->status.allocated = false;
+}
+
+#define OPT_HDR(type, skb, off) \
+       (type *)(skb_network_header(skb) + (off))
+
+static __le32
+iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+       u16 offload_assist = 0;
+#if IS_ENABLED(CONFIG_INET)
+       u8 protocol = 0;
+
+       /* Do not compute checksum if already computed */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               goto out;
+
+       /* We do not expect to be requested to csum stuff we do not support */
+
+       /* TBD: do we also need to check
+        * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
+        * the devices we support has this flags?
+        */
+       if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
+                     skb->protocol != htons(ETH_P_IPV6),
+                     "No support for requested checksum\n")) {
+               skb_checksum_help(skb);
+               goto out;
+       }
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               protocol = ip_hdr(skb)->protocol;
+       } else {
+#if IS_ENABLED(CONFIG_IPV6)
+               struct ipv6hdr *ipv6h =
+                       (struct ipv6hdr *)skb_network_header(skb);
+               unsigned int off = sizeof(*ipv6h);
+
+               protocol = ipv6h->nexthdr;
+               while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+                       struct ipv6_opt_hdr *hp;
+
+                       /* only supported extension headers */
+                       if (protocol != NEXTHDR_ROUTING &&
+                           protocol != NEXTHDR_HOP &&
+                           protocol != NEXTHDR_DEST) {
+                               skb_checksum_help(skb);
+                               goto out;
+                       }
+
+                       hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+                       protocol = hp->nexthdr;
+                       off += ipv6_optlen(hp);
+               }
+               /* if we get here - protocol now should be TCP/UDP */
+#endif
+       }
+
+       if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+               WARN_ON_ONCE(1);
+               skb_checksum_help(skb);
+               goto out;
+       }
+
+       /* enable L4 csum */
+       offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+       /* Set offset to IP header (snap).
+        * We don't support tunneling so no need to take care of inner header.
+        * Size is in words.
+        */
+       offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+       /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+       if (skb->protocol == htons(ETH_P_IP) && amsdu) {
+               ip_hdr(skb)->check = 0;
+               offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+       }
+
+       /* reset UDP/TCP header csum */
+       if (protocol == IPPROTO_TCP)
+               tcp_hdr(skb)->check = 0;
+       else
+               udp_hdr(skb)->check = 0;
+
+out:
+#endif
+       mh_len /= 2;
+       offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+       if (amsdu)
+               offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+       else if (ieee80211_hdrlen(hdr->frame_control) % 4)
+               /* padding is inserted later in transport */
+               offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+       return cpu_to_le32(offload_assist);
+}
+
+static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
+                                            struct ieee80211_vif *vif,
+                                            struct ieee80211_tx_info *info,
+                                            unsigned long *basic_rates,
+                                            u8 *band)
+{
+       u32 link_id = u32_get_bits(info->control.flags,
+                                  IEEE80211_TX_CTRL_MLO_LINK);
+
+       *basic_rates = vif->bss_conf.basic_rates;
+       *band = info->band;
+
+       if (link_id == IEEE80211_LINK_UNSPECIFIED &&
+           ieee80211_vif_is_mld(vif)) {
+               /* shouldn't do this when >1 link is active */
+               WARN_ON(hweight16(vif->active_links) != 1);
+               link_id = __ffs(vif->active_links);
+       }
+
+       if (link_id < IEEE80211_LINK_UNSPECIFIED) {
+               struct ieee80211_bss_conf *link_conf;
+
+               rcu_read_lock();
+               link_conf = rcu_dereference(vif->link_conf[link_id]);
+               if (link_conf) {
+                       *basic_rates = link_conf->basic_rates;
+                       if (link_conf->chanreq.oper.chan)
+                               *band = link_conf->chanreq.oper.chan->band;
+               }
+               rcu_read_unlock();
+       }
+}
+
+u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
+                          struct ieee80211_tx_info *info,
+                          struct ieee80211_vif *vif)
+{
+       struct ieee80211_supported_band *sband;
+       u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
+       unsigned long basic_rates;
+       u8 band, rate;
+       u32 i;
+
+       iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
+
+       sband = mld->hw->wiphy->bands[band];
+       for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
+               u16 hw = sband->bitrates[i].hw_value;
+
+               if (hw >= IWL_FIRST_OFDM_RATE) {
+                       if (lowest_ofdm > hw)
+                               lowest_ofdm = hw;
+               } else if (lowest_cck > hw) {
+                       lowest_cck = hw;
+               }
+       }
+
+       if (band == NL80211_BAND_2GHZ && !vif->p2p &&
+           vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+           !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
+               if (lowest_cck != IWL_RATE_COUNT)
+                       rate = lowest_cck;
+               else if (lowest_ofdm != IWL_RATE_COUNT)
+                       rate = lowest_ofdm;
+               else
+                       rate = IWL_FIRST_CCK_RATE;
+       } else if (lowest_ofdm != IWL_RATE_COUNT) {
+               rate = lowest_ofdm;
+       } else {
+               rate = IWL_FIRST_OFDM_RATE;
+       }
+
+       return rate;
+}
+
+static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
+                                          struct ieee80211_tx_info *info,
+                                          int rate_idx)
+{
+       u32 rate_flags = 0;
+       u8 rate_plcp;
+
+       /* if the rate isn't a well known legacy rate, take the lowest one */
+       if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+               rate_idx = iwl_mld_get_lowest_rate(mld, info,
+                                                  info->control.vif);
+
+       WARN_ON_ONCE(rate_idx < 0);
+
+       /* Set CCK or OFDM flag */
+       if (rate_idx <= IWL_LAST_CCK_RATE)
+               rate_flags |= RATE_MCS_CCK_MSK;
+       else
+               rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
+
+       /* Legacy rates are indexed:
+        * 0 - 3 for CCK and 0 - 7 for OFDM
+        */
+       rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
+                    rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
+
+       return (u32)rate_plcp | rate_flags;
+}
+
+static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
+                             struct ieee80211_tx_info *info,
+                             struct ieee80211_sta *sta, __le16 fc)
+{
+       if (sta && ieee80211_is_data(fc)) {
+               struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+               return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
+       }
+
+       return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+}
+
+static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
+                                     struct ieee80211_tx_info *info,
+                                     struct ieee80211_sta *sta,
+                                     __le16 fc)
+{
+       struct ieee80211_tx_rate *rate = &info->control.rates[0];
+       u32 result;
+
+       /* we only care about legacy/HT/VHT so far, so we can
+        * build in v1 and use iwl_new_rate_from_v1()
+        * FIXME: in newer devices we only support the new rates, build
+        * the rate_n_flags in the new format here instead of using v1 and
+        * converting it.
+        */
+
+       if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+               u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+               u8 nss = ieee80211_rate_get_vht_nss(rate);
+
+               result = RATE_MCS_VHT_MSK_V1;
+               result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
+               result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
+               if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+                       result |= RATE_MCS_SGI_MSK_V1;
+
+               if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+               else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+                       result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
+               else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+                       result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
+
+               result = iwl_new_rate_from_v1(result);
+       } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+               result = RATE_MCS_HT_MSK_V1;
+               result |= u32_encode_bits(rate->idx,
+                                         RATE_HT_MCS_RATE_CODE_MSK_V1 |
+                                         RATE_HT_MCS_NSS_MSK_V1);
+               if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+                       result |= RATE_MCS_SGI_MSK_V1;
+               if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+               if (info->flags & IEEE80211_TX_CTL_LDPC)
+                       result |= RATE_MCS_LDPC_MSK_V1;
+               if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
+                       result |= RATE_MCS_STBC_MSK;
+
+               result = iwl_new_rate_from_v1(result);
+       } else {
+               result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
+       }
+
+       if (info->control.antennas)
+               result |= u32_encode_bits(info->control.antennas,
+                                         RATE_MCS_ANT_AB_MSK);
+       else
+               result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
+
+       return result;
+}
+
+static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
+                                      struct ieee80211_tx_info *info,
+                                      struct ieee80211_sta *sta, __le16 fc)
+{
+       if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+               return iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
+
+       return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
+               iwl_mld_get_tx_ant(mld, info, sta, fc);
+}
+
+static void
+iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd,
+                       struct sk_buff *skb, bool amsdu)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct ieee80211_vif *vif;
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+
+       if (!amsdu || !skb_is_gso(skb))
+               return;
+
+       /* As described in IEEE sta 802.11-2020, table 9-30 (Address
+        * field contents), A-MSDU address 3 should contain the BSSID
+        * address.
+        *
+        * In TSO, the skb header address 3 contains the original address 3 to
+        * correctly create all the A-MSDU subframes headers from it.
+        * Override now the address 3 in the command header with the BSSID.
+        *
+        * Note: we fill in the MLD address, but the firmware will do the
+        * necessary translation to link address after encryption.
+        */
+       vif = info->control.vif;
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
+               break;
+       case NL80211_IFTYPE_AP:
+               ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
+               break;
+       default:
+               break;
+       }
+}
+
+static void
+iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
+                   struct iwl_device_tx_cmd *dev_tx_cmd,
+                   struct ieee80211_sta *sta)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
+                                           NULL;
+       struct iwl_tx_cmd_gen3 *tx_cmd;
+       bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+                    (*ieee80211_get_qos_ctl(hdr) &
+                     IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+       u32 rate_n_flags = 0;
+       u16 flags = 0;
+
+       dev_tx_cmd->hdr.cmd = TX_CMD;
+
+       if (!info->control.hw_key)
+               flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+       /* For data and mgmt packets rate info comes from the fw.
+        * Only set rate/antenna for injected frames with fixed rate, or
+        * when no sta is given.
+        */
+       if (unlikely(!sta ||
+                    info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+               flags |= IWL_TX_FLAGS_CMD_RATE;
+               rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
+                                                          hdr->frame_control);
+       } else if (!ieee80211_is_data(hdr->frame_control) ||
+                  (mld_sta &&
+                   mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
+               /* These are important frames */
+               flags |= IWL_TX_FLAGS_HIGH_PRI;
+       }
+
+       tx_cmd = (void *)dev_tx_cmd->payload;
+
+       iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
+
+       tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
+
+       /* Total # bytes to be transmitted */
+       tx_cmd->len = cpu_to_le16((u16)skb->len);
+
+       tx_cmd->flags = cpu_to_le16(flags);
+
+       tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+}
+
+/* Caller of this need to check that info->control.vif is not NULL */
+static struct iwl_mld_link *
+iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
+{
+       struct iwl_mld_vif *mld_vif =
+               iwl_mld_vif_from_mac80211(info->control.vif);
+       u32 link_id = u32_get_bits(info->control.flags,
+                                  IEEE80211_TX_CTRL_MLO_LINK);
+
+       if (link_id == IEEE80211_LINK_UNSPECIFIED) {
+               if (info->control.vif->active_links)
+                       link_id = ffs(info->control.vif->active_links) - 1;
+               else
+                       link_id = 0;
+       }
+
+       return rcu_dereference(mld_vif->link[link_id]);
+}
+
+static int
+iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
+                       struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       __le16 fc = hdr->frame_control;
+       struct iwl_mld_vif *mld_vif;
+       struct iwl_mld_link *link;
+
+       if (txq && txq->sta)
+               return iwl_mld_txq_from_mac80211(txq)->fw_id;
+
+       if (!info->control.vif)
+               return IWL_MLD_INVALID_QUEUE;
+
+       switch (info->control.vif->type) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
+               link = iwl_mld_get_link_from_tx_info(info);
+
+               if (WARN_ON(!link))
+                       break;
+
+               /* ucast disassociate/deauth frames without a station might
+                * happen, especially with reason 7 ("Class 3 frame received
+                * from nonassociated STA").
+                */
+               if (ieee80211_is_mgmt(fc) &&
+                   (!ieee80211_is_bufferable_mmpdu(skb) ||
+                    ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
+                       return link->bcast_sta.queue_id;
+
+               if (is_multicast_ether_addr(hdr->addr1) &&
+                   !ieee80211_has_order(fc))
+                       return link->mcast_sta.queue_id;
+
+               WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+                         "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
+               return link->bcast_sta.queue_id;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+
+               if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) {
+                       IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
+                       return IWL_MLD_INVALID_DROP_TX;
+               }
+
+               WARN_ON(!ieee80211_is_mgmt(fc));
+
+               return mld_vif->deflink.aux_sta.queue_id;
+       default:
+               /* TODO: consider monitor (task=monitor) */
+               WARN_ONCE(1, "Unsupported vif type\n");
+               break;
+       }
+
+       return IWL_MLD_INVALID_QUEUE;
+}
+
+static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
+                                      struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct iwl_mld_link *mld_link =
+               &iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
+       struct iwl_probe_resp_data *resp_data;
+       u8 *pos;
+
+       if (!info->control.vif->p2p)
+               return;
+
+       rcu_read_lock();
+
+       resp_data = rcu_dereference(mld_link->probe_resp_data);
+       if (!resp_data)
+               goto out;
+
+       if (!resp_data->notif.noa_active)
+               goto out;
+
+       if (skb_tailroom(skb) < resp_data->noa_len) {
+               if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
+                       IWL_ERR(mld,
+                               "Failed to reallocate probe resp\n");
+                       goto out;
+               }
+       }
+
+       pos = skb_put(skb, resp_data->noa_len);
+
+       *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+       /* Set length of IE body (not including ID and length itself) */
+       *pos++ = resp_data->noa_len - 2;
+       *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
+       *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
+       *pos++ = WLAN_OUI_WFA & 0xff;
+       *pos++ = WLAN_OUI_TYPE_WFA_P2P;
+
+       memcpy(pos, &resp_data->notif.noa_attr,
+              resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
+
+out:
+       rcu_read_unlock();
+}
+
+/* This function must be called with BHs disabled */
+static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
+                          struct ieee80211_txq *txq)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_sta *sta = txq ? txq->sta : NULL;
+       struct iwl_device_tx_cmd *dev_tx_cmd;
+       int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
+       u8 tid = IWL_MAX_TID_COUNT;
+
+       if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
+           queue == IWL_MLD_INVALID_DROP_TX)
+               return -1;
+
+       if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
+               return -1;
+
+       dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
+       if (unlikely(!dev_tx_cmd))
+               return -1;
+
+       if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+               if (IWL_MLD_NON_TRANSMITTING_AP)
+                       return -1;
+
+               iwl_mld_probe_resp_set_noa(mld, skb);
+       }
+
+       iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
+
+       if (ieee80211_is_data(hdr->frame_control)) {
+               if (ieee80211_is_data_qos(hdr->frame_control))
+                       tid = ieee80211_get_tid(hdr);
+               else
+                       tid = IWL_TID_NON_QOS;
+       }
+
+       IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
+                    tid, queue, skb->len);
+
+       /* From now on, we cannot access info->control */
+       memset(&info->status, 0, sizeof(info->status));
+       memset(info->driver_data, 0, sizeof(info->driver_data));
+
+       info->driver_data[1] = dev_tx_cmd;
+
+       if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
+               goto err;
+
+       /* Update low-latency counter when a packet is queued instead
+        * of after TX, it makes sense for early low-latency detection
+        */
+       if (sta)
+               iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
+
+       return 0;
+
+err:
+       iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
+       IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
+       return -1;
+}
+
+#ifdef CONFIG_INET
+
+/* This function handles the segmentation of a large TSO packet into multiple
+ * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
+ * constraints.
+ */
+static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
+                                 struct ieee80211_sta *sta,
+                                 struct sk_buff_head *mpdus_skbs)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
+       unsigned int mss = skb_shinfo(skb)->gso_size;
+       unsigned int num_subframes, tcp_payload_len, subf_len;
+       u16 snap_ip_tcp, pad, max_tid_amsdu_len;
+       u8 tid;
+
+       snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
+
+       if (!ieee80211_is_data_qos(hdr->frame_control) ||
+           !sta->cur->max_rc_amsdu_len)
+               return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+
+       /* Do not build AMSDU for IPv6 with extension headers.
+        * Ask stack to segment and checksum the generated MPDUs for us.
+        */
+       if (skb->protocol == htons(ETH_P_IPV6) &&
+           ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+           IPPROTO_TCP) {
+               netdev_flags &= ~NETIF_F_CSUM_MASK;
+               return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+       }
+
+       tid = ieee80211_get_tid(hdr);
+       if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+               return -EINVAL;
+
+       max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
+       if (!max_tid_amsdu_len)
+               return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+
+       /* Sub frame header + SNAP + IP header + TCP header + MSS */
+       subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
+       pad = (4 - subf_len) & 0x3;
+
+       /* If we have N subframes in the A-MSDU, then the A-MSDU's size is
+        * N * subf_len + (N - 1) * pad.
+        */
+       num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
+
+       if (sta->max_amsdu_subframes &&
+           num_subframes > sta->max_amsdu_subframes)
+               num_subframes = sta->max_amsdu_subframes;
+
+       tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+               tcp_hdrlen(skb) + skb->data_len;
+
+       /* Make sure we have enough TBs for the A-MSDU:
+        *      2 for each subframe
+        *      1 more for each fragment
+        *      1 more for the potential data in the header
+        */
+       if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
+           mld->trans->max_skb_frags)
+               num_subframes = 1;
+
+       if (num_subframes > 1)
+               *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+       /* This skb fits in one single A-MSDU */
+       if (tcp_payload_len <= num_subframes * mss) {
+               __skb_queue_tail(mpdus_skbs, skb);
+               return 0;
+       }
+
+       /* Trick the segmentation function to make it create SKBs that can fit
+        * into one A-MSDU.
+        */
+       return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
+}
+
+/* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
+ * large packets when necessary and transmitting each segment as MPDU.
+ */
+static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
+                         struct ieee80211_txq *txq)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct sk_buff *orig_skb = skb;
+       struct sk_buff_head mpdus_skbs;
+       unsigned int payload_len;
+       int ret;
+
+       if (WARN_ON(!txq || !txq->sta))
+               return -1;
+
+       payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+               tcp_hdrlen(skb) + skb->data_len;
+
+       if (payload_len <= skb_shinfo(skb)->gso_size)
+               return iwl_mld_tx_mpdu(mld, skb, txq);
+
+       if (!info->control.vif)
+               return -1;
+
+       __skb_queue_head_init(&mpdus_skbs);
+
+       ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
+       if (ret)
+               return ret;
+
+       WARN_ON(skb_queue_empty(&mpdus_skbs));
+
+       while (!skb_queue_empty(&mpdus_skbs)) {
+               skb = __skb_dequeue(&mpdus_skbs);
+
+               ret = iwl_mld_tx_mpdu(mld, skb, txq);
+               if (!ret)
+                       continue;
+
+               /* Free skbs created as part of TSO logic that have not yet
+                * been dequeued
+                */
+               __skb_queue_purge(&mpdus_skbs);
+
+               /* skb here is not necessarily same as skb that entered
+                * this method, so free it explicitly.
+                */
+               if (skb == orig_skb)
+                       ieee80211_free_txskb(mld->hw, skb);
+               else
+                       kfree_skb(skb);
+
+               /* there was error, but we consumed skb one way or
+                * another, so return 0
+                */
+               return 0;
+       }
+
+       return 0;
+}
+#else
+static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
+                         struct ieee80211_txq *txq)
+{
+       /* Impossible to get TSO without CONFIG_INET */
+       WARN_ON(1);
+
+       return -1;
+}
+#endif /* CONFIG_INET */
+
+void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+                   struct ieee80211_txq *txq)
+{
+       if (skb_is_gso(skb)) {
+               if (!iwl_mld_tx_tso(mld, skb, txq))
+                       return;
+               goto err;
+       }
+
+       if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
+               return;
+
+err:
+       ieee80211_free_txskb(mld->hw, skb);
+}
+
+void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+       struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+       struct sk_buff *skb = NULL;
+       u8 zero_addr[ETH_ALEN] = {};
+
+       /*
+        * No need for threads to be pending here, they can leave the first
+        * taker all the work.
+        *
+        * mld_txq->tx_request logic:
+        *
+        * If 0, no one is currently TXing, set to 1 to indicate current thread
+        * will now start TX and other threads should quit.
+        *
+        * If 1, another thread is currently TXing, set to 2 to indicate to
+        * that thread that there was another request. Since that request may
+        * have raced with the check whether the queue is empty, the TXing
+        * thread should check the queue's status one more time before leaving.
+        * This check is done in order to not leave any TX hanging in the queue
+        * until the next TX invocation (which may not even happen).
+        *
+        * If 2, another thread is currently TXing, and it will already double
+        * check the queue, so do nothing.
+        */
+       if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
+               return;
+
+       rcu_read_lock();
+       do {
+               while (likely(!mld_txq->status.stop_full) &&
+                      (skb = ieee80211_tx_dequeue(mld->hw, txq)))
+                       iwl_mld_tx_skb(mld, skb, txq);
+       } while (atomic_dec_return(&mld_txq->tx_request));
+
+       IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
+                    txq->sta ? txq->sta->addr : zero_addr, txq->tid);
+
+       rcu_read_unlock();
+}
+
+static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
+                                     struct ieee80211_tx_info *info)
+{
+       enum nl80211_band band = info->band;
+       struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
+       u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
+       u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+       u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+       if (sgi)
+               tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+       switch (chan_width) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+               break;
+       default:
+               break;
+       }
+
+       switch (format) {
+       case RATE_MCS_HT_MSK:
+               tx_rate->flags |= IEEE80211_TX_RC_MCS;
+               tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
+               break;
+       case RATE_MCS_VHT_MSK:
+               ieee80211_rate_set_vht(tx_rate,
+                                      rate_n_flags & RATE_MCS_CODE_MSK,
+                                      FIELD_GET(RATE_MCS_NSS_MSK,
+                                                rate_n_flags) + 1);
+               tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
+               break;
+       case RATE_MCS_HE_MSK:
+               /* mac80211 cannot do this without ieee80211_tx_status_ext()
+                * but it only matters for radiotap
+                */
+               tx_rate->idx = 0;
+               break;
+       default:
+               tx_rate->idx =
+                       iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+                                                             band);
+               break;
+       }
+}
+
+void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
+                                 struct iwl_rx_packet *pkt)
+{
+       struct iwl_tx_resp *tx_resp = (void *)pkt->data;
+       int txq_id = le16_to_cpu(tx_resp->tx_queue);
+       struct agg_tx_status *agg_status = &tx_resp->status;
+       u32 status = le16_to_cpu(agg_status->status);
+       u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+       size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
+       int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+       int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
+       struct ieee80211_link_sta *link_sta;
+       struct iwl_mld_sta *mld_sta;
+       u16 ssn;
+       struct sk_buff_head skbs;
+       u8 skb_freed = 0;
+       bool mgmt = false;
+       bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
+
+       if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
+                        "Invalid tx_resp notif frame_count (%d)\n",
+                        tx_resp->frame_count))
+               return;
+
+       /* validate the size of the variable part of the notif */
+       if (IWL_FW_CHECK(mld, notif_size != pkt_len,
+                        "Invalid tx_resp notif size (expected=%zu got=%u)\n",
+                        notif_size, pkt_len))
+               return;
+
+       ssn = le32_to_cpup((__le32 *)agg_status +
+                          tx_resp->frame_count) & 0xFFFF;
+
+       __skb_queue_head_init(&skbs);
+
+       /* we can free until ssn % q.n_bd not inclusive */
+       iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
+
+       while (!skb_queue_empty(&skbs)) {
+               struct sk_buff *skb = __skb_dequeue(&skbs);
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+               struct ieee80211_hdr *hdr = (void *)skb->data;
+
+               skb_freed++;
+
+               iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+
+               memset(&info->status, 0, sizeof(info->status));
+
+               info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
+
+               /* inform mac80211 about what happened with the frame */
+               switch (status & TX_STATUS_MSK) {
+               case TX_STATUS_SUCCESS:
+               case TX_STATUS_DIRECT_DONE:
+                       info->flags |= IEEE80211_TX_STAT_ACK;
+                       break;
+               default:
+                       break;
+               }
+
+               /* If we are freeing multiple frames, mark all the frames
+                * but the first one as acked, since they were acknowledged
+                * before
+                */
+               if (skb_freed > 1)
+                       info->flags |= IEEE80211_TX_STAT_ACK;
+
+               if (tx_failure) {
+                       enum iwl_fw_ini_time_point tp =
+                               IWL_FW_INI_TIME_POINT_TX_FAILED;
+
+                       if (ieee80211_is_action(hdr->frame_control))
+                               tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
+                       else if (ieee80211_is_mgmt(hdr->frame_control))
+                               mgmt = true;
+
+                       iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
+               }
+
+               iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate),
+                                         info);
+
+               if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
+                       ieee80211_tx_status_skb(mld->hw, skb);
+       }
+
+       IWL_DEBUG_TX_REPLY(mld,
+                          "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
+                          txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
+                          tx_resp->failure_frame);
+
+       if (tx_failure && mgmt)
+               iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
+
+       if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
+                        "Got invalid sta_id (%d)\n", sta_id))
+               return;
+
+       rcu_read_lock();
+
+       link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+       if (!link_sta) {
+               /* This can happen if the TX cmd was sent before pre_rcu_remove
+                * but the TX response was received after
+                */
+               IWL_DEBUG_TX_REPLY(mld,
+                                  "Got valid sta_id (%d) but sta is NULL\n",
+                                  sta_id);
+               goto out;
+       }
+
+       if (IS_ERR(link_sta))
+               goto out;
+
+       mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+       if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
+               iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
+
+       if (tid < IWL_MAX_TID_COUNT)
+               iwl_mld_count_mpdu_tx(link_sta, 1);
+
+out:
+       rcu_read_unlock();
+}
+
+static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
+                                  bool in_flush)
+{
+       struct sk_buff_head reclaimed_skbs;
+
+       __skb_queue_head_init(&reclaimed_skbs);
+
+       iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
+
+       while (!skb_queue_empty(&reclaimed_skbs)) {
+               struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+               iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+
+               memset(&info->status, 0, sizeof(info->status));
+
+               /* Packet was transmitted successfully, failures come as single
+                * frames because before failing a frame the firmware transmits
+                * it without aggregation at least once.
+                */
+               if (!in_flush)
+                       info->flags |= IEEE80211_TX_STAT_ACK;
+               else
+                       info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+               ieee80211_tx_status_skb(mld->hw, skb);
+       }
+}
+
+int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
+{
+       struct iwl_tx_path_flush_cmd_rsp *rsp;
+       struct iwl_tx_path_flush_cmd flush_cmd = {
+               .sta_id = cpu_to_le32(fw_sta_id),
+               .tid_mask = cpu_to_le16(0xffff),
+       };
+       struct iwl_host_cmd cmd = {
+               .id = TXPATH_FLUSH,
+               .len = { sizeof(flush_cmd), },
+               .data = { &flush_cmd, },
+               .flags = CMD_WANT_SKB,
+       };
+       int ret, num_flushed_queues;
+       u32 resp_len;
+
+       IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
+                           fw_sta_id, 0xffff);
+
+       ret = iwl_mld_send_cmd(mld, &cmd);
+       if (ret) {
+               IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
+               return ret;
+       }
+
+       resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+       if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
+                        "Invalid TXPATH_FLUSH response len: %d\n",
+                        resp_len)) {
+               ret = -EIO;
+               goto free_rsp;
+       }
+
+       rsp = (void *)cmd.resp_pkt->data;
+
+       if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
+                        "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
+                        le16_to_cpu(rsp->sta_id))) {
+               ret = -EIO;
+               goto free_rsp;
+       }
+
+       num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
+       if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
+                        "num_flushed_queues %d\n", num_flushed_queues)) {
+               ret = -EIO;
+               goto free_rsp;
+       }
+
+       for (int i = 0; i < num_flushed_queues; i++) {
+               struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
+               int read_after = le16_to_cpu(queue_info->read_after_flush);
+               int txq_id = le16_to_cpu(queue_info->queue_num);
+
+               if (IWL_FW_CHECK(mld,
+                                txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
+                                "Invalid txq id %d\n", txq_id))
+                       continue;
+
+               IWL_DEBUG_TX_QUEUES(mld,
+                                   "tid %d txq_id %d read-before %d read-after %d\n",
+                                   le16_to_cpu(queue_info->tid), txq_id,
+                                   le16_to_cpu(queue_info->read_before_flush),
+                                   read_after);
+
+               iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
+       }
+
+free_rsp:
+       iwl_free_resp(&cmd);
+       return ret;
+}
+
+int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+       struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+       int ret;
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       if (likely(mld_txq->status.allocated))
+               return 0;
+
+       ret = iwl_mld_add_txq(mld, txq);
+
+       spin_lock_bh(&mld->add_txqs_lock);
+       if (!list_empty(&mld_txq->list))
+               list_del_init(&mld_txq->list);
+       spin_unlock_bh(&mld->add_txqs_lock);
+
+       return ret;
+}
+
+int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
+                           struct ieee80211_sta *sta,
+                           u32 old_sta_mask, u32 new_sta_mask)
+{
+       struct iwl_scd_queue_cfg_cmd cmd = {
+               .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
+               .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
+               .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
+       };
+
+       lockdep_assert_wiphy(mld->wiphy);
+
+       for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
+               struct ieee80211_txq *txq =
+                       sta->txq[tid != IWL_MAX_TID_COUNT ?
+                                       tid : IEEE80211_NUM_TIDS];
+               struct iwl_mld_txq *mld_txq =
+                       iwl_mld_txq_from_mac80211(txq);
+               int ret;
+
+               if (!mld_txq->status.allocated)
+                       continue;
+
+               if (tid == IWL_MAX_TID_COUNT)
+                       cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
+               else
+                       cmd.u.modify.tid = cpu_to_le32(tid);
+
+               ret = iwl_mld_send_cmd_pdu(mld,
+                                          WIDE_ID(DATA_PATH_GROUP,
+                                                  SCD_QUEUE_CONFIG_CMD),
+                                          &cmd);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt)
+{
+       struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
+       u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+       u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
+       u8 sta_id = ba_res->sta_id;
+       struct ieee80211_link_sta *link_sta;
+
+       if (!tfd_cnt)
+               return;
+
+       if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
+                        "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
+                        tfd_cnt, pkt_len))
+               return;
+
+       IWL_DEBUG_TX_REPLY(mld,
+                          "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
+                          sta_id, le32_to_cpu(ba_res->flags),
+                          le16_to_cpu(ba_res->txed),
+                          le16_to_cpu(ba_res->done));
+
+       for (int i = 0; i < tfd_cnt; i++) {
+               struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
+               int txq_id = le16_to_cpu(ba_tfd->q_num);
+               int index = le16_to_cpu(ba_tfd->tfd_index);
+
+               if (IWL_FW_CHECK(mld,
+                                txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
+                                "Invalid txq id %d\n", txq_id))
+                       continue;
+
+               iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
+       }
+
+       if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
+                        "Got invalid sta_id (%d)\n", sta_id))
+               return;
+
+       rcu_read_lock();
+
+       link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+       if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
+                        "Got valid sta_id (%d) but link_sta is NULL\n",
+                        sta_id))
+               goto out;
+
+       iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
+out:
+       rcu_read_unlock();
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_tx_h__
+#define __iwl_mld_tx_h__
+
+#include "mld.h"
+
+#define IWL_MLD_INVALID_QUEUE          0xFFFF
+#define IWL_MLD_INVALID_DROP_TX                0xFFFE
+
+/**
+ * struct iwl_mld_txq - TX Queue data
+ *
+ * @fw_id: the fw id of this txq. Only valid when &status.allocated is true.
+ * @status: bitmap of the txq status
+ * @status.allocated: Indicates that the queue was allocated.
+ * @status.stop_full: Indicates that the queue is full and should stop TXing.
+ * @list: list pointer, for &mld::txqs_to_add
+ * @tx_request: makes sure that if there are multiple threads that want to tx
+ *     from this txq, only one of them will do all the TXing.
+ *     This is needed to avoid spinning the trans txq lock, which is expensive
+ */
+struct iwl_mld_txq {
+       /* Add here fields that need clean up on restart */
+       struct_group(zeroed_on_hw_restart,
+               u16 fw_id;
+               struct {
+                       u8 allocated:1;
+                       u8 stop_full:1;
+               } status;
+       );
+       struct list_head list;
+       atomic_t tx_request;
+       /* And here fields that survive a fw restart */
+};
+
+static inline void iwl_mld_init_txq(struct iwl_mld_txq *mld_txq)
+{
+       INIT_LIST_HEAD(&mld_txq->list);
+       atomic_set(&mld_txq->tx_request, 0);
+}
+
+static inline struct iwl_mld_txq *
+iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq)
+{
+       return (void *)txq->drv_priv;
+}
+
+void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
+void iwl_mld_add_txq_list(struct iwl_mld *mld);
+void
+iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id);
+void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
+void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
+                                 struct iwl_rx_packet *pkt);
+int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id);
+int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq);
+
+int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
+                           struct ieee80211_sta *sta,
+                           u32 old_sta_mask, u32 new_sta_mask);
+
+void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
+                                       struct iwl_rx_packet *pkt);
+void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant);
+
+u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
+                          struct ieee80211_tx_info *info,
+                          struct ieee80211_vif *vif);
+
+void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+                   struct ieee80211_txq *txq);
+
+#endif /* __iwl_mld_tx_h__ */
 
 /* Ma devices */
        {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
        {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
-
+#endif /* CONFIG_IWLMVM */
+#if IS_ENABLED(CONFIG_IWLMLD)
 /* Bz devices */
        {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
        {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
 
 /* Dr devices */
        {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
 
        {0}
 };
                      80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
                      iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
 
+#endif /* CONFIG_IWLMVM */
+#if IS_ENABLED(CONFIG_IWLMLD)
 /* Bz */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
                      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
                      iwl_cfg_br, iwl_br_name),
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
 };
 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);