*     it will be called from a worker with mvm->mutex held.
  * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
  *     mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ *     and mvm->mutex. Will be handled with the wiphy_work queue infra
+ *     instead of regular work queue.
  */
 enum iwl_rx_handler_context {
        RX_HANDLER_SYNC,
        RX_HANDLER_ASYNC_LOCKED,
        RX_HANDLER_ASYNC_UNLOCKED,
+       RX_HANDLER_ASYNC_LOCKED_WIPHY,
 };
 
 /**
 
 /* this forward declaration can avoid to export the function */
 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+                                           struct wiphy_work *work);
 
 static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
 {
        INIT_LIST_HEAD(&mvm->add_stream_txqs);
        spin_lock_init(&mvm->add_stream_lock);
 
+       wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+                       iwl_mvm_async_handlers_wiphy_wk);
        init_waitqueue_head(&mvm->rx_sync_waitq);
 
        mvm->queue_sync_state = 0;
        spin_unlock_bh(&mvm->async_handlers_lock);
 }
 
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+                                             u8 contexts)
 {
-       struct iwl_mvm *mvm =
-               container_of(wk, struct iwl_mvm, async_handlers_wk);
        struct iwl_async_handler_entry *entry, *tmp;
        LIST_HEAD(local_list);
 
-       /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
        /*
-        * Sync with Rx path with a lock. Remove all the entries from this list,
-        * add them to a local one (lock free), and then handle them.
+        * Sync with Rx path with a lock. Remove all the entries of the
+        * wanted contexts from this list, add them to a local one (lock free),
+        * and then handle them.
         */
        spin_lock_bh(&mvm->async_handlers_lock);
-       list_splice_init(&mvm->async_handlers_list, &local_list);
+       list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+               if (!(BIT(entry->context) & contexts))
+                       continue;
+               list_del(&entry->list);
+               list_add_tail(&entry->list, &local_list);
+       }
        spin_unlock_bh(&mvm->async_handlers_lock);
 
        list_for_each_entry_safe(entry, tmp, &local_list, list) {
-               if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+               if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
                        mutex_lock(&mvm->mutex);
                entry->fn(mvm, &entry->rxb);
                iwl_free_rxb(&entry->rxb);
                list_del(&entry->list);
-               if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+               if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
                        mutex_unlock(&mvm->mutex);
                kfree(entry);
        }
 }
 
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+                                           struct wiphy_work *wk)
+{
+       struct iwl_mvm *mvm =
+               container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+       u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+       iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm =
+               container_of(wk, struct iwl_mvm, async_handlers_wk);
+       u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+                     BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+       iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
                                            struct iwl_rx_packet *pkt)
 {
                spin_lock(&mvm->async_handlers_lock);
                list_add_tail(&entry->list, &mvm->async_handlers_list);
                spin_unlock(&mvm->async_handlers_lock);
-               schedule_work(&mvm->async_handlers_wk);
+               if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+                       wiphy_work_queue(mvm->hw->wiphy,
+                                        &mvm->async_handlers_wiphy_wk);
+               else
+                       schedule_work(&mvm->async_handlers_wk);
                break;
        }
 }