return (ag->num_started == ag->num_devices);
 }
 
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+       struct ath12k_hw_group *ag = ab->ag;
+       struct ath12k_base *partner_ab;
+       bool found = false;
+       int i;
+
+       for (i = 0; i < ag->num_devices; i++) {
+               partner_ab = ag->ab[i];
+               if (!partner_ab)
+                       continue;
+
+               if (found)
+                       ath12k_qmi_trigger_host_cap(partner_ab);
+
+               found = (partner_ab == ab);
+       }
+}
+
 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
 {
        struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
                        goto err_core_stop;
                }
                ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+       } else {
+               ath12k_core_trigger_partner(ab);
        }
 
        mutex_unlock(&ag->mutex);
 
        return 0;
 }
 
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
+{
+       struct ath12k_qmi *qmi = &ab->qmi;
+
+       spin_lock(&qmi->event_lock);
+
+       if (ath12k_qmi_get_event_block(qmi))
+               ath12k_qmi_set_event_block(qmi, false);
+
+       spin_unlock(&qmi->event_lock);
+
+       ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
+                  ab->device_id);
+
+       ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
+}
+
+static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
+{
+       struct ath12k_base *ab;
+       int i;
+
+       for (i = 0; i < ag->num_devices; i++) {
+               ab = ag->ab[i];
+
+               if (!(ab && ab->qmi.num_radios != U8_MAX))
+                       return false;
+       }
+
+       return true;
+}
+
+static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag)
+{
+       struct ath12k_base *ab;
+       int i;
+
+       lockdep_assert_held(&ag->mutex);
+
+       for (i = 0; i < ag->num_devices; i++) {
+               ab = ag->ab[i];
+               if (!ab)
+                       continue;
+
+               spin_lock(&ab->qmi.event_lock);
+
+               if (ath12k_qmi_get_event_block(&ab->qmi)) {
+                       spin_unlock(&ab->qmi.event_lock);
+                       return ab;
+               }
+
+               spin_unlock(&ab->qmi.event_lock);
+       }
+
+       return NULL;
+}
+
 /* clang stack usage explodes if this is inlined */
 static noinline_for_stack
 int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
 {
-       struct ath12k_base *ab = qmi->ab;
+       struct ath12k_base *ab = qmi->ab, *block_ab;
+       struct ath12k_hw_group *ag = ab->ag;
        int ret;
 
        ath12k_qmi_phy_cap_send(ab);
                return ret;
        }
 
-       ret = ath12k_qmi_host_cap_send(ab);
-       if (ret < 0) {
-               ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
-               return ret;
+       spin_lock(&qmi->event_lock);
+
+       ath12k_qmi_set_event_block(qmi, true);
+
+       spin_unlock(&qmi->event_lock);
+
+       mutex_lock(&ag->mutex);
+
+       if (ath12k_qmi_hw_group_host_cap_ready(ag)) {
+               block_ab = ath12k_qmi_hw_group_find_blocked(ag);
+               if (block_ab)
+                       ath12k_qmi_trigger_host_cap(block_ab);
        }
 
+       mutex_unlock(&ag->mutex);
+
        return ret;
 }
 
        .del_server = ath12k_qmi_ops_del_server,
 };
 
+static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
+{
+       struct ath12k_base *ab = qmi->ab;
+       int ret;
+
+       ret = ath12k_qmi_host_cap_send(ab);
+       if (ret < 0) {
+               ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
+                           ab->device_id, ret);
+               return ret;
+       }
+
+       return ret;
+}
+
 static void ath12k_qmi_driver_event_work(struct work_struct *work)
 {
        struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
                                        &ab->dev_flags);
 
                        break;
+               case ATH12K_QMI_EVENT_HOST_CAP:
+                       ret = ath12k_qmi_event_host_cap(qmi);
+                       if (ret < 0)
+                               set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+                       break;
                default:
                        ath12k_warn(ab, "invalid event type: %d", event->type);
                        break;
 
        ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
        ATH12K_QMI_EVENT_POWER_UP,
        ATH12K_QMI_EVENT_POWER_DOWN,
+       ATH12K_QMI_EVENT_HOST_CAP,
        ATH12K_QMI_EVENT_MAX,
 };
 
        u32 target_mem_mode;
        bool target_mem_delayed;
        u8 cal_done;
+
+       /* protected with struct ath12k_qmi::event_lock */
+       bool block_event;
+
        u8 num_radios;
        struct target_info target;
        struct m3_mem_region m3_mem;
        struct qmi_response_type_v01 resp;
 };
 
+static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
+{
+       lockdep_assert_held(&qmi->event_lock);
+
+       qmi->block_event = block;
+}
+
+static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
+{
+       lockdep_assert_held(&qmi->event_lock);
+
+       return qmi->block_event;
+}
+
 int ath12k_qmi_firmware_start(struct ath12k_base *ab,
                              u32 mode);
 void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
 void ath12k_qmi_deinit_service(struct ath12k_base *ab);
 int ath12k_qmi_init_service(struct ath12k_base *ab);
 void ath12k_qmi_free_resource(struct ath12k_base *ab);
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
 
 #endif