init_waitqueue_head(&ar->htt.empty_tx_wq);
        init_waitqueue_head(&ar->wmi.tx_credits_wq);
 
+       skb_queue_head_init(&ar->htt.rx_indication_head);
+
        init_completion(&ar->offchan_tx_completed);
        INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
        skb_queue_head_init(&ar->offchan_tx_queue);
 
        struct ath10k *ar;
        enum ath10k_htc_ep_id eid;
 
+       struct sk_buff_head rx_indication_head;
+
        u8 target_version_major;
        u8 target_version_minor;
        struct completion target_version_received;
 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
                                             struct sk_buff *skb);
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
 #endif
 
                memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
        }
 
-       ieee80211_rx_ni(ar->hw, skb);
+       if (ar->napi.dev)
+               ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
+       else
+               ieee80211_rx_ni(ar->hw, skb);
 
        /* We have delivered the skb to the upper layers (mac80211) so we
         * must not free it.
                break;
        }
        case HTT_T2H_MSG_TYPE_RX_IND:
-               if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
-                       return ath10k_htt_rx_proc_rx_ind_hl(htt,
-                                                           &resp->rx_ind_hl,
-                                                           skb,
-                                                           HTT_RX_PN_CHECK,
-                                                           HTT_RX_NON_TKIP_MIC);
-               else
+               if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
                        ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
+               } else {
+                       skb_queue_tail(&htt->rx_indication_head, skb);
+                       return false;
+               }
                break;
        case HTT_T2H_MSG_TYPE_PEER_MAP: {
                struct htt_peer_map_event ev = {
        return quota;
 }
 
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
+{
+       struct htt_resp *resp;
+       struct ath10k_htt *htt = &ar->htt;
+       struct sk_buff *skb;
+       bool release;
+       int quota;
+
+       for (quota = 0; quota < budget; quota++) {
+               skb = skb_dequeue(&htt->rx_indication_head);
+               if (!skb)
+                       break;
+
+               resp = (struct htt_resp *)skb->data;
+
+               release = ath10k_htt_rx_proc_rx_ind_hl(htt,
+                                                      &resp->rx_ind_hl,
+                                                      skb,
+                                                      HTT_RX_PN_CHECK,
+                                                      HTT_RX_NON_TKIP_MIC);
+
+               if (release)
+                       dev_kfree_skb_any(skb);
+
+               ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
+                          skb_queue_len(&htt->rx_indication_head));
+       }
+       return quota;
+}
+EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
+
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 {
        struct ath10k_htt *htt = &ar->htt;
 
                ep = &ar->htc.endpoint[cb->eid];
                ep->ep_ops.ep_rx_complete(ar, skb);
        }
+
+       if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+               napi_schedule(&ar->napi);
 }
 
 static void ath10k_sdio_write_async_work(struct work_struct *work)
        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
        int ret;
 
+       napi_enable(&ar->napi);
+
        /* Sleep 20 ms before HIF interrupts are disabled.
         * This will give target plenty of time to process the BMI done
         * request before interrupts are disabled.
        }
 
        spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+       napi_synchronize(&ar->napi);
+       napi_disable(&ar->napi);
 }
 
 #ifdef CONFIG_PM
 
 #endif /* CONFIG_PM_SLEEP */
 
+static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
+{
+       struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+       int done;
+
+       done = ath10k_htt_rx_hl_indication(ar, budget);
+       ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
+
+       if (done < budget)
+               napi_complete_done(ctx, done);
+
+       return done;
+}
+
 static int ath10k_sdio_probe(struct sdio_func *func,
                             const struct sdio_device_id *id)
 {
                return -ENOMEM;
        }
 
+       netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
+                      ATH10K_NAPI_BUDGET);
+
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
                   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
                   func->num, func->vendor, func->device,
                   func->num, func->vendor, func->device);
 
        ath10k_core_unregister(ar);
+
+       netif_napi_del(&ar->napi);
+
        ath10k_core_destroy(ar);
 
        flush_workqueue(ar_sdio->workqueue);