int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
                               struct dp_rxdma_ring *rx_ring,
                               int req_entries,
-                              enum hal_rx_buf_return_buf_manager mgr,
-                              gfp_t gfp)
+                              enum hal_rx_buf_return_buf_manager mgr)
 {
        struct hal_srng *srng;
        u32 *desc;
 
                spin_lock_bh(&rx_ring->idr_lock);
                buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
-                                  rx_ring->bufs_max * 3, gfp);
+                                  rx_ring->bufs_max * 3, GFP_ATOMIC);
                spin_unlock_bh(&rx_ring->idr_lock);
                if (buf_id < 0)
                        goto fail_dma_unmap;
 
        rx_ring->bufs_max = num_entries;
        ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
-                                  HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
+                                  HAL_RX_BUF_RBM_SW3_BM);
        return 0;
 }
 
                rx_ring = &ar->dp.rx_refill_buf_ring;
 
                ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
-                                          HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+                                          HAL_RX_BUF_RBM_SW3_BM);
        }
 
        ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
 
 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
                                                         struct dp_rxdma_ring *rx_ring,
-                                                        int *buf_id, gfp_t gfp)
+                                                        int *buf_id)
 {
        struct sk_buff *skb;
        dma_addr_t paddr;
 
        spin_lock_bh(&rx_ring->idr_lock);
        *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
-                           rx_ring->bufs_max, gfp);
+                           rx_ring->bufs_max, GFP_ATOMIC);
        spin_unlock_bh(&rx_ring->idr_lock);
        if (*buf_id < 0)
                goto fail_dma_unmap;
 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
                                           struct dp_rxdma_ring *rx_ring,
                                           int req_entries,
-                                          enum hal_rx_buf_return_buf_manager mgr,
-                                          gfp_t gfp)
+                                          enum hal_rx_buf_return_buf_manager mgr)
 {
        struct hal_srng *srng;
        u32 *desc;
 
        while (num_remain > 0) {
                skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
-                                                       &buf_id, gfp);
+                                                       &buf_id);
                if (!skb)
                        break;
                paddr = ATH11K_SKB_RXCB(skb)->paddr;
                }
 move_next:
                skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
-                                                       &buf_id, GFP_ATOMIC);
+                                                       &buf_id);
 
                if (!skb) {
                        ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
                rx_ring = &ar->dp.rx_refill_buf_ring;
 
                ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
-                                          HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+                                          HAL_RX_BUF_RBM_SW3_BM);
        }
 
        return tot_n_bufs_reaped;
                rx_ring = &ar->dp.rx_refill_buf_ring;
 
                ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
-                                          HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+                                          HAL_RX_BUF_RBM_SW3_BM);
        }
 
        rcu_read_lock();
 
        if (num_buf_freed)
                ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
-                                          HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+                                          HAL_RX_BUF_RBM_SW3_BM);
 
        return budget - quota;
 }
                ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
                                           &dp->rxdma_mon_buf_ring,
                                           rx_bufs_used,
-                                          HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+                                          HAL_RX_BUF_RBM_SW3_BM);
        }
 }
 
 
 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
                               struct dp_rxdma_ring *rx_ring,
                               int req_entries,
-                              enum hal_rx_buf_return_buf_manager mgr,
-                              gfp_t gfp);
+                              enum hal_rx_buf_return_buf_manager mgr);
 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
                           int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
                                       const void *ptr, void *data),
 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
                                           struct dp_rxdma_ring *rx_ring,
                                           int req_entries,
-                                          enum hal_rx_buf_return_buf_manager mgr,
-                                          gfp_t gfp);
+                                          enum hal_rx_buf_return_buf_manager mgr);
 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);