kfree(trans_pcie->rxq);
 }
 
+static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
+                                         struct iwl_rb_allocator *rba)
+{
+       spin_lock(&rba->lock);
+       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+       spin_unlock(&rba->lock);
+}
+
 /*
  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
  *
        if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
                /* Move the 2 RBDs to the allocator ownership.
                 Allocator has another 6 from pool for the request completion*/
-               spin_lock(&rba->lock);
-               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-               spin_unlock(&rba->lock);
+               iwl_pcie_rx_move_to_allocator(rxq, rba);
 
                atomic_inc(&rba->req_pending);
                queue_work(rba->alloc_wq, &rba->rx_alloc);
                IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
 
        while (i != r) {
+               struct iwl_rb_allocator *rba = &trans_pcie->rba;
                struct iwl_rx_mem_buffer *rxb;
-
-               if (unlikely(rxq->used_count == rxq->queue_size / 2))
+               /* number of RBDs still waiting for page allocation */
+               u32 rb_pending_alloc =
+                       atomic_read(&trans_pcie->rba.req_pending) *
+                       RX_CLAIM_REQ_ALLOC;
+
+               if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
+                            !emergency)) {
+                       iwl_pcie_rx_move_to_allocator(rxq, rba);
                        emergency = true;
+               }
 
                rxb = iwl_pcie_get_rxb(trans, rxq, i);
                if (!rxb)
                        iwl_pcie_rx_allocator_get(trans, rxq);
 
                if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
-                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
                        /* Add the remaining empty RBDs for allocator use */
-                       spin_lock(&rba->lock);
-                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-                       spin_unlock(&rba->lock);
+                       iwl_pcie_rx_move_to_allocator(rxq, rba);
                } else if (emergency) {
                        count++;
                        if (count == 8) {
                                count = 0;
-                               if (rxq->used_count < rxq->queue_size / 3)
+                               if (rb_pending_alloc < rxq->queue_size / 3)
                                        emergency = false;
 
                                rxq->read = i;