return;
        }
 
+       /* If already active, just update the backlog */
+       if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
+               if (add_one)
+                       atomic_inc(&endpoint->replenish_backlog);
+               return;
+       }
+
        while (atomic_dec_not_zero(&endpoint->replenish_backlog))
                if (ipa_endpoint_replenish_one(endpoint))
                        goto try_again_later;
+
+       clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
+
        if (add_one)
                atomic_inc(&endpoint->replenish_backlog);
 
        return;
 
 try_again_later:
+       clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
+
        /* The last one didn't succeed, so fix the backlog */
        delta = add_one ? 2 : 1;
        backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
                 * backlog is the same as the maximum outstanding TREs.
                 */
                clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+               clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
                atomic_set(&endpoint->replenish_saved,
                           gsi_channel_tre_max(gsi, endpoint->channel_id));
                atomic_set(&endpoint->replenish_backlog, 0);
 
  * enum ipa_replenish_flag:    RX buffer replenish flags
  *
  * @IPA_REPLENISH_ENABLED:     Whether receive buffer replenishing is enabled
+ * @IPA_REPLENISH_ACTIVE:      Whether replenishing is underway
  * @IPA_REPLENISH_COUNT:       Number of defined replenish flags
  */
 enum ipa_replenish_flag {
        IPA_REPLENISH_ENABLED,
+       IPA_REPLENISH_ACTIVE,
        IPA_REPLENISH_COUNT,    /* Number of flags (must be last) */
 };