#define WL1271_FLAG_IDLE_REQUESTED    (11)
 #define WL1271_FLAG_PSPOLL_FAILURE    (12)
 #define WL1271_FLAG_STA_STATE_SENT    (13)
+#define WL1271_FLAG_FW_TX_BUSY        (14)
        unsigned long flags;
 
        struct wl1271_partition_set part;
 
                total += cnt;
        }
 
-       /* if more blocks are available now, schedule some tx work */
-       if (total && !skb_queue_empty(&wl->tx_queue))
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
+       /* if more blocks are available now, tx work can be scheduled */
+       if (total)
+               clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
                            (wl->tx_results_count & 0xff))
                                wl1271_tx_complete(wl);
 
+                       /* Check if any tx blocks were freed */
+                       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+                                       !skb_queue_empty(&wl->tx_queue)) {
+                               /*
+                                * In order to avoid starvation of the TX path,
+                                * call the work function directly.
+                                */
+                               wl1271_tx_work_locked(wl);
+                       }
+
                        wl1271_rx(wl, wl->fw_status);
                }
 
         * before that, the tx_work will not be initialized!
         */
 
-       ieee80211_queue_work(wl->hw, &wl->tx_work);
+       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
+               ieee80211_queue_work(wl->hw, &wl->tx_work);
 
        /*
         * The workqueue is slow to process the tx_queue and we need stop
 
        return enabled_rates;
 }
 
-void wl1271_tx_work(struct work_struct *work)
+void wl1271_tx_work_locked(struct wl1271 *wl)
 {
-       struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
        struct sk_buff *skb;
        bool woken_up = false;
        u32 sta_rates = 0;
                spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
 
-       mutex_lock(&wl->mutex);
-
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
                         * Queue back last skb, and stop aggregating.
                         */
                        skb_queue_head(&wl->tx_queue, skb);
+                       /* No work left, avoid scheduling redundant tx work */
+                       set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
                        goto out_ack;
                } else if (ret < 0) {
                        dev_kfree_skb(skb);
 out:
        if (woken_up)
                wl1271_ps_elp_sleep(wl);
+}
 
+void wl1271_tx_work(struct work_struct *work)
+{
+       struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
+
+       mutex_lock(&wl->mutex);
+       wl1271_tx_work_locked(wl);
        mutex_unlock(&wl->mutex);
 }
 
 
 }
 
 void wl1271_tx_work(struct work_struct *work);
+void wl1271_tx_work_locked(struct wl1271 *wl);
 void wl1271_tx_complete(struct wl1271 *wl);
 void wl1271_tx_reset(struct wl1271 *wl);
 void wl1271_tx_flush(struct wl1271 *wl);