* @bssidx: index of bss associated with this interface.
  * @mac_addr: assigned mac address.
  * @netif_stop: bitmap indicates reason why netif queues are stopped.
+ * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
  * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
  * @pend_8021x_wait: used for signalling change in count.
  */
        s32 bssidx;
        u8 mac_addr[ETH_ALEN];
        u8 netif_stop;
+       spinlock_t netif_stop_lock;
        atomic_t pend_8021x_cnt;
        wait_queue_head_t pend_8021x_wait;
 };
 
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state)
 {
+       unsigned long flags;
+
        if (!ifp)
                return;
 
        brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
                  ifp->bssidx, ifp->netif_stop, reason, state);
+
+       spin_lock_irqsave(&ifp->netif_stop_lock, flags);
        if (state) {
                if (!ifp->netif_stop)
                        netif_stop_queue(ifp->ndev);
                if (!ifp->netif_stop)
                        netif_wake_queue(ifp->ndev);
        }
+       spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
 }
 
 void brcmf_txflowblock(struct device *dev, bool state)
 
        brcmf_dbg(TRACE, "Enter\n");
 
+       brcmf_fws_bus_blocked(drvr, state);
+
        for (i = 0; i < BRCMF_MAX_IFS; i++)
                brcmf_txflowblock_if(drvr->iflist[i],
                                     BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state);
        ifp->bssidx = bssidx;
 
        init_waitqueue_head(&ifp->pend_8021x_wait);
+       spin_lock_init(&ifp->netif_stop_lock);
 
        if (mac_addr != NULL)
                memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
 
        } else {
                ret = 0;
        }
-       spin_unlock_bh(&bus->txqlock);
 
        if (pktq_len(&bus->txq) >= TXHI) {
                bus->txoff = true;
                brcmf_txflowblock(bus->sdiodev->dev, true);
        }
+       spin_unlock_bh(&bus->txqlock);
 
 #ifdef DEBUG
        if (pktq_plen(&bus->txq, prec) > qcount[prec])
 
        u32 fifo_credit_map;
        u32 fifo_delay_map;
        unsigned long borrow_defer_timestamp;
+       bool bus_flow_blocked;
 };
 
 /*
 
        brcmf_fws_lock(drvr, flags);
        if (skcb->mac->suppressed ||
+           fws->bus_flow_blocked ||
            brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) ||
            brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) ||
            (!multicast &&
 
        brcmf_dbg(TRACE, "enter: fws=%p\n", fws);
        brcmf_fws_lock(fws->drvr, flags);
-       for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) {
+       for (fifo = NL80211_NUM_ACS; fifo >= 0 && !fws->bus_flow_blocked;
+            fifo--) {
                brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo,
                          fws->fifo_credit[fifo]);
                for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) {
                        if (brcmf_skbcb(skb)->if_flags &
                            BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)
                                credit++;
+                       if (fws->bus_flow_blocked)
+                               break;
                }
                if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
-                   (credit == fws->fifo_credit[fifo])) {
+                   (credit == fws->fifo_credit[fifo]) &&
+                   (!fws->bus_flow_blocked)) {
                        fws->fifo_credit[fifo] -= credit;
                        while (brcmf_fws_borrow_credit(fws) == 0) {
                                skb = brcmf_fws_deq(fws, fifo);
                                        brcmf_fws_return_credits(fws, fifo, 1);
                                        break;
                                }
+                               if (fws->bus_flow_blocked)
+                                       break;
                        }
                } else {
                        fws->fifo_credit[fifo] -= credit;
        }
        brcmf_fws_unlock(fws->drvr, flags);
 }
+
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
+{
+       struct brcmf_fws_info *fws = drvr->fws;
+
+       fws->bus_flow_blocked = flow_blocked;
+       if (!flow_blocked)
+               brcmf_fws_schedule_deq(fws);
+}
 
 void brcmf_fws_add_interface(struct brcmf_if *ifp);
 void brcmf_fws_del_interface(struct brcmf_if *ifp);
 void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
 
 #endif /* FWSIGNAL_H_ */
 
        int tx_high_watermark;
        int tx_freecount;
        bool tx_flowblock;
+       spinlock_t tx_flowblock_lock;
 
        struct brcmf_usbreq *tx_reqs;
        struct brcmf_usbreq *rx_reqs;
 {
        struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
        struct brcmf_usbdev_info *devinfo = req->devinfo;
+       unsigned long flags;
 
        brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
                  req->skb);
        brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
        req->skb = NULL;
        brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
+       spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
        if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
                devinfo->tx_flowblock) {
                brcmf_txflowblock(devinfo->dev, false);
                devinfo->tx_flowblock = false;
        }
+       spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
 }
 
 static void brcmf_usb_rx_complete(struct urb *urb)
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
        struct brcmf_usbreq  *req;
        int ret;
+       unsigned long flags;
 
        brcmf_dbg(USB, "Enter, skb=%p\n", skb);
        if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
                goto fail;
        }
 
+       spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
        if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
            !devinfo->tx_flowblock) {
                brcmf_txflowblock(dev, true);
                devinfo->tx_flowblock = true;
        }
+       spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
        return 0;
 
 fail:
 
        /* Initialize the spinlocks */
        spin_lock_init(&devinfo->qlock);
+       spin_lock_init(&devinfo->tx_flowblock_lock);
 
        INIT_LIST_HEAD(&devinfo->rx_freeq);
        INIT_LIST_HEAD(&devinfo->rx_postq);