struct net_device       *ndev = skb->dev;
        struct cpsw_priv        *priv = netdev_priv(ndev);
 
+       /* Check whether the queue is stopped due to stalled tx dma, if the
+        * queue is stopped then start the queue as we have free desc for tx
+        */
        if (unlikely(netif_queue_stopped(ndev)))
                netif_start_queue(ndev);
        cpts_tx_timestamp(&priv->cpts, skb);
                goto fail;
        }
 
+       /* If there is no more tx desc left free then we need to
+        * tell the kernel to stop sending us tx frames.
+        */
+       if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
+               netif_stop_queue(ndev);
+
        return NETDEV_TX_OK;
 fail:
        priv->stats.tx_dropped++;
 
 };
 
 struct cpdma_chan {
+       struct cpdma_desc __iomem       *head, *tail;
+       void __iomem                    *hdp, *cp, *rxfree;
        enum cpdma_state                state;
        struct cpdma_ctlr               *ctlr;
        int                             chan_num;
        spinlock_t                      lock;
-       struct cpdma_desc __iomem       *head, *tail;
        int                             count;
-       void __iomem                    *hdp, *cp, *rxfree;
        u32                             mask;
        cpdma_handler_fn                handler;
        enum dma_data_direction         dir;
 }
 
 static struct cpdma_desc __iomem *
-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
 {
        unsigned long flags;
        int index;
+       int desc_start;
+       int desc_end;
        struct cpdma_desc __iomem *desc = NULL;
 
        spin_lock_irqsave(&pool->lock, flags);
 
-       index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
-                                          num_desc, 0);
-       if (index < pool->num_desc) {
+       if (is_rx) {
+               desc_start = 0;
+               desc_end = pool->num_desc/2;
+        } else {
+               desc_start = pool->num_desc/2;
+               desc_end = pool->num_desc;
+       }
+
+       index = bitmap_find_next_zero_area(pool->bitmap,
+                               desc_end, desc_start, num_desc, 0);
+       if (index < desc_end) {
                bitmap_set(pool->bitmap, index, num_desc);
                desc = pool->iomap + pool->desc_size * index;
                pool->used_desc++;
                goto unlock_ret;
        }
 
-       desc = cpdma_desc_alloc(ctlr->pool, 1);
+       desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
        if (!desc) {
                chan->stats.desc_alloc_fail++;
                ret = -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(cpdma_chan_submit);
 
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
+{
+       unsigned long flags;
+       int index;
+       bool ret;
+       struct cpdma_ctlr       *ctlr = chan->ctlr;
+       struct cpdma_desc_pool  *pool = ctlr->pool;
+
+       spin_lock_irqsave(&pool->lock, flags);
+
+       index = bitmap_find_next_zero_area(pool->bitmap,
+                               pool->num_desc, pool->num_desc/2, 1, 0);
+
+       if (index < pool->num_desc)
+               ret = true;
+       else
+               ret = false;
+
+       spin_unlock_irqrestore(&pool->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
+
 static void __cpdma_chan_free(struct cpdma_chan *chan,
                              struct cpdma_desc __iomem *desc,
                              int outlen, int status)
 
 #define EMAC_DEF_TX_CH                 (0) /* Default 0th channel */
 #define EMAC_DEF_RX_CH                 (0) /* Default 0th channel */
 #define EMAC_DEF_RX_NUM_DESC           (128)
-#define EMAC_DEF_TX_NUM_DESC           (128)
 #define EMAC_DEF_MAX_TX_CH             (1) /* Max TX channels configured */
 #define EMAC_DEF_MAX_RX_CH             (1) /* Max RX channels configured */
 #define EMAC_POLL_WEIGHT               (64) /* Default NAPI poll weight */
        u32 mac_hash2;
        u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
        u32 rx_addr_type;
-       atomic_t cur_tx;
        const char *phy_id;
 #ifdef CONFIG_OF
        struct device_node *phy_node;
 {
        struct sk_buff          *skb = token;
        struct net_device       *ndev = skb->dev;
-       struct emac_priv        *priv = netdev_priv(ndev);
-
-       atomic_dec(&priv->cur_tx);
 
+       /* Check whether the queue is stopped due to stalled tx dma, if the
+        * queue is stopped then start the queue as we have free desc for tx
+        */
        if (unlikely(netif_queue_stopped(ndev)))
                netif_start_queue(ndev);
        ndev->stats.tx_packets++;
                goto fail_tx;
        }
 
-       if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+       /* If there is no more tx desc left free then we need to
+        * tell the kernel to stop sending us tx frames.
+        */
+       if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
                netif_stop_queue(ndev);
 
        return NETDEV_TX_OK;