}
 }
 
+static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+       struct cpsw_common *cpsw = priv->cpsw;
+       struct sk_buff *skb;
+       int ch_buf_num;
+       int i, ret;
+
+       ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch);
+       for (i = 0; i < ch_buf_num; i++) {
+               skb = __netdev_alloc_skb_ip_align(priv->ndev,
+                                                 cpsw->rx_packet_max,
+                                                 GFP_KERNEL);
+               if (!skb) {
+                       cpsw_err(priv, ifup, "cannot allocate skb\n");
+                       return -ENOMEM;
+               }
+
+               ret = cpdma_chan_submit(cpsw->rxch, skb, skb->data,
+                                       skb_tailroom(skb), 0);
+               if (ret < 0) {
+                       cpsw_err(priv, ifup,
+                                "cannot submit skb to rx channel, error %d\n",
+                                ret);
+                       kfree_skb(skb);
+                       return ret;
+               }
+               kmemleak_not_leak(skb);
+       }
+
+       cpsw_info(priv, ifup, "submitted %d rx descriptors\n", ch_buf_num);
+
+       return ch_buf_num;
+}
+
 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
 {
        u32 slave_port;
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
        struct cpsw_common *cpsw = priv->cpsw;
-       int i, ret;
+       int ret;
        u32 reg;
 
        ret = pm_runtime_get_sync(cpsw->dev);
                                  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 
        if (!cpsw_common_res_usage_state(cpsw)) {
-               int buf_num;
-
                /* setup tx dma to fixed prio and zero offset */
                cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1);
                cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0);
                        enable_irq(cpsw->irqs_table[0]);
                }
 
-               buf_num = cpdma_chan_get_rx_buf_num(cpsw->dma);
-               for (i = 0; i < buf_num; i++) {
-                       struct sk_buff *skb;
-
-                       ret = -ENOMEM;
-                       skb = __netdev_alloc_skb_ip_align(priv->ndev,
-                                       cpsw->rx_packet_max, GFP_KERNEL);
-                       if (!skb)
-                               goto err_cleanup;
-                       ret = cpdma_chan_submit(cpsw->rxch, skb, skb->data,
-                                               skb_tailroom(skb), 0);
-                       if (ret < 0) {
-                               kfree_skb(skb);
-                               goto err_cleanup;
-                       }
-                       kmemleak_not_leak(skb);
-               }
-               /* continue even if we didn't manage to submit all
-                * receive descs
-                */
-               cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
+               ret = cpsw_fill_rx_channels(priv);
+               if (ret < 0)
+                       goto err_cleanup;
 
                if (cpts_register(cpsw->dev, cpsw->cpts,
                                  cpsw->data.cpts_clock_mult,
 
        struct cpdma_desc_pool  *pool;
        spinlock_t              lock;
        struct cpdma_chan       *channels[2 * CPDMA_MAX_CHANNELS];
+       int chan_num;
 };
 
 struct cpdma_chan {
        ctlr->state = CPDMA_STATE_IDLE;
        ctlr->params = *params;
        ctlr->dev = params->dev;
+       ctlr->chan_num = 0;
        spin_lock_init(&ctlr->lock);
 
        ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
 }
 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
 
+/**
+ * cpdma_chan_split_pool - Splits ctrl pool between all channels.
+ * Has to be called under ctlr lock
+ */
+static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+{
+       struct cpdma_desc_pool *pool = ctlr->pool;
+       struct cpdma_chan *chan;
+       int ch_desc_num;
+       int i;
+
+       if (!ctlr->chan_num)
+               return;
+
+       /* calculate average size of pool slice */
+       ch_desc_num = pool->num_desc / ctlr->chan_num;
+
+       /* split ctlr pool */
+       for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+               chan = ctlr->channels[i];
+               if (chan)
+                       chan->desc_num = ch_desc_num;
+       }
+}
+
 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
                                     cpdma_handler_fn handler)
 {
        spin_lock_init(&chan->lock);
 
        ctlr->channels[chan_num] = chan;
+       ctlr->chan_num++;
+
+       cpdma_chan_split_pool(ctlr);
+
        spin_unlock_irqrestore(&ctlr->lock, flags);
        return chan;
 }
 EXPORT_SYMBOL_GPL(cpdma_chan_create);
 
-int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
 {
-       return ctlr->pool->num_desc / 2;
+       unsigned long flags;
+       int desc_num;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       desc_num = chan->desc_num;
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       return desc_num;
 }
 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
 
        if (chan->state != CPDMA_STATE_IDLE)
                cpdma_chan_stop(chan);
        ctlr->channels[chan->chan_num] = NULL;
+       ctlr->chan_num--;
+
+       cpdma_chan_split_pool(ctlr);
+
        spin_unlock_irqrestore(&ctlr->lock, flags);
        return 0;
 }