// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 /* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2022 NXP
  */
 #include <linux/init.h>
 #include <linux/module.h>
        if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
                return;
 
-       while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+       while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
                                               ch->recycled_bufs,
                                               ch->recycled_bufs_cnt)) == -EBUSY) {
                if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
  * to the specified buffer pool
  */
 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
-                             struct dpaa2_eth_channel *ch, u16 bpid)
+                             struct dpaa2_eth_channel *ch)
 {
        struct device *dev = priv->net_dev->dev.parent;
        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
                trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
                                         DPAA2_ETH_RX_BUF_RAW_SIZE,
                                         addr, priv->rx_buf_size,
-                                        bpid);
+                                        ch->bp->bpid);
        }
 
 release_bufs:
        /* In case the portal is busy, retry until successful */
-       while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+       while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
                                               buf_array, i)) == -EBUSY) {
                if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
                        break;
        return 0;
 }
 
-static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
+                              struct dpaa2_eth_channel *ch)
 {
-       int i, j;
+       int i;
        int new_count;
 
-       for (j = 0; j < priv->num_channels; j++) {
-               for (i = 0; i < DPAA2_ETH_NUM_BUFS;
-                    i += DPAA2_ETH_BUFS_PER_CMD) {
-                       new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
-                       priv->channel[j]->buf_count += new_count;
+       for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
+               new_count = dpaa2_eth_add_bufs(priv, ch);
+               ch->buf_count += new_count;
 
-                       if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
-                               return -ENOMEM;
-                       }
-               }
+               if (new_count < DPAA2_ETH_BUFS_PER_CMD)
+                       return -ENOMEM;
        }
 
        return 0;
 }
 
+static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
+{
+       struct net_device *net_dev = priv->net_dev;
+       struct dpaa2_eth_channel *channel;
+       int i, err = 0;
+
+       for (i = 0; i < priv->num_channels; i++) {
+               channel = priv->channel[i];
+
+               err = dpaa2_eth_seed_pool(priv, channel);
+
+               /* Not much to do; the buffer pool, though not filled up,
+                * may still contain some buffers which would enable us
+                * to limp on.
+                */
+               if (err)
+                       netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+                                  channel->bp->dev->obj_desc.id,
+                                  channel->bp->bpid);
+       }
+}
+
 /*
- * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * Drain the specified number of buffers from one of the DPNI's private buffer
+ * pools.
  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
  */
-static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
+                                int count)
 {
        u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
        int retries = 0;
        int ret;
 
        do {
-               ret = dpaa2_io_service_acquire(NULL, priv->bpid,
-                                              buf_array, count);
+               ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
                if (ret < 0) {
                        if (ret == -EBUSY &&
                            retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
        } while (ret);
 }
 
-static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
 {
        int i;
 
-       dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
-       dpaa2_eth_drain_bufs(priv, 1);
+       /* Drain the buffer pool */
+       dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
+       dpaa2_eth_drain_bufs(priv, bpid, 1);
 
+       /* Setup to zero the buffer count of all channels which were
+        * using this buffer pool.
+        */
        for (i = 0; i < priv->num_channels; i++)
-               priv->channel[i]->buf_count = 0;
+               if (priv->channel[i]->bp->bpid == bpid)
+                       priv->channel[i]->buf_count = 0;
+}
+
+static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->num_bps; i++)
+               dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
 }
 
 /* Function is called from softirq context only, so we don't need to guard
  * the access to percpu count
  */
 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
-                                struct dpaa2_eth_channel *ch,
-                                u16 bpid)
+                                struct dpaa2_eth_channel *ch)
 {
        int new_count;
 
                return 0;
 
        do {
-               new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
+               new_count = dpaa2_eth_add_bufs(priv, ch);
                if (unlikely(!new_count)) {
                        /* Out of memory; abort for now, we'll try later on */
                        break;
                        break;
 
                /* Refill pool if appropriate */
-               dpaa2_eth_refill_pool(priv, ch, priv->bpid);
+               dpaa2_eth_refill_pool(priv, ch);
 
                store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
                if (store_cleaned <= 0)
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
        int err;
 
-       err = dpaa2_eth_seed_pool(priv, priv->bpid);
-       if (err) {
-               /* Not much to do; the buffer pool, though not filled up,
-                * may still contain some buffers which would enable us
-                * to limp on.
-                */
-               netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
-                          priv->dpbp_dev->obj_desc.id, priv->bpid);
-       }
+       dpaa2_eth_seed_pools(priv);
 
        if (!dpaa2_eth_is_type_phy(priv)) {
                /* We'll only start the txqs when the link is actually ready;
 
 enable_err:
        dpaa2_eth_disable_ch_napi(priv);
-       dpaa2_eth_drain_pool(priv);
+       dpaa2_eth_drain_pools(priv);
        return err;
 }
 
        dpaa2_eth_disable_ch_napi(priv);
 
        /* Empty the buffer pool */
-       dpaa2_eth_drain_pool(priv);
+       dpaa2_eth_drain_pools(priv);
 
        /* Empty the Scatter-Gather Buffer cache */
        dpaa2_eth_sgt_cache_drain(priv);
        dpaa2_eth_set_fq_affinity(priv);
 }
 
-/* Allocate and configure one buffer pool for each interface */
-static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
+/* Allocate and configure a buffer pool */
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
 {
-       int err;
-       struct fsl_mc_device *dpbp_dev;
        struct device *dev = priv->net_dev->dev.parent;
+       struct fsl_mc_device *dpbp_dev;
        struct dpbp_attr dpbp_attrs;
+       struct dpaa2_eth_bp *bp;
+       int err;
 
        err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
                                     &dpbp_dev);
                        err = -EPROBE_DEFER;
                else
                        dev_err(dev, "DPBP device allocation failed\n");
-               return err;
+               return ERR_PTR(err);
        }
 
-       priv->dpbp_dev = dpbp_dev;
+       bp = kzalloc(sizeof(*bp), GFP_KERNEL);
+       if (!bp) {
+               err = -ENOMEM;
+               goto err_alloc;
+       }
 
-       err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+       err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
                        &dpbp_dev->mc_handle);
        if (err) {
                dev_err(dev, "dpbp_open() failed\n");
                dev_err(dev, "dpbp_get_attributes() failed\n");
                goto err_get_attr;
        }
-       priv->bpid = dpbp_attrs.bpid;
 
-       return 0;
+       bp->dev = dpbp_dev;
+       bp->bpid = dpbp_attrs.bpid;
+
+       return bp;
 
 err_get_attr:
        dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
 err_reset:
        dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
 err_open:
+       kfree(bp);
+err_alloc:
        fsl_mc_object_free(dpbp_dev);
 
-       return err;
+       return ERR_PTR(err);
+}
+
+static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
+{
+       struct dpaa2_eth_bp *bp;
+       int i;
+
+       bp = dpaa2_eth_allocate_dpbp(priv);
+       if (IS_ERR(bp))
+               return PTR_ERR(bp);
+
+       priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
+       priv->num_bps++;
+
+       for (i = 0; i < priv->num_channels; i++)
+               priv->channel[i]->bp = bp;
+
+       return 0;
+}
+
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
+{
+       int idx_bp;
+
+       /* Find the index at which this BP is stored */
+       for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
+               if (priv->bp[idx_bp] == bp)
+                       break;
+
+       /* Drain the pool and disable the associated MC object */
+       dpaa2_eth_drain_pool(priv, bp->bpid);
+       dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
+       dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
+       fsl_mc_object_free(bp->dev);
+       kfree(bp);
+
+       /* Move the last in use DPBP over in this position */
+       priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
+       priv->num_bps--;
 }
 
-static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
 {
-       dpaa2_eth_drain_pool(priv);
-       dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
-       dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
-       fsl_mc_object_free(priv->dpbp_dev);
+       int i;
+
+       for (i = 0; i < priv->num_bps; i++)
+               dpaa2_eth_free_dpbp(priv, priv->bp[i]);
 }
 
 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
  */
 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
 {
+       struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
        struct net_device *net_dev = priv->net_dev;
        struct device *dev = net_dev->dev.parent;
        struct dpni_pools_cfg pools_params;
        int i;
 
        pools_params.num_dpbp = 1;
-       pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+       pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
        pools_params.pools[0].backup_pool = 0;
        pools_params.pools[0].buffer_size = priv->rx_buf_size;
        err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
 
        dpaa2_eth_setup_fqs(priv);
 
-       err = dpaa2_eth_setup_dpbp(priv);
+       err = dpaa2_eth_setup_default_dpbp(priv);
        if (err)
                goto err_dpbp_setup;
 
 err_alloc_percpu_stats:
        dpaa2_eth_del_ch_napi(priv);
 err_bind:
-       dpaa2_eth_free_dpbp(priv);
+       dpaa2_eth_free_dpbps(priv);
 err_dpbp_setup:
        dpaa2_eth_free_dpio(priv);
 err_dpio_setup:
        free_percpu(priv->percpu_extras);
 
        dpaa2_eth_del_ch_napi(priv);
-       dpaa2_eth_free_dpbp(priv);
+       dpaa2_eth_free_dpbps(priv);
        dpaa2_eth_free_dpio(priv);
        dpaa2_eth_free_dpni(priv);
        if (priv->onestep_reg_base)