return 0;
 }
 
+/* Keep flooding enabled towards this port's CPU port as long as it serves at
+ * least one port in the tree that requires it.
+ */
+static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
+{
+       struct switchdev_brport_flags flags = {
+               .mask = BR_FLOOD | BR_MCAST_FLOOD,
+       };
+       struct dsa_switch_tree *dst = dp->ds->dst;
+       struct dsa_port *cpu_dp = dp->cpu_dp;
+       struct dsa_port *other_dp;
+       int err;
+
+       list_for_each_entry(other_dp, &dst->ports, list) {
+               if (!dsa_port_is_user(other_dp))
+                       continue;
+
+               if (other_dp->cpu_dp != cpu_dp)
+                       continue;
+
+               if (other_dp->slave->flags & IFF_ALLMULTI)
+                       flags.val |= BR_MCAST_FLOOD;
+               if (other_dp->slave->flags & IFF_PROMISC)
+                       flags.val |= BR_FLOOD;
+       }
+
+       err = dsa_port_pre_bridge_flags(dp, flags, NULL);
+       if (err)
+               return;
+
+       dsa_port_bridge_flags(cpu_dp, flags, NULL);
+}
+
 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
 {
        struct net_device *master = dsa_slave_to_master(dev);
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_switch *ds = dp->ds;
 
        if (change & IFF_ALLMULTI)
                dev_set_allmulti(master,
        if (change & IFF_PROMISC)
                dev_set_promiscuity(master,
                                    dev->flags & IFF_PROMISC ? 1 : -1);
+
+       if (dsa_switch_supports_uc_filtering(ds) &&
+           dsa_switch_supports_mc_filtering(ds))
+               dsa_port_manage_cpu_flood(dp);
 }
 
 static void dsa_slave_set_rx_mode(struct net_device *dev)