INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
                          mlxsw_sp->ptp_ops->shaper_work);
+       INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
+                         mlxsw_sp_span_speed_update_work);
 
        mlxsw_sp->ports[local_port] = mlxsw_sp_port;
        err = register_netdev(dev);
        struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
 
        cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
+       cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
        cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
        mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
        mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
                netdev_info(mlxsw_sp_port->dev, "link up\n");
                netif_carrier_on(mlxsw_sp_port->dev);
                mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
+               mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
        } else {
                netdev_info(mlxsw_sp_port->dev, "link down\n");
                netif_carrier_off(mlxsw_sp_port->dev);
 
        return 0;
 }
 
+void mlxsw_sp_span_speed_update_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct mlxsw_sp_port *mlxsw_sp_port;
+
+       mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
+                                    span.speed_update_dw);
+
+       /* If port is egress mirrored, the shared buffer size should be
+        * updated according to the speed value.
+        */
+       if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
+               mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
+                                                  mlxsw_sp_port->dev->mtu);
+}
+
 static struct mlxsw_sp_span_inspected_port *
 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
                                    enum mlxsw_sp_span_type type,