return err;
 }
 
+static u16
+ice_eswitch_br_get_lkups_cnt(u16 vid)
+{
+       return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
+}
+
+static void
+ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
+{
+       if (ice_eswitch_br_is_vid_valid(vid)) {
+               list[1].type = ICE_VLAN_OFOS;
+               list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
+               list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+       }
+}
+
 static struct ice_rule_query_data *
 ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
-                              const unsigned char *mac)
+                              const unsigned char *mac, u16 vid)
 {
        struct ice_adv_rule_info rule_info = { 0 };
        struct ice_rule_query_data *rule;
        struct ice_adv_lkup_elem *list;
-       u16 lkups_cnt = 1;
+       u16 lkups_cnt;
        int err;
 
+       lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
+
        rule = kzalloc(sizeof(*rule), GFP_KERNEL);
        if (!rule)
                return ERR_PTR(-ENOMEM);
        ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
        eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
 
+       ice_eswitch_br_add_vlan_lkup(list, vid);
+
        rule_info.need_pass_l2 = true;
 
        rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
 
 static struct ice_rule_query_data *
 ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
-                                const unsigned char *mac)
+                                const unsigned char *mac, u16 vid)
 {
        struct ice_adv_rule_info rule_info = { 0 };
        struct ice_rule_query_data *rule;
        struct ice_adv_lkup_elem *list;
-       const u16 lkups_cnt = 1;
        int err = -ENOMEM;
+       u16 lkups_cnt;
+
+       lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
 
        rule = kzalloc(sizeof(*rule), GFP_KERNEL);
        if (!rule)
        ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
        eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
 
+       ice_eswitch_br_add_vlan_lkup(list, vid);
+
        rule_info.allow_pass_l2 = true;
        rule_info.sw_act.vsi_handle = vsi_idx;
        rule_info.sw_act.fltr_act = ICE_NOP;
 
 static struct ice_esw_br_flow *
 ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
-                          int port_type, const unsigned char *mac)
+                          int port_type, const unsigned char *mac, u16 vid)
 {
        struct ice_rule_query_data *fwd_rule, *guard_rule;
        struct ice_esw_br_flow *flow;
        if (!flow)
                return ERR_PTR(-ENOMEM);
 
-       fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac);
+       fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
+                                                 vid);
        err = PTR_ERR_OR_ZERO(fwd_rule);
        if (err) {
                dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
                goto err_fwd_rule;
        }
 
-       guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac);
+       guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
        err = PTR_ERR_OR_ZERO(guard_rule);
        if (err) {
                dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
        kfree(flow);
 }
 
+static struct ice_esw_br_vlan *
+ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+       struct ice_pf *pf = bridge->br_offloads->pf;
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_esw_br_port *port;
+       struct ice_esw_br_vlan *vlan;
+
+       port = xa_load(&bridge->ports, vsi_idx);
+       if (!port) {
+               dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
+               return ERR_PTR(-EINVAL);
+       }
+
+       vlan = xa_load(&port->vlans, vid);
+       if (!vlan) {
+               dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
+                        vsi_idx);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return vlan;
+}
+
 static void
 ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
                                struct ice_esw_br_fdb_entry *fdb_entry)
        struct device *dev = ice_pf_to_dev(pf);
        struct ice_esw_br_fdb_entry *fdb_entry;
        struct ice_esw_br_flow *flow;
+       struct ice_esw_br_vlan *vlan;
        struct ice_hw *hw = &pf->hw;
        unsigned long event;
        int err;
 
+       /* untagged filtering is not yet supported */
+       if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
+               return;
+
+       if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
+               vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
+                                                  vid);
+               if (IS_ERR(vlan)) {
+                       dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
+                               PTR_ERR(vlan));
+                       return;
+               }
+       }
+
        fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
        if (fdb_entry)
                ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
        }
 
        flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
-                                         br_port->type, mac);
+                                         br_port->type, mac, vid);
        if (IS_ERR(flow)) {
                err = PTR_ERR(flow);
                goto err_add_flow;
        return NOTIFY_DONE;
 }
 
+static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+{
+       struct ice_esw_br_fdb_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+               ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+}
+
+static void
+ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
+{
+       if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
+               return;
+
+       ice_eswitch_br_fdb_flush(bridge);
+       if (enable)
+               bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
+       else
+               bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
+}
+
+static void
+ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
+                           struct ice_esw_br_vlan *vlan)
+{
+       struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
+       struct ice_esw_br *bridge = port->bridge;
+
+       list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
+               if (vlan->vid == fdb_entry->data.vid)
+                       ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
+       }
+
+       xa_erase(&port->vlans, vlan->vid);
+       kfree(vlan);
+}
+
+static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
+{
+       struct ice_esw_br_vlan *vlan;
+       unsigned long index;
+
+       xa_for_each(&port->vlans, index, vlan)
+               ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static struct ice_esw_br_vlan *
+ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
+{
+       struct ice_esw_br_vlan *vlan;
+       int err;
+
+       vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+       if (!vlan)
+               return ERR_PTR(-ENOMEM);
+
+       vlan->vid = vid;
+       vlan->flags = flags;
+
+       err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
+       if (err) {
+               kfree(vlan);
+               return ERR_PTR(err);
+       }
+
+       return vlan;
+}
+
+static int
+ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
+                            u16 flags, struct netlink_ext_ack *extack)
+{
+       struct ice_esw_br_port *port;
+       struct ice_esw_br_vlan *vlan;
+
+       port = xa_load(&bridge->ports, vsi_idx);
+       if (!port)
+               return -EINVAL;
+
+       vlan = xa_load(&port->vlans, vid);
+       if (vlan) {
+               if (vlan->flags == flags)
+                       return 0;
+
+               ice_eswitch_br_vlan_cleanup(port, vlan);
+       }
+
+       vlan = ice_eswitch_br_vlan_create(vid, flags, port);
+       if (IS_ERR(vlan)) {
+               NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
+                                      vid, vsi_idx);
+               return PTR_ERR(vlan);
+       }
+
+       return 0;
+}
+
+static void
+ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+       struct ice_esw_br_port *port;
+       struct ice_esw_br_vlan *vlan;
+
+       port = xa_load(&bridge->ports, vsi_idx);
+       if (!port)
+               return;
+
+       vlan = xa_load(&port->vlans, vid);
+       if (!vlan)
+               return;
+
+       ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
+                           const struct switchdev_obj *obj,
+                           struct netlink_ext_ack *extack)
+{
+       struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+       struct switchdev_obj_port_vlan *vlan;
+       int err;
+
+       if (!br_port)
+               return -EINVAL;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+               err = ice_eswitch_br_port_vlan_add(br_port->bridge,
+                                                  br_port->vsi_idx, vlan->vid,
+                                                  vlan->flags, extack);
+               return err;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int
+ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
+                           const struct switchdev_obj *obj)
+{
+       struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+       struct switchdev_obj_port_vlan *vlan;
+
+       if (!br_port)
+               return -EINVAL;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+               ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
+                                            vlan->vid);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int
+ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
+                                const struct switchdev_attr *attr,
+                                struct netlink_ext_ack *extack)
+{
+       struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+
+       if (!br_port)
+               return -EINVAL;
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+               ice_eswitch_br_vlan_filtering_set(br_port->bridge,
+                                                 attr->u.vlan_filtering);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int
+ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
+                             void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       int err;
+
+       switch (event) {
+       case SWITCHDEV_PORT_OBJ_ADD:
+               err = switchdev_handle_port_obj_add(dev, ptr,
+                                                   ice_eswitch_br_is_dev_valid,
+                                                   ice_eswitch_br_port_obj_add);
+               break;
+       case SWITCHDEV_PORT_OBJ_DEL:
+               err = switchdev_handle_port_obj_del(dev, ptr,
+                                                   ice_eswitch_br_is_dev_valid,
+                                                   ice_eswitch_br_port_obj_del);
+               break;
+       case SWITCHDEV_PORT_ATTR_SET:
+               err = switchdev_handle_port_attr_set(dev, ptr,
+                                                    ice_eswitch_br_is_dev_valid,
+                                                    ice_eswitch_br_port_obj_attr_set);
+               break;
+       default:
+               err = 0;
+       }
+
+       return notifier_from_errno(err);
+}
+
 static void
 ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
                           struct ice_esw_br_port *br_port)
                vsi->vf->repr->br_port = NULL;
 
        xa_erase(&bridge->ports, br_port->vsi_idx);
+       ice_eswitch_br_port_vlans_flush(br_port);
        kfree(br_port);
 }
 
        if (!br_port)
                return ERR_PTR(-ENOMEM);
 
+       xa_init(&br_port->vlans);
+
        br_port->bridge = bridge;
 
        return br_port;
                return;
 
        unregister_netdevice_notifier(&br_offloads->netdev_nb);
+       unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
        unregister_switchdev_notifier(&br_offloads->switchdev_nb);
        destroy_workqueue(br_offloads->wq);
        /* Although notifier block is unregistered just before,
                goto err_reg_switchdev_nb;
        }
 
+       br_offloads->switchdev_blk.notifier_call =
+               ice_eswitch_br_event_blocking;
+       err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+       if (err) {
+               dev_err(dev,
+                       "Failed to register bridge blocking switchdev notifier\n");
+               goto err_reg_switchdev_blk;
+       }
+
        br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
        err = register_netdevice_notifier(&br_offloads->netdev_nb);
        if (err) {
        return 0;
 
 err_reg_netdev_nb:
+       unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+err_reg_switchdev_blk:
        unregister_switchdev_notifier(&br_offloads->switchdev_nb);
 err_reg_switchdev_nb:
        destroy_workqueue(br_offloads->wq);