#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
 
+#define NFP_MIN_INT_PORT_ID    1
+#define NFP_MAX_INT_PORT_ID    256
+
 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
 {
        return "FLOWER";
        return DEVLINK_ESWITCH_MODE_SWITCHDEV;
 }
 
+static int
+nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
+                                  struct net_device *netdev)
+{
+       struct net_device *entry;
+       int i, id = 0;
+
+       rcu_read_lock();
+       idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
+               if (entry == netdev) {
+                       id = i;
+                       break;
+               }
+       rcu_read_unlock();
+
+       return id;
+}
+
+static int
+nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       int id;
+
+       id = nfp_flower_lookup_internal_port_id(priv, netdev);
+       if (id > 0)
+               return id;
+
+       idr_preload(GFP_ATOMIC);
+       spin_lock_bh(&priv->internal_ports.lock);
+       id = idr_alloc(&priv->internal_ports.port_ids, netdev,
+                      NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
+       spin_unlock_bh(&priv->internal_ports.lock);
+       idr_preload_end();
+
+       return id;
+}
+
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+                                      struct net_device *netdev)
+{
+       int ext_port;
+
+       if (nfp_netdev_is_nfp_repr(netdev)) {
+               return nfp_repr_get_port_id(netdev);
+       } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
+               ext_port = nfp_flower_get_internal_port_id(app, netdev);
+               if (ext_port < 0)
+                       return 0;
+
+               return nfp_flower_internal_port_get_port_id(ext_port);
+       }
+
+       return 0;
+}
+
+static void
+nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       int id;
+
+       id = nfp_flower_lookup_internal_port_id(priv, netdev);
+       if (!id)
+               return;
+
+       spin_lock_bh(&priv->internal_ports.lock);
+       idr_remove(&priv->internal_ports.port_ids, id);
+       spin_unlock_bh(&priv->internal_ports.lock);
+}
+
+static int
+nfp_flower_internal_port_event_handler(struct nfp_app *app,
+                                      struct net_device *netdev,
+                                      unsigned long event)
+{
+       if (event == NETDEV_UNREGISTER &&
+           nfp_flower_internal_port_can_offload(app, netdev))
+               nfp_flower_free_internal_port_id(app, netdev);
+
+       return NOTIFY_OK;
+}
+
+static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
+{
+       spin_lock_init(&priv->internal_ports.lock);
+       idr_init(&priv->internal_ports.port_ids);
+}
+
+static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
+{
+       idr_destroy(&priv->internal_ports.port_ids);
+}
+
 static struct nfp_flower_non_repr_priv *
 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
 {
                /* Tell the firmware that the driver supports flow merging. */
                err = nfp_rtsym_write_le(app->pf->rtbl,
                                         "_abi_flower_merge_hint_enable", 1);
-               if (!err)
+               if (!err) {
                        app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
-               else if (err == -ENOENT)
+                       nfp_flower_internal_port_init(app_priv);
+               } else if (err == -ENOENT) {
                        nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
-               else
+               } else {
                        goto err_lag_clean;
+               }
        } else {
                nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
        }
        if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
                nfp_flower_lag_cleanup(&app_priv->nfp_lag);
 
+       if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
+               nfp_flower_internal_port_cleanup(app_priv);
+
        nfp_flower_metadata_cleanup(app);
        vfree(app->priv);
        app->priv = NULL;
        if (ret & NOTIFY_STOP_MASK)
                return ret;
 
+       ret = nfp_flower_internal_port_event_handler(app, netdev, event);
+       if (ret & NOTIFY_STOP_MASK)
+               return ret;
+
        return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
 }
 
 
        struct sk_buff_head retrans_skbs;
 };
 
+/**
+ * struct nfp_fl_internal_ports - Flower APP priv data for additional ports
+ * @port_ids:  Assignment of ids to any additional ports
+ * @lock:      Lock for extra ports list
+ */
+struct nfp_fl_internal_ports {
+       struct idr port_ids;
+       spinlock_t lock;
+};
+
 /**
  * struct nfp_flower_priv - Flower APP per-vNIC priv data
  * @app:               Back pointer to app
  * @non_repr_priv:     List of offloaded non-repr ports and their priv data
  * @active_mem_unit:   Current active memory unit for flower rules
  * @total_mem_units:   Total number of available memory units for flower rules
+ * @internal_ports:    Internal port ids used in offloaded rules
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
        struct list_head non_repr_priv;
        unsigned int active_mem_unit;
        unsigned int total_mem_units;
+       struct nfp_fl_internal_ports internal_ports;
 };
 
 /**
        __be64 stats_cookie;
 };
 
+static inline bool
+nfp_flower_internal_port_can_offload(struct nfp_app *app,
+                                    struct net_device *netdev)
+{
+       struct nfp_flower_priv *app_priv = app->priv;
+
+       if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
+               return false;
+       if (!netdev->rtnl_link_ops)
+               return false;
+       if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+               return true;
+
+       return false;
+}
+
 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                             unsigned int host_ctx_split);
 void nfp_flower_metadata_cleanup(struct nfp_app *app);
 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
 void
 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+                                      struct net_device *netdev);
 #endif
 
                                  struct nfp_fl_payload *nfp_flow,
                                  enum nfp_flower_tun_type tun_type)
 {
-       u32 cmsg_port = 0;
+       u32 port_id;
        int err;
        u8 *ext;
        u8 *msk;
 
-       if (nfp_netdev_is_nfp_repr(netdev))
-               cmsg_port = nfp_repr_get_port_id(netdev);
+       port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
 
        memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
        memset(nfp_flow->mask_data, 0, key_ls->key_size);
 
        /* Populate Exact Port data. */
        err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
-                                     cmsg_port, false, tun_type);
+                                     port_id, false, tun_type);
        if (err)
                return err;
 
        /* Populate Mask Port Data. */
        err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
-                                     cmsg_port, true, tun_type);
+                                     port_id, true, tun_type);
        if (err)
                return err;