#define NFP_FL_MAX_ROUTES               32
 
+#define NFP_TUN_PRE_TUN_RULE_LIMIT     32
+#define NFP_TUN_PRE_TUN_RULE_DEL       0x1
+
+/**
+ * struct nfp_tun_pre_run_rule - rule matched before decap
+ * @flags:             options for the rule offset
+ * @port_idx:          index of destination MAC address for the rule
+ * @vlan_tci:          VLAN info associated with MAC
+ * @host_ctx_id:       stats context of rule to update
+ */
+struct nfp_tun_pre_tun_rule {
+       __be32 flags;
+       __be16 port_idx;
+       __be16 vlan_tci;
+       __be32 host_ctx_id;
+};
+
 /**
  * struct nfp_tun_active_tuns - periodic message of active tunnels
  * @seq:               sequence number of the message
 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
                                 struct nfp_fl_payload *flow)
 {
-       return -EOPNOTSUPP;
+       struct nfp_flower_priv *app_priv = app->priv;
+       struct nfp_tun_offloaded_mac *mac_entry;
+       struct nfp_tun_pre_tun_rule payload;
+       struct net_device *internal_dev;
+       int err;
+
+       if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
+               return -ENOSPC;
+
+       memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+       internal_dev = flow->pre_tun_rule.dev;
+       payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+       payload.host_ctx_id = flow->meta.host_ctx_id;
+
+       /* Lookup MAC index for the pre-tunnel rule egress device.
+        * Note that because the device is always an internal port, it will
+        * have a constant global index so does not need to be tracked.
+        */
+       mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
+                                                    internal_dev->dev_addr);
+       if (!mac_entry)
+               return -ENOENT;
+
+       payload.port_idx = cpu_to_be16(mac_entry->index);
+
+       /* Copy mac id and vlan to flow - dev may not exist at delete time. */
+       flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
+       flow->pre_tun_rule.port_idx = payload.port_idx;
+
+       err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+                                      sizeof(struct nfp_tun_pre_tun_rule),
+                                      (unsigned char *)&payload, GFP_KERNEL);
+       if (err)
+               return err;
+
+       app_priv->pre_tun_rule_cnt++;
+
+       return 0;
 }
 
 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
                                     struct nfp_fl_payload *flow)
 {
-       return -EOPNOTSUPP;
+       struct nfp_flower_priv *app_priv = app->priv;
+       struct nfp_tun_pre_tun_rule payload;
+       u32 tmp_flags = 0;
+       int err;
+
+       memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+       tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
+       payload.flags = cpu_to_be32(tmp_flags);
+       payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+       payload.port_idx = flow->pre_tun_rule.port_idx;
+
+       err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+                                      sizeof(struct nfp_tun_pre_tun_rule),
+                                      (unsigned char *)&payload, GFP_KERNEL);
+       if (err)
+               return err;
+
+       app_priv->pre_tun_rule_cnt--;
+
+       return 0;
 }
 
 int nfp_tunnel_config_start(struct nfp_app *app)