* @cmsg_skbs:         List of skbs for control message processing
  * @nfp_mac_off_list:  List of MAC addresses to offload
  * @nfp_mac_index_list:        List of unique 8-bit indexes for non NFP netdevs
+ * @nfp_ipv4_off_list: List of IPv4 addresses to offload
  * @nfp_mac_off_lock:  Lock for the MAC address list
  * @nfp_mac_index_lock:        Lock for the MAC index list
+ * @nfp_ipv4_off_lock: Lock for the IPv4 address list
  * @nfp_mac_off_ids:   IDA to manage id assignment for offloaded macs
  * @nfp_mac_off_count: Number of MACs in address list
  * @nfp_tun_mac_nb:    Notifier to monitor link state
        struct sk_buff_head cmsg_skbs;
        struct list_head nfp_mac_off_list;
        struct list_head nfp_mac_index_list;
+       struct list_head nfp_ipv4_off_list;
        struct mutex nfp_mac_off_lock;
        struct mutex nfp_mac_index_lock;
+       struct mutex nfp_ipv4_off_lock;
        struct ida nfp_mac_off_ids;
        int nfp_mac_off_count;
        struct notifier_block nfp_tun_mac_nb;
        struct rcu_head rcu;
        spinlock_t lock; /* lock stats */
        struct nfp_fl_stats stats;
+       __be32 nfp_tun_ipv4_addr;
        char *unmasked_data;
        char *mask_data;
        char *action_data;
 int nfp_tunnel_config_start(struct nfp_app *app);
 void nfp_tunnel_config_stop(struct nfp_app *app);
 void nfp_tunnel_write_macs(struct nfp_app *app);
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
 
 #endif
 
 static void
 nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
                         struct tc_cls_flower_offload *flow,
-                        bool mask_version)
+                        bool mask_version, __be32 *tun_dst)
 {
        struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
        struct flow_dissector_key_ipv4_addrs *vxlan_ips;
                                             target);
                frame->ip_src = vxlan_ips->src;
                frame->ip_dst = vxlan_ips->dst;
+               *tun_dst = vxlan_ips->dst;
        }
 }
 
                                  struct nfp_fl_payload *nfp_flow)
 {
        enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+       __be32 tun_dst, tun_dst_mask = 0;
        struct nfp_repr *netdev_repr;
        int err;
        u8 *ext;
        if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
                /* Populate Exact VXLAN Data. */
                nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
-                                        flow, false);
+                                        flow, false, &tun_dst);
                /* Populate Mask VXLAN Data. */
                nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
-                                        flow, true);
+                                        flow, true, &tun_dst_mask);
                ext += sizeof(struct nfp_flower_vxlan);
                msk += sizeof(struct nfp_flower_vxlan);
 
                if (nfp_netdev_is_nfp_repr(netdev)) {
                        netdev_repr = netdev_priv(netdev);
                        nfp_tunnel_write_macs(netdev_repr->app);
+
+                       /* Store the tunnel destination in the rule data.
+                        * This must be present and be an exact match.
+                        */
+                       nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+                       nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
                }
        }
 
 
  */
 
 #include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
 #include <linux/idr.h>
 #include <net/dst_metadata.h>
 
 #include "../nfp_net_repr.h"
 #include "../nfp_net.h"
 
+#define NFP_FL_IPV4_ADDRS_MAX        32
+
+/**
+ * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
+ * @count:     number of IPs populated in the array
+ * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
+ */
+struct nfp_tun_ipv4_addr {
+       __be32 count;
+       __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
+/**
+ * struct nfp_ipv4_addr_entry - cached IPv4 addresses
+ * @ipv4_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list:      list pointer
+ */
+struct nfp_ipv4_addr_entry {
+       __be32 ipv4_addr;
+       int ref_count;
+       struct list_head list;
+};
+
 /**
  * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
  * @reserved:  reserved for future use
        return 0;
 }
 
+static void nfp_tun_write_ipv4_list(struct nfp_app *app)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_ipv4_addr_entry *entry;
+       struct nfp_tun_ipv4_addr payload;
+       struct list_head *ptr, *storage;
+       int count;
+
+       memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
+       mutex_lock(&priv->nfp_ipv4_off_lock);
+       count = 0;
+       list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+               if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+                       mutex_unlock(&priv->nfp_ipv4_off_lock);
+                       nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
+                       return;
+               }
+               entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+               payload.ipv4_addr[count++] = entry->ipv4_addr;
+       }
+       payload.count = cpu_to_be32(count);
+       mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+       nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
+                                sizeof(struct nfp_tun_ipv4_addr),
+                                &payload);
+}
+
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_ipv4_addr_entry *entry;
+       struct list_head *ptr, *storage;
+
+       mutex_lock(&priv->nfp_ipv4_off_lock);
+       list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+               entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+               if (entry->ipv4_addr == ipv4) {
+                       entry->ref_count++;
+                       mutex_unlock(&priv->nfp_ipv4_off_lock);
+                       return;
+               }
+       }
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               mutex_unlock(&priv->nfp_ipv4_off_lock);
+               nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+               return;
+       }
+       entry->ipv4_addr = ipv4;
+       entry->ref_count = 1;
+       list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
+       mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+       nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_ipv4_addr_entry *entry;
+       struct list_head *ptr, *storage;
+
+       mutex_lock(&priv->nfp_ipv4_off_lock);
+       list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+               entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+               if (entry->ipv4_addr == ipv4) {
+                       entry->ref_count--;
+                       if (!entry->ref_count) {
+                               list_del(&entry->list);
+                               kfree(entry);
+                       }
+                       break;
+               }
+       }
+       mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+       nfp_tun_write_ipv4_list(app);
+}
+
 void nfp_tunnel_write_macs(struct nfp_app *app)
 {
        struct nfp_flower_priv *priv = app->priv;
        INIT_LIST_HEAD(&priv->nfp_mac_index_list);
        ida_init(&priv->nfp_mac_off_ids);
 
+       /* Initialise priv data for IPv4 offloading. */
+       mutex_init(&priv->nfp_ipv4_off_lock);
+       INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+
        err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
        if (err)
                goto err_free_mac_ida;
        struct nfp_tun_mac_offload_entry *mac_entry;
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_tun_mac_non_nfp_idx *mac_idx;
+       struct nfp_ipv4_addr_entry *ip_entry;
        struct list_head *ptr, *storage;
 
        unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
        mutex_unlock(&priv->nfp_mac_index_lock);
 
        ida_destroy(&priv->nfp_mac_off_ids);
+
+       /* Free any memory that may be occupied by ipv4 list. */
+       mutex_lock(&priv->nfp_ipv4_off_lock);
+       list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+               ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+               list_del(&ip_entry->list);
+               kfree(ip_entry);
+       }
+       mutex_unlock(&priv->nfp_ipv4_off_lock);
 }