return skb;
 }
 
+static unsigned int
+nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
+{
+       unsigned int size;
+
+       size = sizeof(struct cmsg_req_map_op);
+       size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
+
+       return size;
+}
+
 static struct sk_buff *
 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
+{
+       return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
+}
+
+static unsigned int
+nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
 {
        unsigned int size;
 
-       size = sizeof(struct cmsg_req_map_op);
-       size += sizeof(struct cmsg_key_value_pair) * n;
+       size = sizeof(struct cmsg_reply_map_op);
+       size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
 
-       return nfp_bpf_cmsg_alloc(bpf, size);
+       return size;
 }
 
 static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
        dev_consume_skb_any(skb);
 }
 
+static void *
+nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
+                    unsigned int n)
+{
+       return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
+                    unsigned int n)
+{
+       return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
+                      unsigned int n)
+{
+       return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
+                      unsigned int n)
+{
+       return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
+}
+
 static int
 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
                      enum nfp_bpf_cmsg_type op,
 
        /* Copy inputs */
        if (key)
-               memcpy(&req->elem[0].key, key, map->key_size);
+               memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
        if (value)
-               memcpy(&req->elem[0].value, value, map->value_size);
+               memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
+                      map->value_size);
 
        skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
-                                      sizeof(*reply) + sizeof(*reply->elem));
+                                      nfp_bpf_cmsg_map_reply_size(bpf, 1));
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
 
        /* Copy outputs */
        if (out_key)
-               memcpy(out_key, &reply->elem[0].key, map->key_size);
+               memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
+                      map->key_size);
        if (out_value)
-               memcpy(out_value, &reply->elem[0].value, map->value_size);
+               memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
+                      map->value_size);
 
        dev_consume_skb_any(skb);
 
                                     key, NULL, 0, next_key, NULL);
 }
 
+unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
+{
+       return max3((unsigned int)NFP_NET_DEFAULT_MTU,
+                   nfp_bpf_cmsg_map_req_size(bpf, 1),
+                   nfp_bpf_cmsg_map_reply_size(bpf, 1));
+}
+
 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
 {
        struct nfp_app_bpf *bpf = app->priv;
 
        }
 
        bpf->abi_version = readl(value);
-       if (bpf->abi_version != 2) {
+       if (bpf->abi_version < 2 || bpf->abi_version > 3) {
                nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
                         bpf->abi_version);
                bpf->abi_version = 0;
        if (err)
                goto err_free_neutral_maps;
 
+       if (bpf->abi_version < 3) {
+               bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4;
+               bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4;
+       } else {
+               bpf->cmsg_key_sz = bpf->maps.max_key_sz;
+               bpf->cmsg_val_sz = bpf->maps.max_val_sz;
+               app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
+       }
+
        bpf->bpf_dev = bpf_offload_dev_create();
        err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
        if (err)
 
  * @cmsg_replies:      received cmsg replies waiting to be consumed
  * @cmsg_wq:           work queue for waiting for cmsg replies
  *
+ * @cmsg_key_sz:       size of key in cmsg element array
+ * @cmsg_val_sz:       size of value in cmsg element array
+ *
  * @map_list:          list of offloaded maps
  * @maps_in_use:       number of currently offloaded maps
  * @map_elems_in_use:  number of elements allocated to offloaded maps
        struct sk_buff_head cmsg_replies;
        struct wait_queue_head cmsg_wq;
 
+       unsigned int cmsg_key_sz;
+       unsigned int cmsg_val_sz;
+
        struct list_head map_list;
        unsigned int maps_in_use;
        unsigned int map_elems_in_use;
 
 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
 
+unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
 long long int
 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
 void