struct nfp_bpf_neutral_map *record;
        int err;
 
-       /* Map record paths are entered via ndo, update side is protected. */
-       ASSERT_RTNL();
-
        /* Reuse path - other offloaded program is already tracking this map. */
        record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
                                        nfp_bpf_maps_neutral_params);
        bool freed = false;
        int i;
 
-       ASSERT_RTNL();
-
        for (i = 0; i < nfp_prog->map_records_cnt; i++) {
                if (--nfp_prog->map_records[i]->count) {
                        nfp_prog->map_records[i] = NULL;
        return ret;
 }
 
-static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
+static int nfp_bpf_translate(struct net_device *netdev, struct bpf_prog *prog)
 {
        struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
+       struct nfp_net *nn = netdev_priv(netdev);
        unsigned int max_instr;
        int err;
 
 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
 {
        switch (bpf->command) {
-       case BPF_OFFLOAD_TRANSLATE:
-               return nfp_bpf_translate(nn, bpf->offload.prog);
        case BPF_OFFLOAD_DESTROY:
                return nfp_bpf_destroy(nn, bpf->offload.prog);
        case BPF_OFFLOAD_MAP_ALLOC:
        .insn_hook      = nfp_verify_insn,
        .finalize       = nfp_bpf_finalize,
        .prepare        = nfp_bpf_verifier_prep,
+       .translate      = nfp_bpf_translate,
 };
 
        return nsim_bpf_create_prog(ns, env->prog);
 }
 
+static int nsim_bpf_translate(struct net_device *dev, struct bpf_prog *prog)
+{
+       struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
+
+       state->state = "xlated";
+       return 0;
+}
+
 static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
 {
        struct nsim_bpf_bound_prog *state;
        .insn_hook      = nsim_bpf_verify_insn,
        .finalize       = nsim_bpf_finalize,
        .prepare        = nsim_bpf_verifier_prep,
+       .translate      = nsim_bpf_translate,
 };
 
 static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
        ASSERT_RTNL();
 
        switch (bpf->command) {
-       case BPF_OFFLOAD_TRANSLATE:
-               state = bpf->offload.prog->aux->offload->dev_priv;
-
-               state->state = "xlated";
-               return 0;
        case BPF_OFFLOAD_DESTROY:
                nsim_bpf_destroy_prog(bpf->offload.prog);
                return 0;
 
                         int insn_idx, int prev_insn_idx);
        int (*finalize)(struct bpf_verifier_env *env);
        int (*prepare)(struct net_device *netdev, struct bpf_verifier_env *env);
+       int (*translate)(struct net_device *netdev, struct bpf_prog *prog);
 };
 
 struct bpf_prog_offload {
 
        XDP_QUERY_PROG,
        XDP_QUERY_PROG_HW,
        /* BPF program for offload callbacks, invoked at program load time. */
-       BPF_OFFLOAD_TRANSLATE,
        BPF_OFFLOAD_DESTROY,
        BPF_OFFLOAD_MAP_ALLOC,
        BPF_OFFLOAD_MAP_FREE,
                        /* flags with which program was installed */
                        u32 prog_flags;
                };
-               /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
+               /* BPF_OFFLOAD_DESTROY */
                struct {
                        struct bpf_prog *prog;
                } offload;
 
 
 static int bpf_prog_offload_translate(struct bpf_prog *prog)
 {
-       struct netdev_bpf data = {};
-       int ret;
-
-       data.offload.prog = prog;
+       struct bpf_prog_offload *offload;
+       int ret = -ENODEV;
 
-       rtnl_lock();
-       ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
-       rtnl_unlock();
+       down_read(&bpf_devs_lock);
+       offload = prog->aux->offload;
+       if (offload)
+               ret = offload->offdev->ops->translate(offload->netdev, prog);
+       up_read(&bpf_devs_lock);
 
        return ret;
 }