int nfp_bpf_jit(struct nfp_prog *prog);
 
-extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
 
 struct netdev_bpf;
 struct nfp_app;
 
        return 0;
 }
 
-const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
        .insn_hook = nfp_verify_insn,
 };
 
        return 0;
 }
 
-static const struct bpf_ext_analyzer_ops nsim_bpf_analyzer_ops = {
+static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
        .insn_hook = nsim_bpf_verify_insn,
 };
 
 
 #include <linux/numa.h>
 #include <linux/wait.h>
 
+struct bpf_verifier_env;
 struct perf_event;
 struct bpf_prog;
 struct bpf_map;
                                  struct bpf_prog *prog, u32 *target_size);
 };
 
+struct bpf_prog_offload_ops {
+       int (*insn_hook)(struct bpf_verifier_env *env,
+                        int insn_idx, int prev_insn_idx);
+};
+
 struct bpf_dev_offload {
        struct bpf_prog         *prog;
        struct net_device       *netdev;
        void                    *dev_priv;
        struct list_head        offloads;
        bool                    dev_state;
-       bool                    verifier_running;
-       wait_queue_head_t       verifier_done;
+       const struct bpf_prog_offload_ops *dev_ops;
 };
 
 struct bpf_prog_aux {
 
        return log->len_used >= log->len_total - 1;
 }
 
-struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
-       int (*insn_hook)(struct bpf_verifier_env *env,
-                        int insn_idx, int prev_insn_idx);
-};
-
 #define BPF_MAX_SUBPROGS 256
 
 /* single container for all structs
        bool strict_alignment;          /* perform strict pointer alignment checks */
        struct bpf_verifier_state *cur_state; /* current verifier state */
        struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
-       const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
        struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
        u32 used_map_cnt;               /* number of used maps */
        u32 id_gen;                     /* used to generate unique reg IDs */
        return cur->frame[cur->curframe]->regs;
 }
 
-#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
-#else
-static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
-{
-       return -EOPNOTSUPP;
-}
-#endif
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+                                int insn_idx, int prev_insn_idx);
 
 #endif /* _LINUX_BPF_VERIFIER_H */
 
        BPF_OFFLOAD_DESTROY,
 };
 
-struct bpf_ext_analyzer_ops;
+struct bpf_prog_offload_ops;
 struct netlink_ext_ack;
 
 struct netdev_bpf {
                /* BPF_OFFLOAD_VERIFIER_PREP */
                struct {
                        struct bpf_prog *prog;
-                       const struct bpf_ext_analyzer_ops *ops; /* callee set */
+                       const struct bpf_prog_offload_ops *ops; /* callee set */
                } verifier;
                /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
                struct {
 
                return -ENOMEM;
 
        offload->prog = prog;
-       init_waitqueue_head(&offload->verifier_done);
 
        offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
                                           attr->prog_ifindex);
        if (err)
                goto exit_unlock;
 
-       env->dev_ops = data.verifier.ops;
-
+       env->prog->aux->offload->dev_ops = data.verifier.ops;
        env->prog->aux->offload->dev_state = true;
-       env->prog->aux->offload->verifier_running = true;
 exit_unlock:
        rtnl_unlock();
        return err;
 }
 
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+                                int insn_idx, int prev_insn_idx)
+{
+       struct bpf_dev_offload *offload;
+       int ret = -ENODEV;
+
+       down_read(&bpf_devs_lock);
+       offload = env->prog->aux->offload;
+       if (offload->netdev)
+               ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+       up_read(&bpf_devs_lock);
+
+       return ret;
+}
+
 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
        struct bpf_dev_offload *offload = prog->aux->offload;
 
        data.offload.prog = prog;
 
-       if (offload->verifier_running)
-               wait_event(offload->verifier_done, !offload->verifier_running);
-
        if (offload->dev_state)
                WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
 
 {
        struct bpf_dev_offload *offload = prog->aux->offload;
 
-       offload->verifier_running = false;
-       wake_up(&offload->verifier_done);
-
        rtnl_lock();
        down_write(&bpf_devs_lock);
        __bpf_prog_offload_destroy(prog);
 
 static int bpf_prog_offload_translate(struct bpf_prog *prog)
 {
-       struct bpf_dev_offload *offload = prog->aux->offload;
        struct netdev_bpf data = {};
        int ret;
 
        data.offload.prog = prog;
 
-       offload->verifier_running = false;
-       wake_up(&offload->verifier_done);
-
        rtnl_lock();
        ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
        rtnl_unlock();
 
        return 0;
 }
 
-static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
-                                 int insn_idx, int prev_insn_idx)
-{
-       if (env->dev_ops && env->dev_ops->insn_hook)
-               return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
-
-       return 0;
-}
-
 static int do_check(struct bpf_verifier_env *env)
 {
        struct bpf_verifier_state *state;
                        print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
                }
 
-               err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
-               if (err)
-                       return err;
+               if (bpf_prog_is_dev_bound(env->prog->aux)) {
+                       err = bpf_prog_offload_verify_insn(env, insn_idx,
+                                                          prev_insn_idx);
+                       if (err)
+                               return err;
+               }
 
                regs = cur_regs(env);
                env->insn_aux_data[insn_idx].seen = true;
        if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
                env->strict_alignment = true;
 
-       if (env->prog->aux->offload) {
+       if (bpf_prog_is_dev_bound(env->prog->aux)) {
                ret = bpf_prog_offload_verifier_prep(env);
                if (ret)
                        goto err_unlock;