enum bpf_prog_type saved_dst_prog_type;
        enum bpf_attach_type saved_dst_attach_type;
        bool verifier_zext; /* Zero extensions has been inserted by verifier. */
-       bool offload_requested;
+       bool dev_bound; /* Program is bound to the netdev. */
+       bool offload_requested; /* Program is bound and offloaded to the netdev. */
        bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
        bool func_proto_unreliable;
        bool sleepable;
 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
 
 int bpf_prog_offload_compile(struct bpf_prog *prog);
-void bpf_prog_offload_destroy(struct bpf_prog *prog);
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
                               struct bpf_prog *prog);
 
 void unpriv_ebpf_notify(int new_state);
 
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
+void bpf_dev_bound_netdev_unregister(struct net_device *dev);
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+       return aux->dev_bound;
+}
 
 static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
 {
 void sock_map_destroy(struct sock *sk);
 void sock_map_close(struct sock *sk, long timeout);
 #else
-static inline int bpf_prog_offload_init(struct bpf_prog *prog,
+static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
                                        union bpf_attr *attr)
 {
        return -EOPNOTSUPP;
 }
 
+static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+}
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+       return false;
+}
+
 static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
 {
        return false;
 
 struct bpf_offload_netdev {
        struct rhash_head l;
        struct net_device *netdev;
-       struct bpf_offload_dev *offdev;
+       struct bpf_offload_dev *offdev; /* NULL when bound-only */
        struct list_head progs;
        struct list_head maps;
        struct list_head offdev_netdevs;
        INIT_LIST_HEAD(&ondev->progs);
        INIT_LIST_HEAD(&ondev->maps);
 
-       down_write(&bpf_devs_lock);
        err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
        if (err) {
                netdev_warn(netdev, "failed to register for BPF offload\n");
-               goto err_unlock_free;
+               goto err_free;
        }
 
-       list_add(&ondev->offdev_netdevs, &offdev->netdevs);
-       up_write(&bpf_devs_lock);
+       if (offdev)
+               list_add(&ondev->offdev_netdevs, &offdev->netdevs);
        return 0;
 
-err_unlock_free:
-       up_write(&bpf_devs_lock);
+err_free:
        kfree(ondev);
        return err;
 }
 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
                                                struct net_device *netdev)
 {
-       struct bpf_offload_netdev *ondev, *altdev;
+       struct bpf_offload_netdev *ondev, *altdev = NULL;
        struct bpf_offloaded_map *offmap, *mtmp;
        struct bpf_prog_offload *offload, *ptmp;
 
        ASSERT_RTNL();
 
-       down_write(&bpf_devs_lock);
        ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
        if (WARN_ON(!ondev))
-               goto unlock;
+               return;
 
        WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
-       list_del(&ondev->offdev_netdevs);
 
        /* Try to move the objects to another netdev of the device */
-       altdev = list_first_entry_or_null(&offdev->netdevs,
-                                         struct bpf_offload_netdev,
-                                         offdev_netdevs);
+       if (offdev) {
+               list_del(&ondev->offdev_netdevs);
+               altdev = list_first_entry_or_null(&offdev->netdevs,
+                                                 struct bpf_offload_netdev,
+                                                 offdev_netdevs);
+       }
+
        if (altdev) {
                list_for_each_entry(offload, &ondev->progs, offloads)
                        offload->netdev = altdev->netdev;
        WARN_ON(!list_empty(&ondev->progs));
        WARN_ON(!list_empty(&ondev->maps));
        kfree(ondev);
-unlock:
-       up_write(&bpf_devs_lock);
 }
 
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
 {
        struct bpf_offload_netdev *ondev;
        struct bpf_prog_offload *offload;
            attr->prog_type != BPF_PROG_TYPE_XDP)
                return -EINVAL;
 
-       if (attr->prog_flags)
+       if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
+               return -EINVAL;
+
+       if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
+           attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
                return -EINVAL;
 
        offload = kzalloc(sizeof(*offload), GFP_USER);
        if (err)
                goto err_maybe_put;
 
+       prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
+
        down_write(&bpf_devs_lock);
        ondev = bpf_offload_find_netdev(offload->netdev);
        if (!ondev) {
-               err = -EINVAL;
-               goto err_unlock;
+               if (bpf_prog_is_offloaded(prog->aux)) {
+                       err = -EINVAL;
+                       goto err_unlock;
+               }
+
+               /* When only binding to the device, explicitly
+                * create an entry in the hashtable.
+                */
+               err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
+               if (err)
+                       goto err_unlock;
+               ondev = bpf_offload_find_netdev(offload->netdev);
        }
        offload->offdev = ondev->offdev;
        prog->aux->offload = offload;
        up_read(&bpf_devs_lock);
 }
 
-void bpf_prog_offload_destroy(struct bpf_prog *prog)
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
 {
+       struct bpf_offload_netdev *ondev;
+       struct net_device *netdev;
+
+       rtnl_lock();
        down_write(&bpf_devs_lock);
-       if (prog->aux->offload)
+       if (prog->aux->offload) {
+               list_del_init(&prog->aux->offload->offloads);
+
+               netdev = prog->aux->offload->netdev;
                __bpf_prog_offload_destroy(prog);
+
+               ondev = bpf_offload_find_netdev(netdev);
+               if (!ondev->offdev && list_empty(&ondev->progs))
+                       __bpf_offload_dev_netdev_unregister(NULL, netdev);
+       }
        up_write(&bpf_devs_lock);
+       rtnl_unlock();
 }
 
 static int bpf_prog_offload_translate(struct bpf_prog *prog)
        struct bpf_offload_netdev *ondev1, *ondev2;
        struct bpf_prog_offload *offload;
 
-       if (!bpf_prog_is_offloaded(prog->aux))
+       if (!bpf_prog_is_dev_bound(prog->aux))
                return false;
 
        offload = prog->aux->offload;
 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
                                    struct net_device *netdev)
 {
-       return __bpf_offload_dev_netdev_register(offdev, netdev);
+       int err;
+
+       down_write(&bpf_devs_lock);
+       err = __bpf_offload_dev_netdev_register(offdev, netdev);
+       up_write(&bpf_devs_lock);
+       return err;
 }
 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
 
 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
                                       struct net_device *netdev)
 {
+       down_write(&bpf_devs_lock);
        __bpf_offload_dev_netdev_unregister(offdev, netdev);
+       up_write(&bpf_devs_lock);
 }
 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
 
 }
 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
 
+void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+       struct bpf_offload_netdev *ondev;
+
+       ASSERT_RTNL();
+
+       down_write(&bpf_devs_lock);
+       ondev = bpf_offload_find_netdev(dev);
+       if (ondev && !ondev->offdev)
+               __bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
+       up_write(&bpf_devs_lock);
+}
+
 static int __init bpf_offload_init(void)
 {
        return rhashtable_init(&offdevs, &offdevs_params);