Add LINK_DETACH command to force-detach bpf_link without destroying it. It has
the same behavior as auto-detaching of bpf_link due to cgroup dying for
bpf_cgroup_link or net_device being destroyed for bpf_xdp_link. In such case,
bpf_link is still a valid kernel object, but is defuncts and doesn't hold BPF
program attached to corresponding BPF hook. This functionality allows users
with enough access rights to manually force-detach attached bpf_link without
killing respective owner process.
This patch implements LINK_DETACH for cgroup, xdp, and netns links, mostly
re-using existing link release handling code.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200731182830.286260-2-andriin@fb.com
 struct bpf_link_ops {
        void (*release)(struct bpf_link *link);
        void (*dealloc)(struct bpf_link *link);
+       int (*detach)(struct bpf_link *link);
        int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
                           struct bpf_prog *old_prog);
        void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
 
        BPF_LINK_GET_NEXT_ID,
        BPF_ENABLE_STATS,
        BPF_ITER_CREATE,
+       BPF_LINK_DETACH,
 };
 
 enum bpf_map_type {
                __u32           old_prog_fd;
        } link_update;
 
+       struct {
+               __u32           link_fd;
+       } link_detach;
+
        struct { /* struct used by BPF_ENABLE_STATS command */
                __u32           type;
        } enable_stats;
 
 {
        struct bpf_cgroup_link *cg_link =
                container_of(link, struct bpf_cgroup_link, link);
+       struct cgroup *cg;
 
        /* link might have been auto-detached by dying cgroup already,
         * in that case our work is done here
        WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
                                    cg_link->type));
 
+       cg = cg_link->cgroup;
+       cg_link->cgroup = NULL;
+
        mutex_unlock(&cgroup_mutex);
-       cgroup_put(cg_link->cgroup);
+
+       cgroup_put(cg);
 }
 
 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
        kfree(cg_link);
 }
 
+static int bpf_cgroup_link_detach(struct bpf_link *link)
+{
+       bpf_cgroup_link_release(link);
+
+       return 0;
+}
+
 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
                                        struct seq_file *seq)
 {
 static const struct bpf_link_ops bpf_cgroup_link_lops = {
        .release = bpf_cgroup_link_release,
        .dealloc = bpf_cgroup_link_dealloc,
+       .detach = bpf_cgroup_link_detach,
        .update_prog = cgroup_bpf_replace,
        .show_fdinfo = bpf_cgroup_link_show_fdinfo,
        .fill_link_info = bpf_cgroup_link_fill_link_info,
 
        bpf_prog_array_free(old_array);
 
 out_unlock:
+       net_link->net = NULL;
        mutex_unlock(&netns_bpf_mutex);
 }
 
+static int bpf_netns_link_detach(struct bpf_link *link)
+{
+       bpf_netns_link_release(link);
+       return 0;
+}
+
 static void bpf_netns_link_dealloc(struct bpf_link *link)
 {
        struct bpf_netns_link *net_link =
 static const struct bpf_link_ops bpf_netns_link_ops = {
        .release = bpf_netns_link_release,
        .dealloc = bpf_netns_link_dealloc,
+       .detach = bpf_netns_link_detach,
        .update_prog = bpf_netns_link_update_prog,
        .fill_link_info = bpf_netns_link_fill_info,
        .show_fdinfo = bpf_netns_link_show_fdinfo,
 
        return ret;
 }
 
+#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
+
+static int link_detach(union bpf_attr *attr)
+{
+       struct bpf_link *link;
+       int ret;
+
+       if (CHECK_ATTR(BPF_LINK_DETACH))
+               return -EINVAL;
+
+       link = bpf_link_get_from_fd(attr->link_detach.link_fd);
+       if (IS_ERR(link))
+               return PTR_ERR(link);
+
+       if (link->ops->detach)
+               ret = link->ops->detach(link);
+       else
+               ret = -EOPNOTSUPP;
+
+       bpf_link_put(link);
+       return ret;
+}
+
 static int bpf_link_inc_not_zero(struct bpf_link *link)
 {
        return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT;
        case BPF_ITER_CREATE:
                err = bpf_iter_create(&attr);
                break;
+       case BPF_LINK_DETACH:
+               err = link_detach(&attr);
+               break;
        default:
                err = -EINVAL;
                break;
 
        /* if racing with net_device's tear down, xdp_link->dev might be
         * already NULL, in which case link was already auto-detached
         */
-       if (xdp_link->dev)
+       if (xdp_link->dev) {
                WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
+               xdp_link->dev = NULL;
+       }
 
        rtnl_unlock();
 }
 
+static int bpf_xdp_link_detach(struct bpf_link *link)
+{
+       bpf_xdp_link_release(link);
+       return 0;
+}
+
 static void bpf_xdp_link_dealloc(struct bpf_link *link)
 {
        struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
 static const struct bpf_link_ops bpf_xdp_link_lops = {
        .release = bpf_xdp_link_release,
        .dealloc = bpf_xdp_link_dealloc,
+       .detach = bpf_xdp_link_detach,
        .show_fdinfo = bpf_xdp_link_show_fdinfo,
        .fill_link_info = bpf_xdp_link_fill_link_info,
        .update_prog = bpf_xdp_link_update,