void unpriv_ebpf_notify(int new_state);
 
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+                             struct bpf_prog_aux *prog_aux);
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
 void bpf_dev_bound_netdev_unregister(struct net_device *dev);
 
 void sock_map_destroy(struct sock *sk);
 void sock_map_close(struct sock *sk, long timeout);
 #else
+static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+                                           struct bpf_prog_aux *prog_aux)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
+                                               u32 func_id)
+{
+       return NULL;
+}
+
 static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
-                                       union bpf_attr *attr)
+                                         union bpf_attr *attr)
 {
        return -EOPNOTSUPP;
 }
 
 struct udp_tunnel_nic;
 struct bpf_prog;
 struct xdp_buff;
+struct xdp_md;
 
 void synchronize_net(void);
 void netdev_set_default_ethtool_ops(struct net_device *dev,
                                                  bool cycles);
 };
 
+struct xdp_metadata_ops {
+       int     (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
+       int     (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash);
+};
+
 /**
  * enum netdev_priv_flags - &struct net_device priv_flags
  *
  *
  *     @netdev_ops:    Includes several pointers to callbacks,
  *                     if one wants to override the ndo_*() functions
+ *     @xdp_metadata_ops:      Includes pointers to XDP metadata callbacks.
  *     @ethtool_ops:   Management operations
  *     @l3mdev_ops:    Layer 3 master device operations
  *     @ndisc_ops:     Includes callbacks for different IPv6 neighbour
        unsigned int            flags;
        unsigned long long      priv_flags;
        const struct net_device_ops *netdev_ops;
+       const struct xdp_metadata_ops *xdp_metadata_ops;
        int                     ifindex;
        unsigned short          gflags;
        unsigned short          hard_header_len;
 
 
 #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
 
+#define XDP_METADATA_KFUNC_xxx \
+       XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
+                          bpf_xdp_metadata_rx_timestamp) \
+       XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
+                          bpf_xdp_metadata_rx_hash) \
+
+enum {
+#define XDP_METADATA_KFUNC(name, _) name,
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+MAX_XDP_METADATA_KFUNC,
+};
+
+#ifdef CONFIG_NET
+u32 bpf_xdp_metadata_kfunc_id(int id);
+bool bpf_dev_bound_kfunc_id(u32 btf_id);
+#else
+static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
+static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
+#endif
+
 #endif /* __LINUX_NET_XDP_H__ */
 
        if (fp->kprobe_override)
                return false;
 
+       /* XDP programs inserted into maps are not guaranteed to run on
+        * a particular netdev (and can run outside driver context entirely
+        * in the case of devmap and cpumap). Until device checks
+        * are implemented, prohibit adding dev-bound programs to program maps.
+        */
+       if (bpf_prog_is_dev_bound(fp->aux))
+               return false;
+
        spin_lock(&map->owner.lock);
        if (!map->owner.type) {
                /* There's no owner yet where we could check for
 
        up_write(&bpf_devs_lock);
 }
 
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+                             struct bpf_prog_aux *prog_aux)
+{
+       if (!bpf_prog_is_dev_bound(prog_aux)) {
+               bpf_log(log, "metadata kfuncs require device-bound program\n");
+               return -EINVAL;
+       }
+
+       if (bpf_prog_is_offloaded(prog_aux)) {
+               bpf_log(log, "metadata kfuncs can't be offloaded\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
+{
+       const struct xdp_metadata_ops *ops;
+       void *p = NULL;
+
+       /* We don't hold bpf_devs_lock while resolving several
+        * kfuncs and can race with the unregister_netdevice().
+        * We rely on bpf_dev_bound_match() check at attach
+        * to render this program unusable.
+        */
+       down_read(&bpf_devs_lock);
+       if (!prog->aux->offload)
+               goto out;
+
+       ops = prog->aux->offload->netdev->xdp_metadata_ops;
+       if (!ops)
+               goto out;
+
+       if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
+               p = ops->xmo_rx_timestamp;
+       else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
+               p = ops->xmo_rx_hash;
+out:
+       up_read(&bpf_devs_lock);
+
+       return p;
+}
+
 static int __init bpf_offload_init(void)
 {
        return rhashtable_init(&offdevs, &offdevs_params);
 
                return -EINVAL;
        }
 
+       if (bpf_dev_bound_kfunc_id(func_id)) {
+               err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
+               if (err)
+                       return err;
+       }
+
        desc = &tab->descs[tab->nr_descs++];
        desc->func_id = func_id;
        desc->imm = call_imm;
                            struct bpf_insn *insn_buf, int insn_idx, int *cnt)
 {
        const struct bpf_kfunc_desc *desc;
+       void *xdp_kfunc;
 
        if (!insn->imm) {
                verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
                return -EINVAL;
        }
 
+       *cnt = 0;
+
+       if (bpf_dev_bound_kfunc_id(insn->imm)) {
+               xdp_kfunc = bpf_dev_bound_resolve_kfunc(env->prog, insn->imm);
+               if (xdp_kfunc) {
+                       insn->imm = BPF_CALL_IMM(xdp_kfunc);
+                       return 0;
+               }
+
+               /* fallback to default kfunc when not supported by netdev */
+       }
+
        /* insn->imm has the btf func_id. Replace it with
         * an address (relative to __bpf_call_base).
         */
                return -EFAULT;
        }
 
-       *cnt = 0;
        insn->imm = desc->imm;
        if (insn->off)
                return 0;
        if (tgt_prog) {
                struct bpf_prog_aux *aux = tgt_prog->aux;
 
+               if (bpf_prog_is_dev_bound(tgt_prog->aux)) {
+                       bpf_log(log, "Replacing device-bound programs not supported\n");
+                       return -EINVAL;
+               }
+
                for (i = 0; i < aux->func_info_cnt; i++)
                        if (aux->func_info[i].type_id == btf_id) {
                                subprog = i;
 
        if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
                return -EINVAL;
 
+       if (bpf_prog_is_dev_bound(prog->aux))
+               return -EINVAL;
+
        if (do_live) {
                if (!batch_size)
                        batch_size = NAPI_POLL_WEIGHT;
 
  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
  */
 #include <linux/bpf.h>
+#include <linux/btf_ids.h>
 #include <linux/filter.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 
        return nxdpf;
 }
+
+__diag_push();
+__diag_ignore_all("-Wmissing-prototypes",
+                 "Global functions as their definitions will be in vmlinux BTF");
+
+/**
+ * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
+ * @ctx: XDP context pointer.
+ * @timestamp: Return value pointer.
+ *
+ * Returns 0 on success or ``-errno`` on error.
+ */
+int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+       return -EOPNOTSUPP;
+}
+
+/**
+ * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
+ * @ctx: XDP context pointer.
+ * @hash: Return value pointer.
+ *
+ * Returns 0 on success or ``-errno`` on error.
+ */
+int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+       return -EOPNOTSUPP;
+}
+
+__diag_pop();
+
+BTF_SET8_START(xdp_metadata_kfunc_ids)
+#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0)
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+BTF_SET8_END(xdp_metadata_kfunc_ids)
+
+static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
+       .owner = THIS_MODULE,
+       .set   = &xdp_metadata_kfunc_ids,
+};
+
+BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
+#define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str)
+XDP_METADATA_KFUNC_xxx
+#undef XDP_METADATA_KFUNC
+
+u32 bpf_xdp_metadata_kfunc_id(int id)
+{
+       /* xdp_metadata_kfunc_ids is sorted and can't be used */
+       return xdp_metadata_kfunc_ids_unsorted[id];
+}
+
+bool bpf_dev_bound_kfunc_id(u32 btf_id)
+{
+       return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id);
+}
+
+static int __init xdp_metadata_init(void)
+{
+       return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set);
+}
+late_initcall(xdp_metadata_init);