struct work_struct work;
 };
 
+struct bpf_array {
+       struct bpf_map map;
+       u32 elem_size;
+       /* 'ownership' of prog_array is claimed by the first program that
+        * is going to use this map or by the first program which FD is stored
+        * in the map to make sure that all callers and callees have the same
+        * prog_type and JITed flag
+        */
+       enum bpf_prog_type owner_prog_type;
+       bool owner_jited;
+       union {
+               char value[0] __aligned(8);
+               struct bpf_prog *prog[0] __aligned(8);
+       };
+};
+#define MAX_TAIL_CALL_CNT 32
+
+u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+void bpf_prog_array_map_clear(struct bpf_map *map);
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
 #ifdef CONFIG_BPF_SYSCALL
 void bpf_register_prog_type(struct bpf_prog_type_list *tl);
 void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_tail_call_proto;
 
 #endif /* _LINUX_BPF_H */
 
 
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-void bpf_prog_select_runtime(struct bpf_prog *fp);
+int bpf_prog_select_runtime(struct bpf_prog *fp);
 void bpf_prog_free(struct bpf_prog *fp);
 
 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 
        BPF_MAP_TYPE_UNSPEC,
        BPF_MAP_TYPE_HASH,
        BPF_MAP_TYPE_ARRAY,
+       BPF_MAP_TYPE_PROG_ARRAY,
 };
 
 enum bpf_prog_type {
         * Return: 0 on success
         */
        BPF_FUNC_l4_csum_replace,
+
+       /**
+        * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
+        * @ctx: context pointer passed to next program
+        * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+        * @index: index inside array that selects specific program to run
+        * Return: 0 on success
+        */
+       BPF_FUNC_tail_call,
        __BPF_FUNC_MAX_ID,
 };
 
 
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-
-struct bpf_array {
-       struct bpf_map map;
-       u32 elem_size;
-       char value[0] __aligned(8);
-};
+#include <linux/filter.h>
 
 /* Called from syscall */
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        return 0;
 }
 late_initcall(register_array_map);
+
+static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+{
+       /* only bpf_prog file descriptors can be stored in prog_array map */
+       if (attr->value_size != sizeof(u32))
+               return ERR_PTR(-EINVAL);
+       return array_map_alloc(attr);
+}
+
+static void prog_array_map_free(struct bpf_map *map)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       int i;
+
+       synchronize_rcu();
+
+       /* make sure it's empty */
+       for (i = 0; i < array->map.max_entries; i++)
+               BUG_ON(array->prog[i] != NULL);
+       kvfree(array);
+}
+
+static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       return NULL;
+}
+
+/* only called from syscall */
+static int prog_array_map_update_elem(struct bpf_map *map, void *key,
+                                     void *value, u64 map_flags)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct bpf_prog *prog, *old_prog;
+       u32 index = *(u32 *)key, ufd;
+
+       if (map_flags != BPF_ANY)
+               return -EINVAL;
+
+       if (index >= array->map.max_entries)
+               return -E2BIG;
+
+       ufd = *(u32 *)value;
+       prog = bpf_prog_get(ufd);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
+       if (!bpf_prog_array_compatible(array, prog)) {
+               bpf_prog_put(prog);
+               return -EINVAL;
+       }
+
+       old_prog = xchg(array->prog + index, prog);
+       if (old_prog)
+               bpf_prog_put(old_prog);
+
+       return 0;
+}
+
+static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct bpf_prog *old_prog;
+       u32 index = *(u32 *)key;
+
+       if (index >= array->map.max_entries)
+               return -E2BIG;
+
+       old_prog = xchg(array->prog + index, NULL);
+       if (old_prog) {
+               bpf_prog_put(old_prog);
+               return 0;
+       } else {
+               return -ENOENT;
+       }
+}
+
+/* decrement refcnt of all bpf_progs that are stored in this map */
+void bpf_prog_array_map_clear(struct bpf_map *map)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       int i;
+
+       for (i = 0; i < array->map.max_entries; i++)
+               prog_array_map_delete_elem(map, &i);
+}
+
+static const struct bpf_map_ops prog_array_ops = {
+       .map_alloc = prog_array_map_alloc,
+       .map_free = prog_array_map_free,
+       .map_get_next_key = array_map_get_next_key,
+       .map_lookup_elem = prog_array_map_lookup_elem,
+       .map_update_elem = prog_array_map_update_elem,
+       .map_delete_elem = prog_array_map_delete_elem,
+};
+
+static struct bpf_map_type_list prog_array_type __read_mostly = {
+       .ops = &prog_array_ops,
+       .type = BPF_MAP_TYPE_PROG_ARRAY,
+};
+
+static int __init register_prog_array_map(void)
+{
+       bpf_register_map_type(&prog_array_type);
+       return 0;
+}
+late_initcall(register_prog_array_map);
 
        return 0;
 }
 
+const struct bpf_func_proto bpf_tail_call_proto = {
+       .func = NULL,
+       .gpl_only = false,
+       .ret_type = RET_VOID,
+       .arg1_type = ARG_PTR_TO_CTX,
+       .arg2_type = ARG_CONST_MAP_PTR,
+       .arg3_type = ARG_ANYTHING,
+};
+
 /**
  *     __bpf_prog_run - run eBPF program on a given context
  *     @ctx: is the data we are operating on
                [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
                /* Call instruction */
                [BPF_JMP | BPF_CALL] = &&JMP_CALL,
+               [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
                /* Jumps */
                [BPF_JMP | BPF_JA] = &&JMP_JA,
                [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
                [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
                [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
        };
+       u32 tail_call_cnt = 0;
        void *ptr;
        int off;
 
                                                       BPF_R4, BPF_R5);
                CONT;
 
+       JMP_TAIL_CALL: {
+               struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
+               struct bpf_array *array = container_of(map, struct bpf_array, map);
+               struct bpf_prog *prog;
+               u64 index = BPF_R3;
+
+               if (unlikely(index >= array->map.max_entries))
+                       goto out;
+
+               if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
+                       goto out;
+
+               tail_call_cnt++;
+
+               prog = READ_ONCE(array->prog[index]);
+               if (unlikely(!prog))
+                       goto out;
+
+               ARG1 = BPF_R1;
+               insn = prog->insnsi;
+               goto select_insn;
+out:
+               CONT;
+       }
        /* JMP */
        JMP_JA:
                insn += insn->off;
 {
 }
 
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp)
+{
+       if (array->owner_prog_type) {
+               if (array->owner_prog_type != fp->type)
+                       return false;
+               if (array->owner_jited != fp->jited)
+                       return false;
+       } else {
+               array->owner_prog_type = fp->type;
+               array->owner_jited = fp->jited;
+       }
+       return true;
+}
+
+static int check_tail_call(const struct bpf_prog *fp)
+{
+       struct bpf_prog_aux *aux = fp->aux;
+       int i;
+
+       for (i = 0; i < aux->used_map_cnt; i++) {
+               struct bpf_array *array;
+               struct bpf_map *map;
+
+               map = aux->used_maps[i];
+               if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+                       continue;
+               array = container_of(map, struct bpf_array, map);
+               if (!bpf_prog_array_compatible(array, fp))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 /**
  *     bpf_prog_select_runtime - select execution runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
  * try to JIT internal BPF program, if JIT is not available select interpreter
  * BPF program will be executed via BPF_PROG_RUN() macro
  */
-void bpf_prog_select_runtime(struct bpf_prog *fp)
+int bpf_prog_select_runtime(struct bpf_prog *fp)
 {
        fp->bpf_func = (void *) __bpf_prog_run;
 
        bpf_int_jit_compile(fp);
        /* Lock whole bpf_prog as read-only */
        bpf_prog_lock_ro(fp);
+
+       return check_tail_call(fp);
 }
 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
 
 
 {
        struct bpf_map *map = filp->private_data;
 
+       if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
+               /* prog_array stores refcnt-ed bpf_prog pointers
+                * release them all when user space closes prog_array_fd
+                */
+               bpf_prog_array_map_clear(map);
+
        bpf_map_put(map);
        return 0;
 }
                         */
                        BUG_ON(!prog->aux->ops->get_func_proto);
 
+                       if (insn->imm == BPF_FUNC_tail_call) {
+                               /* mark bpf_tail_call as different opcode
+                                * to avoid conditional branch in
+                                * interpeter for every normal call
+                                * and to prevent accidental JITing by
+                                * JIT compiler that doesn't support
+                                * bpf_tail_call yet
+                                */
+                               insn->imm = 0;
+                               insn->code |= BPF_X;
+                               continue;
+                       }
+
                        fn = prog->aux->ops->get_func_proto(insn->imm);
                        /* all functions that have prototype and verifier allowed
                         * programs to call them, must be real in-kernel functions
        fixup_bpf_calls(prog);
 
        /* eBPF program is ready to be JITed */
-       bpf_prog_select_runtime(prog);
+       err = bpf_prog_select_runtime(prog);
+       if (err < 0)
+               goto free_used_maps;
 
        err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
        if (err < 0)
 
                        fn->ret_type, func_id);
                return -EINVAL;
        }
+
+       if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
+           func_id != BPF_FUNC_tail_call)
+               /* prog_array map type needs extra care:
+                * only allow to pass it into bpf_tail_call() for now.
+                * bpf_map_delete_elem() can be allowed in the future,
+                * while bpf_map_update_elem() must only be done via syscall
+                */
+               return -EINVAL;
+
+       if (func_id == BPF_FUNC_tail_call &&
+           map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+               /* don't allow any other map type to be passed into
+                * bpf_tail_call()
+                */
+               return -EINVAL;
+
        return 0;
 }
 
 
                return &bpf_probe_read_proto;
        case BPF_FUNC_ktime_get_ns:
                return &bpf_ktime_get_ns_proto;
+       case BPF_FUNC_tail_call:
+               return &bpf_tail_call_proto;
 
        case BPF_FUNC_trace_printk:
                /*
 
                return &bpf_get_prandom_u32_proto;
        case BPF_FUNC_get_smp_processor_id:
                return &bpf_get_smp_processor_id_proto;
+       case BPF_FUNC_tail_call:
+               return &bpf_tail_call_proto;
        default:
                return NULL;
        }