static int callchain_trace(unsigned int addr, void *data)
 {
        struct arc_callchain_trace *ctrl = data;
-       struct perf_callchain_entry *entry = ctrl->perf_stuff;
+       struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
        perf_callchain_store(entry, addr);
 
        if (ctrl->depth++ < 3)
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct arc_callchain_trace ctrl = {
                .depth = 0,
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        /*
         * User stack can't be unwound trivially with kernel dwarf unwinder
 
  */
 static struct frame_tail __user *
 user_backtrace(struct frame_tail __user *tail,
-              struct perf_callchain_entry *entry)
+              struct perf_callchain_entry_ctx *entry)
 {
        struct frame_tail buftail;
        unsigned long err;
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct frame_tail __user *tail;
 
 
        tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
-       while ((entry->nr < sysctl_perf_event_max_stack) &&
+       while ((entry->entry->nr < entry->max_stack) &&
               tail && !((unsigned long)tail & 0x3))
                tail = user_backtrace(tail, entry);
 }
 callchain_trace(struct stackframe *fr,
                void *data)
 {
-       struct perf_callchain_entry *entry = data;
+       struct perf_callchain_entry_ctx *entry = data;
        perf_callchain_store(entry, fr->pc);
        return 0;
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct stackframe fr;
 
 
  */
 static struct frame_tail __user *
 user_backtrace(struct frame_tail __user *tail,
-              struct perf_callchain_entry *entry)
+              struct perf_callchain_entry_ctx *entry)
 {
        struct frame_tail buftail;
        unsigned long err;
 
 static struct compat_frame_tail __user *
 compat_user_backtrace(struct compat_frame_tail __user *tail,
-                     struct perf_callchain_entry *entry)
+                     struct perf_callchain_entry_ctx *entry)
 {
        struct compat_frame_tail buftail;
        unsigned long err;
 }
 #endif /* CONFIG_COMPAT */
 
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                         struct pt_regs *regs)
 {
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
 
                tail = (struct frame_tail __user *)regs->regs[29];
 
-               while (entry->nr < sysctl_perf_event_max_stack &&
+               while (entry->entry->nr < entry->max_stack &&
                       tail && !((unsigned long)tail & 0xf))
                        tail = user_backtrace(tail, entry);
        } else {
 
                tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
 
-               while ((entry->nr < sysctl_perf_event_max_stack) &&
+               while ((entry->entry->nr < entry->max_stack) &&
                        tail && !((unsigned long)tail & 0x3))
                        tail = compat_user_backtrace(tail, entry);
 #endif
  */
 static int callchain_trace(struct stackframe *frame, void *data)
 {
-       struct perf_callchain_entry *entry = data;
+       struct perf_callchain_entry_ctx *entry = data;
        perf_callchain_store(entry, frame->pc);
        return 0;
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
                           struct pt_regs *regs)
 {
        struct stackframe frame;
 
 
 static struct metag_frame __user *
 user_backtrace(struct metag_frame __user *user_frame,
-              struct perf_callchain_entry *entry)
+              struct perf_callchain_entry_ctx *entry)
 {
        struct metag_frame frame;
        unsigned long calladdr;
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        unsigned long sp = regs->ctx.AX[0].U0;
        struct metag_frame __user *frame;
 
        --frame;
 
-       while ((entry->nr < sysctl_perf_event_max_stack) && frame)
+       while ((entry->entry->nr < entry->max_stack) && frame)
                frame = user_backtrace(frame, entry);
 }
 
 callchain_trace(struct stackframe *fr,
                void *data)
 {
-       struct perf_callchain_entry *entry = data;
+       struct perf_callchain_entry_ctx *entry = data;
        perf_callchain_store(entry, fr->pc);
        return 0;
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct stackframe fr;
 
 
  * the user stack callchains, we will add it here.
  */
 
-static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
-       unsigned long reg29)
+static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
+                                   unsigned long reg29)
 {
        unsigned long *sp = (unsigned long *)reg29;
        unsigned long addr;
                addr = *sp++;
                if (__kernel_text_address(addr)) {
                        perf_callchain_store(entry, addr);
-                       if (entry->nr >= sysctl_perf_event_max_stack)
+                       if (entry->entry->nr >= entry->max_stack)
                                break;
                }
        }
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
-                     struct pt_regs *regs)
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
 {
        unsigned long sp = regs->regs[29];
 #ifdef CONFIG_KALLSYMS
        }
        do {
                perf_callchain_store(entry, pc);
-               if (entry->nr >= sysctl_perf_event_max_stack)
+               if (entry->entry->nr >= entry->max_stack)
                        break;
                pc = unwind_stack(current, &sp, pc, &ra);
        } while (pc);
 
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
                puc == (unsigned long) &sf->uc;
 }
 
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
                                   struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        sp = regs->gpr[1];
        perf_callchain_store(entry, next_ip);
 
-       while (entry->nr < sysctl_perf_event_max_stack) {
+       while (entry->entry->nr < entry->max_stack) {
                fp = (unsigned long __user *) sp;
                if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
                        return;
        return rc;
 }
 
-static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
                                          struct pt_regs *regs)
 {
 }
        return mctx->mc_gregs;
 }
 
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
                                   struct pt_regs *regs)
 {
        unsigned int sp, next_sp;
        sp = regs->gpr[1];
        perf_callchain_store(entry, next_ip);
 
-       while (entry->nr < sysctl_perf_event_max_stack) {
+       while (entry->entry->nr < entry->max_stack) {
                fp = (unsigned int __user *) (unsigned long) sp;
                if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
                        return;
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        if (current_is_64bit())
                perf_callchain_user_64(entry, regs);
 
 
 static int __perf_callchain_kernel(void *data, unsigned long address)
 {
-       struct perf_callchain_entry *entry = data;
+       struct perf_callchain_entry_ctx *entry = data;
 
        perf_callchain_store(entry, address);
        return 0;
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
                           struct pt_regs *regs)
 {
        if (user_mode(regs))
 
 
 static void callchain_address(void *data, unsigned long addr, int reliable)
 {
-       struct perf_callchain_entry *entry = data;
+       struct perf_callchain_entry_ctx *entry = data;
 
        if (reliable)
                perf_callchain_store(entry, addr);
 };
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        perf_callchain_store(entry, regs->pc);
 
 
 }
 pure_initcall(init_hw_perf_events);
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
                           struct pt_regs *regs)
 {
        unsigned long ksp, fp;
                        }
                }
 #endif
-       } while (entry->nr < sysctl_perf_event_max_stack);
+       } while (entry->entry->nr < entry->max_stack);
 }
 
 static inline int
        return (__range_not_ok(fp, size, TASK_SIZE) == 0);
 }
 
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
                                   struct pt_regs *regs)
 {
        unsigned long ufp;
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp + STACK_BIAS;
                perf_callchain_store(entry, pc);
-       } while (entry->nr < sysctl_perf_event_max_stack);
+       } while (entry->entry->nr < entry->max_stack);
 }
 
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
                                   struct pt_regs *regs)
 {
        unsigned long ufp;
                        ufp = (unsigned long)sf.fp;
                }
                perf_callchain_store(entry, pc);
-       } while (entry->nr < sysctl_perf_event_max_stack);
+       } while (entry->entry->nr < entry->max_stack);
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        u64 saved_fault_address = current_thread_info()->fault_address;
        u8 saved_fault_code = get_thread_fault_code();
 
 /*
  * Tile specific backtracing code for perf_events.
  */
-static inline void perf_callchain(struct perf_callchain_entry *entry,
+static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
                    struct pt_regs *regs)
 {
        struct KBacktraceIterator kbt;
        }
 }
 
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                    struct pt_regs *regs)
 {
        perf_callchain(entry, regs);
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
                      struct pt_regs *regs)
 {
        perf_callchain(entry, regs);
 
 
 static int backtrace_address(void *data, unsigned long addr, int reliable)
 {
-       struct perf_callchain_entry *entry = data;
+       struct perf_callchain_entry_ctx *entry = data;
 
        return perf_callchain_store(entry, addr);
 }
 };
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* TODO: We don't support guest os callchain now */
 #include <asm/compat.h>
 
 static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
 {
        /* 32-bit process in 64-bit kernel. */
        unsigned long ss_base, cs_base;
 
        fp = compat_ptr(ss_base + regs->bp);
        pagefault_disable();
-       while (entry->nr < sysctl_perf_event_max_stack) {
+       while (entry->entry->nr < entry->max_stack) {
                unsigned long bytes;
                frame.next_frame     = 0;
                frame.return_address = 0;
 }
 #else
 static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
 {
     return 0;
 }
 #endif
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
        const void __user *fp;
                return;
 
        pagefault_disable();
-       while (entry->nr < sysctl_perf_event_max_stack) {
+       while (entry->entry->nr < entry->max_stack) {
                unsigned long bytes;
                frame.next_frame             = NULL;
                frame.return_address = 0;
 
 
 static int callchain_trace(struct stackframe *frame, void *data)
 {
-       struct perf_callchain_entry *entry = data;
+       struct perf_callchain_entry_ctx *entry = data;
 
        perf_callchain_store(entry, frame->pc);
        return 0;
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
                           struct pt_regs *regs)
 {
-       xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack,
+       xtensa_backtrace_kernel(regs, entry->max_stack,
                                callchain_trace, NULL, entry);
 }
 
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                         struct pt_regs *regs)
 {
-       xtensa_backtrace_user(regs, sysctl_perf_event_max_stack,
+       xtensa_backtrace_user(regs, entry->max_stack,
                              callchain_trace, entry);
 }
 
 
        __u64                           ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
 };
 
+struct perf_callchain_entry_ctx {
+       struct perf_callchain_entry *entry;
+       u32                         max_stack;
+};
+
 struct perf_raw_record {
        u32                             size;
        void                            *data;
 /* Callchains */
 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 
-extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
-extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
-                  bool crosstask, bool add_mark);
+                  u32 max_stack, bool crosstask, bool add_mark);
 extern int get_callchain_buffers(void);
 extern void put_callchain_buffers(void);
 
 extern int sysctl_perf_event_max_stack;
 
-static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
 {
-       if (entry->nr < sysctl_perf_event_max_stack) {
+       struct perf_callchain_entry *entry = ctx->entry;
+       if (entry->nr < ctx->max_stack) {
                entry->ip[entry->nr++] = ip;
                return 0;
        } else {
 
                               BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
                return -EINVAL;
 
-       trace = get_perf_callchain(regs, init_nr, kernel, user, false, false);
+       trace = get_perf_callchain(regs, init_nr, kernel, user,
+                                  sysctl_perf_event_max_stack, false, false);
 
        if (unlikely(!trace))
                /* couldn't fetch the stack trace */
 
 static struct callchain_cpus_entries *callchain_cpus_entries;
 
 
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
                                  struct pt_regs *regs)
 {
 }
 
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                                struct pt_regs *regs)
 {
 }
        if (!kernel && !user)
                return NULL;
 
-       return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
+       return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
 }
 
 struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
-                  bool crosstask, bool add_mark)
+                  u32 max_stack, bool crosstask, bool add_mark)
 {
        struct perf_callchain_entry *entry;
+       struct perf_callchain_entry_ctx ctx;
        int rctx;
 
        entry = get_callchain_entry(&rctx);
        if (!entry)
                goto exit_put;
 
+       ctx.entry     = entry;
+       ctx.max_stack = max_stack;
+
        entry->nr = init_nr;
 
        if (kernel && !user_mode(regs)) {
                if (add_mark)
-                       perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
-               perf_callchain_kernel(entry, regs);
+                       perf_callchain_store(&ctx, PERF_CONTEXT_KERNEL);
+               perf_callchain_kernel(&ctx, regs);
        }
 
        if (user) {
                                goto exit_put;
 
                        if (add_mark)
-                               perf_callchain_store(entry, PERF_CONTEXT_USER);
-                       perf_callchain_user(entry, regs);
+                               perf_callchain_store(&ctx, PERF_CONTEXT_USER);
+                       perf_callchain_user(&ctx, regs);
                }
        }