ts->kernel_start = 1ULL << 63;
        ts->crp = crp;
 
+       thread->ts = ts;
+
        return ts;
 }
 
+static inline struct thread_stack *thread__stack(struct thread *thread)
+{
+       return thread ? thread->ts : NULL;
+}
+
 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
                              bool trace_end)
 {
 
 int thread_stack__flush(struct thread *thread)
 {
-       if (thread->ts)
-               return __thread_stack__flush(thread, thread->ts);
+       struct thread_stack *ts = thread->ts;
+
+       if (ts)
+               return __thread_stack__flush(thread, ts);
 
        return 0;
 }
 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
                        u64 to_ip, u16 insn_len, u64 trace_nr)
 {
+       struct thread_stack *ts = thread__stack(thread);
+
        if (!thread)
                return -EINVAL;
 
-       if (!thread->ts) {
-               thread->ts = thread_stack__new(thread, NULL);
-               if (!thread->ts) {
+       if (!ts) {
+               ts = thread_stack__new(thread, NULL);
+               if (!ts) {
                        pr_warning("Out of memory: no thread stack\n");
                        return -ENOMEM;
                }
-               thread->ts->trace_nr = trace_nr;
+               ts->trace_nr = trace_nr;
        }
 
        /*
         * the stack might be completely invalid.  Better to report nothing than
         * to report something misleading, so flush the stack.
         */
-       if (trace_nr != thread->ts->trace_nr) {
-               if (thread->ts->trace_nr)
-                       __thread_stack__flush(thread, thread->ts);
-               thread->ts->trace_nr = trace_nr;
+       if (trace_nr != ts->trace_nr) {
+               if (ts->trace_nr)
+                       __thread_stack__flush(thread, ts);
+               ts->trace_nr = trace_nr;
        }
 
        /* Stop here if thread_stack__process() is in use */
-       if (thread->ts->crp)
+       if (ts->crp)
                return 0;
 
        if (flags & PERF_IP_FLAG_CALL) {
                ret_addr = from_ip + insn_len;
                if (ret_addr == to_ip)
                        return 0; /* Zero-length calls are excluded */
-               return thread_stack__push(thread->ts, ret_addr,
+               return thread_stack__push(ts, ret_addr,
                                          flags & PERF_IP_FLAG_TRACE_END);
        } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
                /*
                 * address, so try to pop that. Also, do not expect a call made
                 * when the trace ended, to return, so pop that.
                 */
-               thread_stack__pop(thread->ts, to_ip);
-               thread_stack__pop_trace_end(thread->ts);
+               thread_stack__pop(ts, to_ip);
+               thread_stack__pop_trace_end(ts);
        } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
-               thread_stack__pop(thread->ts, to_ip);
+               thread_stack__pop(ts, to_ip);
        }
 
        return 0;
 
 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
 {
-       if (!thread || !thread->ts)
+       struct thread_stack *ts = thread__stack(thread);
+
+       if (!ts)
                return;
 
-       if (trace_nr != thread->ts->trace_nr) {
-               if (thread->ts->trace_nr)
-                       __thread_stack__flush(thread, thread->ts);
-               thread->ts->trace_nr = trace_nr;
+       if (trace_nr != ts->trace_nr) {
+               if (ts->trace_nr)
+                       __thread_stack__flush(thread, ts);
+               ts->trace_nr = trace_nr;
        }
 }
 
 void thread_stack__free(struct thread *thread)
 {
-       if (thread->ts) {
-               __thread_stack__flush(thread, thread->ts);
-               zfree(&thread->ts->stack);
+       struct thread_stack *ts = thread->ts;
+
+       if (ts) {
+               __thread_stack__flush(thread, ts);
+               zfree(&ts->stack);
                zfree(&thread->ts);
        }
 }
 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
                          size_t sz, u64 ip, u64 kernel_start)
 {
+       struct thread_stack *ts = thread__stack(thread);
        u64 context = callchain_context(ip, kernel_start);
        u64 last_context;
        size_t i, j;
        chain->ips[0] = context;
        chain->ips[1] = ip;
 
-       if (!thread || !thread->ts) {
+       if (!ts) {
                chain->nr = 2;
                return;
        }
 
        last_context = context;
 
-       for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
-               ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
+       for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
+               ip = ts->stack[ts->cnt - j].ret_addr;
                context = callchain_context(ip, kernel_start);
                if (context != last_context) {
                        if (i >= sz - 1)
                          struct addr_location *to_al, u64 ref,
                          struct call_return_processor *crp)
 {
-       struct thread_stack *ts = thread->ts;
+       struct thread_stack *ts = thread__stack(thread);
        int err = 0;
 
        if (ts && !ts->crp) {
        }
 
        if (!ts) {
-               thread->ts = thread_stack__new(thread, crp);
-               if (!thread->ts)
+               ts = thread_stack__new(thread, crp);
+               if (!ts)
                        return -ENOMEM;
-               ts = thread->ts;
                ts->comm = comm;
        }
 
 
 size_t thread_stack__depth(struct thread *thread)
 {
-       if (!thread->ts)
+       struct thread_stack *ts = thread__stack(thread);
+
+       if (!ts)
                return 0;
-       return thread->ts->cnt;
+       return ts->cnt;
 }