enum cs_etm_sample_type sample_type)
 {
        u32 et = 0;
-       struct int_node *inode = NULL;
+       int cpu;
 
        if (decoder->packet_count >= MAX_BUFFER - 1)
                return OCSD_RESP_FATAL_SYS_ERR;
 
-       /* Search the RB tree for the cpu associated with this traceID */
-       inode = intlist__find(traceid_list, trace_chan_id);
-       if (!inode)
+       if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
                return OCSD_RESP_FATAL_SYS_ERR;
 
        et = decoder->tail;
        decoder->packet_buffer[et].sample_type = sample_type;
        decoder->packet_buffer[et].exc = false;
        decoder->packet_buffer[et].exc_ret = false;
-       decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+       decoder->packet_buffer[et].cpu = cpu;
        decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
        decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
 
 
 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
                                           pid_t tid, u64 time_);
 
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+{
+       struct int_node *inode;
+       u64 *metadata;
+
+       inode = intlist__find(traceid_list, trace_chan_id);
+       if (!inode)
+               return -EINVAL;
+
+       metadata = inode->priv;
+       *cpu = (int)metadata[CS_ETM_CPU];
+       return 0;
+}
+
 static void cs_etm__packet_dump(const char *pkt_string)
 {
        const char *color = PERF_COLOR_BLUE;
        cs_etm__free_events(session);
        session->auxtrace = NULL;
 
-       /* First remove all traceID/CPU# nodes for the RB tree */
+       /* First remove all traceID/metadata nodes for the RB tree */
        intlist__for_each_entry_safe(inode, tmp, traceid_list)
                intlist__remove(traceid_list, inode);
        /* Then the RB tree itself */
                                    0xffffffff);
 
        /*
-        * Create an RB tree for traceID-CPU# tuple. Since the conversion has
-        * to be made for each packet that gets decoded, optimizing access in
-        * anything other than a sequential array is worth doing.
+        * Create an RB tree for traceID-metadata tuple.  Since the conversion
+        * has to be made for each packet that gets decoded, optimizing access
+        * in anything other than a sequential array is worth doing.
         */
        traceid_list = intlist__new(NULL);
        if (!traceid_list) {
                        err = -EINVAL;
                        goto err_free_metadata;
                }
-               /* All good, associate the traceID with the CPU# */
-               inode->priv = &metadata[j][CS_ETM_CPU];
+               /* All good, associate the traceID with the metadata pointer */
+               inode->priv = metadata[j];
        }
 
        /*
 
        CS_ETMV4_PRIV_MAX,
 };
 
-/* RB tree for quick conversion between traceID and CPUs */
+/* RB tree for quick conversion between traceID and metadata pointers */
 struct intlist *traceid_list;
 
 #define KiB(x) ((x) * 1024)
 #ifdef HAVE_CSTRACE_SUPPORT
 int cs_etm__process_auxtrace_info(union perf_event *event,
                                  struct perf_session *session);
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
 #else
 static inline int
 cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
 {
        return -1;
 }
+
+static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
+                                 int *cpu __maybe_unused)
+{
+       return -1;
+}
 #endif
 
 #endif