struct sched_atom       **atoms;
 
        pthread_t               thread;
-       sem_t                   sleep_sem;
 
        sem_t                   ready_for_work;
        sem_t                   work_done_sem;
        SCHED_EVENT_RUN,
        SCHED_EVENT_SLEEP,
        SCHED_EVENT_WAKEUP,
-       SCHED_EVENT_MIGRATION,
 };
 
 struct sched_atom {
        enum sched_event_type   type;
-       int                     specific_wait;
        u64                     timestamp;
        u64                     duration;
        unsigned long           nr;
 
        wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
        sem_init(wakee_event->wait_sem, 0, 0);
-       wakee_event->specific_wait = 1;
        event->wait_sem = wakee_event->wait_sem;
 
        sched->nr_wakeup_events++;
 }
 
 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
-                                 u64 timestamp, const char task_state __maybe_unused)
+                                 u64 timestamp)
 {
        struct sched_atom *event = get_new_event(task, timestamp);
 
         * every task starts in sleeping state - this gets ignored
         * if there's no wakeup pointing to this sleep state:
         */
-       add_sched_event_sleep(sched, task, 0, 0);
+       add_sched_event_sleep(sched, task, 0);
 
        sched->pid_to_task[pid] = task;
        sched->nr_tasks++;
                                ret = sem_post(atom->wait_sem);
                        BUG_ON(ret);
                        break;
-               case SCHED_EVENT_MIGRATION:
-                       break;
                default:
                        BUG_ON(1);
        }
                parms->task = task = sched->tasks[i];
                parms->sched = sched;
                parms->fd = self_open_counters(sched, i);
-               sem_init(&task->sleep_sem, 0, 0);
                sem_init(&task->ready_for_work, 0, 0);
                sem_init(&task->work_done_sem, 0, 0);
                task->curr_event = 0;
                task = sched->tasks[i];
                err = pthread_join(task->thread, NULL);
                BUG_ON(err);
-               sem_destroy(&task->sleep_sem);
                sem_destroy(&task->ready_for_work);
                sem_destroy(&task->work_done_sem);
        }
 
        for (i = 0; i < sched->nr_tasks; i++) {
                task = sched->tasks[i];
-               sem_init(&task->sleep_sem, 0, 0);
                task->curr_event = 0;
        }
 }
                   *next_comm  = evsel__strval(evsel, sample, "next_comm");
        const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
                  next_pid = evsel__intval(evsel, sample, "next_pid");
-       const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
        struct task_desc *prev, __maybe_unused *next;
        u64 timestamp0, timestamp = sample->time;
        int cpu = sample->cpu;
        sched->cpu_last_switched[cpu] = timestamp;
 
        add_sched_event_run(sched, prev, timestamp, delta);
-       add_sched_event_sleep(sched, prev, timestamp, prev_state);
+       add_sched_event_sleep(sched, prev, timestamp);
 
        return 0;
 }