res = pos - iocb->ki_pos;
        iocb->ki_pos = pos;
 
-       atomic_dec(&aux_dev->usecount);
-       wake_up_atomic_t(&aux_dev->usecount);
+       if (atomic_dec_and_test(&aux_dev->usecount))
+               wake_up_var(&aux_dev->usecount);
+
        return res;
 }
 
                res = pos - iocb->ki_pos;
        iocb->ki_pos = pos;
 
-       atomic_dec(&aux_dev->usecount);
-       wake_up_atomic_t(&aux_dev->usecount);
+       if (atomic_dec_and_test(&aux_dev->usecount))
+               wake_up_var(&aux_dev->usecount);
+
        return res;
 }
 
        mutex_unlock(&aux_idr_mutex);
 
        atomic_dec(&aux_dev->usecount);
-       wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
-                        TASK_UNINTERRUPTIBLE);
+       wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));
 
        minor = aux_dev->index;
        if (aux_dev->dev)
 
        u32 seqno;
 };
 
-static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
-{
-       return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
-}
-
 static bool wait_for_ready(struct igt_wakeup *w)
 {
        DEFINE_WAIT(ready);
 
        set_bit(IDLE, &w->flags);
        if (atomic_dec_and_test(w->done))
-               wake_up_atomic_t(w->done);
+               wake_up_var(w->done);
 
        if (test_bit(STOP, &w->flags))
                goto out;
 out:
        clear_bit(IDLE, &w->flags);
        if (atomic_dec_and_test(w->set))
-               wake_up_atomic_t(w->set);
+               wake_up_var(w->set);
 
        return !test_bit(STOP, &w->flags);
 }
        atomic_set(ready, 0);
        wake_up_all(wq);
 
-       wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
+       wait_var_event(set, !atomic_read(set));
        atomic_set(ready, count);
        atomic_set(done, count);
 }
 static int igt_wakeup(void *arg)
 {
        I915_RND_STATE(prng);
-       const int state = TASK_UNINTERRUPTIBLE;
        struct intel_engine_cs *engine = arg;
        struct igt_wakeup *waiters;
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
                 * that they are ready for the next test. We wait until all
                 * threads are complete and waiting for us (i.e. not a seqno).
                 */
-               err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
+               err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
                if (err) {
                        pr_err("Timed out waiting for %d remaining waiters\n",
                               atomic_read(&done));