wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
                            percpu_ref_switch_lock);
 
-       if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+       if (data->force_atomic || percpu_ref_is_dying(ref))
                __percpu_ref_switch_to_atomic(ref, confirm_switch);
        else
                __percpu_ref_switch_to_percpu(ref);
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+       WARN_ONCE(percpu_ref_is_dying(ref),
                  "%s called more than once on %ps!", __func__,
                  ref->data->release);
 
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+       WARN_ON_ONCE(!percpu_ref_is_dying(ref));
        WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
 
        ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;