#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
 
+static bool kernfs_lockdep(struct kernfs_node *kn)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       return kn->flags & KERNFS_LOCKDEP;
+#else
+       return false;
+#endif
+}
+
 /**
  *     kernfs_name_hash
  *     @name: Null terminated string to hash
        if (!atomic_inc_unless_negative(&kn->active))
                return NULL;
 
-       if (kn->flags & KERNFS_LOCKDEP)
+       if (kernfs_lockdep(kn))
                rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
        return kn;
 }
        if (unlikely(!kn))
                return;
 
-       if (kn->flags & KERNFS_LOCKDEP)
+       if (kernfs_lockdep(kn))
                rwsem_release(&kn->dep_map, 1, _RET_IP_);
        v = atomic_dec_return(&kn->active);
        if (likely(v != KN_DEACTIVATED_BIAS))
        lockdep_assert_held(&kernfs_mutex);
        BUG_ON(!(kn->flags & KERNFS_REMOVED));
 
-       if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
-               return;
-
        /* only the first invocation on @kn should deactivate it */
        if (atomic_read(&kn->active) >= 0)
                atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
 
        mutex_unlock(&kernfs_mutex);
 
-       if (kn->flags & KERNFS_LOCKDEP) {
+       if (kernfs_lockdep(kn)) {
                rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
                if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
                        lock_contended(&kn->dep_map, _RET_IP_);
        wait_event(root->deactivate_waitq,
                   atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
 
-       if (kn->flags & KERNFS_LOCKDEP) {
+       if (kernfs_lockdep(kn)) {
                lock_acquired(&kn->dep_map, _RET_IP_);
                rwsem_release(&kn->dep_map, 1, _RET_IP_);
        }