spinlock_t              busylock ____cacheline_aligned_in_smp;
        spinlock_t              seqlock;
 
-       /* for NOLOCK qdisc, true if there are no enqueued skbs */
-       bool                    empty;
        struct rcu_head         rcu;
 
        /* private data */
 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
 {
        if (qdisc_is_percpu_stats(qdisc))
-               return READ_ONCE(qdisc->empty);
+               return nolock_qdisc_is_empty(qdisc);
        return !READ_ONCE(qdisc->q.qlen);
 }
 
 {
        if (qdisc->flags & TCQ_F_NOLOCK) {
                if (spin_trylock(&qdisc->seqlock))
-                       goto nolock_empty;
+                       return true;
 
                /* If the MISSED flag is set, it means other thread has
                 * set the MISSED flag before second spin_trylock(), so
                /* Retry again in case other CPU may not see the new flag
                 * after it releases the lock at the end of qdisc_run_end().
                 */
-               if (!spin_trylock(&qdisc->seqlock))
-                       return false;
-
-nolock_empty:
-               WRITE_ONCE(qdisc->empty, false);
-               return true;
+               return spin_trylock(&qdisc->seqlock);
        } else if (qdisc_is_running(qdisc)) {
                return false;
        }
 
                need_retry = false;
 
                goto retry;
-       } else {
-               WRITE_ONCE(qdisc->empty, true);
        }
 
        return skb;
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
-       sch->empty = true;
        dev_hold(dev);
        refcount_set(&sch->refcnt, 1);