struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
                              struct Qdisc *qdisc);
 void qdisc_reset(struct Qdisc *qdisc);
-void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_put(struct Qdisc *qdisc);
 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
                               unsigned int len);
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 
                qdisc_notify(net, skb, n, clid, old, new);
 
        if (old)
-               qdisc_destroy(old);
+               qdisc_put(old);
 }
 
 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
                                qdisc_refcount_inc(new);
 
                        if (!ingress)
-                               qdisc_destroy(old);
+                               qdisc_put(old);
                }
 
 skip:
        err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
        if (err) {
                if (q)
-                       qdisc_destroy(q);
+                       qdisc_put(q);
                return err;
        }
 
 
        pr_debug("atm_tc_put: destroying\n");
        list_del_init(&flow->list);
        pr_debug("atm_tc_put: qdisc %p\n", flow->q);
-       qdisc_destroy(flow->q);
+       qdisc_put(flow->q);
        tcf_block_put(flow->block);
        if (flow->sock) {
                pr_debug("atm_tc_put: f_count %ld\n",
 
        WARN_ON(cl->filters);
 
        tcf_block_put(cl->block);
-       qdisc_destroy(cl->q);
+       qdisc_put(cl->q);
        qdisc_put_rtab(cl->R_tab);
        gen_kill_estimator(&cl->rate_est);
        if (cl != &q->link)
 
        cbs_disable_offload(dev, q);
 
        if (q->qdisc)
-               qdisc_destroy(q->qdisc);
+               qdisc_put(q->qdisc);
 }
 
 static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
 
                                            tca[TCA_RATE]);
                if (err) {
                        NL_SET_ERR_MSG(extack, "Failed to replace estimator");
-                       qdisc_destroy(cl->qdisc);
+                       qdisc_put(cl->qdisc);
                        kfree(cl);
                        return err;
                }
 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
 {
        gen_kill_estimator(&cl->rate_est);
-       qdisc_destroy(cl->qdisc);
+       qdisc_put(cl->qdisc);
        kfree(cl);
 }
 
 
        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 
        tcf_block_put(p->block);
-       qdisc_destroy(p->q);
+       qdisc_put(p->q);
        if (p->mv != p->embedded)
                kfree(p->mv);
 }
 
        if (q) {
                err = fifo_set_limit(q, limit);
                if (err < 0) {
-                       qdisc_destroy(q);
+                       qdisc_put(q);
                        q = NULL;
                }
        }
 
        if (!ops->init || ops->init(sch, NULL, extack) == 0)
                return sch;
 
-       qdisc_destroy(sch);
+       qdisc_put(sch);
        return NULL;
 }
 EXPORT_SYMBOL(qdisc_create_dflt);
        kfree((char *) qdisc - qdisc->padded);
 }
 
-void qdisc_destroy(struct Qdisc *qdisc)
+static void qdisc_destroy(struct Qdisc *qdisc)
 {
        const struct Qdisc_ops  *ops = qdisc->ops;
        struct sk_buff *skb, *tmp;
 
-       if (qdisc->flags & TCQ_F_BUILTIN ||
-           !refcount_dec_and_test(&qdisc->refcnt))
-               return;
-
 #ifdef CONFIG_NET_SCHED
        qdisc_hash_del(qdisc);
 
 
        qdisc_free(qdisc);
 }
-EXPORT_SYMBOL(qdisc_destroy);
+
+void qdisc_put(struct Qdisc *qdisc)
+{
+       if (qdisc->flags & TCQ_F_BUILTIN ||
+           !refcount_dec_and_test(&qdisc->refcnt))
+               return;
+
+       qdisc_destroy(qdisc);
+}
+EXPORT_SYMBOL(qdisc_put);
 
 /* Attach toplevel qdisc to device queue. */
 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
                rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
                dev_queue->qdisc_sleeping = qdisc_default;
 
-               qdisc_destroy(qdisc);
+               qdisc_put(qdisc);
        }
 }
 
        netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
        if (dev_ingress_queue(dev))
                shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
-       qdisc_destroy(dev->qdisc);
+       qdisc_put(dev->qdisc);
        dev->qdisc = &noop_qdisc;
 
        WARN_ON(timer_pending(&dev->watchdog_timer));
 
        struct hfsc_sched *q = qdisc_priv(sch);
 
        tcf_block_put(cl->block);
-       qdisc_destroy(cl->qdisc);
+       qdisc_put(cl->qdisc);
        gen_kill_estimator(&cl->rate_est);
        if (cl != &q->root)
                kfree(cl);
 
 {
        if (!cl->level) {
                WARN_ON(!cl->leaf.q);
-               qdisc_destroy(cl->leaf.q);
+               qdisc_put(cl->leaf.q);
        }
        gen_kill_estimator(&cl->rate_est);
        tcf_block_put(cl->block);
                        /* turn parent into inner node */
                        qdisc_reset(parent->leaf.q);
                        qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
-                       qdisc_destroy(parent->leaf.q);
+                       qdisc_put(parent->leaf.q);
                        if (parent->prio_activity)
                                htb_deactivate(q, parent);
 
 
        if (!priv->qdiscs)
                return;
        for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
-               qdisc_destroy(priv->qdiscs[ntx]);
+               qdisc_put(priv->qdiscs[ntx]);
        kfree(priv->qdiscs);
 }
 
                qdisc = priv->qdiscs[ntx];
                old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
                if (old)
-                       qdisc_destroy(old);
+                       qdisc_put(old);
 #ifdef CONFIG_NET_SCHED
                if (ntx < dev->real_num_tx_queues)
                        qdisc_hash_add(qdisc, false);
 
                for (ntx = 0;
                     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
                     ntx++)
-                       qdisc_destroy(priv->qdiscs[ntx]);
+                       qdisc_put(priv->qdiscs[ntx]);
                kfree(priv->qdiscs);
        }
 
                qdisc = priv->qdiscs[ntx];
                old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
                if (old)
-                       qdisc_destroy(old);
+                       qdisc_put(old);
                if (ntx < dev->real_num_tx_queues)
                        qdisc_hash_add(qdisc, false);
        }
 
 
        tcf_block_put(q->block);
        for (band = 0; band < q->bands; band++)
-               qdisc_destroy(q->queues[band]);
+               qdisc_put(q->queues[band]);
 
        kfree(q->queues);
 }
                        q->queues[i] = &noop_qdisc;
                        qdisc_tree_reduce_backlog(child, child->q.qlen,
                                                  child->qstats.backlog);
-                       qdisc_destroy(child);
+                       qdisc_put(child);
                }
        }
 
                                        qdisc_tree_reduce_backlog(old,
                                                                  old->q.qlen,
                                                                  old->qstats.backlog);
-                                       qdisc_destroy(old);
+                                       qdisc_put(old);
                                }
                                sch_tree_unlock(sch);
                        }
 
 
        qdisc_watchdog_cancel(&q->watchdog);
        if (q->qdisc)
-               qdisc_destroy(q->qdisc);
+               qdisc_put(q->qdisc);
        dist_free(q->delay_dist);
        dist_free(q->slot_dist);
 }
 
        tcf_block_put(q->block);
        prio_offload(sch, NULL);
        for (prio = 0; prio < q->bands; prio++)
-               qdisc_destroy(q->queues[prio]);
+               qdisc_put(q->queues[prio]);
 }
 
 static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
                                              extack);
                if (!queues[i]) {
                        while (i > oldbands)
-                               qdisc_destroy(queues[--i]);
+                               qdisc_put(queues[--i]);
                        return -ENOMEM;
                }
        }
 
                qdisc_tree_reduce_backlog(child, child->q.qlen,
                                          child->qstats.backlog);
-               qdisc_destroy(child);
+               qdisc_put(child);
        }
 
        for (i = oldbands; i < q->bands; i++) {
 
        return 0;
 
 destroy_class:
-       qdisc_destroy(cl->qdisc);
+       qdisc_put(cl->qdisc);
        kfree(cl);
        return err;
 }
 
        qfq_rm_from_agg(q, cl);
        gen_kill_estimator(&cl->rate_est);
-       qdisc_destroy(cl->qdisc);
+       qdisc_put(cl->qdisc);
        kfree(cl);
 }
 
 
 
        del_timer_sync(&q->adapt_timer);
        red_offload(sch, false);
-       qdisc_destroy(q->qdisc);
+       qdisc_put(q->qdisc);
 }
 
 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
        if (child) {
                qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
                                          q->qdisc->qstats.backlog);
-               qdisc_destroy(q->qdisc);
+               qdisc_put(q->qdisc);
                q->qdisc = child;
        }
 
 
        struct sfb_sched_data *q = qdisc_priv(sch);
 
        tcf_block_put(q->block);
-       qdisc_destroy(q->qdisc);
+       qdisc_put(q->qdisc);
 }
 
 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
 
        qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
                                  q->qdisc->qstats.backlog);
-       qdisc_destroy(q->qdisc);
+       qdisc_put(q->qdisc);
        q->qdisc = child;
 
        q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
 
        if (child) {
                qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
                                          q->qdisc->qstats.backlog);
-               qdisc_destroy(q->qdisc);
+               qdisc_put(q->qdisc);
                q->qdisc = child;
        }
        q->limit = qopt->limit;
        struct tbf_sched_data *q = qdisc_priv(sch);
 
        qdisc_watchdog_cancel(&q->watchdog);
-       qdisc_destroy(q->qdisc);
+       qdisc_put(q->qdisc);
 }
 
 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)