--- /dev/null
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+
+/* CoDel uses a 1024 nsec clock, encoded in u32
+ * This gives a range of 2199 seconds, because of signed compares
+ */
+typedef u32 codel_time_t;
+typedef s32 codel_tdiff_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+       u64 ns = ktime_to_ns(ktime_get());
+
+       return ns >> CODEL_SHIFT;
+}
+
+#define codel_time_after(a, b)         ((s32)(a) - (s32)(b) > 0)
+#define codel_time_after_eq(a, b)      ((s32)(a) - (s32)(b) >= 0)
+#define codel_time_before(a, b)                ((s32)(a) - (s32)(b) < 0)
+#define codel_time_before_eq(a, b)     ((s32)(a) - (s32)(b) <= 0)
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+       codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+       qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+       return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+       return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+       get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static inline u32 codel_time_to_us(codel_time_t val)
+{
+       u64 valns = ((u64)val << CODEL_SHIFT);
+
+       do_div(valns, NSEC_PER_USEC);
+       return (u32)valns;
+}
+
+/**
+ * struct codel_params - contains codel parameters
+ * @target:    target queue size (in time units)
+ * @interval:  width of moving time window
+ * @ecn:       is Explicit Congestion Notification enabled
+ */
+struct codel_params {
+       codel_time_t    target;
+       codel_time_t    interval;
+       bool            ecn;
+};
+
+/**
+ * struct codel_vars - contains codel variables
+ * @count:             how many drops we've done since the last time we
+ *                     entered dropping state
+ * @lastcount:         count at entry to dropping state
+ * @dropping:          set to true if in dropping state
+ * @first_above_time:  when we went (or will go) continuously above target
+ *                     for interval
+ * @drop_next:         time to drop next packet, or when we dropped last
+ * @ldelay:            sojourn time of last dequeued packet
+ */
+struct codel_vars {
+       u32             count;
+       u32             lastcount;
+       bool            dropping;
+       codel_time_t    first_above_time;
+       codel_time_t    drop_next;
+       codel_time_t    ldelay;
+};
+
+/**
+ * struct codel_stats - contains codel shared variables and stats
+ * @maxpacket: largest packet we've seen so far
+ * @drop_count:        temp count of dropped packets in dequeue()
+ * ecn_mark:   number of packets we ECN marked instead of dropping
+ */
+struct codel_stats {
+       u32             maxpacket;
+       u32             drop_count;
+       u32             ecn_mark;
+};
+
+static void codel_params_init(struct codel_params *params)
+{
+       params->interval = MS2TIME(100);
+       params->target = MS2TIME(5);
+       params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+       vars->drop_next = 0;
+       vars->first_above_time = 0;
+       vars->dropping = false; /* exit dropping state */
+       vars->count = 0;
+       vars->lastcount = 0;
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+       stats->maxpacket = 256;
+}
+
+/* return interval/sqrt(x) with good precision
+ * relies on int_sqrt(unsigned long x) kernel implementation
+ */
+static u32 codel_inv_sqrt(u32 _interval, u32 _x)
+{
+       u64 interval = _interval;
+       unsigned long x = _x;
+
+       /* Scale operands for max precision */
+
+#if BITS_PER_LONG == 64
+       x <<= 32; /* On 64bit arches, we can prescale x by 32bits */
+       interval <<= 16;
+#endif
+
+       while (x < (1UL << (BITS_PER_LONG - 2))) {
+               x <<= 2;
+               interval <<= 1;
+       }
+       do_div(interval, int_sqrt(x));
+       return (u32)interval;
+}
+
+static codel_time_t codel_control_law(codel_time_t t,
+                                     codel_time_t interval,
+                                     u32 count)
+{
+       return t + codel_inv_sqrt(interval, count);
+}
+
+
+static bool codel_should_drop(struct sk_buff *skb,
+                             unsigned int *backlog,
+                             struct codel_vars *vars,
+                             struct codel_params *params,
+                             struct codel_stats *stats,
+                             codel_time_t now)
+{
+       bool ok_to_drop;
+
+       if (!skb) {
+               vars->first_above_time = 0;
+               return false;
+       }
+
+       vars->ldelay = now - codel_get_enqueue_time(skb);
+       *backlog -= qdisc_pkt_len(skb);
+
+       if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
+               stats->maxpacket = qdisc_pkt_len(skb);
+
+       if (codel_time_before(vars->ldelay, params->target) ||
+           *backlog <= stats->maxpacket) {
+               /* went below - stay below for at least interval */
+               vars->first_above_time = 0;
+               return false;
+       }
+       ok_to_drop = false;
+       if (vars->first_above_time == 0) {
+               /* just went above from below. If we stay above
+                * for at least interval we'll say it's ok to drop
+                */
+               vars->first_above_time = now + params->interval;
+       } else if (codel_time_after(now, vars->first_above_time)) {
+               ok_to_drop = true;
+       }
+       return ok_to_drop;
+}
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+                                               struct Qdisc *sch);
+
+static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+                                    struct codel_params *params,
+                                    struct codel_vars *vars,
+                                    struct codel_stats *stats,
+                                    codel_skb_dequeue_t dequeue_func,
+                                    u32 *backlog)
+{
+       struct sk_buff *skb = dequeue_func(vars, sch);
+       codel_time_t now;
+       bool drop;
+
+       if (!skb) {
+               vars->dropping = false;
+               return skb;
+       }
+       now = codel_get_time();
+       drop = codel_should_drop(skb, backlog, vars, params, stats, now);
+       if (vars->dropping) {
+               if (!drop) {
+                       /* sojourn time below target - leave dropping state */
+                       vars->dropping = false;
+               } else if (codel_time_after_eq(now, vars->drop_next)) {
+                       /* It's time for the next drop. Drop the current
+                        * packet and dequeue the next. The dequeue might
+                        * take us out of dropping state.
+                        * If not, schedule the next drop.
+                        * A large backlog might result in drop rates so high
+                        * that the next drop should happen now,
+                        * hence the while loop.
+                        */
+                       while (vars->dropping &&
+                              codel_time_after_eq(now, vars->drop_next)) {
+                               if (++vars->count == 0) /* avoid zero divides */
+                                       vars->count = ~0U;
+                               if (params->ecn && INET_ECN_set_ce(skb)) {
+                                       stats->ecn_mark++;
+                                       vars->drop_next =
+                                               codel_control_law(vars->drop_next,
+                                                                 params->interval,
+                                                                 vars->count);
+                                       goto end;
+                               }
+                               qdisc_drop(skb, sch);
+                               stats->drop_count++;
+                               skb = dequeue_func(vars, sch);
+                               if (!codel_should_drop(skb, backlog,
+                                                      vars, params, stats, now)) {
+                                       /* leave dropping state */
+                                       vars->dropping = false;
+                               } else {
+                                       /* and schedule the next drop */
+                                       vars->drop_next =
+                                               codel_control_law(vars->drop_next,
+                                                                 params->interval,
+                                                                 vars->count);
+                               }
+                       }
+               }
+       } else if (drop) {
+               if (params->ecn && INET_ECN_set_ce(skb)) {
+                       stats->ecn_mark++;
+               } else {
+                       qdisc_drop(skb, sch);
+                       stats->drop_count++;
+
+                       skb = dequeue_func(vars, sch);
+                       drop = codel_should_drop(skb, backlog, vars, params,
+                                                stats, now);
+               }
+               vars->dropping = true;
+               /* if min went above target close to when we last went below it
+                * assume that the drop rate that controlled the queue on the
+                * last cycle is a good starting point to control it now.
+                */
+               if (codel_time_before(now - vars->drop_next,
+                                     16 * params->interval)) {
+                       vars->count = (vars->count - vars->lastcount) | 1;
+               } else {
+                       vars->count = 1;
+               }
+               vars->lastcount = vars->count;
+               vars->drop_next = codel_control_law(now, params->interval,
+                                                   vars->count);
+       }
+end:
+       return skb;
+}
+#endif
 
--- /dev/null
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *
+ *  Implemented on linux by :
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+       struct codel_params     params;
+       struct codel_vars       vars;
+       struct codel_stats      stats;
+       u32                     drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+       struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+       prefetch(&skb->end); /* we'll need skb_shinfo() */
+       return skb;
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+       struct codel_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb;
+
+       skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
+                           dequeue, &sch->qstats.backlog);
+       /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+        * or HTB crashes. Defer it for next round.
+        */
+       if (q->stats.drop_count && sch->q.qlen) {
+               qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+               q->stats.drop_count = 0;
+       }
+       if (skb)
+               qdisc_bstats_update(sch, skb);
+       return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct codel_sched_data *q;
+
+       if (likely(qdisc_qlen(sch) < sch->limit)) {
+               codel_set_enqueue_time(skb);
+               return qdisc_enqueue_tail(skb, sch);
+       }
+       q = qdisc_priv(sch);
+       q->drop_overlimit++;
+       return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+       [TCA_CODEL_TARGET]      = { .type = NLA_U32 },
+       [TCA_CODEL_LIMIT]       = { .type = NLA_U32 },
+       [TCA_CODEL_INTERVAL]    = { .type = NLA_U32 },
+       [TCA_CODEL_ECN]         = { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct codel_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tb[TCA_CODEL_MAX + 1];
+       unsigned int qlen;
+       int err;
+
+       if (!opt)
+               return -EINVAL;
+
+       err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+       if (err < 0)
+               return err;
+
+       sch_tree_lock(sch);
+
+       if (tb[TCA_CODEL_TARGET]) {
+               u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+               q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+       }
+
+       if (tb[TCA_CODEL_INTERVAL]) {
+               u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+               q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+       }
+
+       if (tb[TCA_CODEL_LIMIT])
+               sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+       if (tb[TCA_CODEL_ECN])
+               q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+       qlen = sch->q.qlen;
+       while (sch->q.qlen > sch->limit) {
+               struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               qdisc_drop(skb, sch);
+       }
+       qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+       sch_tree_unlock(sch);
+       return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+       struct codel_sched_data *q = qdisc_priv(sch);
+
+       sch->limit = DEFAULT_CODEL_LIMIT;
+
+       codel_params_init(&q->params);
+       codel_vars_init(&q->vars);
+       codel_stats_init(&q->stats);
+
+       if (opt) {
+               int err = codel_change(sch, opt);
+
+               if (err)
+                       return err;
+       }
+
+       if (sch->limit >= 1)
+               sch->flags |= TCQ_F_CAN_BYPASS;
+       else
+               sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+       return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct codel_sched_data *q = qdisc_priv(sch);
+       struct nlattr *opts;
+
+       opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (opts == NULL)
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CODEL_TARGET,
+                       codel_time_to_us(q->params.target)) ||
+           nla_put_u32(skb, TCA_CODEL_LIMIT,
+                       sch->limit) ||
+           nla_put_u32(skb, TCA_CODEL_INTERVAL,
+                       codel_time_to_us(q->params.interval)) ||
+           nla_put_u32(skb, TCA_CODEL_ECN,
+                       q->params.ecn))
+               goto nla_put_failure;
+
+       return nla_nest_end(skb, opts);
+
+nla_put_failure:
+       nla_nest_cancel(skb, opts);
+       return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+       const struct codel_sched_data *q = qdisc_priv(sch);
+       struct tc_codel_xstats st = {
+               .maxpacket      = q->stats.maxpacket,
+               .count          = q->vars.count,
+               .lastcount      = q->vars.lastcount,
+               .drop_overlimit = q->drop_overlimit,
+               .ldelay         = codel_time_to_us(q->vars.ldelay),
+               .dropping       = q->vars.dropping,
+               .ecn_mark       = q->stats.ecn_mark,
+       };
+
+       if (q->vars.dropping) {
+               codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
+
+               if (delta >= 0)
+                       st.drop_next = codel_time_to_us(delta);
+               else
+                       st.drop_next = -codel_time_to_us(-delta);
+       }
+
+       return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+       struct codel_sched_data *q = qdisc_priv(sch);
+
+       qdisc_reset_queue(sch);
+       codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+       .id             =       "codel",
+       .priv_size      =       sizeof(struct codel_sched_data),
+
+       .enqueue        =       codel_qdisc_enqueue,
+       .dequeue        =       codel_qdisc_dequeue,
+       .peek           =       qdisc_peek_dequeued,
+       .init           =       codel_init,
+       .reset          =       codel_reset,
+       .change         =       codel_change,
+       .dump           =       codel_dump,
+       .dump_stats     =       codel_dump_stats,
+       .owner          =       THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+       return register_qdisc(&codel_qdisc_ops);
+}
+
+static void __exit codel_module_exit(void)
+{
+       unregister_qdisc(&codel_qdisc_ops);
+}
+
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("Dual BSD/GPL");