return tmp;
 }
 
+static u32 dualpi2_unscale_alpha_beta(u32 param)
+{
+       u64 tmp = ((u64)param * NSEC_PER_SEC << ALPHA_BETA_SCALING);
+
+       do_div(tmp, MAX_PROB);
+       return tmp;
+}
+
 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q)
 {
        return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate);
        return lower_32_bits(ns);
 }
 
+static u32 convert_ns_to_usec(u64 ns)
+{
+       do_div(ns, NSEC_PER_USEC);
+       if (upper_32_bits(ns))
+               return U32_MAX;
+
+       return lower_32_bits(ns);
+}
+
 static enum hrtimer_restart dualpi2_timer(struct hrtimer *timer)
 {
        struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer);
        if (tb[TCA_DUALPI2_LIMIT]) {
                u32 limit = nla_get_u32(tb[TCA_DUALPI2_LIMIT]);
 
-               sch->limit = limit;
-               q->memory_limit = get_memory_limit(sch, limit);
+               WRITE_ONCE(sch->limit, limit);
+               WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit));
        }
 
        if (tb[TCA_DUALPI2_MEMORY_LIMIT])
-               q->memory_limit = nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT]);
+               WRITE_ONCE(q->memory_limit,
+                          nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT]));
 
        if (tb[TCA_DUALPI2_TARGET]) {
                u64 target = nla_get_u32(tb[TCA_DUALPI2_TARGET]);
 
-               q->pi2_target = target * NSEC_PER_USEC;
+               WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC);
        }
 
        if (tb[TCA_DUALPI2_TUPDATE]) {
                u64 tupdate = nla_get_u32(tb[TCA_DUALPI2_TUPDATE]);
 
-               q->pi2_tupdate = convert_us_to_nsec(tupdate);
+               WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate));
        }
 
        if (tb[TCA_DUALPI2_ALPHA]) {
                u32 alpha = nla_get_u32(tb[TCA_DUALPI2_ALPHA]);
 
-               q->pi2_alpha = dualpi2_scale_alpha_beta(alpha);
+               WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha));
        }
 
        if (tb[TCA_DUALPI2_BETA]) {
                u32 beta = nla_get_u32(tb[TCA_DUALPI2_BETA]);
 
-               q->pi2_beta = dualpi2_scale_alpha_beta(beta);
+               WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta));
        }
 
        if (tb[TCA_DUALPI2_STEP_THRESH_PKTS]) {
                u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_PKTS]);
 
-               q->step_in_packets = true;
-               q->step_thresh = step_th;
+               WRITE_ONCE(q->step_in_packets, true);
+               WRITE_ONCE(q->step_thresh, step_th);
        } else if (tb[TCA_DUALPI2_STEP_THRESH_US]) {
                u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_US]);
 
-               q->step_in_packets = false;
-               q->step_thresh = convert_us_to_nsec(step_th);
+               WRITE_ONCE(q->step_in_packets, false);
+               WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th));
        }
 
        if (tb[TCA_DUALPI2_MIN_QLEN_STEP])
-               q->min_qlen_step = nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP]);
+               WRITE_ONCE(q->min_qlen_step,
+                          nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP]));
 
        if (tb[TCA_DUALPI2_COUPLING]) {
                u8 coupling = nla_get_u8(tb[TCA_DUALPI2_COUPLING]);
 
-               q->coupling_factor = coupling;
+               WRITE_ONCE(q->coupling_factor, coupling);
        }
 
        if (tb[TCA_DUALPI2_DROP_OVERLOAD]) {
                u8 drop_overload = nla_get_u8(tb[TCA_DUALPI2_DROP_OVERLOAD]);
 
-               q->drop_overload = (bool)drop_overload;
+               WRITE_ONCE(q->drop_overload, (bool)drop_overload);
        }
 
        if (tb[TCA_DUALPI2_DROP_EARLY]) {
                u8 drop_early = nla_get_u8(tb[TCA_DUALPI2_DROP_EARLY]);
 
-               q->drop_early = (bool)drop_early;
+               WRITE_ONCE(q->drop_early, (bool)drop_early);
        }
 
        if (tb[TCA_DUALPI2_C_PROTECTION]) {
        if (tb[TCA_DUALPI2_ECN_MASK]) {
                u8 ecn_mask = nla_get_u8(tb[TCA_DUALPI2_ECN_MASK]);
 
-               q->ecn_mask = ecn_mask;
+               WRITE_ONCE(q->ecn_mask, ecn_mask);
        }
 
        if (tb[TCA_DUALPI2_SPLIT_GSO]) {
                u8 split_gso = nla_get_u8(tb[TCA_DUALPI2_SPLIT_GSO]);
 
-               q->split_gso = (bool)split_gso;
+               WRITE_ONCE(q->split_gso, (bool)split_gso);
        }
 
        old_qlen = qdisc_qlen(sch);
        return 0;
 }
 
+static int dualpi2_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct dualpi2_sched_data *q = qdisc_priv(sch);
+       struct nlattr *opts;
+       bool step_in_pkts;
+       u32 step_th;
+
+       step_in_pkts = READ_ONCE(q->step_in_packets);
+       step_th = READ_ONCE(q->step_thresh);
+
+       opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
+       if (!opts)
+               goto nla_put_failure;
+
+       if (step_in_pkts &&
+           (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
+           nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
+                       READ_ONCE(q->memory_limit)) ||
+           nla_put_u32(skb, TCA_DUALPI2_TARGET,
+                       convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
+           nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
+                       convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
+           nla_put_u32(skb, TCA_DUALPI2_ALPHA,
+                       dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
+           nla_put_u32(skb, TCA_DUALPI2_BETA,
+                       dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
+           nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_PKTS, step_th) ||
+           nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
+                       READ_ONCE(q->min_qlen_step)) ||
+           nla_put_u8(skb, TCA_DUALPI2_COUPLING,
+                      READ_ONCE(q->coupling_factor)) ||
+           nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
+                      READ_ONCE(q->drop_overload)) ||
+           nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
+                      READ_ONCE(q->drop_early)) ||
+           nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
+                      READ_ONCE(q->c_protection_wc)) ||
+           nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
+           nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
+               goto nla_put_failure;
+
+       if (!step_in_pkts &&
+           (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
+           nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
+                       READ_ONCE(q->memory_limit)) ||
+           nla_put_u32(skb, TCA_DUALPI2_TARGET,
+                       convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
+           nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
+                       convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
+           nla_put_u32(skb, TCA_DUALPI2_ALPHA,
+                       dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
+           nla_put_u32(skb, TCA_DUALPI2_BETA,
+                       dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
+           nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_US,
+                       convert_ns_to_usec(step_th)) ||
+           nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
+                       READ_ONCE(q->min_qlen_step)) ||
+           nla_put_u8(skb, TCA_DUALPI2_COUPLING,
+                      READ_ONCE(q->coupling_factor)) ||
+           nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
+                      READ_ONCE(q->drop_overload)) ||
+           nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
+                      READ_ONCE(q->drop_early)) ||
+           nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
+                      READ_ONCE(q->c_protection_wc)) ||
+           nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
+           nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
+               goto nla_put_failure;
+
+       return nla_nest_end(skb, opts);
+
+nla_put_failure:
+       nla_nest_cancel(skb, opts);
+       return -1;
+}
+
+static int dualpi2_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+       struct dualpi2_sched_data *q = qdisc_priv(sch);
+       struct tc_dualpi2_xstats st = {
+               .prob                   = q->pi2_prob,
+               .packets_in_c           = q->packets_in_c,
+               .packets_in_l           = q->packets_in_l,
+               .maxq                   = q->maxq,
+               .ecn_mark               = q->ecn_mark,
+               .credit                 = q->c_protection_credit,
+               .step_marks             = q->step_marks,
+               .memory_used            = q->memory_used,
+               .max_memory_used        = q->max_memory_used,
+               .memory_limit           = q->memory_limit,
+       };
+       u64 qc, ql;
+
+       get_queue_delays(q, &qc, &ql);
+       st.delay_l = convert_ns_to_usec(ql);
+       st.delay_c = convert_ns_to_usec(qc);
+       return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
 /* Reset both L-queue and C-queue, internal packet counters, PI probability,
  * C-queue protection credit, and timestamps, while preserving current
  * configuration of DUALPI2.
        .destroy        = dualpi2_destroy,
        .reset          = dualpi2_reset,
        .change         = dualpi2_change,
+       .dump           = dualpi2_dump,
+       .dump_stats     = dualpi2_dump_stats,
        .owner          = THIS_MODULE,
 };