ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
                pcpu_rt = *ppcpu_rt;
                if (pcpu_rt) {
+                       dst_release(&pcpu_rt->dst);
                        rt6_rcu_free(pcpu_rt);
                        *ppcpu_rt = NULL;
                }
 {
        if (atomic_dec_and_test(&rt->rt6i_ref)) {
                rt6_free_pcpu(rt);
+               dst_release(&rt->dst);
                rt6_rcu_free(rt);
        }
 }
                        atomic_inc(&pn->leaf->rt6i_ref);
                }
 #endif
+               /* Always release dst as dst->__refcnt is guaranteed
+                * to be taken before entering this function
+                */
+               dst_release(&rt->dst);
                if (!(rt->dst.flags & DST_NOCACHE))
                        dst_free(&rt->dst);
        }
 st_failure:
        if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
                fib6_repair_tree(info->nl_net, fn);
+       /* Always release dst as dst->__refcnt is guaranteed
+        * to be taken before entering this function
+        */
+       dst_release(&rt->dst);
        if (!(rt->dst.flags & DST_NOCACHE))
                dst_free(&rt->dst);
        return err;
                }
                gc_args->more++;
        } else if (rt->rt6i_flags & RTF_CACHE) {
-               if (atomic_read(&rt->dst.__refcnt) == 0 &&
+               if (atomic_read(&rt->dst.__refcnt) == 1 &&
                    time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
                        RT6_TRACE("aging clone %p\n", rt);
                        return -1;
 
                                        int flags)
 {
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
-                                       0, DST_OBSOLETE_FORCE_CHK, flags);
+                                       1, DST_OBSOLETE_FORCE_CHK, flags);
 
        if (rt)
                rt6_info_init(rt);
                                *p =  NULL;
                        }
                } else {
-                       dst_destroy((struct dst_entry *)rt);
+                       dst_release(&rt->dst);
+                       if (!(flags & DST_NOCACHE))
+                               dst_destroy((struct dst_entry *)rt);
                        return NULL;
                }
        }
 EXPORT_SYMBOL(rt6_lookup);
 
 /* ip6_ins_rt is called with FREE table->tb6_lock.
-   It takes new route entry, the addition fails by any reason the
-   route is freed. In any case, if caller does not hold it, it may
-   be destroyed.
+ * It takes new route entry, the addition fails by any reason the
+ * route is released.
+ * Caller must hold dst before calling it.
  */
 
 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
        struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
        struct mx6_config mxc = { .mx = NULL, };
 
+       /* Hold dst to account for the reference from the fib6 tree */
+       dst_hold(&rt->dst);
        return __ip6_ins_rt(rt, &info, &mxc, NULL);
 }
 
                prev = cmpxchg(p, NULL, pcpu_rt);
                if (prev) {
                        /* If someone did it before us, return prev instead */
+                       dst_release(&pcpu_rt->dst);
                        dst_destroy(&pcpu_rt->dst);
                        pcpu_rt = prev;
                }
                 * since rt is going away anyway.  The next
                 * dst_check() will trigger a re-lookup.
                 */
+               dst_release(&pcpu_rt->dst);
                dst_destroy(&pcpu_rt->dst);
                pcpu_rt = rt;
        }
                uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
                dst_release(&rt->dst);
 
-               if (uncached_rt)
+               if (uncached_rt) {
+                       /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
+                        * No need for another dst_hold()
+                        */
                        rt6_uncached_list_add(uncached_rt);
-               else
+               } else {
                        uncached_rt = net->ipv6.ip6_null_entry;
-
-               dst_hold(&uncached_rt->dst);
+                       dst_hold(&uncached_rt->dst);
+               }
 
                trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
                return uncached_rt;
                         * invalidate the sk->sk_dst_cache.
                         */
                        ip6_ins_rt(nrt6);
+                       /* Release the reference taken in
+                        * ip6_rt_cache_alloc()
+                        */
+                       dst_release(&nrt6->dst);
                }
        }
 }
 
        rt->dst.flags |= DST_HOST;
        rt->dst.output  = ip6_output;
-       atomic_set(&rt->dst.__refcnt, 1);
        rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
        rt->rt6i_dst.plen = 128;
                dev_put(dev);
        if (idev)
                in6_dev_put(idev);
-       if (rt)
+       if (rt) {
+               dst_release(&rt->dst);
                dst_free(&rt->dst);
+       }
 
        return ERR_PTR(err);
 }
 
        return err;
 out:
-       if (rt)
+       if (rt) {
+               dst_release(&rt->dst);
                dst_free(&rt->dst);
+       }
 
        return err;
 }
        nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
 
        if (ip6_ins_rt(nrt))
-               goto out;
+               goto out_release;
 
        netevent.old = &rt->dst;
        netevent.new = &nrt->dst;
                ip6_del_rt(rt);
        }
 
+out_release:
+       /* Release the reference taken in
+        * ip6_rt_cache_alloc()
+        */
+       dst_release(&nrt->dst);
+
 out:
        neigh_release(neigh);
 }
        rt->rt6i_table = fib6_get_table(net, tb_id);
        rt->dst.flags |= DST_NOCACHE;
 
-       atomic_set(&rt->dst.__refcnt, 1);
-
        return rt;
 }
 
 
                err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
                if (err) {
+                       dst_release(&rt->dst);
                        dst_free(&rt->dst);
                        goto cleanup;
                }
 
 cleanup:
        list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
-               if (nh->rt6_info)
+               if (nh->rt6_info) {
+                       dst_release(&nh->rt6_info->dst);
                        dst_free(&nh->rt6_info->dst);
+               }
                kfree(nh->mxc.mx);
                list_del(&nh->next);
                kfree(nh);