#define DST_XFRM_TUNNEL                0x0080
 #define DST_XFRM_QUEUE         0x0100
 #define DST_METADATA           0x0200
+#define DST_NOGC               0x0400
 
        short                   error;
 
 
 void dst_release(struct dst_entry *dst);
 
+void dst_release_immediate(struct dst_entry *dst);
+
 static inline void refdst_drop(unsigned long refdst)
 {
        if (!(refdst & SKB_DST_NOREF))
  */
 static inline bool dst_hold_safe(struct dst_entry *dst)
 {
-       if (dst->flags & DST_NOCACHE)
+       if (dst->flags & (DST_NOCACHE | DST_NOGC))
                return atomic_inc_not_zero(&dst->__refcnt);
        dst_hold(dst);
        return true;
 
 {
        if (dst) {
                int newrefcnt;
-               unsigned short nocache = dst->flags & DST_NOCACHE;
+               unsigned short destroy_after_rcu = dst->flags &
+                                                  (DST_NOCACHE | DST_NOGC);
 
                newrefcnt = atomic_dec_return(&dst->__refcnt);
                if (unlikely(newrefcnt < 0))
                        net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
                                             __func__, dst, newrefcnt);
-               if (!newrefcnt && unlikely(nocache))
+               if (!newrefcnt && unlikely(destroy_after_rcu))
                        call_rcu(&dst->rcu_head, dst_destroy_rcu);
        }
 }
 EXPORT_SYMBOL(dst_release);
 
+void dst_release_immediate(struct dst_entry *dst)
+{
+       if (dst) {
+               int newrefcnt;
+
+               newrefcnt = atomic_dec_return(&dst->__refcnt);
+               if (unlikely(newrefcnt < 0))
+                       net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
+                                            __func__, dst, newrefcnt);
+               if (!newrefcnt)
+                       dst_destroy(dst);
+       }
+}
+EXPORT_SYMBOL(dst_release_immediate);
+
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
 {
        struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);