struct conntrack_gc_work {
        struct delayed_work     dwork;
        u32                     next_bucket;
+       u32                     avg_timeout;
+       u32                     start_time;
        bool                    exiting;
        bool                    early_drop;
 };
 /* serialize hash resizes and nf_ct_iterate_cleanup */
 static DEFINE_MUTEX(nf_conntrack_mutex);
 
-#define GC_SCAN_INTERVAL       (120u * HZ)
+#define GC_SCAN_INTERVAL_MAX   (60ul * HZ)
+#define GC_SCAN_INTERVAL_MIN   (1ul * HZ)
+
+/* clamp timeouts to this value (TCP unacked) */
+#define GC_SCAN_INTERVAL_CLAMP (300ul * HZ)
+
+/* large initial bias so that we don't scan often just because we have
+ * three entries with a 1s timeout.
+ */
+#define GC_SCAN_INTERVAL_INIT  INT_MAX
+
 #define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
+#define GC_SCAN_EXPIRED_MAX    (64000u / HZ)
 
 #define MIN_CHAINLEN   8u
 #define MAX_CHAINLEN   (32u - MIN_CHAINLEN)
 
 static void gc_worker(struct work_struct *work)
 {
-       unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
        unsigned int i, hashsz, nf_conntrack_max95 = 0;
-       unsigned long next_run = GC_SCAN_INTERVAL;
+       u32 end_time, start_time = nfct_time_stamp;
        struct conntrack_gc_work *gc_work;
+       unsigned int expired_count = 0;
+       unsigned long next_run;
+       s32 delta_time;
+
        gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
 
        i = gc_work->next_bucket;
        if (gc_work->early_drop)
                nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
 
+       if (i == 0) {
+               gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT;
+               gc_work->start_time = start_time;
+       }
+
+       next_run = gc_work->avg_timeout;
+
+       end_time = start_time + GC_SCAN_MAX_DURATION;
+
        do {
                struct nf_conntrack_tuple_hash *h;
                struct hlist_nulls_head *ct_hash;
 
                hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
                        struct nf_conntrack_net *cnet;
+                       unsigned long expires;
                        struct net *net;
 
                        tmp = nf_ct_tuplehash_to_ctrack(h);
                                continue;
                        }
 
+                       if (expired_count > GC_SCAN_EXPIRED_MAX) {
+                               rcu_read_unlock();
+
+                               gc_work->next_bucket = i;
+                               gc_work->avg_timeout = next_run;
+
+                               delta_time = nfct_time_stamp - gc_work->start_time;
+
+                               /* re-sched immediately if total cycle time is exceeded */
+                               next_run = delta_time < (s32)GC_SCAN_INTERVAL_MAX;
+                               goto early_exit;
+                       }
+
                        if (nf_ct_is_expired(tmp)) {
                                nf_ct_gc_expired(tmp);
+                               expired_count++;
                                continue;
                        }
 
+                       expires = clamp(nf_ct_expires(tmp), GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_CLAMP);
+                       next_run += expires;
+                       next_run /= 2u;
+
                        if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
                                continue;
 
                                continue;
                        }
 
-                       if (gc_worker_can_early_drop(tmp))
+                       if (gc_worker_can_early_drop(tmp)) {
                                nf_ct_kill(tmp);
+                               expired_count++;
+                       }
 
                        nf_ct_put(tmp);
                }
                cond_resched();
                i++;
 
-               if (time_after(jiffies, end_time) && i < hashsz) {
+               delta_time = nfct_time_stamp - end_time;
+               if (delta_time > 0 && i < hashsz) {
+                       gc_work->avg_timeout = next_run;
                        gc_work->next_bucket = i;
                        next_run = 0;
-                       break;
+                       goto early_exit;
                }
        } while (i < hashsz);
 
+       gc_work->next_bucket = 0;
+
+       next_run = clamp(next_run, GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_MAX);
+
+       delta_time = max_t(s32, nfct_time_stamp - gc_work->start_time, 1);
+       if (next_run > (unsigned long)delta_time)
+               next_run -= delta_time;
+       else
+               next_run = 1;
+
+early_exit:
        if (gc_work->exiting)
                return;
 
-       /*
-        * Eviction will normally happen from the packet path, and not
-        * from this gc worker.
-        *
-        * This worker is only here to reap expired entries when system went
-        * idle after a busy period.
-        */
-       if (next_run) {
+       if (next_run)
                gc_work->early_drop = false;
-               gc_work->next_bucket = 0;
-       }
+
        queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
 }
 
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
-       INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
+       INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
        gc_work->exiting = false;
 }