return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
 }
 
+static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
+                                            struct cake_flow *flow,
+                                            int flow_mode)
+{
+       if (likely(cake_dsrc(flow_mode) &&
+                  q->hosts[flow->srchost].srchost_bulk_flow_count))
+               q->hosts[flow->srchost].srchost_bulk_flow_count--;
+}
+
+static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
+                                            struct cake_flow *flow,
+                                            int flow_mode)
+{
+       if (likely(cake_dsrc(flow_mode) &&
+                  q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
+               q->hosts[flow->srchost].srchost_bulk_flow_count++;
+}
+
+static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
+                                            struct cake_flow *flow,
+                                            int flow_mode)
+{
+       if (likely(cake_ddst(flow_mode) &&
+                  q->hosts[flow->dsthost].dsthost_bulk_flow_count))
+               q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
+}
+
+static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
+                                            struct cake_flow *flow,
+                                            int flow_mode)
+{
+       if (likely(cake_ddst(flow_mode) &&
+                  q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
+               q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
+}
+
+static u16 cake_get_flow_quantum(struct cake_tin_data *q,
+                                struct cake_flow *flow,
+                                int flow_mode)
+{
+       u16 host_load = 1;
+
+       if (cake_dsrc(flow_mode))
+               host_load = max(host_load,
+                               q->hosts[flow->srchost].srchost_bulk_flow_count);
+
+       if (cake_ddst(flow_mode))
+               host_load = max(host_load,
+                               q->hosts[flow->dsthost].dsthost_bulk_flow_count);
+
+       /* The get_random_u16() is a way to apply dithering to avoid
+        * accumulating roundoff errors
+        */
+       return (q->flow_quantum * quantum_div[host_load] +
+               get_random_u16()) >> 16;
+}
+
 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
                     int flow_mode, u16 flow_override, u16 host_override)
 {
                allocate_dst = cake_ddst(flow_mode);
 
                if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
-                       if (allocate_src)
-                               q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
-                       if (allocate_dst)
-                               q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+                       cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+                       cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
                }
 found:
                /* reserve queue for future packets in same flow */
                        q->hosts[outer_hash + k].srchost_tag = srchost_hash;
 found_src:
                        srchost_idx = outer_hash + k;
-                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
-                               q->hosts[srchost_idx].srchost_bulk_flow_count++;
                        q->flows[reduced_hash].srchost = srchost_idx;
+
+                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+                               cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
                }
 
                if (allocate_dst) {
                        q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
 found_dst:
                        dsthost_idx = outer_hash + k;
-                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
-                               q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
                        q->flows[reduced_hash].dsthost = dsthost_idx;
+
+                       if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+                               cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
                }
        }
 
 
        /* flowchain */
        if (!flow->set || flow->set == CAKE_SET_DECAYING) {
-               struct cake_host *srchost = &b->hosts[flow->srchost];
-               struct cake_host *dsthost = &b->hosts[flow->dsthost];
-               u16 host_load = 1;
-
                if (!flow->set) {
                        list_add_tail(&flow->flowchain, &b->new_flows);
                } else {
                flow->set = CAKE_SET_SPARSE;
                b->sparse_flow_count++;
 
-               if (cake_dsrc(q->flow_mode))
-                       host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
-               if (cake_ddst(q->flow_mode))
-                       host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
-               flow->deficit = (b->flow_quantum *
-                                quantum_div[host_load]) >> 16;
+               flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
        } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
-               struct cake_host *srchost = &b->hosts[flow->srchost];
-               struct cake_host *dsthost = &b->hosts[flow->dsthost];
-
                /* this flow was empty, accounted as a sparse flow, but actually
                 * in the bulk rotation.
                 */
                b->sparse_flow_count--;
                b->bulk_flow_count++;
 
-               if (cake_dsrc(q->flow_mode))
-                       srchost->srchost_bulk_flow_count++;
-
-               if (cake_ddst(q->flow_mode))
-                       dsthost->dsthost_bulk_flow_count++;
-
+               cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+               cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
        }
 
        if (q->buffer_used > q->buffer_max_used)
 {
        struct cake_sched_data *q = qdisc_priv(sch);
        struct cake_tin_data *b = &q->tins[q->cur_tin];
-       struct cake_host *srchost, *dsthost;
        ktime_t now = ktime_get();
        struct cake_flow *flow;
        struct list_head *head;
        bool first_flow = true;
        struct sk_buff *skb;
-       u16 host_load;
        u64 delay;
        u32 len;
 
        q->cur_flow = flow - b->flows;
        first_flow = false;
 
-       /* triple isolation (modified DRR++) */
-       srchost = &b->hosts[flow->srchost];
-       dsthost = &b->hosts[flow->dsthost];
-       host_load = 1;
-
        /* flow isolation (DRR++) */
        if (flow->deficit <= 0) {
                /* Keep all flows with deficits out of the sparse and decaying
                                b->sparse_flow_count--;
                                b->bulk_flow_count++;
 
-                               if (cake_dsrc(q->flow_mode))
-                                       srchost->srchost_bulk_flow_count++;
-
-                               if (cake_ddst(q->flow_mode))
-                                       dsthost->dsthost_bulk_flow_count++;
+                               cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+                               cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
 
                                flow->set = CAKE_SET_BULK;
                        } else {
                        }
                }
 
-               if (cake_dsrc(q->flow_mode))
-                       host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
-               if (cake_ddst(q->flow_mode))
-                       host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
-               WARN_ON(host_load > CAKE_QUEUES);
-
-               /* The get_random_u16() is a way to apply dithering to avoid
-                * accumulating roundoff errors
-                */
-               flow->deficit += (b->flow_quantum * quantum_div[host_load] +
-                                 get_random_u16()) >> 16;
+               flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
                list_move_tail(&flow->flowchain, &b->old_flows);
 
                goto retry;
                                if (flow->set == CAKE_SET_BULK) {
                                        b->bulk_flow_count--;
 
-                                       if (cake_dsrc(q->flow_mode))
-                                               srchost->srchost_bulk_flow_count--;
-
-                                       if (cake_ddst(q->flow_mode))
-                                               dsthost->dsthost_bulk_flow_count--;
+                                       cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+                                       cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
 
                                        b->decaying_flow_count++;
                                } else if (flow->set == CAKE_SET_SPARSE ||
                                else if (flow->set == CAKE_SET_BULK) {
                                        b->bulk_flow_count--;
 
-                                       if (cake_dsrc(q->flow_mode))
-                                               srchost->srchost_bulk_flow_count--;
-
-                                       if (cake_ddst(q->flow_mode))
-                                               dsthost->dsthost_bulk_flow_count--;
-
+                                       cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+                                       cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
                                } else
                                        b->decaying_flow_count--;