static inline void rps_lock_irqsave(struct softnet_data *sd,
                                    unsigned long *flags)
 {
-       if (IS_ENABLED(CONFIG_RPS))
+       if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
        else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
                local_irq_save(*flags);
 
 static inline void rps_lock_irq_disable(struct softnet_data *sd)
 {
-       if (IS_ENABLED(CONFIG_RPS))
+       if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_lock_irq(&sd->input_pkt_queue.lock);
        else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
                local_irq_disable();
 static inline void rps_unlock_irq_restore(struct softnet_data *sd,
                                          unsigned long *flags)
 {
-       if (IS_ENABLED(CONFIG_RPS))
+       if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
        else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
                local_irq_restore(*flags);
 
 static inline void rps_unlock_irq_enable(struct softnet_data *sd)
 {
-       if (IS_ENABLED(CONFIG_RPS))
+       if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_unlock_irq(&sd->input_pkt_queue.lock);
        else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
                local_irq_enable();
        __napi_schedule_irqoff(&mysd->backlog);
 }
 
+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
+{
+       unsigned long flags;
+
+       if (use_backlog_threads()) {
+               rps_lock_irqsave(sd, &flags);
+
+               if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
+                       __napi_schedule_irqoff(&sd->backlog);
+
+               rps_unlock_irq_restore(sd, &flags);
+
+       } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
+               smp_call_function_single_async(cpu, &sd->defer_csd);
+       }
+}
+
 #ifdef CONFIG_NET_FLOW_LIMIT
 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
 #endif
 
        /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
         * if we are unlucky enough (this seems very unlikely).
         */
-       if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
-               smp_call_function_single_async(cpu, &sd->defer_csd);
+       if (unlikely(kick))
+               kick_defer_list_purge(sd, cpu);
 }
 
 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,