return NULL;
 }
 
+static void skb_defer_free_flush(struct softnet_data *sd)
+{
+       struct sk_buff *skb, *next;
+
+       /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
+       if (!READ_ONCE(sd->defer_list))
+               return;
+
+       spin_lock(&sd->defer_lock);
+       skb = sd->defer_list;
+       sd->defer_list = NULL;
+       sd->defer_count = 0;
+       spin_unlock(&sd->defer_lock);
+
+       while (skb != NULL) {
+               next = skb->next;
+               napi_consume_skb(skb, 1);
+               skb = next;
+       }
+}
+
 #if defined(CONFIG_NET_RX_BUSY_POLL)
 
 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
                if (work > 0)
                        __NET_ADD_STATS(dev_net(napi->dev),
                                        LINUX_MIB_BUSYPOLLRXPACKETS, work);
+               skb_defer_free_flush(this_cpu_ptr(&softnet_data));
                local_bh_enable();
 
                if (!loop_end || loop_end(loop_end_arg, start_time))
        return -1;
 }
 
-static void skb_defer_free_flush(struct softnet_data *sd)
-{
-       struct sk_buff *skb, *next;
-
-       /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
-       if (!READ_ONCE(sd->defer_list))
-               return;
-
-       spin_lock(&sd->defer_lock);
-       skb = sd->defer_list;
-       sd->defer_list = NULL;
-       sd->defer_count = 0;
-       spin_unlock(&sd->defer_lock);
-
-       while (skb != NULL) {
-               next = skb->next;
-               napi_consume_skb(skb, 1);
-               skb = next;
-       }
-}
-
 static int napi_threaded_poll(void *data)
 {
        struct napi_struct *napi = data;