tristate "Virtual eXtensible Local Area Network (VXLAN)"
        depends on INET
        select NET_UDP_TUNNEL
+       select GRO_CELLS
        ---help---
          This allows one to create vxlan virtual interfaces that provide
          Layer 2 Networks over Layer 3 Networks. VXLAN is often used
        tristate "Generic Network Virtualization Encapsulation"
        depends on INET && NET_UDP_TUNNEL
        select NET_IP_TUNNEL
+       select GRO_CELLS
        ---help---
          This allows one to create geneve virtual interfaces that provide
          Layer 2 Networks over Layer 3 Networks. GENEVE is often used
        select CRYPTO
        select CRYPTO_AES
        select CRYPTO_GCM
+       select GRO_CELLS
        ---help---
           MACsec is an encryption standard for Ethernet.
 
 
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 
-struct gro_cell {
-       struct sk_buff_head     napi_skbs;
-       struct napi_struct      napi;
-};
+struct gro_cell;
 
 struct gro_cells {
        struct gro_cell __percpu        *cells;
 };
 
-static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
-{
-       struct gro_cell *cell;
-       struct net_device *dev = skb->dev;
-
-       if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO))
-               return netif_rx(skb);
-
-       cell = this_cpu_ptr(gcells->cells);
-
-       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
-               atomic_long_inc(&dev->rx_dropped);
-               kfree_skb(skb);
-               return NET_RX_DROP;
-       }
-
-       __skb_queue_tail(&cell->napi_skbs, skb);
-       if (skb_queue_len(&cell->napi_skbs) == 1)
-               napi_schedule(&cell->napi);
-       return NET_RX_SUCCESS;
-}
-
-/* called under BH context */
-static inline int gro_cell_poll(struct napi_struct *napi, int budget)
-{
-       struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
-       struct sk_buff *skb;
-       int work_done = 0;
-
-       while (work_done < budget) {
-               skb = __skb_dequeue(&cell->napi_skbs);
-               if (!skb)
-                       break;
-               napi_gro_receive(napi, skb);
-               work_done++;
-       }
-
-       if (work_done < budget)
-               napi_complete_done(napi, work_done);
-       return work_done;
-}
-
-static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
-{
-       int i;
-
-       gcells->cells = alloc_percpu(struct gro_cell);
-       if (!gcells->cells)
-               return -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
-
-               __skb_queue_head_init(&cell->napi_skbs);
-
-               set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
-
-               netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
-               napi_enable(&cell->napi);
-       }
-       return 0;
-}
-
-static inline void gro_cells_destroy(struct gro_cells *gcells)
-{
-       int i;
-
-       if (!gcells->cells)
-               return;
-       for_each_possible_cpu(i) {
-               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
-
-               netif_napi_del(&cell->napi);
-               __skb_queue_purge(&cell->napi_skbs);
-       }
-       free_percpu(gcells->cells);
-       gcells->cells = NULL;
-}
+int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb);
+int gro_cells_init(struct gro_cells *gcells, struct net_device *dev);
+void gro_cells_destroy(struct gro_cells *gcells);
 
 #endif
 
        bool
        default n
 
+config GRO_CELLS
+       bool
+       default n
+
 config NET_DEVLINK
        tristate "Network physical/parent device Netlink interface"
        help
 
 obj-$(CONFIG_DST_CACHE) += dst_cache.o
 obj-$(CONFIG_HWBM) += hwbm.o
 obj-$(CONFIG_NET_DEVLINK) += devlink.o
+obj-$(CONFIG_GRO_CELLS) += gro_cells.o
 
--- /dev/null
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <net/gro_cells.h>
+
+struct gro_cell {
+       struct sk_buff_head     napi_skbs;
+       struct napi_struct      napi;
+};
+
+int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct gro_cell *cell;
+
+       if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO))
+               return netif_rx(skb);
+
+       cell = this_cpu_ptr(gcells->cells);
+
+       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+               atomic_long_inc(&dev->rx_dropped);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       __skb_queue_tail(&cell->napi_skbs, skb);
+       if (skb_queue_len(&cell->napi_skbs) == 1)
+               napi_schedule(&cell->napi);
+       return NET_RX_SUCCESS;
+}
+EXPORT_SYMBOL(gro_cells_receive);
+
+/* called under BH context */
+static int gro_cell_poll(struct napi_struct *napi, int budget)
+{
+       struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
+       struct sk_buff *skb;
+       int work_done = 0;
+
+       while (work_done < budget) {
+               skb = __skb_dequeue(&cell->napi_skbs);
+               if (!skb)
+                       break;
+               napi_gro_receive(napi, skb);
+               work_done++;
+       }
+
+       if (work_done < budget)
+               napi_complete_done(napi, work_done);
+       return work_done;
+}
+
+int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
+{
+       int i;
+
+       gcells->cells = alloc_percpu(struct gro_cell);
+       if (!gcells->cells)
+               return -ENOMEM;
+
+       for_each_possible_cpu(i) {
+               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
+               __skb_queue_head_init(&cell->napi_skbs);
+
+               set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
+
+               netif_napi_add(dev, &cell->napi, gro_cell_poll,
+                              NAPI_POLL_WEIGHT);
+               napi_enable(&cell->napi);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(gro_cells_init);
+
+void gro_cells_destroy(struct gro_cells *gcells)
+{
+       int i;
+
+       if (!gcells->cells)
+               return;
+       for_each_possible_cpu(i) {
+               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
+               netif_napi_del(&cell->napi);
+               __skb_queue_purge(&cell->napi_skbs);
+       }
+       free_percpu(gcells->cells);
+       gcells->cells = NULL;
+}
+EXPORT_SYMBOL(gro_cells_destroy);
 
 config NET_IP_TUNNEL
        tristate
        select DST_CACHE
+       select GRO_CELLS
        default n
 
 config NET_IPGRE
 
        tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)"
        select INET6_TUNNEL
        select DST_CACHE
+       select GRO_CELLS
        ---help---
          Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in
          RFC 2473.
 
 config XFRM
        bool
        depends on NET
+       select GRO_CELLS
 
 config XFRM_ALGO
        tristate