--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PROTO_MEMORY_H
+#define _PROTO_MEMORY_H
+
+#include <net/sock.h>
+
+/* 1 MB per cpu, in page units */
+#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+       return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool
+proto_memory_pressure(const struct proto *prot)
+{
+       if (!prot->memory_pressure)
+               return false;
+       return !!READ_ONCE(*prot->memory_pressure);
+}
+
+static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+{
+       return proto_memory_pressure(sk->sk_prot);
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+       if (!sk->sk_prot->memory_pressure)
+               return false;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+           mem_cgroup_under_socket_pressure(sk->sk_memcg))
+               return true;
+
+       return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+}
+
+static inline long
+proto_memory_allocated(const struct proto *prot)
+{
+       return max(0L, atomic_long_read(prot->memory_allocated));
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+       return proto_memory_allocated(sk->sk_prot);
+}
+
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+       int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+       if (val)
+               atomic_long_add(val, proto->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_add(const struct sock *sk, int val)
+{
+       struct proto *proto = sk->sk_prot;
+
+       val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+       if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
+               proto_memory_pcpu_drain(proto);
+}
+
+static inline void
+sk_memory_allocated_sub(const struct sock *sk, int val)
+{
+       struct proto *proto = sk->sk_prot;
+
+       val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+       if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
+               proto_memory_pcpu_drain(proto);
+}
+
+#endif /* _PROTO_MEMORY_H */
 
 #endif
 }
 
-static inline bool sk_has_memory_pressure(const struct sock *sk)
-{
-       return sk->sk_prot->memory_pressure != NULL;
-}
-
-static inline bool sk_under_global_memory_pressure(const struct sock *sk)
-{
-       return sk->sk_prot->memory_pressure &&
-               !!READ_ONCE(*sk->sk_prot->memory_pressure);
-}
-
-static inline bool sk_under_memory_pressure(const struct sock *sk)
-{
-       if (!sk->sk_prot->memory_pressure)
-               return false;
-
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
-           mem_cgroup_under_socket_pressure(sk->sk_memcg))
-               return true;
-
-       return !!READ_ONCE(*sk->sk_prot->memory_pressure);
-}
-
-static inline long
-proto_memory_allocated(const struct proto *prot)
-{
-       return max(0L, atomic_long_read(prot->memory_allocated));
-}
-
-static inline long
-sk_memory_allocated(const struct sock *sk)
-{
-       return proto_memory_allocated(sk->sk_prot);
-}
-
-/* 1 MB per cpu, in page units */
-#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
-extern int sysctl_mem_pcpu_rsv;
-
-static inline void proto_memory_pcpu_drain(struct proto *proto)
-{
-       int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
-
-       if (val)
-               atomic_long_add(val, proto->memory_allocated);
-}
-
-static inline void
-sk_memory_allocated_add(const struct sock *sk, int val)
-{
-       struct proto *proto = sk->sk_prot;
-
-       val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
-
-       if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
-               proto_memory_pcpu_drain(proto);
-}
-
-static inline void
-sk_memory_allocated_sub(const struct sock *sk, int val)
-{
-       struct proto *proto = sk->sk_prot;
-
-       val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
-
-       if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
-               proto_memory_pcpu_drain(proto);
-}
-
 #define SK_ALLOC_PERCPU_COUNTER_BATCH 16
 
 static inline void sk_sockets_allocated_dec(struct sock *sk)
        return percpu_counter_sum_positive(prot->sockets_allocated);
 }
 
-static inline bool
-proto_memory_pressure(struct proto *prot)
-{
-       if (!prot->memory_pressure)
-               return false;
-       return !!READ_ONCE(*prot->memory_pressure);
-}
-
-
 #ifdef CONFIG_PROC_FS
 #define PROTO_INUSE_NR 64      /* should be enough for the first time */
 struct prot_inuse {
 
 #include <net/net_namespace.h>
 #include <net/request_sock.h>
 #include <net/sock.h>
+#include <net/proto_memory.h>
 #include <linux/net_tstamp.h>
 #include <net/xfrm.h>
 #include <linux/ipsec.h>
 
 #include <net/busy_poll.h>
 #include <net/pkt_sched.h>
 #include <net/hotdata.h>
+#include <net/proto_memory.h>
 #include <net/rps.h>
 
 #include "dev.h"
 
 #include <net/protocol.h>
 #include <net/tcp.h>
 #include <net/mptcp.h>
+#include <net/proto_memory.h>
 #include <net/udp.h>
 #include <net/udplite.h>
 #include <linux/bottom_half.h>
 
 #include <net/inet_common.h>
 #include <net/tcp.h>
 #include <net/mptcp.h>
+#include <net/proto_memory.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
 #include <net/sock.h>
 
 #include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/tcp.h>
+#include <net/proto_memory.h>
 #include <net/inet_common.h>
 #include <linux/ipsec.h>
 #include <asm/unaligned.h>
 
 
 #include <net/tcp.h>
 #include <net/mptcp.h>
+#include <net/proto_memory.h>
 
 #include <linux/compiler.h>
 #include <linux/gfp.h>
 
 #include <linux/inet.h>
 #include <linux/slab.h>
 #include <net/sock.h>
+#include <net/proto_memory.h>
 #include <net/inet_ecn.h>
 #include <linux/skbuff.h>
 #include <net/sctp/sctp.h>