int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 
+INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
+
 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
        if (sk_memalloc_socks() && skb_pfmemalloc(skb))
                return __sk_backlog_rcv(sk, skb);
 
-       return sk->sk_backlog_rcv(sk, skb);
+       return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+                                 tcp_v6_do_rcv,
+                                 tcp_v4_do_rcv,
+                                 sk, skb);
 }
 
 static inline void sk_incoming_cpu_update(struct sock *sk)
 
        BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
 
        noreclaim_flag = memalloc_noreclaim_save();
-       ret = sk->sk_backlog_rcv(sk, skb);
+       ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+                                tcp_v6_do_rcv,
+                                tcp_v4_do_rcv,
+                                sk, skb);
        memalloc_noreclaim_restore(noreclaim_flag);
 
        return ret;
 
 static void    tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                                      struct request_sock *req);
 
-static int     tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
+INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
 
 static const struct inet_connection_sock_af_ops ipv6_mapped;
 const struct inet_connection_sock_af_ops ipv6_specific;
  * This is because we cannot sleep with the original spinlock
  * held.
  */
-static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = tcp_inet6_sk(sk);
        struct sk_buff *opt_skb = NULL;