At the time of commit 
fff326990789 ("tcp: reflect SYN queue_mapping into
SYNACK packets") we had little ways to cope with SYN floods.
We no longer need to reflect incoming skb queue mappings, and instead
can pick a TX queue based on cpu cooking the SYNACK, with normal XPS
affinities.
Note that all SYNACK retransmits were picking TX queue 0, this no longer
is a win given that SYNACK rtx are now distributed on all cpus.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
        __u32 (*init_seq)(const struct sk_buff *skb);
        int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
                           struct flowi *fl, struct request_sock *req,
-                          u16 queue_mapping, struct tcp_fastopen_cookie *foc,
+                          struct tcp_fastopen_cookie *foc,
                           bool attach_req);
 };
 
 
                          arg->csumoffset) = csum_fold(csum_add(nskb->csum,
                                                                arg->csum));
                nskb->ip_summed = CHECKSUM_NONE;
-               skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
                ip_push_pending_frames(sk, &fl4);
        }
 out:
 
        }
        if (fastopen_sk) {
                af_ops->send_synack(fastopen_sk, dst, &fl, req,
-                                   skb_get_queue_mapping(skb), &foc, false);
+                                   &foc, false);
                /* Add the child socket directly into the accept queue */
                inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
                sk->sk_data_ready(sk);
                if (!want_cookie)
                        inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
                af_ops->send_synack(sk, dst, &fl, req,
-                                   skb_get_queue_mapping(skb), &foc, !want_cookie);
+                                   &foc, !want_cookie);
                if (want_cookie)
                        goto drop_and_free;
        }
 
 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
-                             u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc,
                                  bool attach_req)
 {
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
-               skb_set_queue_mapping(skb, queue_mapping);
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
                                            ireq->opt);
 
        int res;
 
        tcp_rsk(req)->txhash = net_tx_rndhash();
-       res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL, true);
+       res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
 
 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                              struct flowi *fl,
                              struct request_sock *req,
-                             u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc,
                              bool attach_req)
 {
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-               skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
        }