We must try harder to get unique (addr, port) pairs when
doing port autoselection for sockets with SO_REUSEADDR
option set.
We achieve this by adding a relaxation parameter to
inet_csk_bind_conflict. When 'relax' parameter is off
we return a conflict whenever the current searched
pair (addr, port) is not unique.
This tries to address the problems reported in patch:
	
8d238b25b1ec22a73b1c2206f111df2faaff8285
	Revert "tcp: bind() fix when many ports are bound"
Tests where ran for creating and binding(0) many sockets
on 100 IPs. The results are, on average:
	* 60000 sockets, 600 ports / IP:
		* 0.210 s, 620 (IP, port) duplicates without patch
		* 0.219 s, no duplicates with patch
	* 100000 sockets, 1000 ports / IP:
		* 0.371 s, 1720 duplicates without patch
		* 0.373 s, no duplicates with patch
	* 200000 sockets, 2000 ports / IP:
		* 0.766 s, 6900 duplicates without patch
		* 0.768 s, no duplicates with patch
	* 500000 sockets, 5000 ports / IP:
		* 2.227 s, 41500 duplicates without patch
		* 2.284 s, no duplicates with patch
Signed-off-by: Alex Copot <alex.mihai.c@gmail.com>
Signed-off-by: Daniel Baluta <dbaluta@ixiacom.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
 struct sockaddr;
 
 extern int inet6_csk_bind_conflict(const struct sock *sk,
-                                  const struct inet_bind_bucket *tb);
+                                  const struct inet_bind_bucket *tb, bool relax);
 
 extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
                                             const struct request_sock *req);
 
 #endif
        void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
        int         (*bind_conflict)(const struct sock *sk,
-                                    const struct inet_bind_bucket *tb);
+                                    const struct inet_bind_bucket *tb, bool relax);
 };
 
 /** inet_connection_sock - INET connection oriented sock
                                                const __be32 raddr,
                                                const __be32 laddr);
 extern int inet_csk_bind_conflict(const struct sock *sk,
-                                 const struct inet_bind_bucket *tb);
+                                 const struct inet_bind_bucket *tb, bool relax);
 extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
 extern struct dst_entry* inet_csk_route_req(struct sock *sk,
 
 EXPORT_SYMBOL(inet_get_local_port_range);
 
 int inet_csk_bind_conflict(const struct sock *sk,
-                          const struct inet_bind_bucket *tb)
+                          const struct inet_bind_bucket *tb, bool relax)
 {
        struct sock *sk2;
        struct hlist_node *node;
                                    sk2_rcv_saddr == sk_rcv_saddr(sk))
                                        break;
                        }
+                       if (!relax && reuse && sk2->sk_reuse &&
+                           sk2->sk_state != TCP_LISTEN) {
+                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+
+                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
+                                       break;
+                       }
                }
        }
        return node != NULL;
                                            (tb->num_owners < smallest_size || smallest_size == -1)) {
                                                smallest_size = tb->num_owners;
                                                smallest_rover = rover;
-                                               if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+                                               if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+                                                   !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
                                                        snum = smallest_rover;
                                                        goto tb_found;
                                                }
                                        }
-                                       if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+                                       if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
                                                snum = rover;
                                                goto tb_found;
                                        }
                        goto success;
                } else {
                        ret = 1;
-                       if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+                       if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
                                if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
                                    smallest_size != -1 && --attempts >= 0) {
                                        spin_unlock(&head->lock);
                                        goto again;
                                }
+
                                goto fail_unlock;
                        }
                }
 
 #include <net/inet6_connection_sock.h>
 
 int inet6_csk_bind_conflict(const struct sock *sk,
-                           const struct inet_bind_bucket *tb)
+                           const struct inet_bind_bucket *tb, bool relax)
 {
        const struct sock *sk2;
        const struct hlist_node *node;