or to specify the output device using cmsg and IP_PKTINFO.
 
+By default the scope of the port bindings for unbound sockets is
+limited to the default VRF. That is, it will not be matched by packets
+arriving on interfaces enslaved to an l3mdev and processes may bind to
+the same port if they bind to an l3mdev.
+
 TCP & UDP services running in the default VRF context (ie., not bound
 to any VRF device) can work across all VRF domains by enabling the
 tcp_l3mdev_accept and udp_l3mdev_accept sysctl options:
 netfilter rules on the VRF device can be used to limit access to services
 running in the default VRF context as well.
 
-The default VRF does not have limited scope with respect to port bindings.
-That is, if a process does a wildcard bind to a port in the default VRF it
-owns the port across all VRF domains within the network namespace.
-
 ################################################################################
 
 Using iproute2 for VRFs
 
         ((__sk)->sk_family == AF_INET6)                        &&      \
         ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr))               &&      \
         ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr))   &&      \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif)) ||                      \
-          ((__sk)->sk_bound_dev_if == (__sdif)))               &&      \
+        (((__sk)->sk_bound_dev_if == (__dif))  ||                      \
+         ((__sk)->sk_bound_dev_if == (__sdif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
 
 #endif /* _INET6_HASHTABLES_H */
 
 
 struct inet_bind_bucket {
        possible_net_t          ib_net;
+       int                     l3mdev;
        unsigned short          port;
        signed char             fastreuse;
        signed char             fastreuseport;
 struct inet_bind_bucket *
 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
                        struct inet_bind_hashbucket *head,
-                       const unsigned short snum);
+                       const unsigned short snum, int l3mdev);
 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
                              struct inet_bind_bucket *tb);
 
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
        (((__sk)->sk_portpair == (__ports))                     &&      \
         ((__sk)->sk_addrpair == (__cookie))                    &&      \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif))                 ||      \
-          ((__sk)->sk_bound_dev_if == (__sdif)))               &&      \
+        (((__sk)->sk_bound_dev_if == (__dif))                  ||      \
+         ((__sk)->sk_bound_dev_if == (__sdif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
        (((__sk)->sk_portpair == (__ports))             &&              \
         ((__sk)->sk_daddr      == (__saddr))           &&              \
         ((__sk)->sk_rcv_saddr  == (__daddr))           &&              \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif))         ||              \
-          ((__sk)->sk_bound_dev_if == (__sdif)))       &&              \
+        (((__sk)->sk_bound_dev_if == (__dif))          ||              \
+         ((__sk)->sk_bound_dev_if == (__sdif)))        &&              \
         net_eq(sock_net(__sk), (__net)))
 #endif /* 64-bit arch */
 
 
        return sk->sk_bound_dev_if;
 }
 
+static inline int inet_sk_bound_l3mdev(const struct sock *sk)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+       struct net *net = sock_net(sk);
+
+       if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+               return l3mdev_master_ifindex_by_index(net,
+                                                     sk->sk_bound_dev_if);
+#endif
+
+       return 0;
+}
+
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
 
        int i, low, high, attempt_half;
        struct inet_bind_bucket *tb;
        u32 remaining, offset;
+       int l3mdev;
 
+       l3mdev = inet_sk_bound_l3mdev(sk);
        attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 other_half_scan:
        inet_get_local_port_range(net, &low, &high);
                                                  hinfo->bhash_size)];
                spin_lock_bh(&head->lock);
                inet_bind_bucket_for_each(tb, &head->chain)
-                       if (net_eq(ib_net(tb), net) && tb->port == port) {
+                       if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+                           tb->port == port) {
                                if (!inet_csk_bind_conflict(sk, tb, false, false))
                                        goto success;
                                goto next_port;
        struct net *net = sock_net(sk);
        struct inet_bind_bucket *tb = NULL;
        kuid_t uid = sock_i_uid(sk);
+       int l3mdev;
+
+       l3mdev = inet_sk_bound_l3mdev(sk);
 
        if (!port) {
                head = inet_csk_find_open_port(sk, &tb, &port);
                                          hinfo->bhash_size)];
        spin_lock_bh(&head->lock);
        inet_bind_bucket_for_each(tb, &head->chain)
-               if (net_eq(ib_net(tb), net) && tb->port == port)
+               if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+                   tb->port == port)
                        goto tb_found;
 tb_not_found:
        tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
-                                    net, head, port);
+                                    net, head, port, l3mdev);
        if (!tb)
                goto fail_unlock;
 tb_found:
 
 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
                                                 struct net *net,
                                                 struct inet_bind_hashbucket *head,
-                                                const unsigned short snum)
+                                                const unsigned short snum,
+                                                int l3mdev)
 {
        struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
        if (tb) {
                write_pnet(&tb->ib_net, net);
+               tb->l3mdev    = l3mdev;
                tb->port      = snum;
                tb->fastreuse = 0;
                tb->fastreuseport = 0;
                        table->bhash_size);
        struct inet_bind_hashbucket *head = &table->bhash[bhash];
        struct inet_bind_bucket *tb;
+       int l3mdev;
 
        spin_lock(&head->lock);
        tb = inet_csk(sk)->icsk_bind_hash;
                return -ENOENT;
        }
        if (tb->port != port) {
+               l3mdev = inet_sk_bound_l3mdev(sk);
+
                /* NOTE: using tproxy and redirecting skbs to a proxy
                 * on a different listener port breaks the assumption
                 * that the listener socket's icsk_bind_hash is the same
                 * create a new bind bucket for the child here. */
                inet_bind_bucket_for_each(tb, &head->chain) {
                        if (net_eq(ib_net(tb), sock_net(sk)) &&
-                           tb->port == port)
+                           tb->l3mdev == l3mdev && tb->port == port)
                                break;
                }
                if (!tb) {
                        tb = inet_bind_bucket_create(table->bind_bucket_cachep,
-                                                    sock_net(sk), head, port);
+                                                    sock_net(sk), head, port,
+                                                    l3mdev);
                        if (!tb) {
                                spin_unlock(&head->lock);
                                return -ENOMEM;
        u32 remaining, offset;
        int ret, i, low, high;
        static u32 hint;
+       int l3mdev;
 
        if (port) {
                head = &hinfo->bhash[inet_bhashfn(net, port,
                return ret;
        }
 
+       l3mdev = inet_sk_bound_l3mdev(sk);
+
        inet_get_local_port_range(net, &low, &high);
        high++; /* [32768, 60999] -> [32768, 61000[ */
        remaining = high - low;
                 * the established check is already unique enough.
                 */
                inet_bind_bucket_for_each(tb, &head->chain) {
-                       if (net_eq(ib_net(tb), net) && tb->port == port) {
+                       if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+                           tb->port == port) {
                                if (tb->fastreuse >= 0 ||
                                    tb->fastreuseport >= 0)
                                        goto next_port;
                }
 
                tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
-                                            net, head, port);
+                                            net, head, port, l3mdev);
                if (!tb) {
                        spin_unlock_bh(&head->lock);
                        return -ENOMEM;