____cacheline_aligned_in_smp;
 };
 
+#define inet_lhash2_for_each_icsk_continue(__icsk) \
+       hlist_for_each_entry_continue(__icsk, icsk_listen_portaddr_node)
+
+#define inet_lhash2_for_each_icsk(__icsk, list) \
+       hlist_for_each_entry(__icsk, list, icsk_listen_portaddr_node)
+
 #define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
        hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
 
 
        struct tcp_iter_state *st = seq->private;
 
        st->offset = 0;
-       for (; st->bucket < INET_LHTABLE_SIZE; st->bucket++) {
-               struct inet_listen_hashbucket *ilb;
-               struct hlist_nulls_node *node;
+       for (; st->bucket <= tcp_hashinfo.lhash2_mask; st->bucket++) {
+               struct inet_listen_hashbucket *ilb2;
+               struct inet_connection_sock *icsk;
                struct sock *sk;
 
-               ilb = &tcp_hashinfo.listening_hash[st->bucket];
-               if (hlist_nulls_empty(&ilb->nulls_head))
+               ilb2 = &tcp_hashinfo.lhash2[st->bucket];
+               if (hlist_empty(&ilb2->head))
                        continue;
 
-               spin_lock(&ilb->lock);
-               sk_nulls_for_each(sk, node, &ilb->nulls_head) {
+               spin_lock(&ilb2->lock);
+               inet_lhash2_for_each_icsk(icsk, &ilb2->head) {
+                       sk = (struct sock *)icsk;
                        if (seq_sk_match(seq, sk))
                                return sk;
                }
-               spin_unlock(&ilb->lock);
+               spin_unlock(&ilb2->lock);
        }
 
        return NULL;
 static void *listening_get_next(struct seq_file *seq, void *cur)
 {
        struct tcp_iter_state *st = seq->private;
-       struct inet_listen_hashbucket *ilb;
-       struct hlist_nulls_node *node;
+       struct inet_listen_hashbucket *ilb2;
+       struct inet_connection_sock *icsk;
        struct sock *sk = cur;
 
        ++st->num;
        ++st->offset;
 
-       sk = sk_nulls_next(sk);
-
-       sk_nulls_for_each_from(sk, node) {
+       icsk = inet_csk(sk);
+       inet_lhash2_for_each_icsk_continue(icsk) {
+               sk = (struct sock *)icsk;
                if (seq_sk_match(seq, sk))
                        return sk;
        }
 
-       ilb = &tcp_hashinfo.listening_hash[st->bucket];
-       spin_unlock(&ilb->lock);
+       ilb2 = &tcp_hashinfo.lhash2[st->bucket];
+       spin_unlock(&ilb2->lock);
        ++st->bucket;
        return listening_get_first(seq);
 }
 
        switch (st->state) {
        case TCP_SEQ_STATE_LISTENING:
-               if (st->bucket >= INET_LHTABLE_SIZE)
+               if (st->bucket > tcp_hashinfo.lhash2_mask)
                        break;
                st->state = TCP_SEQ_STATE_LISTENING;
                rc = listening_get_first(seq);
        switch (st->state) {
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
-                       spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
+                       spin_unlock(&tcp_hashinfo.lhash2[st->bucket].lock);
                break;
        case TCP_SEQ_STATE_ESTABLISHED:
                if (v)