unsigned int ivlen = crypto_aead_ivsize(
                                crypto_aead_reqtfm(&ctx->aead_req));
 
-       WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
+       WARN_ON(refcount_read(&sk->sk_refcnt) != 0);
        aead_put_sgl(sk);
        sock_kzfree_s(sk, ctx->iv, ivlen);
        sock_kfree_s(sk, ctx, ctx->len);
 
 #include <net/tcp_states.h>
 #include <net/netns/hash.h>
 
-#include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <asm/byteorder.h>
 
 /* This is for all connections with a full identity, no wildcards.
        sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
                           dport, dif, &refcounted);
 
-       if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+       if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
                sk = NULL;
        return sk;
 }
 
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/bug.h>
+#include <linux/refcount.h>
 
 #include <net/sock.h>
 
                return NULL;
        req->rsk_listener = NULL;
        if (attach_listener) {
-               if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) {
+               if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
                        kmem_cache_free(ops->slab, req);
                        return NULL;
                }
        sk_node_init(&req_to_sk(req)->sk_node);
        sk_tx_queue_clear(req_to_sk(req));
        req->saved_syn = NULL;
-       atomic_set(&req->rsk_refcnt, 0);
+       refcount_set(&req->rsk_refcnt, 0);
 
        return req;
 }
 static inline void reqsk_free(struct request_sock *req)
 {
        /* temporary debugging */
-       WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
+       WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
 
        req->rsk_ops->destructor(req);
        if (req->rsk_listener)
 
 static inline void reqsk_put(struct request_sock *req)
 {
-       if (atomic_dec_and_test(&req->rsk_refcnt))
+       if (refcount_dec_and_test(&req->rsk_refcnt))
                reqsk_free(req);
 }
 
 
 #include <linux/poll.h>
 
 #include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 #include <net/tcp_states.h>
                u32             skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
        };
 
-       atomic_t                skc_refcnt;
+       refcount_t              skc_refcnt;
        /* private: */
        int                     skc_dontcopy_end[0];
        union {
 
 static __always_inline void sock_hold(struct sock *sk)
 {
-       atomic_inc(&sk->sk_refcnt);
+       refcount_inc(&sk->sk_refcnt);
 }
 
 /* Ungrab socket in the context, which assumes that socket refcnt
  */
 static __always_inline void __sock_put(struct sock *sk)
 {
-       atomic_dec(&sk->sk_refcnt);
+       refcount_dec(&sk->sk_refcnt);
 }
 
 static inline bool sk_del_node_init(struct sock *sk)
 
        if (rc) {
                /* paranoid for a while -acme */
-               WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+               WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
        return rc;
 
        if (rc) {
                /* paranoid for a while -acme */
-               WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+               WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
        return rc;
 
 static inline void sk_refcnt_debug_release(const struct sock *sk)
 {
-       if (atomic_read(&sk->sk_refcnt) != 1)
+       if (refcount_read(&sk->sk_refcnt) != 1)
                printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
-                      sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
+                      sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
 }
 #else /* SOCK_REFCNT_DEBUG */
 #define sk_refcnt_debug_inc(sk) do { } while (0)
 /* Ungrab socket and destroy it, if it was the last reference. */
 static inline void sock_put(struct sock *sk)
 {
-       if (atomic_dec_and_test(&sk->sk_refcnt))
+       if (refcount_dec_and_test(&sk->sk_refcnt))
                sk_free(sk);
 }
 /* Generic version of sock_put(), dealing with all sockets
 
                   vcc->flags, sk->sk_err,
                   sk_wmem_alloc_get(sk), sk->sk_sndbuf,
                   sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
-                  atomic_read(&sk->sk_refcnt));
+                  refcount_read(&sk->sk_refcnt));
 }
 
 static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
 
                seq_printf(seq,
                           "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
                           sk,
-                          atomic_read(&sk->sk_refcnt),
+                          refcount_read(&sk->sk_refcnt),
                           sk_rmem_alloc_get(sk),
                           sk_wmem_alloc_get(sk),
                           from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
 
        if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
                return;
 
-       BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
+       BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
        /* Kill poor orphan */
        bt_sock_unlink(&rfcomm_sk_list, sk);
 
        struct sock *sk = skb->sk;
        struct sk_buff *clone;
 
-       if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
+       if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
                return NULL;
 
        clone = skb_clone(skb, GFP_ATOMIC);
        /* Take a reference to prevent skb_orphan() from freeing the socket,
         * but only if the socket refcount is not zero.
         */
-       if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+       if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
                *skb_hwtstamps(skb) = *hwtstamps;
                __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
                sock_put(sk);
        /* Take a reference to prevent skb_orphan() from freeing the socket,
         * but only if the socket refcount is not zero.
         */
-       if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+       if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
                err = sock_queue_err_skb(sk, skb);
                sock_put(sk);
        }
 
                 * (Documentation/RCU/rculist_nulls.txt for details)
                 */
                smp_wmb();
-               atomic_set(&newsk->sk_refcnt, 2);
+               refcount_set(&newsk->sk_refcnt, 2);
 
                /*
                 * Increment the counter in the same struct proto as the master
                ) {
                struct sock *sk = skb->sk;
 
-               if (atomic_inc_not_zero(&sk->sk_refcnt)) {
+               if (refcount_inc_not_zero(&sk->sk_refcnt)) {
                        WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
                        skb->destructor = sock_efree;
                }
         * (Documentation/RCU/rculist_nulls.txt for details)
         */
        smp_wmb();
-       atomic_set(&sk->sk_refcnt, 1);
+       refcount_set(&sk->sk_refcnt, 1);
        atomic_set(&sk->sk_drops, 0);
 }
 EXPORT_SYMBOL(sock_init_data);
 
         * are committed to memory and refcnt initialized.
         */
        smp_wmb();
-       atomic_set(&req->rsk_refcnt, 2 + 1);
+       refcount_set(&req->rsk_refcnt, 2 + 1);
 }
 
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 
 /* All sockets share common refcount, but have different destructors */
 void sock_gen_put(struct sock *sk)
 {
-       if (!atomic_dec_and_test(&sk->sk_refcnt))
+       if (!refcount_dec_and_test(&sk->sk_refcnt))
                return;
 
        if (sk->sk_state == TCP_TIME_WAIT)
                        continue;
                if (likely(INET_MATCH(sk, net, acookie,
                                      saddr, daddr, ports, dif))) {
-                       if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
+                       if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
                                goto out;
                        if (unlikely(!INET_MATCH(sk, net, acookie,
                                                 saddr, daddr, ports, dif))) {
 
 
 void inet_twsk_put(struct inet_timewait_sock *tw)
 {
-       if (atomic_dec_and_test(&tw->tw_refcnt))
+       if (refcount_dec_and_test(&tw->tw_refcnt))
                inet_twsk_free(tw);
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
         * We can use atomic_set() because prior spin_lock()/spin_unlock()
         * committed into memory all tw fields.
         */
-       atomic_set(&tw->tw_refcnt, 4);
+       refcount_set(&tw->tw_refcnt, 4);
        inet_twsk_add_node_rcu(tw, &ehead->chain);
 
        /* Step 3: Remove SK from hash chain */
                 * to a non null value before everything is setup for this
                 * timewait socket.
                 */
-               atomic_set(&tw->tw_refcnt, 0);
+               refcount_set(&tw->tw_refcnt, 0);
 
                __module_get(tw->tw_prot->owner);
        }
                                atomic_read(&twsk_net(tw)->count))
                                continue;
 
-                       if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
+                       if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
                                continue;
 
                        if (unlikely((tw->tw_family != family) ||
 
 {
        pr_debug("ping_close(sk=%p,sk->num=%u)\n",
                 inet_sk(sk), inet_sk(sk)->inet_num);
-       pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
+       pr_debug("isk->refcnt = %d\n", refcount_read(&sk->sk_refcnt));
 
        sk_common_release(sk);
 }
                0, 0L, 0,
                from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
                0, sock_i_ino(sp),
-               atomic_read(&sp->sk_refcnt), sp,
+               refcount_read(&sp->sk_refcnt), sp,
                atomic_read(&sp->sk_drops));
 }
 
 
                0, 0L, 0,
                from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                0, sock_i_ino(sp),
-               atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
+               refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
 }
 
 static int raw_seq_show(struct seq_file *seq, void *v)
 
        child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
                                                 NULL, &own_req);
        if (child) {
-               atomic_set(&req->rsk_refcnt, 1);
+               refcount_set(&req->rsk_refcnt, 1);
                tcp_sk(child)->tsoffset = tsoff;
                sock_rps_save_rxhash(child, skb);
                inet_csk_reqsk_queue_add(sk, req, child);
 
        inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
                                  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 
-       atomic_set(&req->rsk_refcnt, 2);
+       refcount_set(&req->rsk_refcnt, 2);
 
        /* Now finish processing the fastopen child socket. */
        inet_csk(child)->icsk_af_ops->rebuild_header(child);
 
                from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
                icsk->icsk_probes_out,
                sock_i_ino(sk),
-               atomic_read(&sk->sk_refcnt), sk,
+               refcount_read(&sk->sk_refcnt), sk,
                jiffies_to_clock_t(icsk->icsk_rto),
                jiffies_to_clock_t(icsk->icsk_ack.ato),
                (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
                i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
                3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
-               atomic_read(&tw->tw_refcnt), tw);
+               refcount_read(&tw->tw_refcnt), tw);
 }
 
 #define TMPSZ 150
 
 
        sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
                               dif, &udp_table, NULL);
-       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+       if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                sk = NULL;
        return sk;
 }
                                             uh->source, iph->saddr, dif);
        }
 
-       if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
+       if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
                return;
 
        skb->sk = sk;
                0, 0L, 0,
                from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
                0, sock_i_ino(sp),
-               atomic_read(&sp->sk_refcnt), sp,
+               refcount_read(&sp->sk_refcnt), sp,
                atomic_read(&sp->sk_drops));
 }
 
 
                                req->id.idiag_dport,
                                req->id.idiag_if, tbl, NULL);
 #endif
-       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+       if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                sk = NULL;
        rcu_read_unlock();
        err = -ENOENT;
                return -EINVAL;
        }
 
-       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+       if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                sk = NULL;
 
        rcu_read_unlock();
 
                   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                   0,
                   sock_i_ino(sp),
-                  atomic_read(&sp->sk_refcnt), sp,
+                  refcount_read(&sp->sk_refcnt), sp,
                   atomic_read(&sp->sk_drops));
 }
 
                        continue;
                if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
                        continue;
-               if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
+               if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
                        goto out;
 
                if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
 
        sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
                            ntohs(dport), dif, &refcounted);
-       if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+       if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
                sk = NULL;
        return sk;
 }
 
                   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                   icsk->icsk_probes_out,
                   sock_i_ino(sp),
-                  atomic_read(&sp->sk_refcnt), sp,
+                  refcount_read(&sp->sk_refcnt), sp,
                   jiffies_to_clock_t(icsk->icsk_rto),
                   jiffies_to_clock_t(icsk->icsk_ack.ato),
                   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
                   tw->tw_substate, 0, 0,
                   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
-                  atomic_read(&tw->tw_refcnt), tw);
+                  refcount_read(&tw->tw_refcnt), tw);
 }
 
 static int tcp6_seq_show(struct seq_file *seq, void *v)
 
 
        sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
                                dif, &udp_table, NULL);
-       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+       if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                sk = NULL;
        return sk;
 }
        else
                return;
 
-       if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
+       if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
                return;
 
        skb->sk = sk;
 
        else
                seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
                               s,
-                              atomic_read(&s->sk_refcnt),
+                              refcount_read(&s->sk_refcnt),
                               sk_rmem_alloc_get(s),
                               sk_wmem_alloc_get(s),
                               from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
 
                   tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
                   "");
        seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
-                  tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
+                  tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0,
                   atomic_read(&tunnel->ref_count));
-
        seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
                   tunnel->debug,
                   atomic_long_read(&tunnel->stats.tx_packets),
 
        sk_nulls_for_each_rcu(rc, node, laddr_hb) {
                if (llc_estab_match(sap, daddr, laddr, rc)) {
                        /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
-                       if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+                       if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
                                goto again;
                        if (unlikely(llc_sk(rc)->sap != sap ||
                                     !llc_estab_match(sap, daddr, laddr, rc))) {
        sk_nulls_for_each_rcu(rc, node, laddr_hb) {
                if (llc_listener_match(sap, laddr, rc)) {
                        /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
-                       if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+                       if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
                                goto again;
                        if (unlikely(llc_sk(rc)->sap != sap ||
                                     !llc_listener_match(sap, laddr, rc))) {
        skb_queue_purge(&sk->sk_write_queue);
        skb_queue_purge(&llc->pdu_unack_q);
 #ifdef LLC_REFCNT_DEBUG
-       if (atomic_read(&sk->sk_refcnt) != 1) {
+       if (refcount_read(&sk->sk_refcnt) != 1) {
                printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
-                       sk, __func__, atomic_read(&sk->sk_refcnt));
+                       sk, __func__, refcount_read(&sk->sk_refcnt));
                printk(KERN_DEBUG "%d LLC sockets are still alive\n",
                        atomic_read(&llc_sock_nr));
        } else {
 
        sk_nulls_for_each_rcu(rc, node, laddr_hb) {
                if (llc_dgram_match(sap, laddr, rc)) {
                        /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
-                       if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+                       if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
                                goto again;
                        if (unlikely(llc_sk(rc)->sap != sap ||
                                     !llc_dgram_match(sap, laddr, rc))) {
 
                                                    daddr, dport,
                                                    in->ifindex);
 
-                       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+                       if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                                sk = NULL;
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
                                                   daddr, ntohs(dport),
                                                   in->ifindex);
 
-                       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+                       if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
                                sk = NULL;
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
 
        table = &nl_table[sk->sk_protocol];
        if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
                                    netlink_rhashtable_params)) {
-               WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+               WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
 
        struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
        struct sock *sk = &nlk->sk;
 
-       if (!atomic_dec_and_test(&sk->sk_refcnt))
+       if (!refcount_dec_and_test(&sk->sk_refcnt))
                return;
 
        if (nlk->cb_running && nlk->cb.done) {
                           sk_rmem_alloc_get(s),
                           sk_wmem_alloc_get(s),
                           nlk->cb_running,
-                          atomic_read(&s->sk_refcnt),
+                          refcount_read(&s->sk_refcnt),
                           atomic_read(&s->sk_drops),
                           sock_i_ino(s)
                        );
 
                seq_printf(seq,
                           "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
                           s,
-                          atomic_read(&s->sk_refcnt),
+                          refcount_read(&s->sk_refcnt),
                           s->sk_type,
                           ntohs(po->num),
                           po->ifindex,
 
                        sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
                        from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
                        sock_i_ino(sk),
-                       atomic_read(&sk->sk_refcnt), sk,
+                       refcount_read(&sk->sk_refcnt), sk,
                        atomic_read(&sk->sk_drops));
        }
        seq_pad(seq, '\n');
 
 {
        struct rxrpc_sock *rx = rxrpc_sk(sk);
 
-       _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
+       _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
        /* declare the socket closed for business */
        sock_orphan(sk);
 
                *err = -1;
                return;
        }
-       dst->value = atomic_read(&skb->sk->sk_refcnt);
+       dst->value = refcount_read(&skb->sk->sk_refcnt);
 }
 
 META_COLLECTOR(int_sk_rcvbuf)
 
        struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
 
        if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
-               WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+               WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
 }
 
 
                seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
                        s,
-                       atomic_read(&s->sk_refcnt),
+                       refcount_read(&s->sk_refcnt),
                        0,
                        s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
                        s->sk_type,