else {
                    dev_kfree_skb_any(entry->skb);
                }
-#if 1
-               /* race fixed by the above incarnation mechanism, but... */
-               if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
-                   atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
-               }
-#endif
+
                /* check error condition */
                if (*entry->status & STATUS_ERROR)
                    atomic_inc(&vcc->stats->tx_err);
        return -ENOMEM;
     }
 
-    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
     vcc->push(vcc, skb);
     atomic_inc(&vcc->stats->rx);
 
-    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
     return 0;
 }
 
     unsigned long           flags;
 
     ASSERT(vcc);
-    ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
     ASSERT(fore200e);
     ASSERT(fore200e_vcc);
 
 
                 * TBRQ, the host issues the close command to the adapter.
                 */
 
-               while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
+               while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
                       (retry < MAX_RETRY)) {
                        msleep(sleep);
                        if (sleep < 250)
 
                struct sock *sk = sk_atm(vcc);
 
                vc->estimator->cells += (skb->len + 47) / 48;
-               if (atomic_read(&sk->sk_wmem_alloc) >
+               if (refcount_read(&sk->sk_wmem_alloc) >
                    (sk->sk_sndbuf >> 1)) {
                        u32 cps = vc->estimator->maxcps;
 
                atomic_inc(&vcc->stats->tx_err);
                return -ENOMEM;
        }
-       atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
 
        skb_put_data(skb, cell, 52);
 
 
 
 static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
 {
-       return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) <
+       return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) <
               sk_atm(vcc)->sk_sndbuf;
 }
 
 
 
        /* ===== cache line for TX ===== */
        int                     sk_wmem_queued;
-       atomic_t                sk_wmem_alloc;
+       refcount_t              sk_wmem_alloc;
        unsigned long           sk_tsq_flags;
        struct sk_buff          *sk_send_head;
        struct sk_buff_head     sk_write_queue;
  */
 static inline int sk_wmem_alloc_get(const struct sock *sk)
 {
-       return atomic_read(&sk->sk_wmem_alloc) - 1;
+       return refcount_read(&sk->sk_wmem_alloc) - 1;
 }
 
 /**
        int amt = 0;
 
        if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
-               amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
+               amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
                if (amt < 0)
                        amt = 0;
        }
  */
 static inline bool sock_writeable(const struct sock *sk)
 {
-       return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+       return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
 }
 
 static inline gfp_t gfp_any(void)
 
 
        ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
-       atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
        ATM_SKB(skb)->atm_options = atmvcc->atm_options;
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
 
                memcpy(here, llc_oui, sizeof(llc_oui));
                ((__be16 *) here)[3] = skb->protocol;
        }
-       atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
        ATM_SKB(skb)->atm_options = vcc->atm_options;
        entry->vccs->last_use = jiffies;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
 
                printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
                       __func__, atomic_read(&sk->sk_rmem_alloc));
 
-       if (atomic_read(&sk->sk_wmem_alloc))
+       if (refcount_read(&sk->sk_wmem_alloc))
                printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
-                      __func__, atomic_read(&sk->sk_wmem_alloc));
+                      __func__, refcount_read(&sk->sk_wmem_alloc));
 }
 
 static void vcc_def_wakeup(struct sock *sk)
        struct atm_vcc *vcc = atm_sk(sk);
 
        return (vcc->qos.txtp.max_sdu +
-               atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
+               refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
 }
 
 static void vcc_write_space(struct sock *sk)
        memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
        memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
        vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
-       atomic_set(&sk->sk_wmem_alloc, 1);
+       refcount_set(&sk->sk_wmem_alloc, 1);
        atomic_set(&sk->sk_rmem_alloc, 0);
        vcc->push = NULL;
        vcc->pop = NULL;
                goto out;
        }
        pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
-       atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk->sk_wmem_alloc);
 
        skb->dev = NULL; /* for paths shared with net_device interfaces */
        ATM_SKB(skb)->atm_options = vcc->atm_options;
 
        ATM_SKB(skb)->vcc = vcc;
        ATM_SKB(skb)->atm_options = vcc->atm_options;
 
-       atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
        if (vcc->send(vcc, skb) < 0) {
                dev->stats.tx_dropped++;
                return;
        int i;
        char *tmp;              /* FIXME */
 
-       atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
        mesg = (struct atmlec_msg *)skb->data;
        tmp = skb->data;
        tmp += sizeof(struct atmlec_msg);
 
                                        sizeof(struct llc_snap_hdr));
        }
 
-       atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
        ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
        entry->shortcut->send(entry->shortcut, skb);
        entry->packets_fwded++;
 
        struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
        struct k_message *mesg = (struct k_message *)skb->data;
-       atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
 
        if (mpc == NULL) {
                pr_info("no mpc found\n");
 
                return 1;
        }
 
-       atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
        ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
                 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
 
 
        pr_debug("(%d) %d -= %d\n",
                 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
-       atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
        dev_kfree_skb_any(skb);
        sk->sk_write_space(sk);
 }
 
        struct sock *sk;
 
        msg = (struct atmsvc_msg *) skb->data;
-       atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
        vcc = *(struct atm_vcc **) &msg->vcc;
        pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
        sk = sk_atm(vcc);
 
 static void caif_sock_destructor(struct sock *sk)
 {
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-       caif_assert(!atomic_read(&sk->sk_wmem_alloc));
+       caif_assert(!refcount_read(&sk->sk_wmem_alloc));
        caif_assert(sk_unhashed(sk));
        caif_assert(!sk->sk_socket);
        if (!sock_flag(sk, SOCK_DEAD)) {
 
                skb->data_len += copied;
                skb->len += copied;
                skb->truesize += truesize;
-               atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+               refcount_add(truesize, &skb->sk->sk_wmem_alloc);
                while (copied) {
                        int size = min_t(int, copied, PAGE_SIZE - start);
                        skb_fill_page_desc(skb, frag++, pages[n], start, size);
 
                get_page(pfrag->page);
 
                skb->truesize += copy;
-               atomic_add(copy, &sk->sk_wmem_alloc);
+               refcount_add(copy, &sk->sk_wmem_alloc);
                skb->len += copy;
                skb->data_len += copy;
                offset += copy;
 
                if (likely(sk->sk_net_refcnt))
                        get_net(net);
                sock_net_set(sk, net);
-               atomic_set(&sk->sk_wmem_alloc, 1);
+               refcount_set(&sk->sk_wmem_alloc, 1);
 
                mem_cgroup_sk_alloc(sk);
                cgroup_sk_alloc(&sk->sk_cgrp_data);
                sk->sk_destruct(sk);
 
        filter = rcu_dereference_check(sk->sk_filter,
-                                      atomic_read(&sk->sk_wmem_alloc) == 0);
+                                      refcount_read(&sk->sk_wmem_alloc) == 0);
        if (filter) {
                sk_filter_uncharge(sk, filter);
                RCU_INIT_POINTER(sk->sk_filter, NULL);
         * some packets are still in some tx queue.
         * If not null, sock_wfree() will call __sk_free(sk) later
         */
-       if (atomic_dec_and_test(&sk->sk_wmem_alloc))
+       if (refcount_dec_and_test(&sk->sk_wmem_alloc))
                __sk_free(sk);
 }
 EXPORT_SYMBOL(sk_free);
                /*
                 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
                 */
-               atomic_set(&newsk->sk_wmem_alloc, 1);
+               refcount_set(&newsk->sk_wmem_alloc, 1);
                atomic_set(&newsk->sk_omem_alloc, 0);
                sk_init_common(newsk);
 
                 * Keep a reference on sk_wmem_alloc, this will be released
                 * after sk_write_space() call
                 */
-               atomic_sub(len - 1, &sk->sk_wmem_alloc);
+               WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
                sk->sk_write_space(sk);
                len = 1;
        }
         * if sk_wmem_alloc reaches 0, we must finish what sk_free()
         * could not do because of in-flight packets
         */
-       if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
+       if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
                __sk_free(sk);
 }
 EXPORT_SYMBOL(sock_wfree);
 {
        struct sock *sk = skb->sk;
 
-       if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
+       if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
                __sk_free(sk);
 }
 
         * is enough to guarantee sk_free() wont free this sock until
         * all in-flight packets are completed
         */
-       atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk->sk_wmem_alloc);
 }
 EXPORT_SYMBOL(skb_set_owner_w);
 
                struct sock *sk = skb->sk;
 
                if (atomic_inc_not_zero(&sk->sk_refcnt)) {
-                       atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+                       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
                        skb->destructor = sock_efree;
                }
        } else {
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
                             gfp_t priority)
 {
-       if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+       if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
                struct sk_buff *skb = alloc_skb(size, priority);
                if (skb) {
                        skb_set_owner_w(skb, sk);
                        break;
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+               if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
                        break;
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        break;
                if (sk->sk_type == SOCK_STREAM) {
                        if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
                                return 1;
-               } else if (atomic_read(&sk->sk_wmem_alloc) <
+               } else if (refcount_read(&sk->sk_wmem_alloc) <
                           prot->sysctl_wmem[0])
                                return 1;
        }
        /* Do not wake up a writer until he can make "significant"
         * progress.  --DaveM
         */
-       if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
 
        }
 
        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-       WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+       WARN_ON(refcount_read(&sk->sk_wmem_alloc));
        WARN_ON(sk->sk_wmem_queued);
        WARN_ON(sk->sk_forward_alloc);
 
 
                        skb->data_len += tailen;
                        skb->truesize += tailen;
                        if (sk)
-                               atomic_add(tailen, &sk->sk_wmem_alloc);
+                               refcount_add(tailen, &sk->sk_wmem_alloc);
 
                        goto out;
                }
 
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
-                               if (atomic_read(&sk->sk_wmem_alloc) <=
+                               if (refcount_read(&sk->sk_wmem_alloc) <=
                                    2 * sk->sk_sndbuf)
                                        skb = sock_wmalloc(sk,
                                                           alloclen + hh_len + 15, 1,
                        skb->len += copy;
                        skb->data_len += copy;
                        skb->truesize += copy;
-                       atomic_add(copy, &sk->sk_wmem_alloc);
+                       refcount_add(copy, &sk->sk_wmem_alloc);
                }
                offset += copy;
                length -= copy;
                skb->len += len;
                skb->data_len += len;
                skb->truesize += len;
-               atomic_add(len, &sk->sk_wmem_alloc);
+               refcount_add(len, &sk->sk_wmem_alloc);
                offset += len;
                size -= len;
        }
 
        return skb->len < size_goal &&
               sysctl_tcp_autocorking &&
               skb != tcp_write_queue_head(sk) &&
-              atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
+              refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
 }
 
 static void tcp_push(struct sock *sk, int flags, int mss_now,
                /* It is possible TX completion already happened
                 * before we set TSQ_THROTTLED.
                 */
-               if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
+               if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
                        return;
        }
 
 
                swap(gso_skb->sk, skb->sk);
                swap(gso_skb->destructor, skb->destructor);
                sum_truesize += skb->truesize;
-               atomic_add(sum_truesize - gso_skb->truesize,
+               refcount_add(sum_truesize - gso_skb->truesize,
                           &skb->sk->sk_wmem_alloc);
        }
 
 
        struct sock *sk = skb->sk;
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned long flags, nval, oval;
-       int wmem;
 
        /* Keep one reference on sk_wmem_alloc.
         * Will be released by sk_free() from here or tcp_tasklet_func()
         */
-       wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc);
+       WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
 
        /* If this softirq is serviced by ksoftirqd, we are likely under stress.
         * Wait until our queues (qdisc + devices) are drained.
         * - chance for incoming ACK (processed by another cpu maybe)
         *   to migrate this flow (skb->ooo_okay will be eventually set)
         */
-       if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
+       if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
                goto out;
 
        for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
                if (nval != oval)
                        continue;
 
-               if (!atomic_inc_not_zero(&sk->sk_wmem_alloc))
+               if (!refcount_inc_not_zero(&sk->sk_wmem_alloc))
                        break;
                /* queue this socket to tasklet queue */
                tsq = this_cpu_ptr(&tsq_tasklet);
        skb->sk = sk;
        skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
        skb_set_hash_from_sk(skb, sk);
-       atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+       refcount_add(skb->truesize, &sk->sk_wmem_alloc);
 
        skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
 
        limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
        limit <<= factor;
 
-       if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+       if (refcount_read(&sk->sk_wmem_alloc) > limit) {
                /* Always send the 1st or 2nd skb in write queue.
                 * No need to wait for TX completion to call us back,
                 * after softirq/tasklet schedule.
                 * test again the condition.
                 */
                smp_mb__after_atomic();
-               if (atomic_read(&sk->sk_wmem_alloc) > limit)
+               if (refcount_read(&sk->sk_wmem_alloc) > limit)
                        return true;
        }
        return false;
        /* Do not sent more than we queued. 1/4 is reserved for possible
         * copying overhead: fragmentation, tunneling, mangling etc.
         */
-       if (atomic_read(&sk->sk_wmem_alloc) >
+       if (refcount_read(&sk->sk_wmem_alloc) >
            min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
                  sk->sk_sndbuf))
                return -EAGAIN;
 
                        skb->data_len += tailen;
                        skb->truesize += tailen;
                        if (sk)
-                               atomic_add(tailen, &sk->sk_wmem_alloc);
+                               refcount_add(tailen, &sk->sk_wmem_alloc);
 
                        goto out;
                }
 
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
-                               if (atomic_read(&sk->sk_wmem_alloc) <=
+                               if (refcount_read(&sk->sk_wmem_alloc) <=
                                    2 * sk->sk_sndbuf)
                                        skb = sock_wmalloc(sk,
                                                           alloclen + hh_len, 1,
                        skb->len += copy;
                        skb->data_len += copy;
                        skb->truesize += copy;
-                       atomic_add(copy, &sk->sk_wmem_alloc);
+                       refcount_add(copy, &sk->sk_wmem_alloc);
                }
                offset += copy;
                length -= copy;
 
                   psock->sk->sk_receive_queue.qlen,
                   atomic_read(&psock->sk->sk_rmem_alloc),
                   psock->sk->sk_write_queue.qlen,
-                  atomic_read(&psock->sk->sk_wmem_alloc));
+                  refcount_read(&psock->sk->sk_wmem_alloc));
 
        if (psock->done)
                seq_puts(seq, "Done ");
 
        }
 
        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-       WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+       WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 
        atomic_dec(&net_pfkey->socks_nr);
 }
 
        }
 
        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-       WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+       WARN_ON(refcount_read(&sk->sk_wmem_alloc));
        WARN_ON(nlk_sk(sk)->groups);
 }
 
 
        skb_queue_purge(&sk->sk_error_queue);
 
        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
-       WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+       WARN_ON(refcount_read(&sk->sk_wmem_alloc));
 
        if (!sock_flag(sk, SOCK_DEAD)) {
                pr_err("Attempt to release alive packet socket: %p\n", sk);
        skb->data_len = to_write;
        skb->len += to_write;
        skb->truesize += to_write;
-       atomic_add(to_write, &po->sk.sk_wmem_alloc);
+       refcount_add(to_write, &po->sk.sk_wmem_alloc);
 
        while (likely(to_write)) {
                nr_frags = skb_shinfo(skb)->nr_frags;
 
                return POLLHUP;
 
        if (sk->sk_state == TCP_ESTABLISHED &&
-               atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
+               refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
                atomic_read(&pn->tx_credits))
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
 
 
        tc->t_last_seen_una = rds_tcp_snd_una(tc);
        rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
-       if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
                queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
 
 out:
 
  */
 static inline int rxrpc_writable(struct sock *sk)
 {
-       return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
+       return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
 }
 
 /*
 
        rxrpc_purge_queue(&sk->sk_receive_queue);
 
-       WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+       WARN_ON(refcount_read(&sk->sk_wmem_alloc));
        WARN_ON(!sk_unhashed(sk));
        WARN_ON(sk->sk_socket);
 
 
                        ATM_SKB(skb)->vcc = flow->vcc;
                        memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
                               flow->hdr_len);
-                       atomic_add(skb->truesize,
+                       refcount_add(skb->truesize,
                                   &sk_atm(flow->vcc)->sk_wmem_alloc);
                        /* atm.atm_options are already set by atm_tc_enqueue */
                        flow->vcc->send(flow->vcc, skb);
 
         * therefore only reserve a single byte to keep socket around until
         * the packet has been transmitted.
         */
-       atomic_inc(&sk->sk_wmem_alloc);
+       refcount_inc(&sk->sk_wmem_alloc);
 }
 
 static int sctp_packet_pack(struct sctp_packet *packet,
 
                assoc->stream.outcnt, assoc->max_retrans,
                assoc->init_retries, assoc->shutdown_retries,
                assoc->rtx_data_chunks,
-               atomic_read(&sk->sk_wmem_alloc),
+               refcount_read(&sk->sk_wmem_alloc),
                sk->sk_wmem_queued,
                sk->sk_sndbuf,
                sk->sk_rcvbuf);
 
                                sizeof(struct sk_buff) +
                                sizeof(struct sctp_chunk);
 
-       atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+       refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
        sk->sk_wmem_queued += chunk->skb->truesize;
        sk_mem_charge(sk, chunk->skb->truesize);
 }
                                sizeof(struct sk_buff) +
                                sizeof(struct sctp_chunk);
 
-       atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+       WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc));
 
        /*
         * This undoes what is done via sctp_set_owner_w and sk_mem_charge
 
 static int unix_writable(const struct sock *sk)
 {
        return sk->sk_state != TCP_LISTEN &&
-              (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+              (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 }
 
 static void unix_write_space(struct sock *sk)
 
        skb_queue_purge(&sk->sk_receive_queue);
 
-       WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+       WARN_ON(refcount_read(&sk->sk_wmem_alloc));
        WARN_ON(!sk_unhashed(sk));
        WARN_ON(sk->sk_socket);
        if (!sock_flag(sk, SOCK_DEAD)) {
        skb->len += size;
        skb->data_len += size;
        skb->truesize += size;
-       atomic_add(size, &sk->sk_wmem_alloc);
+       refcount_add(size, &sk->sk_wmem_alloc);
 
        if (newskb) {
                err = unix_scm_to_skb(&scm, skb, false);