skb->destructor = sctp_sock_rfree;
        atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
        /*
-        * This mimics the behavior of
-        * sk_stream_set_owner_r
+        * This mimics the behavior of skb_set_owner_r
         */
        sk->sk_forward_alloc -= event->rmem_len;
 }
 
        return sk->sk_wmem_queued < sk->sk_sndbuf;
 }
 
-extern void sk_stream_rfree(struct sk_buff *skb);
-
-static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
-{
-       skb->sk = sk;
-       skb->destructor = sk_stream_rfree;
-       atomic_add(skb->truesize, &sk->sk_rmem_alloc);
-       sk->sk_forward_alloc -= skb->truesize;
-}
-
-static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
-{
-       skb_truesize_check(skb);
-       sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
-       sk->sk_wmem_queued   -= skb->truesize;
-       sk->sk_forward_alloc += skb->truesize;
-       __kfree_skb(skb);
-}
-
 /* The per-socket spinlock must be held here. */
 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
        /*
         * Pressure flag: try to collapse.
         * Technical note: it is used by multiple contexts non atomically.
-        * All the sk_stream_mem_schedule() is of this nature: accounting
+        * All the __sk_mem_schedule() is of this nature: accounting
         * is strict, actions are advisory and have some latency.
         */
        int                     *memory_pressure;
        return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
 }
 
-extern void __sk_stream_mem_reclaim(struct sock *sk);
-extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
+/*
+ * Functions for memory accounting
+ */
+extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
+extern void __sk_mem_reclaim(struct sock *sk);
 
-#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
-#define SK_STREAM_MEM_QUANTUM_SHIFT ilog2(SK_STREAM_MEM_QUANTUM)
+#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
+#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
+#define SK_MEM_SEND    0
+#define SK_MEM_RECV    1
 
-static inline int sk_stream_pages(int amt)
+static inline int sk_mem_pages(int amt)
 {
-       return (amt + SK_STREAM_MEM_QUANTUM - 1) >> SK_STREAM_MEM_QUANTUM_SHIFT;
+       return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
 }
 
-static inline void sk_stream_mem_reclaim(struct sock *sk)
+static inline int sk_has_account(struct sock *sk)
 {
-       if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
-               __sk_stream_mem_reclaim(sk);
+       /* return true if protocol supports memory accounting */
+       return !!sk->sk_prot->memory_allocated;
 }
 
-static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
+static inline int sk_wmem_schedule(struct sock *sk, int size)
 {
-       return (int)skb->truesize <= sk->sk_forward_alloc ||
-               sk_stream_mem_schedule(sk, skb->truesize, 1);
+       if (!sk_has_account(sk))
+               return 1;
+       return size <= sk->sk_forward_alloc ||
+               __sk_mem_schedule(sk, size, SK_MEM_SEND);
 }
 
-static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
+static inline int sk_rmem_schedule(struct sock *sk, int size)
 {
+       if (!sk_has_account(sk))
+               return 1;
        return size <= sk->sk_forward_alloc ||
-              sk_stream_mem_schedule(sk, size, 0);
+               __sk_mem_schedule(sk, size, SK_MEM_RECV);
+}
+
+static inline void sk_mem_reclaim(struct sock *sk)
+{
+       if (!sk_has_account(sk))
+               return;
+       if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
+               __sk_mem_reclaim(sk);
+}
+
+static inline void sk_mem_charge(struct sock *sk, int size)
+{
+       if (!sk_has_account(sk))
+               return;
+       sk->sk_forward_alloc -= size;
+}
+
+static inline void sk_mem_uncharge(struct sock *sk, int size)
+{
+       if (!sk_has_account(sk))
+               return;
+       sk->sk_forward_alloc += size;
+}
+
+static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+       skb_truesize_check(skb);
+       sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+       sk->sk_wmem_queued -= skb->truesize;
+       sk_mem_uncharge(sk, skb->truesize);
+       __kfree_skb(skb);
 }
 
 /* Used by processes to "lock" a socket state, so that
 
 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
-static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
-{
-       sk->sk_wmem_queued   += skb->truesize;
-       sk->sk_forward_alloc -= skb->truesize;
-}
-
 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
                                   struct sk_buff *skb, struct page *page,
                                   int off, int copy)
        skb->data_len        += copy;
        skb->truesize        += copy;
        sk->sk_wmem_queued   += copy;
-       sk->sk_forward_alloc -= copy;
+       sk_mem_charge(sk, copy);
        return 0;
 }
 
        skb->sk = sk;
        skb->destructor = sock_rfree;
        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+       sk_mem_charge(sk, skb->truesize);
 }
 
 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
 
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
-               sk_stream_free_skb(sk, skb);
-       sk_stream_mem_reclaim(sk);
+               sk_wmem_free_skb(sk, skb);
+       sk_mem_reclaim(sk);
 }
 
 static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
 
 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
 {
        kfree_skb(skb);
+       sk_mem_reclaim(sk);
 }
 
 /**
        }
 
        kfree_skb(skb);
+       sk_mem_reclaim(sk);
        return err;
 }
 
 
        if (err)
                goto out;
 
+       if (!sk_rmem_schedule(sk, skb->truesize)) {
+               err = -ENOBUFS;
+               goto out;
+       }
+
        skb->dev = NULL;
        skb_set_owner_r(skb, sk);
 
 {
        struct sock *sk = skb->sk;
 
+       skb_truesize_check(skb);
        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+       sk_mem_uncharge(skb->sk, skb->truesize);
 }
 
 
 
 EXPORT_SYMBOL(sk_wait_data);
 
+/**
+ *     __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
+ *     @sk: socket
+ *     @size: memory size to allocate
+ *     @kind: allocation type
+ *
+ *     If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
+ *     rmem allocation. This function assumes that protocols which have
+ *     memory_pressure use sk_wmem_queued as write buffer accounting.
+ */
+int __sk_mem_schedule(struct sock *sk, int size, int kind)
+{
+       struct proto *prot = sk->sk_prot;
+       int amt = sk_mem_pages(size);
+       int allocated;
+
+       sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+       allocated = atomic_add_return(amt, prot->memory_allocated);
+
+       /* Under limit. */
+       if (allocated <= prot->sysctl_mem[0]) {
+               if (prot->memory_pressure && *prot->memory_pressure)
+                       *prot->memory_pressure = 0;
+               return 1;
+       }
+
+       /* Under pressure. */
+       if (allocated > prot->sysctl_mem[1])
+               if (prot->enter_memory_pressure)
+                       prot->enter_memory_pressure();
+
+       /* Over hard limit. */
+       if (allocated > prot->sysctl_mem[2])
+               goto suppress_allocation;
+
+       /* guarantee minimum buffer size under pressure */
+       if (kind == SK_MEM_RECV) {
+               if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
+                       return 1;
+       } else { /* SK_MEM_SEND */
+               if (sk->sk_type == SOCK_STREAM) {
+                       if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
+                               return 1;
+               } else if (atomic_read(&sk->sk_wmem_alloc) <
+                          prot->sysctl_wmem[0])
+                               return 1;
+       }
+
+       if (prot->memory_pressure) {
+               if (!*prot->memory_pressure ||
+                   prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
+                   sk_mem_pages(sk->sk_wmem_queued +
+                                atomic_read(&sk->sk_rmem_alloc) +
+                                sk->sk_forward_alloc))
+                       return 1;
+       }
+
+suppress_allocation:
+
+       if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
+               sk_stream_moderate_sndbuf(sk);
+
+               /* Fail only if socket is _under_ its sndbuf.
+                * In this case we cannot block, so that we have to fail.
+                */
+               if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
+                       return 1;
+       }
+
+       /* Alas. Undo changes. */
+       sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
+       atomic_sub(amt, prot->memory_allocated);
+       return 0;
+}
+
+EXPORT_SYMBOL(__sk_mem_schedule);
+
+/**
+ *     __sk_reclaim - reclaim memory_allocated
+ *     @sk: socket
+ */
+void __sk_mem_reclaim(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       atomic_sub(sk->sk_forward_alloc / SK_MEM_QUANTUM,
+                  prot->memory_allocated);
+       sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
+
+       if (prot->memory_pressure && *prot->memory_pressure &&
+           (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
+               *prot->memory_pressure = 0;
+}
+
+EXPORT_SYMBOL(__sk_mem_reclaim);
+
+
 /*
  * Set of default routines for initialising struct proto_ops when
  * the protocol does not support a particular function. In certain
 
 
 EXPORT_SYMBOL(sk_stream_wait_memory);
 
-void sk_stream_rfree(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       skb_truesize_check(skb);
-       atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
-       sk->sk_forward_alloc += skb->truesize;
-}
-
-EXPORT_SYMBOL(sk_stream_rfree);
-
 int sk_stream_error(struct sock *sk, int flags, int err)
 {
        if (err == -EPIPE)
 
 EXPORT_SYMBOL(sk_stream_error);
 
-void __sk_stream_mem_reclaim(struct sock *sk)
-{
-       atomic_sub(sk->sk_forward_alloc >> SK_STREAM_MEM_QUANTUM_SHIFT,
-                  sk->sk_prot->memory_allocated);
-       sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
-       if (*sk->sk_prot->memory_pressure &&
-           (atomic_read(sk->sk_prot->memory_allocated) <
-            sk->sk_prot->sysctl_mem[0]))
-               *sk->sk_prot->memory_pressure = 0;
-}
-
-EXPORT_SYMBOL(__sk_stream_mem_reclaim);
-
-int sk_stream_mem_schedule(struct sock *sk, int size, int kind)
-{
-       int amt = sk_stream_pages(size);
-       struct proto *prot = sk->sk_prot;
-
-       sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM;
-       atomic_add(amt, prot->memory_allocated);
-
-       /* Under limit. */
-       if (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]) {
-               if (*prot->memory_pressure)
-                       *prot->memory_pressure = 0;
-               return 1;
-       }
-
-       /* Over hard limit. */
-       if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[2]) {
-               prot->enter_memory_pressure();
-               goto suppress_allocation;
-       }
-
-       /* Under pressure. */
-       if (atomic_read(prot->memory_allocated) > prot->sysctl_mem[1])
-               prot->enter_memory_pressure();
-
-       if (kind) {
-               if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
-                       return 1;
-       } else if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
-               return 1;
-
-       if (!*prot->memory_pressure ||
-           prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) *
-                               sk_stream_pages(sk->sk_wmem_queued +
-                                               atomic_read(&sk->sk_rmem_alloc) +
-                                               sk->sk_forward_alloc))
-               return 1;
-
-suppress_allocation:
-
-       if (!kind) {
-               sk_stream_moderate_sndbuf(sk);
-
-               /* Fail only if socket is _under_ its sndbuf.
-                * In this case we cannot block, so that we have to fail.
-                */
-               if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
-                       return 1;
-       }
-
-       /* Alas. Undo changes. */
-       sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM;
-       atomic_sub(amt, prot->memory_allocated);
-       return 0;
-}
-
-EXPORT_SYMBOL(sk_stream_mem_schedule);
-
 void sk_stream_kill_queues(struct sock *sk)
 {
        /* First the read buffer. */
        BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
 
        /* Account for returned memory. */
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        BUG_TRAP(!sk->sk_wmem_queued);
        BUG_TRAP(!sk->sk_forward_alloc);
 
 /*
  * Pressure flag: try to collapse.
  * Technical note: it is used by multiple contexts non atomically.
- * All the sk_stream_mem_schedule() is of this nature: accounting
+ * All the __sk_mem_schedule() is of this nature: accounting
  * is strict, actions are advisory and have some latency.
  */
 int tcp_memory_pressure __read_mostly;
        tcb->sacked  = 0;
        skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
-       sk_charge_skb(sk, skb);
+       sk->sk_wmem_queued += skb->truesize;
+       sk_mem_charge(sk, skb->truesize);
        if (tp->nonagle & TCP_NAGLE_PUSH)
                tp->nonagle &= ~TCP_NAGLE_PUSH;
 }
 
        skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
        if (skb) {
-               if (sk_stream_wmem_schedule(sk, skb->truesize)) {
+               if (sk_wmem_schedule(sk, skb->truesize)) {
                        /*
                         * Make sure that we have exactly size bytes
                         * available to the caller, no more, no less.
                        tcp_mark_push(tp, skb);
                        goto new_segment;
                }
-               if (!sk_stream_wmem_schedule(sk, copy))
+               if (!sk_wmem_schedule(sk, copy))
                        goto wait_for_memory;
 
                if (can_coalesce) {
                skb->data_len += copy;
                skb->truesize += copy;
                sk->sk_wmem_queued += copy;
-               sk->sk_forward_alloc -= copy;
+               sk_mem_charge(sk, copy);
                skb->ip_summed = CHECKSUM_PARTIAL;
                tp->write_seq += copy;
                TCP_SKB_CB(skb)->end_seq += copy;
                                if (copy > PAGE_SIZE - off)
                                        copy = PAGE_SIZE - off;
 
-                               if (!sk_stream_wmem_schedule(sk, copy))
+                               if (!sk_wmem_schedule(sk, copy))
                                        goto wait_for_memory;
 
                                if (!page) {
                 * reset, where we can be unlinking the send_head.
                 */
                tcp_check_send_head(sk, skb);
-               sk_stream_free_skb(sk, skb);
+               sk_wmem_free_skb(sk, skb);
        }
 
 do_error:
                __kfree_skb(skb);
        }
 
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        /* As outlined in RFC 2525, section 2.17, we send a RST here because
         * data was lost. To witness the awful effects of the old behavior of
                }
        }
        if (sk->sk_state != TCP_CLOSE) {
-               sk_stream_mem_reclaim(sk);
+               sk_mem_reclaim(sk);
                if (tcp_too_many_orphans(sk,
                                atomic_read(sk->sk_prot->orphan_count))) {
                        if (net_ratelimit())
        limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
        max_share = min(4UL*1024*1024, limit);
 
-       sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
+       sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
        sysctl_tcp_wmem[1] = 16*1024;
        sysctl_tcp_wmem[2] = max(64*1024, max_share);
 
-       sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
+       sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
        sysctl_tcp_rmem[1] = 87380;
        sysctl_tcp_rmem[2] = max(87380, max_share);
 
 
                         * restart window, so that we send ACKs quickly.
                         */
                        tcp_incr_quickack(sk);
-                       sk_stream_mem_reclaim(sk);
+                       sk_mem_reclaim(sk);
                }
        }
        icsk->icsk_ack.lrcvtime = now;
                        break;
 
                tcp_unlink_write_queue(skb, sk);
-               sk_stream_free_skb(sk, skb);
+               sk_wmem_free_skb(sk, skb);
                tcp_clear_all_retrans_hints(tp);
        }
 
        __skb_queue_purge(&tp->out_of_order_queue);
        if (tcp_is_sack(tp))
                tcp_sack_reset(&tp->rx_opt);
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        if (!sock_flag(sk, SOCK_DEAD)) {
                sk->sk_state_change(sk);
 queue_and_out:
                        if (eaten < 0 &&
                            (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-                            !sk_stream_rmem_schedule(sk, skb))) {
+                            !sk_rmem_schedule(sk, skb->truesize))) {
                                if (tcp_prune_queue(sk) < 0 ||
-                                   !sk_stream_rmem_schedule(sk, skb))
+                                   !sk_rmem_schedule(sk, skb->truesize))
                                        goto drop;
                        }
-                       sk_stream_set_owner_r(skb, sk);
+                       skb_set_owner_r(skb, sk);
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                }
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
        TCP_ECN_check_ce(tp, skb);
 
        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-           !sk_stream_rmem_schedule(sk, skb)) {
+           !sk_rmem_schedule(sk, skb->truesize)) {
                if (tcp_prune_queue(sk) < 0 ||
-                   !sk_stream_rmem_schedule(sk, skb))
+                   !sk_rmem_schedule(sk, skb->truesize))
                        goto drop;
        }
 
        SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
                   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
-       sk_stream_set_owner_r(skb, sk);
+       skb_set_owner_r(skb, sk);
 
        if (!skb_peek(&tp->out_of_order_queue)) {
                /* Initial out of order segment, build 1 SACK. */
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
                __skb_insert(nskb, skb->prev, skb, list);
-               sk_stream_set_owner_r(nskb, sk);
+               skb_set_owner_r(nskb, sk);
 
                /* Copy data, releasing collapsed skbs. */
                while (copy > 0) {
                     sk->sk_receive_queue.next,
                     (struct sk_buff*)&sk->sk_receive_queue,
                     tp->copied_seq, tp->rcv_nxt);
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
                return 0;
                 */
                if (tcp_is_sack(tp))
                        tcp_sack_reset(&tp->rx_opt);
-               sk_stream_mem_reclaim(sk);
+               sk_mem_reclaim(sk);
        }
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
                                /* Bulk data transfer: receiver */
                                __skb_pull(skb,tcp_header_len);
                                __skb_queue_tail(&sk->sk_receive_queue, skb);
-                               sk_stream_set_owner_r(skb, sk);
+                               skb_set_owner_r(skb, sk);
                                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                        }
 
 
        tp->write_seq = TCP_SKB_CB(skb)->end_seq;
        skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
-       sk_charge_skb(sk, skb);
+       sk->sk_wmem_queued += skb->truesize;
+       sk_mem_charge(sk, skb->truesize);
 }
 
 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
        if (buff == NULL)
                return -ENOMEM; /* We'll just try again later. */
 
-       sk_charge_skb(sk, buff);
+       sk->sk_wmem_queued += buff->truesize;
+       sk_mem_charge(sk, buff->truesize);
        nlen = skb->len - len - nsize;
        buff->truesize += nlen;
        skb->truesize -= nlen;
 
        skb->truesize        -= len;
        sk->sk_wmem_queued   -= len;
-       sk->sk_forward_alloc += len;
+       sk_mem_uncharge(sk, len);
        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
 
        /* Any change of skb->len requires recalculation of tso
        if (unlikely(buff == NULL))
                return -ENOMEM;
 
-       sk_charge_skb(sk, buff);
+       sk->sk_wmem_queued += buff->truesize;
+       sk_mem_charge(sk, buff->truesize);
        buff->truesize += nlen;
        skb->truesize -= nlen;
 
        /* We're allowed to probe.  Build it now. */
        if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
                return -1;
-       sk_charge_skb(sk, nskb);
+       sk->sk_wmem_queued += nskb->truesize;
+       sk_mem_charge(sk, nskb->truesize);
 
        skb = tcp_send_head(sk);
 
                         * Throw it away. */
                        TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
                        tcp_unlink_write_queue(skb, sk);
-                       sk_stream_free_skb(sk, skb);
+                       sk_wmem_free_skb(sk, skb);
                } else {
                        TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
                                                   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
                /* changed transmit queue under us so clear hints */
                tcp_clear_retrans_hints_partial(tp);
 
-               sk_stream_free_skb(sk, next_skb);
+               sk_wmem_free_skb(sk, next_skb);
        }
 }
 
                        tcp_unlink_write_queue(skb, sk);
                        skb_header_release(nskb);
                        __tcp_add_write_queue_head(sk, nskb);
-                       sk_stream_free_skb(sk, skb);
-                       sk_charge_skb(sk, nskb);
+                       sk_wmem_free_skb(sk, skb);
+                       sk->sk_wmem_queued += nskb->truesize;
+                       sk_mem_charge(sk, nskb->truesize);
                        skb = nskb;
                }
 
        tp->retrans_stamp = TCP_SKB_CB(buff)->when;
        skb_header_release(buff);
        __tcp_add_write_queue_tail(sk, buff);
-       sk_charge_skb(sk, buff);
+       sk->sk_wmem_queued += buff->truesize;
+       sk_mem_charge(sk, buff->truesize);
        tp->packets_out += tcp_skb_pcount(buff);
        tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
 
 
                goto out_unlock;
        }
 
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
                goto out;
 
 out:
        if (tcp_memory_pressure)
-               sk_stream_mem_reclaim(sk);
+               sk_mem_reclaim(sk);
 out_unlock:
        bh_unlock_sock(sk);
        sock_put(sk);
        TCP_CHECK_TIMER(sk);
 
 out:
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 out_unlock:
        bh_unlock_sock(sk);
        sock_put(sk);
        }
 
        TCP_CHECK_TIMER(sk);
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
 resched:
        inet_csk_reset_keepalive_timer (sk, elapsed);
 
        sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1));
        sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
 
-       sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM;
+       sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
        sysctl_sctp_wmem[1] = 16*1024;
        sysctl_sctp_wmem[2] = max(64*1024, max_share);
 
 
        /*
         * Also try to renege to limit our memory usage in the event that
         * we are under memory pressure
-        * If we can't renege, don't worry about it, the sk_stream_rmem_schedule
+        * If we can't renege, don't worry about it, the sk_rmem_schedule
         * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
         * memory usage too much
         */
 
                                sizeof(struct sctp_chunk);
 
        atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
-       sk_charge_skb(sk, chunk->skb);
+       sk->sk_wmem_queued += chunk->skb->truesize;
+       sk_mem_charge(sk, chunk->skb->truesize);
 }
 
 /* Verify that this is a valid address. */
        atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
 
        /*
-        * This undoes what is done via sk_charge_skb
+        * This undoes what is done via sctp_set_owner_w and sk_mem_charge
         */
        sk->sk_wmem_queued   -= skb->truesize;
-       sk->sk_forward_alloc += skb->truesize;
+       sk_mem_uncharge(sk, skb->truesize);
 
        sock_wfree(skb);
        __sctp_write_space(asoc);
        atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
 
        /*
-        * Mimic the behavior of sk_stream_rfree
+        * Mimic the behavior of sock_rfree
         */
-       sk->sk_forward_alloc += event->rmem_len;
+       sk_mem_uncharge(sk, event->rmem_len);
 }
 
 
 
        if (rx_count >= asoc->base.sk->sk_rcvbuf) {
 
                if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
-                  (!sk_stream_rmem_schedule(asoc->base.sk, chunk->skb)))
+                   (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
                        goto fail;
        }
 
 
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
        }
 
-       sk_stream_mem_reclaim(asoc->base.sk);
+       sk_mem_reclaim(asoc->base.sk);
        return;
 }