]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Modifies SDP to support the updated 2.6.26-rc2 kernel APIs.
authorAmir Vadai <amirv@mellanox.co.il>
Thu, 22 May 2008 11:35:03 +0000 (14:35 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:04:15 +0000 (05:04 -0700)
Signed-off-by: Amir Vadai <amirv@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_bcopy.c
drivers/infiniband/ulp/sdp/sdp_main.c

index e4d3effb53a54d65d404f9fc67f3ddbf22a9a3c2..c434b60c619093472af0c21b17e40c50fa9921b7 100644 (file)
@@ -265,4 +265,30 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
 void sdp_start_keepalive_timer(struct sock *sk);
 void sdp_bzcopy_write_space(struct sdp_sock *ssk);
 
+static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+{
+       struct sk_buff *skb;
+
+       /* The TCP header must be at least 32-bit aligned.  */
+       size = ALIGN(size, 4);
+
+       skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+       if (skb) {
+               if (sk_wmem_schedule(sk, skb->truesize)) {
+                       /*
+                        * Make sure that we have exactly size bytes
+                        * available to the caller, no more, no less.
+                        */
+                       skb_reserve(skb, skb_tailroom(skb) - size);
+                       return skb;
+               }
+               __kfree_skb(skb);
+       } else {
+               sk->sk_prot->enter_memory_pressure();
+               sk_stream_moderate_sndbuf(sk);
+       }
+       return NULL;
+}
+
+
 #endif
index ad788f7efe0a564c2699154a4a1d03e865ec9f51..36cbbadf82fe63b94ee185146dc1260b8857aac1 100644 (file)
@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
        sock_set_flag(sk, SOCK_DONE);
 
 
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        if (!sock_flag(sk, SOCK_DEAD)) {
                sk->sk_state_change(sk);
@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
        struct ib_send_wr *bad_wr;
 
        h->mid = mid;
-       if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG))
+       if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
                h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
        else
                h->flags = 0;
@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
        ssk->tx_wr.num_sge = frags + 1;
        ssk->tx_wr.opcode = IB_WR_SEND;
        ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
-       if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG))
+       if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
                ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
        rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
        ++ssk->tx_head;
@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
        /* TODO: allocate from cache */
 
        if (unlikely(ssk->isk.sk.sk_allocation)) {
-               skb = sk_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
+               skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
                                          ssk->isk.sk.sk_allocation);
                gfp_page = ssk->isk.sk.sk_allocation | __GFP_HIGHMEM;
        } else {
-               skb = sk_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
+               skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
                                          GFP_KERNEL);
                gfp_page = GFP_HIGHUSER;
        }
@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
        if (likely(ssk->bufs > 1) &&
            likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
                struct sk_buff *skb;
-               skb = sk_stream_alloc_skb(&ssk->isk.sk,
+               skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh),
                                          GFP_KERNEL);
                if (!skb)
@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
            ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
                struct sdp_chrecvbuf *resp_size;
                ssk->recv_request = 0;
-               skb = sk_stream_alloc_skb(&ssk->isk.sk,
+               skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh) +
                                          sizeof(*resp_size),
                                          gfp_page);
@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
            ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
            ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
                struct sdp_chrecvbuf *req_size;
-               skb = sk_stream_alloc_skb(&ssk->isk.sk,
+               skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh) +
                                          sizeof(*req_size),
                                          gfp_page);
@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
        if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
            likely(ssk->bufs > 1) &&
            likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
-               skb = sk_stream_alloc_skb(&ssk->isk.sk,
+               skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh),
                                          GFP_KERNEL);
                /* FIXME */
@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
                        (TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
                !ssk->isk.sk.sk_send_head &&
                ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
-               skb = sk_stream_alloc_skb(&ssk->isk.sk,
+               skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh),
                                          gfp_page);
                /* FIXME */
@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
                skb = sdp_send_completion(ssk, wc->wr_id);
                if (unlikely(!skb))
                        return;
-               sk_stream_free_skb(&ssk->isk.sk, skb);
+               sk_wmem_free_skb(&ssk->isk.sk, skb);
                if (unlikely(wc->status)) {
                        if (wc->status != IB_WC_WR_FLUSH_ERR) {
                                sdp_dbg(&ssk->isk.sk,
@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
                goto out;
        sdp_poll_cq(ssk, cq);
        release_sock(sk);
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
        lock_sock(sk);
        cq = ssk->cq;
        if (unlikely(!cq))
index dcc60e3aa20d0ac25c8bbf0804039a872fbf6eb5..d35c803a43fd1f94ee6cc86cec98d3a10be3d899 100644 (file)
@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
                __kfree_skb(skb);
        }
 
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
         * 3.10, we send a RST here because data was lost.  To
@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
 {
        if (unlikely(flags & MSG_OOB)) {
                struct sk_buff *skb = sk->sk_write_queue.prev;
-               TCP_SKB_CB(skb)->flags |= TCPCB_URG;
+               TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_URG;
        }
 }
 
@@ -1202,7 +1202,8 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
 {
         skb_header_release(skb);
         __skb_queue_tail(&sk->sk_write_queue, skb);
-        sk_charge_skb(sk, skb);
+       sk->sk_wmem_queued += skb->truesize;
+        sk_mem_charge(sk, skb->truesize);
         if (!sk->sk_send_head)
                 sk->sk_send_head = skb;
         if (ssk->nonagle & TCP_NAGLE_PUSH)
@@ -1366,7 +1367,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
                if (copy > PAGE_SIZE - off)
                        copy = PAGE_SIZE - off;
 
-               if (!sk_stream_wmem_schedule(sk, copy))
+               if (!sk_wmem_schedule(sk, copy))
                        return SDP_DO_WAIT_MEM;
 
                if (!page) {
@@ -1438,7 +1439,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
                if (left <= this_page)
                        this_page = left;
 
-               if (!sk_stream_wmem_schedule(sk, copy))
+               if (!sk_wmem_schedule(sk, copy))
                        return SDP_DO_WAIT_MEM;
 
                skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -1646,8 +1647,8 @@ new_segment:
                                                goto wait_for_sndbuf;
                                }
 
-                               skb = sk_stream_alloc_pskb(sk, select_size(sk, ssk),
-                                                          0, sk->sk_allocation);
+                               skb = sdp_stream_alloc_skb(sk, select_size(sk, ssk),
+                                                          sk->sk_allocation);
                                if (!skb)
                                        goto wait_for_memory;
 
@@ -1671,7 +1672,7 @@ new_segment:
 
                        /* OOB data byte should be the last byte of
                           the data payload */
-                       if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG) &&
+                       if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG) &&
                            !(flags & MSG_OOB)) {
                                sdp_mark_push(ssk, skb);
                                goto new_segment;
@@ -1747,7 +1748,7 @@ do_fault:
                if (sk->sk_send_head == skb)
                        sk->sk_send_head = NULL;
                __skb_unlink(skb, &sk->sk_write_queue);
-               sk_stream_free_skb(sk, skb);
+               sk_wmem_free_skb(sk, skb);
        }
 
 do_error:
@@ -2348,10 +2349,6 @@ static int __init sdp_proc_init(void)
        sdp_seq_afinfo.seq_fops->llseek        = seq_lseek;
        sdp_seq_afinfo.seq_fops->release       = seq_release_private;
 
-       p = proc_net_fops_create(&init_net, sdp_seq_afinfo.name, S_IRUGO,
-                                sdp_seq_afinfo.seq_fops);
-       if (p)
-               p->data = &sdp_seq_afinfo;
        p = proc_net_fops_create(&init_net, sdp_seq_afinfo.name, S_IRUGO,
                                 sdp_seq_afinfo.seq_fops);
        if (p)