void sdp_start_keepalive_timer(struct sock *sk);
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+{
+ struct sk_buff *skb;
+
+ /* The TCP header must be at least 32-bit aligned. */
+ size = ALIGN(size, 4);
+
+ skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+ if (skb) {
+ if (sk_wmem_schedule(sk, skb->truesize)) {
+ /*
+ * Make sure that we have exactly size bytes
+ * available to the caller, no more, no less.
+ */
+ skb_reserve(skb, skb_tailroom(skb) - size);
+ return skb;
+ }
+ __kfree_skb(skb);
+ } else {
+ sk->sk_prot->enter_memory_pressure();
+ sk_stream_moderate_sndbuf(sk);
+ }
+ return NULL;
+}
+
+
#endif
sock_set_flag(sk, SOCK_DONE);
- sk_stream_mem_reclaim(sk);
+ sk_mem_reclaim(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
struct ib_send_wr *bad_wr;
h->mid = mid;
- if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG))
+ if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
- if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG))
+ if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
- skb = sk_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
+ skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
ssk->isk.sk.sk_allocation);
gfp_page = ssk->isk.sk.sk_allocation | __GFP_HIGHMEM;
} else {
- skb = sk_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
+ skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
if (likely(ssk->bufs > 1) &&
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
struct sk_buff *skb;
- skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh),
GFP_KERNEL);
if (!skb)
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
- skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
- skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
likely(ssk->bufs > 1) &&
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
- skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ skb = sdp_stream_alloc_skb(&ssk->isk.sk,
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
skb = sdp_send_completion(ssk, wc->wr_id);
if (unlikely(!skb))
return;
- sk_stream_free_skb(&ssk->isk.sk, skb);
+ sk_wmem_free_skb(&ssk->isk.sk, skb);
if (unlikely(wc->status)) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
sdp_dbg(&ssk->isk.sk,
goto out;
sdp_poll_cq(ssk, cq);
release_sock(sk);
- sk_stream_mem_reclaim(sk);
+ sk_mem_reclaim(sk);
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
__kfree_skb(skb);
}
- sk_stream_mem_reclaim(sk);
+ sk_mem_reclaim(sk);
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
- TCP_SKB_CB(skb)->flags |= TCPCB_URG;
+ TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_URG;
}
}
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
- sk_charge_skb(sk, skb);
+ sk->sk_wmem_queued += skb->truesize;
+ sk_mem_charge(sk, skb->truesize);
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
- if (!sk_stream_wmem_schedule(sk, copy))
+ if (!sk_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
if (!page) {
if (left <= this_page)
this_page = left;
- if (!sk_stream_wmem_schedule(sk, copy))
+ if (!sk_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
goto wait_for_sndbuf;
}
- skb = sk_stream_alloc_pskb(sk, select_size(sk, ssk),
- 0, sk->sk_allocation);
+ skb = sdp_stream_alloc_skb(sk, select_size(sk, ssk),
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
/* OOB data byte should be the last byte of
the data payload */
- if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG) &&
+ if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG) &&
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
- sk_stream_free_skb(sk, skb);
+ sk_wmem_free_skb(sk, skb);
}
do_error:
sdp_seq_afinfo.seq_fops->llseek = seq_lseek;
sdp_seq_afinfo.seq_fops->release = seq_release_private;
- p = proc_net_fops_create(&init_net, sdp_seq_afinfo.name, S_IRUGO,
- sdp_seq_afinfo.seq_fops);
- if (p)
- p->data = &sdp_seq_afinfo;
p = proc_net_fops_create(&init_net, sdp_seq_afinfo.name, S_IRUGO,
sdp_seq_afinfo.seq_fops);
if (p)