return NULL;
}
-static inline struct sk_buff *sdp_alloc_skb(struct sock *sk, u8 mid, int size)
+static inline struct sk_buff *sdp_alloc_skb(struct sock *sk, u8 mid, int size,
+ gfp_t gfp)
{
struct sdp_bsdh *h;
struct sk_buff *skb;
- gfp_t gfp;
- if (unlikely(sk->sk_allocation))
- gfp = sk->sk_allocation;
- else
- gfp = GFP_KERNEL;
+ if (!gfp) {
+ if (unlikely(sk->sk_allocation))
+ gfp = sk->sk_allocation;
+ else
+ gfp = GFP_KERNEL;
+ }
skb = sdp_stream_alloc_skb(sk, sizeof(struct sdp_bsdh) + size, gfp);
BUG_ON(!skb);
return skb;
}
-static inline struct sk_buff *sdp_alloc_skb_data(struct sock *sk)
+static inline struct sk_buff *sdp_alloc_skb_data(struct sock *sk, gfp_t gfp)
{
- return sdp_alloc_skb(sk, SDP_MID_DATA, 0);
+ return sdp_alloc_skb(sk, SDP_MID_DATA, 0, gfp);
}
-static inline struct sk_buff *sdp_alloc_skb_disconnect(struct sock *sk)
+static inline struct sk_buff *sdp_alloc_skb_disconnect(struct sock *sk,
+ gfp_t gfp)
{
- return sdp_alloc_skb(sk, SDP_MID_DISCONN, 0);
+ return sdp_alloc_skb(sk, SDP_MID_DISCONN, 0, gfp);
}
static inline struct sk_buff *sdp_alloc_skb_chrcvbuf_ack(struct sock *sk,
- int size)
+ int size, gfp_t gfp)
{
struct sk_buff *skb;
struct sdp_chrecvbuf *resp_size;
- skb = sdp_alloc_skb(sk, SDP_MID_CHRCVBUF_ACK, sizeof(*resp_size));
+ skb = sdp_alloc_skb(sk, SDP_MID_CHRCVBUF_ACK, sizeof(*resp_size), gfp);
resp_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *resp_size);
resp_size->size = htonl(size);
}
static inline struct sk_buff *sdp_alloc_skb_srcavail(struct sock *sk,
- u32 len, u32 rkey, u64 vaddr)
+ u32 len, u32 rkey, u64 vaddr, gfp_t gfp)
{
struct sk_buff *skb;
struct sdp_srcah *srcah;
- skb = sdp_alloc_skb(sk, SDP_MID_SRCAVAIL, sizeof(*srcah));
+ skb = sdp_alloc_skb(sk, SDP_MID_SRCAVAIL, sizeof(*srcah), gfp);
srcah = (struct sdp_srcah *)skb_put(skb, sizeof(*srcah));
srcah->len = htonl(len);
return skb;
}
-static inline struct sk_buff *sdp_alloc_skb_srcavail_cancel(struct sock *sk)
+static inline struct sk_buff *sdp_alloc_skb_srcavail_cancel(struct sock *sk,
+ gfp_t gfp)
{
- return sdp_alloc_skb(sk, SDP_MID_SRCAVAIL_CANCEL, 0);
+ return sdp_alloc_skb(sk, SDP_MID_SRCAVAIL_CANCEL, 0, gfp);
}
static inline struct sk_buff *sdp_alloc_skb_rdmardcompl(struct sock *sk,
- u32 len)
+ u32 len, gfp_t gfp)
{
struct sk_buff *skb;
struct sdp_rrch *rrch;
- skb = sdp_alloc_skb(sk, SDP_MID_RDMARDCOMPL, sizeof(*rrch));
+ skb = sdp_alloc_skb(sk, SDP_MID_RDMARDCOMPL, sizeof(*rrch), gfp);
rrch = (struct sdp_rrch *)skb_put(skb, sizeof(*rrch));
rrch->len = htonl(len);
return skb;
}
-static inline struct sk_buff *sdp_alloc_skb_sendsm(struct sock *sk)
+static inline struct sk_buff *sdp_alloc_skb_sendsm(struct sock *sk, gfp_t gfp)
{
- return sdp_alloc_skb(sk, SDP_MID_SENDSM, 0);
+ return sdp_alloc_skb(sk, SDP_MID_SENDSM, 0, gfp);
}
static inline int sdp_tx_ring_slots_left(struct sdp_sock *ssk)
{
void sdp_tx_ring_destroy(struct sdp_sock *ssk);
int sdp_xmit_poll(struct sdp_sock *ssk, int force);
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb);
-void sdp_post_sends(struct sdp_sock *ssk, int nonagle);
+void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp);
void sdp_nagle_timeout(unsigned long data);
void sdp_post_keepalive(struct sdp_sock *ssk);
}
ssk->nagle_last_unacked = 0;
- sdp_post_sends(ssk, 0);
+ sdp_post_sends(ssk, GFP_ATOMIC);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
sk_stream_write_space(&ssk->isk.sk);
mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT);
}
-void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp)
{
/* TODO: nonagle? */
struct sk_buff *skb;
int c;
- gfp_t gfp_page;
int post_count = 0;
struct sock *sk = &ssk->isk.sk;
return;
}
- if (unlikely(ssk->isk.sk.sk_allocation))
- gfp_page = ssk->isk.sk.sk_allocation;
- else
- gfp_page = GFP_KERNEL;
-
if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2) {
int wc_processed = sdp_xmit_poll(ssk, 1);
sdp_dbg_data(&ssk->isk.sk, "freed %d\n", wc_processed);
ssk->recv_request = 0;
skb = sdp_alloc_skb_chrcvbuf_ack(sk,
- ssk->recv_frags * PAGE_SIZE);
+ ssk->recv_frags * PAGE_SIZE, gfp);
sdp_post_send(ssk, skb);
post_count++;
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
- skb = sdp_alloc_skb_data(&ssk->isk.sk);
+ skb = sdp_alloc_skb_data(&ssk->isk.sk, gfp);
sdp_post_send(ssk, skb);
SDPSTATS_COUNTER_INC(post_send_credits);
tx_credits(ssk) > 1) {
ssk->sdp_disconnect = 0;
- skb = sdp_alloc_skb_disconnect(sk);
+ skb = sdp_alloc_skb_disconnect(sk, gfp);
sdp_post_send(ssk, skb);
post_count++;
tx_sa->bytes_sent = tx_sa->bytes_acked = 0;
- skb = sdp_alloc_skb_srcavail(sk, len, tx_sa->fmr->fmr->lkey, off);
+ skb = sdp_alloc_skb_srcavail(sk, len, tx_sa->fmr->fmr->lkey, off, 0);
if (!skb) {
return -ENOMEM;
}
sdp_warn(&ssk->isk.sk, "Posting srcavail cancel\n");
- skb = sdp_alloc_skb_srcavail_cancel(sk);
+ skb = sdp_alloc_skb_srcavail_cancel(sk, 0);
skb_entail(sk, ssk, skb);
- sdp_post_sends(ssk, 1);
+ sdp_post_sends(ssk, 0);
schedule_delayed_work(&ssk->srcavail_cancel_work,
SDP_SRCAVAIL_CANCEL_TIMEOUT);
if (rx_sa->used <= rx_sa->reported)
return 0;
- skb = sdp_alloc_skb_rdmardcompl(&ssk->isk.sk, copied);
+ skb = sdp_alloc_skb_rdmardcompl(&ssk->isk.sk, copied, 0);
rx_sa->reported += copied;
int sdp_post_sendsm(struct sock *sk)
{
- struct sk_buff *skb = sdp_alloc_skb_sendsm(sk);
+ struct sk_buff *skb = sdp_alloc_skb_sendsm(sk, 0);
sdp_post_send(sdp_sk(sk), skb);