#define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2))
-#define SDP_MAX_RECV_SKB_FRAGS (PAGE_SIZE > 0x8000 ? 1 : 0x8000 / PAGE_SIZE)
-#define SDP_MAX_SEND_SKB_FRAGS (SDP_MAX_RECV_SKB_FRAGS + 1)
+#define SDP_MAX_RECV_SGES 17
+#define SDP_MAX_SEND_SGES 17
/* skb inlined data len - rest will be rx'ed into frags */
#define SDP_SKB_HEAD_SIZE (0x500 + sizeof(struct sdp_bsdh))
struct sdp_buf {
struct sk_buff *skb;
- u64 mapping[SDP_MAX_SEND_SKB_FRAGS + 1];
+ u64 mapping[SDP_MAX_SEND_SGES];
} __attribute__((__packed__));
struct sdp_chrecvbuf {
struct tx_srcavail_state *tx_sa;
struct rx_srcavail_state *rx_sa;
spinlock_t tx_sa_lock;
- int max_send_sge;
struct delayed_work srcavail_cancel_work;
int srcavail_cancel_mseq;
.event_handler = sdp_qp_event_handler,
.cap.max_send_wr = SDP_TX_SIZE,
.cap.max_recv_wr = SDP_RX_SIZE,
- .cap.max_recv_sge = SDP_MAX_RECV_SKB_FRAGS + 1,
.sq_sig_type = IB_SIGNAL_REQ_WR,
.qp_type = IB_QPT_RC,
};
sdp_dbg(sk, "%s\n", __func__);
sdp_sk(sk)->max_sge = sdp_get_max_send_sge(device);
- if (sdp_sk(sk)->max_sge < (SDP_MAX_RECV_SKB_FRAGS + 1)) {
- sdp_warn(sk, "recv sge's. capability: %d needed: %ld\n",
- sdp_sk(sk)->max_sge, SDP_MAX_RECV_SKB_FRAGS + 1);
- rc = -ENOMEM;
- goto err_rx;
- }
+ sdp_dbg(sk, "Max sges: %d\n", sdp_sk(sk)->max_sge);
- qp_init_attr.cap.max_send_sge = sdp_sk(sk)->max_sge;
- sdp_dbg(sk, "Setting max send sge to: %d\n", sdp_sk(sk)->max_sge);
+ qp_init_attr.cap.max_send_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_SEND_SGES);
+ sdp_dbg(sk, "Setting max send sge to: %d\n", qp_init_attr.cap.max_send_sge);
+
+ qp_init_attr.cap.max_recv_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_RECV_SGES);
+ sdp_dbg(sk, "Setting max recv sge to: %d\n", qp_init_attr.cap.max_recv_sge);
sdp_sk(sk)->sdp_dev = ib_get_client_data(device, &sdp_client);
if (!sdp_sk(sk)->sdp_dev) {
return rc;
}
+static int sdp_get_max_send_frags(u32 buf_size)
+{
+ return MIN(
+ /* +1 to conpensate on not aligned buffers */
+ (PAGE_ALIGN(buf_size) >> PAGE_SHIFT) + 1,
+ SDP_MAX_SEND_SGES - 1);
+}
+
static int sdp_connect_handler(struct sock *sk, struct rdma_cm_id *id,
struct rdma_cm_event *event)
{
sdp_sk(child)->min_bufs = tx_credits(sdp_sk(child)) / 4;
sdp_sk(child)->xmit_size_goal = ntohl(h->localrcvsz) -
sizeof(struct sdp_bsdh);
- sdp_sk(child)->send_frags = PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) /
- PAGE_SIZE + 1; /* +1 to conpensate on not aligned buffers */
+
+ sdp_sk(child)->send_frags = sdp_get_max_send_frags(sdp_sk(child)->xmit_size_goal);
sdp_init_buffers(sdp_sk(child), rcvbuf_initial_size);
id->context = child;
sdp_sk(sk)->min_bufs = tx_credits(sdp_sk(sk)) / 4;
sdp_sk(sk)->xmit_size_goal =
ntohl(h->actrcvsz) - sizeof(struct sdp_bsdh);
- sdp_sk(sk)->send_frags = MIN(PAGE_ALIGN(sdp_sk(sk)->xmit_size_goal) /
- PAGE_SIZE, MAX_SKB_FRAGS) + 1; /* +1 to conpensate on not */
- /* aligned buffers */
+ sdp_sk(sk)->send_frags = sdp_get_max_send_frags(sdp_sk(sk)->xmit_size_goal);
sdp_sk(sk)->xmit_size_goal = MIN(sdp_sk(sk)->xmit_size_goal,
sdp_sk(sk)->send_frags * PAGE_SIZE);
u64 addr;
struct ib_device *dev;
struct ib_recv_wr rx_wr = { NULL };
- struct ib_sge ibsge[SDP_MAX_RECV_SKB_FRAGS + 1];
+ struct ib_sge ibsge[SDP_MAX_RECV_SGES];
struct ib_sge *sge = ibsge;
struct ib_recv_wr *bad_wr;
struct sk_buff *skb;
return skb;
}
+static int sdp_get_recv_sges(struct sdp_sock *ssk, u32 new_size)
+{
+ int recv_sges = ssk->max_sge - 1; /* 1 sge is dedicated to sdp header */
+
+ recv_sges = MIN(recv_sges, PAGE_ALIGN(new_size) >> PAGE_SHIFT);
+ recv_sges = MIN(recv_sges, SDP_MAX_RECV_SGES - 1);
+
+ return recv_sges;
+}
+
int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size)
{
- ssk->recv_frags = PAGE_ALIGN(new_size - SDP_SKB_HEAD_SIZE) / PAGE_SIZE;
- if (ssk->recv_frags > SDP_MAX_RECV_SKB_FRAGS)
- ssk->recv_frags = SDP_MAX_RECV_SKB_FRAGS;
+ ssk->recv_frags = sdp_get_recv_sges(ssk, new_size);
ssk->rcvbuf_scale = rcvbuf_scale;
sdp_post_recvs(ssk);
int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size)
{
- u32 curr_size = SDP_SKB_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE;
- u32 max_size = SDP_SKB_HEAD_SIZE + SDP_MAX_RECV_SKB_FRAGS * PAGE_SIZE;
+ u32 curr_size = ssk->recv_frags << PAGE_SHIFT;
+ u32 max_size = (ssk->max_sge - 1) << PAGE_SHIFT;
if (new_size > curr_size && new_size <= max_size &&
sdp_get_large_socket(ssk)) {
ssk->rcvbuf_scale = rcvbuf_scale;
- ssk->recv_frags = PAGE_ALIGN(new_size - SDP_SKB_HEAD_SIZE) /
- PAGE_SIZE;
- if (ssk->recv_frags > SDP_MAX_RECV_SKB_FRAGS)
- ssk->recv_frags = SDP_MAX_RECV_SKB_FRAGS;
+ ssk->recv_frags = sdp_get_recv_sges(ssk, new_size);
return 0;
} else
return -1;