#define SDP_MAX_RECV_SKB_FRAGS (PAGE_SIZE > 0x8000 ? 1 : 0x8000 / PAGE_SIZE)
#define SDP_MAX_SEND_SKB_FRAGS (SDP_MAX_RECV_SKB_FRAGS + 1)
-#define SDP_MAX_SEND_SGES 32
/* payload len - rest will be rx'ed into frags */
#define SDP_HEAD_SIZE (PAGE_SIZE / 2 + sizeof(struct sdp_bsdh))
struct delayed_work srcavail_cancel_work;
int srcavail_cancel_mseq;
+ int max_sge;
+
struct work_struct rx_comp_work;
wait_queue_head_t wq;
{
}
+static int sdp_get_max_send_sge(struct ib_device *dev)
+{
+ struct ib_device_attr attr;
+ static int max_sges = -1;
+
+ if (max_sges > 0)
+ goto out;
+
+ ib_query_device(dev, &attr);
+
+ max_sges = attr.max_sge;
+
+out:
+ return max_sges;
+}
+
static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
{
struct ib_qp_init_attr qp_init_attr = {
.event_handler = sdp_qp_event_handler,
.cap.max_send_wr = SDP_TX_SIZE,
- .cap.max_send_sge = SDP_MAX_SEND_SGES,
.cap.max_recv_wr = SDP_RX_SIZE,
.cap.max_recv_sge = SDP_MAX_RECV_SKB_FRAGS + 1,
.sq_sig_type = IB_SIGNAL_REQ_WR,
sdp_dbg(sk, "%s\n", __func__);
+ sdp_sk(sk)->max_sge = sdp_get_max_send_sge(device);
+ if (sdp_sk(sk)->max_sge < (SDP_MAX_RECV_SKB_FRAGS + 1)) {
+ sdp_warn(sk, "recv sge's. capability: %d needed: %ld\n",
+ sdp_sk(sk)->max_sge, SDP_MAX_RECV_SKB_FRAGS + 1);
+ rc = -ENOMEM;
+ goto err_tx;
+ }
+
+ qp_init_attr.cap.max_send_sge = sdp_sk(sk)->max_sge;
+ sdp_dbg(sk, "Setting max send sge to: %d\n", sdp_sk(sk)->max_sge);
+
sdp_sk(sk)->sdp_dev = ib_get_client_data(device, &sdp_client);
rc = sdp_rx_ring_create(sdp_sk(sk), device);
sge_left = rx_sa->page_cnt;
do {
/* Len error when using sge_cnt > 30 ?? */
- int sge_cnt = min(sge_left, SDP_MAX_SEND_SGES - 2);
+ int sge_cnt = min(sge_left, ssk->max_sge - 2);
wr.wr.rdma.remote_addr = rx_sa->vaddr + copied + rx_sa->used;
wr.num_sge = sge_cnt;