len += snprintf(buf + len, 255-len, " | payload: 0x%zx, "
"len: 0x%zx, rkey: 0x%x, vaddr: 0x%llx |",
- ntohl(h->len) - sizeof(struct sdp_bsdh) -
+ ntohl(h->len) - sizeof(struct sdp_bsdh) -
sizeof(struct sdp_srcah),
ntohl(srcah->len), ntohl(srcah->rkey),
be64_to_cpu(srcah->vaddr));
sdp_tx_ring_slots_left(ssk)) {
ssk->recv_request = 0;
- skb = sdp_alloc_skb_chrcvbuf_ack(sk,
+ skb = sdp_alloc_skb_chrcvbuf_ack(sk,
ssk->recv_frags * PAGE_SIZE, gfp);
sdp_post_send(ssk, skb);
qp_init_attr.cap.max_send_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_SEND_SGES);
sdp_dbg(sk, "Setting max send sge to: %d\n", qp_init_attr.cap.max_send_sge);
-
+
qp_init_attr.cap.max_recv_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_RECV_SGES);
sdp_dbg(sk, "Setting max recv sge to: %d\n", qp_init_attr.cap.max_recv_sge);
-
+
sdp_sk(sk)->sdp_dev = ib_get_client_data(device, &sdp_client);
if (!sdp_sk(sk)->sdp_dev) {
sdp_warn(sk, "SDP not available on device %s\n", device->name);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
if (sdp_link_layer_ib_only &&
- rdma_node_get_transport(id->device->node_type) ==
+ rdma_node_get_transport(id->device->node_type) ==
RDMA_TRANSPORT_IB &&
rdma_port_link_layer(id->device, id->port_num) !=
IB_LINK_LAYER_INFINIBAND) {
struct sdp_sock *ssk =
container_of(work, struct sdp_sock, cma_timewait_work.work);
struct sock *sk = &ssk->isk.sk;
-
+
lock_sock(sk);
if (!ssk->cma_timewait_timeout) {
release_sock(sk);
sock_hold(sk, SOCK_REF_CMA);
sdp_start_cma_timewait_timeout(sdp_sk(sk), SDP_CMA_TIMEWAIT_TIMEOUT);
-
+
/* We need to flush the recv. buffs. We do this only on the
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
struct sdp_sock *ssk =
container_of(work, struct sdp_sock, dreq_wait_work.work);
struct sock *sk = &ssk->isk.sk;
-
+
if (!ssk->dreq_wait_timeout)
goto out;
if (credits_needed) {
sk_wait_event(sk, ¤t_timeo,
- !sk->sk_err &&
+ !sk->sk_err &&
!(sk->sk_shutdown & SEND_SHUTDOWN) &&
!ssk->tx_compl_pending &&
tx_slots_free(ssk) >= *credits_needed &&
vm_wait);
} else {
sk_wait_event(sk, ¤t_timeo,
- !sk->sk_err &&
+ !sk->sk_err &&
!(sk->sk_shutdown & SEND_SHUTDOWN) &&
!ssk->tx_compl_pending &&
sk_stream_memory_free(sk) &&
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
- for (i = 0; i < msg->msg_iovlen; i++) {
+ for (i = 0; i < msg->msg_iovlen; i++) {
struct iovec *iov = &msg->msg_iov[i];
int seglen = iov->iov_len;
char __user *from = iov->iov_base;
err = zcopied;
goto out_err;
}
-
+
copied += zcopied;
seglen = iov->iov_len;
from = iov->iov_base;
break;
case SDP_MID_DATA:
-sdp_mid_data:
+sdp_mid_data:
rx_sa = NULL;
avail_bytes_count = skb->len;
break;
kfree(rx_sa);
rx_sa = NULL;
-
+
}
force_skb_cleanup:
sdp_dbg_data(sk, "unlinking skb %p\n", skb);
#define _kzalloc(size,flags) kzalloc(size,flags)
#undef kzalloc
s = kzalloc(sizeof(*s), GFP_KERNEL);
-#define kzalloc(s,f) _kzalloc(s,f)
+#define kzalloc(s,f) _kzalloc(s,f)
if (!s)
return -ENOMEM;
s->family = afinfo->family;
if (first == -1 && h[i])
first = i;
-
+
if (h[i])
last = i;
}
for_each_possible_cpu(__i) \
__val += per_cpu(sdpstats, __i).var; \
__val; \
-})
+})
#define SDPSTATS_HIST_GET(hist, hist_len, sum) ({ \
unsigned int __i; \
#endif
proc_net_remove(&init_net, sdp_seq_afinfo.name);
-no_mem:
+no_mem:
return -ENOMEM;
}
if (h->mid == SDP_MID_SRCAVAIL) {
struct sdp_srcah *srcah = (struct sdp_srcah *)(h+1);
struct rx_srcavail_state *rx_sa;
-
+
ssk->srcavail_cancel_mseq = 0;
ssk->rx_sa = rx_sa = RX_SRCAVAIL_STATE(skb) = kzalloc(
if (ssk->rx_sa) {
ssk->srcavail_cancel_mseq = ntohl(h->mseq);
ssk->rx_sa->flags |= RX_SA_ABORTED;
- ssk->rx_sa = NULL; /* TODO: change it into SDP_MID_DATA and get
+ ssk->rx_sa = NULL; /* TODO: change it into SDP_MID_DATA and get
the dirty logic from recvmsg */
} else {
sdp_dbg(sk, "Got SrcAvailCancel - "
if (posts_handler(ssk) || (sk->sk_socket &&
test_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags))) {
- sdp_prf(&ssk->isk.sk, NULL,
+ sdp_prf(&ssk->isk.sk, NULL,
"Somebody is doing the post work for me. %d",
posts_handler(ssk));
return;
err:
- __kfree_skb(skb);
+ __kfree_skb(skb);
}
static struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
if (wc_processed) {
struct sock *sk = &ssk->isk.sk;
sdp_post_sends(ssk, GFP_ATOMIC);
- sdp_prf1(sk, NULL, "Waking sendmsg. inflight=%d",
+ sdp_prf1(sk, NULL, "Waking sendmsg. inflight=%d",
(u32) tx_ring_posted(ssk));
sk_stream_write_space(&ssk->isk.sk);
if (sk->sk_write_pending &&
* - a write is pending - wake it up and let it do the poll + post
* - post handler is taken - taker will do the poll + post
* else return 1 and let the caller do it
- */
+ */
static int sdp_tx_handler_select(struct sdp_sock *ssk)
{
struct sock *sk = &ssk->isk.sk;
/* Somebody else available to check for completion */
sdp_prf1(sk, NULL, "Somebody else will call do_posts");
return 0;
- }
+ }
return 1;
}
struct sock *sk = &ssk->isk.sk;
u32 inflight, wc_processed;
- sdp_prf1(&ssk->isk.sk, NULL, "TX timeout: inflight=%d, head=%d tail=%d",
+ sdp_prf1(&ssk->isk.sk, NULL, "TX timeout: inflight=%d, head=%d tail=%d",
(u32) tx_ring_posted(ssk),
ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring));
#include <rdma/rdma_cm.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_fmr_pool.h>
-#include <rdma/ib_umem.h>
+#include <rdma/ib_umem.h>
#include <net/tcp.h> /* for memcpy_toiovec */
#include <asm/io.h>
#include <asm/uaccess.h>
return -ENOMEM;
}
sdp_dbg_data(sk, "sending SrcAvail\n");
-
- TX_SRCAVAIL_STATE(skb) = tx_sa; /* tx_sa is hanged on the skb
+
+ TX_SRCAVAIL_STATE(skb) = tx_sa; /* tx_sa is hanged on the skb
* but continue to live after skb is freed */
ssk->tx_sa = tx_sa;
skb->truesize += payload_len;
sdp_skb_entail(sk, skb);
-
+
ssk->write_seq += payload_len;
SDP_SKB_CB(skb)->end_seq += payload_len;
sk_wait_event(sk, ¤t_timeo,
tx_sa->abort_flags &&
ssk->rx_sa &&
- (tx_sa->bytes_acked < tx_sa->bytes_sent) &&
+ (tx_sa->bytes_acked < tx_sa->bytes_sent) &&
vm_wait);
sdp_prf(&ssk->isk.sk, NULL, "woke up sleepers");
posts_handler_put(ssk);
sdp_prf1(sk, NULL, "Going to sleep");
- sk_wait_event(sk, &timeo,
+ sk_wait_event(sk, &timeo,
!ssk->tx_ring.rdma_inflight->busy);
sdp_prf1(sk, NULL, "Woke up");
sdp_dbg_data(&ssk->isk.sk, "woke up sleepers\n");
return 0;
-err_fmr_alloc:
+err_fmr_alloc:
free_page((unsigned long) pages);
err_pages_alloc:
sdp_free_fmr(sk, &tx_sa->fmr, &tx_sa->umem);
err_alloc_fmr:
- return rc;
+ return rc;
}
int sdp_sendmsg_zcopy(struct kiocb *iocb, struct sock *sk, struct iovec *iov)