void sdp_handle_disconn(struct sock *sk);
/* sdp_zcopy.c */
-int sdp_sendmsg_zcopy(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size);
+int sdp_sendmsg_zcopy(struct kiocb *iocb, struct sock *sk, struct iovec *iov);
int sdp_handle_srcavail(struct sdp_sock *ssk, struct sdp_srcah *srcah);
void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack);
void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
static int sdp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
- struct iovec *iov;
+ int i;
struct sdp_sock *ssk = sdp_sk(sk);
struct sk_buff *skb;
int iovlen, flags;
int size_goal;
int err, copied;
- int zcopied = 0;
long timeo;
struct bzcopy_state *bz = NULL;
SDPSTATS_COUNTER_INC(sendmsg);
- if (sdp_zcopy_thresh && size > sdp_zcopy_thresh && size > SDP_MIN_ZCOPY_THRESH) {
- zcopied = sdp_sendmsg_zcopy(iocb, sk, msg, size);
-
- if (zcopied < 0) {
- sdp_dbg_data(sk, "ZCopy send err: %d\n", zcopied);
- return zcopied;
- } else if (size == zcopied) {
- return size;
- }
-
- sdp_dbg_data(sk, "Got SendSM/Timedout - fallback to regular send\n");
-
- sdp_dbg_data(sk, "ZCopied: 0x%x\n", zcopied);
- sdp_dbg_data(sk, "Continue to BCopy 0x%lx bytes left\n", size - zcopied);
- }
-
lock_sock(sk);
- sdp_dbg_data(sk, "%s\n", __func__);
+ sdp_dbg_data(sk, "%s size = 0x%lx\n", __func__, size);
posts_handler_get(ssk);
/* Ok commence sending. */
iovlen = msg->msg_iovlen;
- iov = msg->msg_iov;
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
- while (--iovlen >= 0) {
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ struct iovec *iov = &msg->msg_iov[i];
int seglen = iov->iov_len;
char __user *from = iov->iov_base;
- iov++;
+ sdp_dbg_data(sk, "Sending iov: %d/%ld %p\n", i, msg->msg_iovlen, from);
+
+ SDPSTATS_HIST(sendmsg_seglen, seglen);
- /* Limmiting the size_goal is reqired when using 64K pages*/
+ if (sdp_zcopy_thresh && seglen > sdp_zcopy_thresh &&
+ seglen > SDP_MIN_ZCOPY_THRESH) {
+ int zcopied = 0;
+
+ zcopied = sdp_sendmsg_zcopy(iocb, sk, iov);
+
+ if (zcopied < 0) {
+ sdp_dbg_data(sk, "ZCopy send err: %d\n", zcopied);
+ err = zcopied;
+ goto out_err;
+ }
+
+ copied += zcopied;
+ seglen = iov->iov_len;
+ from = iov->iov_base;
+
+ sdp_dbg_data(sk, "ZCopied: 0x%x/0x%x\n", zcopied, seglen);
+ }
+
+ /* Limiting the size_goal is reqired when using 64K pages*/
if (size_goal > SDP_MAX_PAYLOAD)
size_goal = SDP_MAX_PAYLOAD;
- SDPSTATS_HIST(sendmsg_seglen, seglen);
-
if (bz)
sdp_bz_cleanup(bz);
bz = sdp_bz_setup(ssk, from, seglen, size_goal);
from += copy;
copied += copy;
- if ((seglen -= copy) == 0 && iovlen == 0)
+ seglen -= copy;
+ if (seglen == 0 && iovlen == 0)
goto out;
- if (skb->len < PAGE_SIZE || (flags & MSG_OOB))
- continue;
continue;
wait_for_sndbuf:
release_sock(sk);
- sdp_dbg_data(sk, "BCopied: 0x%x\n", copied);
- return copied + zcopied;
+ sdp_dbg_data(sk, "copied: 0x%x\n", copied);
+ return copied;
do_fault:
sdp_prf(sk, skb, "prepare fault");
return rc;
}
-int sdp_sendmsg_zcopy(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size)
+int sdp_sendmsg_zcopy(struct kiocb *iocb, struct sock *sk, struct iovec *iov)
{
struct sdp_sock *ssk = sdp_sk(sk);
- int iovlen, flags;
- struct iovec *iov = NULL;
int rc = 0;
long timeo;
struct tx_srcavail_state *tx_sa;
size_t bytes_to_copy = 0;
int copied = 0;
- sdp_dbg_data(sk, "%s\n", __func__);
+ sdp_dbg_data(sk, "Sending iov: %p, iov_len: 0x%lx\n",
+ iov->iov_base, iov->iov_len);
sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy start");
if (ssk->rx_sa) {
sdp_dbg_data(sk, "Deadlock prevent: crossing SrcAvail\n");
return 0;
}
- lock_sock(sk);
sock_hold(&ssk->isk.sk, SOCK_REF_ZCOPY);
SDPSTATS_COUNTER_INC(sendmsg_zcopy_segment);
- posts_handler_get(ssk);
-
- flags = msg->msg_flags;
-
- iovlen = msg->msg_iovlen;
- iov = msg->msg_iov;
-
timeo = SDP_SRCAVAIL_ADV_TIMEOUT ;
- /* Wait for a connection to finish. */
- if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
- if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
- goto err;
-
- /* This should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
-
/* Ok commence sending. */
offset = (unsigned long)iov->iov_base & (PAGE_SIZE - 1);
- sdp_dbg_data(sk, "Sending iov: %p, iovlen: 0x%lx, size: 0x%lx\n",
- iov->iov_base, iov->iov_len, size);
-
- SDPSTATS_HIST(sendmsg_seglen, iov->iov_len);
-
- if (iovlen > 1) {
- sdp_warn(sk, "iovlen > 1 not supported\n");
- rc = -ENOTSUPP;
- goto err;
- }
-
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
- rc = -EPIPE;
- goto err;
- }
tx_sa = kmalloc(sizeof(struct tx_srcavail_state), GFP_KERNEL);
if (!tx_sa) {
rc = do_sdp_sendmsg_zcopy(sk, tx_sa, iov, &timeo);
- if (iov->iov_len < sdp_zcopy_thresh) {
+ if (iov->iov_len && iov->iov_len < sdp_zcopy_thresh) {
sdp_dbg_data(sk, "0x%lx bytes left, switching to bcopy\n",
iov->iov_len);
break;
kfree(tx_sa);
err_alloc_tx_sa:
-err:
copied = bytes_to_copy - iov->iov_len;
sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy end rc: %d copied: %d", rc, copied);
- posts_handler_put(ssk);
-
- release_sock(sk);
sock_put(&ssk->isk.sk, SOCK_REF_ZCOPY);