void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
void rds_ib_inc_free(struct rds_incoming *inc);
-int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
struct ib_wc *wc,
struct rds_ib_ack_state *state);
unsigned long *flag;
preempt_disable();
- flag = &__get_cpu_var(clean_list_grace);
+ flag = this_cpu_ptr(&clean_list_grace);
set_bit(CLEAN_LIST_BUSY_BIT, flag);
ret = xlist_del_head(&pool->clean_list);
if (ret)
return head;
}
-int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
- size_t size)
+int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_ib_incoming *ibinc;
struct rds_page_frag *frag;
- struct iovec *iov = first_iov;
unsigned long to_copy;
unsigned long frag_off = 0;
- unsigned long iov_off = 0;
int copied = 0;
int ret;
u32 len;
frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
len = be32_to_cpu(inc->i_hdr.h_len);
- while (copied < size && copied < len) {
+ while (iov_iter_count(to) && copied < len) {
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
- to_copy = min_t(size_t, to_copy, size - copied);
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ RDS_FRAG_SIZE - frag_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
- rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
- "[%p, %u] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
-
/* XXX needs + offset for multiple recvs per page */
- ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
- frag->f_sg.offset + frag_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret) {
- copied = ret;
- break;
- }
+ rds_stats_add(s_copy_to_user, to_copy);
+ ret = copy_page_to_iter(sg_page(&frag->f_sg),
+ frag->f_sg.offset + frag_off,
+ to_copy,
+ to);
+ if (ret != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
frag_off += to_copy;
copied += to_copy;
}
int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
gfp_t page_gfp, int prefill);
void rds_iw_inc_free(struct rds_incoming *inc);
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_iw_recv_tasklet_fn(unsigned long data);
void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
BUG_ON(atomic_read(&rds_iw_allocation) < 0);
}
-int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
- size_t size)
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_iw_incoming *iwinc;
struct rds_page_frag *frag;
- struct iovec *iov = first_iov;
unsigned long to_copy;
unsigned long frag_off = 0;
- unsigned long iov_off = 0;
int copied = 0;
int ret;
u32 len;
frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
len = be32_to_cpu(inc->i_hdr.h_len);
- while (copied < size && copied < len) {
+ while (iov_iter_count(to) && copied < len) {
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
- to_copy = min_t(size_t, to_copy, size - copied);
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ RDS_FRAG_SIZE - frag_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
- rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
- "[%p, %lu] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- frag->f_page, frag->f_offset, frag_off);
-
/* XXX needs + offset for multiple recvs per page */
- ret = rds_page_copy_to_user(frag->f_page,
- frag->f_offset + frag_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret) {
- copied = ret;
- break;
- }
+ rds_stats_add(s_copy_to_user, to_copy);
+ ret = copy_page_to_iter(frag->f_page,
+ frag->f_offset + frag_off,
+ to_copy,
+ to);
+ if (ret != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
frag_off += to_copy;
copied += to_copy;
}
return rm;
}
-int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
- size_t total_len)
+int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
{
- unsigned long to_copy;
- unsigned long iov_off;
+ unsigned long to_copy, nbytes;
unsigned long sg_off;
- struct iovec *iov;
struct scatterlist *sg;
int ret = 0;
- rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
+ rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
/*
* now allocate and copy in the data payload.
*/
sg = rm->data.op_sg;
- iov = first_iov;
- iov_off = 0;
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
- while (total_len) {
+ while (iov_iter_count(from)) {
if (!sg_page(sg)) {
- ret = rds_page_remainder_alloc(sg, total_len,
+ ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
GFP_HIGHUSER);
if (ret)
- goto out;
+ return ret;
rm->data.op_nents++;
sg_off = 0;
}
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
- to_copy = min_t(size_t, to_copy, total_len);
-
- rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
- "sg [%p, %u, %u] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- (void *)sg_page(sg), sg->offset, sg->length, sg_off);
+ to_copy = min_t(unsigned long, iov_iter_count(from),
+ sg->length - sg_off);
- ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret)
- goto out;
+ rds_stats_add(s_copy_from_user, to_copy);
+ nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
+ to_copy, from);
+ if (nbytes != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
- total_len -= to_copy;
sg_off += to_copy;
if (sg_off == sg->length)
sg++;
}
-out:
return ret;
}
-int rds_message_inc_copy_to_user(struct rds_incoming *inc,
- struct iovec *first_iov, size_t size)
+int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_message *rm;
- struct iovec *iov;
struct scatterlist *sg;
unsigned long to_copy;
- unsigned long iov_off;
unsigned long vec_off;
int copied;
int ret;
rm = container_of(inc, struct rds_message, m_inc);
len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
- iov = first_iov;
- iov_off = 0;
sg = rm->data.op_sg;
vec_off = 0;
copied = 0;
- while (copied < size && copied < len) {
- while (iov_off == iov->iov_len) {
- iov_off = 0;
- iov++;
- }
-
- to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
- to_copy = min_t(size_t, to_copy, size - copied);
+ while (iov_iter_count(to) && copied < len) {
+ to_copy = min_t(unsigned long, iov_iter_count(to),
+ sg->length - vec_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
- rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
- "sg [%p, %u, %u] + %lu\n",
- to_copy, iov->iov_base, iov->iov_len, iov_off,
- sg_page(sg), sg->offset, sg->length, vec_off);
-
- ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
- iov->iov_base + iov_off,
- to_copy);
- if (ret) {
- copied = ret;
- break;
- }
+ rds_stats_add(s_copy_to_user, to_copy);
+ ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
+ to_copy, to);
+ if (ret != to_copy)
+ return -EFAULT;
- iov_off += to_copy;
vec_off += to_copy;
copied += to_copy;
int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
int (*recv)(struct rds_connection *conn);
- int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+ int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
void (*inc_free)(struct rds_incoming *inc);
int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
/* message.c */
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
-int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
- size_t total_len);
+int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
__be16 dport, u64 seq);
int rds_message_add_version_extension(struct rds_header *hdr, unsigned int version);
int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *version);
int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
-int rds_message_inc_copy_to_user(struct rds_incoming *inc,
- struct iovec *first_iov, size_t size);
+int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_message_inc_free(struct rds_incoming *inc);
void rds_message_addref(struct rds_message *rm);
void rds_message_put(struct rds_message *rm);
void rds_inc_put(struct rds_incoming *inc);
void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
struct rds_incoming *inc, gfp_t gfp);
-int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int msg_flags);
+int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int msg_flags);
void rds_clear_recv_queue(struct rds_sock *rs);
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
void rds_inc_info_copy(struct rds_incoming *inc,
__be32 saddr, __be32 daddr, int flip);
/* send.c */
-int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t payload_len);
+int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
void rds_send_reset(struct rds_connection *conn);
int rds_send_xmit(struct rds_connection *conn);
struct sockaddr_in;
return 0;
}
-int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int msg_flags)
+int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int msg_flags)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
goto out;
while (1) {
+ struct iov_iter save;
/* If there are pending notifications, do those - and nothing else */
if (!list_empty(&rs->rs_notify_queue)) {
ret = rds_notify_queue_get(rs, msg);
rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
&inc->i_conn->c_faddr,
ntohs(inc->i_hdr.h_sport));
- ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
- size);
+ save = msg->msg_iter;
+ ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
if (ret < 0)
break;
rds_inc_put(inc);
inc = NULL;
rds_stats_inc(s_recv_deliver_raced);
+ msg->msg_iter = save;
continue;
}
int cmsg_groups = 0;
int retval;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
struct cmsghdr *cmsg;
int ret = 0;
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
u8 op;
};
-int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t payload_len)
+int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
/* Attach data to the rm */
if (payload_len) {
rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
- ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
+ ret = rds_message_copy_from_user(rm, &msg->msg_iter);
if (ret)
goto out;
}
void rds_tcp_data_ready(struct sock *sk);
int rds_tcp_recv(struct rds_connection *conn);
void rds_tcp_inc_free(struct rds_incoming *inc);
-int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
- size_t size);
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
/* tcp_send.c */
void rds_tcp_xmit_prepare(struct rds_connection *conn);
/*
* this is pretty lame, but, whatever.
*/
-int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
- size_t size)
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
{
struct rds_tcp_incoming *tinc;
- struct iovec *iov, tmp;
struct sk_buff *skb;
- unsigned long to_copy, skb_off;
int ret = 0;
- if (size == 0)
+ if (!iov_iter_count(to))
goto out;
tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
- iov = first_iov;
- tmp = *iov;
skb_queue_walk(&tinc->ti_skb_list, skb) {
- skb_off = 0;
- while (skb_off < skb->len) {
- while (tmp.iov_len == 0) {
- iov++;
- tmp = *iov;
- }
-
- to_copy = min(tmp.iov_len, size);
+ unsigned long to_copy, skb_off;
+ for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
+ to_copy = iov_iter_count(to);
to_copy = min(to_copy, skb->len - skb_off);
- rdsdebug("ret %d size %zu skb %p skb_off %lu "
- "skblen %d iov_base %p iov_len %zu cpy %lu\n",
- ret, size, skb, skb_off, skb->len,
- tmp.iov_base, tmp.iov_len, to_copy);
-
- /* modifies tmp as it copies */
- if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
- to_copy)) {
- ret = -EFAULT;
- goto out;
- }
+ if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
+ return -EFAULT;
rds_stats_add(s_copy_to_user, to_copy);
- size -= to_copy;
ret += to_copy;
- skb_off += to_copy;
- if (size == 0)
+
+ if (!iov_iter_count(to))
goto out;
}
}