]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "RDS: avoid large pages for sg allocation for TCP transport"
authorWei Lin Guay <wei.lin.guay@oracle.com>
Thu, 31 Aug 2017 19:26:13 +0000 (21:26 +0200)
committerDhaval Giani <dhaval.giani@oracle.com>
Wed, 15 Nov 2017 06:18:12 +0000 (01:18 -0500)
This reverts commit 2d80dcbe382c ("RDS: avoid large pages for sg allocation
for TCP transport") because RDS has implemented N sge to support large
fragment size, with each sge of PAGE_SIZE.

Orabug: 26770234

Signed-off-by: Wei Lin Guay <wei.lin.guay@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Tested-by: Shih-Yu Huang <shih-yu.huang@oracle.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: Dhaval Giani <dhaval.giani@oracle.com>
net/rds/af_rds.c
net/rds/bind.c
net/rds/ib_recv.c
net/rds/message.c
net/rds/page.c
net/rds/rds.h
net/rds/send.c

index 4e84ca544794898d0cfbbf3c334d3176efbda9dd..c435c3eb020adfb2053acf69abd270a7ea839d3c 100644 (file)
@@ -659,7 +659,6 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
        rs->rs_conn = 0;
        rs->rs_netfilter_enabled = 0;
        rs->rs_rx_traces = 0;
-       rs->rs_large_page = true;
 
        if (rs->rs_bound_addr)
                printk(KERN_CRIT "bound addr %x at create\n", rs->rs_bound_addr);
index 8ebbcb9bae243ad4267192138b4fae78619f2071..da29cdf6644b231ccb1c3b271f865a5e401a7524 100644 (file)
@@ -220,9 +220,6 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        }
 
        rs->rs_transport = trans;
-       if (rs->rs_transport->t_type == RDS_TRANS_TCP)
-               rs->rs_large_page = false;
-
        ret = 0;
 
 out:
index 4d061cabdd729240be56937880f2b0a7052ba86d..ded40962c7766502c01d5a1f4cf26877bf207778 100644 (file)
@@ -386,7 +386,7 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
                sg_init_table(frag->f_sg, ic->i_frag_pages);
                for_each_sg(frag->f_sg, sg, ic->i_frag_pages, i) {
                        ret = rds_page_remainder_alloc(sg,
-                                                      PAGE_SIZE, page_mask, false);
+                                                      PAGE_SIZE, page_mask);
                        if (ret) {
                                for_each_sg(frag->f_sg, s, ic->i_frag_pages, j)
                                        __free_pages(sg_page(s), get_order(s->length));
@@ -571,7 +571,7 @@ static int rds_ib_srq_prefill_one(struct rds_ib_device *rds_ibdev,
        sg_init_table(recv->r_frag->f_sg, num_sge);
        for_each_sg(recv->r_frag->f_sg, sg, num_sge, i) {
                ret = rds_page_remainder_alloc(sg,
-                                              PAGE_SIZE, page_mask, false);
+                                              PAGE_SIZE, page_mask);
                if (ret) {
                        for_each_sg(recv->r_frag->f_sg, s, num_sge, j)
                                __free_pages(sg_page(s), get_order(s->length));
index 991e8709860236b2b260515805f7f897d59ca400..6ae9267732aaea4f09b99b76f7290163818f7d67 100644 (file)
@@ -293,7 +293,7 @@ struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
 }
 
 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
-                              gfp_t gfp, bool large_page)
+                              gfp_t gfp)
 {
        unsigned long to_copy, nbytes;
        unsigned long sg_off;
@@ -312,8 +312,7 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
                if (!sg_page(sg)) {
                        ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
                                                       GFP_ATOMIC == gfp ?
-                                                      gfp : GFP_HIGHUSER,
-                                                      large_page);
+                                                      gfp : GFP_HIGHUSER);
 
                        if (ret)
                                return ret;
index 59cd71cbb991f5a22ad2bb12dbbc83e99f1edb49..1dae848832918a5de712ecea6083e681194a5db4 100644 (file)
@@ -116,31 +116,22 @@ EXPORT_SYMBOL_GPL(rds_page_copy_user);
  * reference until they are done with the region.
  */
 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
-                            gfp_t gfp, bool large_page)
+                            gfp_t gfp)
 {
        struct rds_page_remainder *rem;
        unsigned long flags;
        struct page *page;
        int ret;
-       unsigned int order, size;
 
        gfp |= __GFP_HIGHMEM;
 
        /* jump straight to allocation if we're trying for a huge page */
        if (bytes >= PAGE_SIZE) {
-               if (large_page) {
-                       order =  get_order(bytes);
-                       size = bytes;
-               } else {
-                       order =  0;
-                       size = PAGE_SIZE;
-               }
-
-               page = alloc_pages(gfp, order);
+               page = alloc_pages(gfp, get_order(bytes));
                if (!page) {
                        ret = -ENOMEM;
                } else {
-                       sg_set_page(scat, page, size, 0);
+                       sg_set_page(scat, page, bytes, 0);
                        ret = 0;
                }
                goto out;
index 65056ea6ae99fdd6ec453c67ab68c87083d0ddb7..26c017c61c6c10e0bf314c64b773a3f1102e8b80 100644 (file)
@@ -774,8 +774,6 @@ struct rds_sock {
        u8                      rs_rx_traces;
        u8                      rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
 
-       bool                    rs_large_page;
-
        u32                     rs_hash_initval;
 };
 
@@ -977,7 +975,7 @@ rds_conn_self_loopback_passive(struct rds_connection *conn)
 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
-                              gfp_t gfp, bool n);
+                              gfp_t gfp);
 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
                                 __be16 dport, u64 seq);
 int rds_message_add_extension(struct rds_header *hdr,
@@ -1009,7 +1007,7 @@ static inline int rds_message_verify_checksum(const struct rds_header *hdr)
 
 /* page.c */
 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
-                            gfp_t gfp, bool n);
+                            gfp_t gfp);
 int rds_page_copy_user(struct page *page, unsigned long offset,
                       void __user *ptr, unsigned long bytes,
                       int to_user);
index 96e44a4b0faf282158661ef45bbe669ee0661e4d..167934a666e45d60f315b6e114a1c068dbc23589 100644 (file)
@@ -1218,7 +1218,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        long timeo = sock_sndtimeo(sk, nonblock);
        size_t total_payload_len = payload_len, rdma_payload_len = 0;
        struct rds_conn_path *cpath;
-       bool large_page;
 
        /* Mirror Linux UDP mirror of BSD error message compatibility */
        /* XXX: Perhaps MSG_MORE someday */
@@ -1245,7 +1244,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        }
 
        lock_sock(sk);
-       large_page = rs->rs_large_page;
        if (daddr == 0 || rs->rs_bound_addr == 0) {
                release_sock(sk);
                ret = -ENOTCONN; /* XXX not a great errno */
@@ -1276,8 +1274,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        /* Attach data to the rm */
        if (payload_len) {
                rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
-               ret = rds_message_copy_from_user(rm, &msg->msg_iter, GFP_KERNEL,
-                                                false);
+               ret = rds_message_copy_from_user(rm, &msg->msg_iter, GFP_KERNEL);
                if (ret)
                        goto out;
        }