]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
RDS: use c_wq for all activities on a connection
authorSantosh Shilimkar <santosh.shilimkar@oracle.com>
Sat, 27 Aug 2016 02:32:57 +0000 (19:32 -0700)
committerSantosh Shilimkar <santosh.shilimkar@oracle.com>
Wed, 12 Oct 2016 17:20:25 +0000 (10:20 -0700)
RDS connection work, send work, recv work etc events have been
serialised by use of a single threaded work queue. For loopback
connections, we created a separate thread but only connection
work is moved on it. This actually under utilises the thread
and creates un-necessary contention for send/recv work and
connection work for loopback connections.

We move remainder loopback work as well on the rds_local_wq
which garantees serialisation as well as delinks the loopback
and non loopback work(s).

Orabug: 22347191

Tested-by: Michael Nowak <michael.nowak@oracle.com>
Tested-by: Rafael Alejandro Peralez <rafael.peralez@oracle.com>
Tested-by: Liwen Huang <liwen.huang@oracle.com>
Tested-by: Hong Liu <hong.x.liu@oracle.com>
Reviewed-by: Mukesh Kacker <mukesh.kacker@oracle.com>
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
net/rds/cong.c
net/rds/ib_cm.c
net/rds/ib_recv.c
net/rds/ib_send.c
net/rds/send.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rds/threads.c

index 2cd81c3502b76685620a1c42efb8eaf74ef78542..7ba13755c83206ffa4a584e5952847a24860636d 100644 (file)
@@ -242,7 +242,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
        list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
                if (!test_and_set_bit(0, &conn->c_map_queued)) {
                        rds_stats_inc(s_cong_update_queued);
-                       queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+                       queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
                }
        }
 
index f6926c71c217907f0bf9e2f5103f3123ae44ee31..47e2a006ce64671735419e481a1ab2e44240a15d 100644 (file)
@@ -521,7 +521,7 @@ static void rds_ib_rx(struct rds_ib_connection *ic)
                if ((atomic_read(&rds_ibdev->srq->s_num_posted) <
                                        rds_ib_srq_hwm_refill) &&
                        !test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
-                               queue_delayed_work(rds_wq,
+                               queue_delayed_work(conn->c_wq,
                                        &rds_ibdev->srq->s_refill_w, 0);
 
        if (ic->i_rx_poll_cq >= RDS_IB_RX_LIMIT) {
index 15063fadcbfa803952f999eae024e18fd0754383..2468d609fd73bcdb1c17036875993511659166e6 100644 (file)
@@ -703,7 +703,7 @@ release_out:
        if (rds_conn_up(conn) &&
           (must_wake || (can_wait && ring_low)
                        || rds_ib_ring_empty(&ic->i_recv_ring))) {
-               queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
+               queue_delayed_work(conn->c_wq, &conn->c_recv_w, 1);
        }
        if (can_wait)
                cond_resched();
index df8b0f7a4365bdea0821d03aa17bf51d17a4ccc1..216efb284451246697e11cd0b8787cacc9eea36e 100644 (file)
@@ -464,7 +464,7 @@ void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
 
        atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
        if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
-               queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+               queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
 
        WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
 
index 462889d2b4683c41c91f0883d9a69585357a15e4..3741ebdbb10930e6e457bdf2f05819a46e894b4e 100644 (file)
@@ -495,7 +495,7 @@ over_batch:
                        rds_stats_inc(s_send_lock_queue_raced);
                        if (batch_count < send_batch_count)
                                goto restart;
-                       queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+                       queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
                }
        }
 out:
@@ -1401,7 +1401,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
        ret = rds_send_xmit(conn);
        if (ret == -ENOMEM || ret == -EAGAIN)
-               queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+               queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
 
 
        rds_message_put(rm);
@@ -1521,7 +1521,7 @@ int rds_send_internal(struct rds_connection *conn, struct rds_sock *rs,
                rds_stats_inc(s_send_queue_full);
 
                /* force a requeue of the work for later */
-               queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+               queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
 
                ret = -EAGAIN;
                goto out;
@@ -1534,7 +1534,7 @@ int rds_send_internal(struct rds_connection *conn, struct rds_sock *rs,
        rds_stats_inc(s_send_queued);
 
        /* always hand the send off to the worker thread */
-       queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+       queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
 
        rdsdebug("message sent for rs %p, conn %p, len %d, %u.%u.%u.%u : %u -> %u.%u.%u.%u : %u\n",
                 rs, conn, skb->len, NIPQUAD(dst->saddr), dst->sport, NIPQUAD(dst->daddr), dst->dport);
@@ -1597,7 +1597,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
        rds_stats_inc(s_send_pong);
 
        if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
-               queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+               queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
 
        rds_message_put(rm);
        return 0;
@@ -1646,7 +1646,7 @@ rds_send_hb(struct rds_connection *conn, int response)
 
        ret = rds_send_xmit(conn);
        if (ret == -ENOMEM || ret == -EAGAIN)
-               queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+               queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
 
        rds_message_put(rm);
        return 0;
@@ -1671,5 +1671,5 @@ void rds_route_to_base(struct rds_connection *conn)
        }
        spin_unlock_irqrestore(&base_conn->c_lock, flags);
        conn->c_route_to_base = 1;
-       queue_delayed_work(rds_wq, &base_conn->c_send_w, 0);
+       queue_delayed_work(conn->c_wq, &base_conn->c_send_w, 0);
 }
index 4ee2145ca935d72127d07a5d69fdc0364f2de0d9..abf651a1dd8235bf22fe8c1ae903d117c9933d61 100644 (file)
@@ -318,7 +318,7 @@ void rds_tcp_data_ready(struct sock *sk)
        rds_tcp_stats_inc(s_tcp_data_ready_calls);
 
        if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
-               queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+               queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0);
 out:
        read_unlock(&sk->sk_callback_lock);
        ready(sk);
index edecc4606af69cbf4dd0d8b84fd2fe3980fbb530..aa3a82a918e2c6499ecbc9c4d5966cc9f2d1cf79 100644 (file)
@@ -197,7 +197,7 @@ void rds_tcp_write_space(struct sock *sk)
        rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
         if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
-               queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+               queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
 
 out:
        read_unlock(&sk->sk_callback_lock);
index b729f36220b1e4584422c68dc709e369879a166d..c59be62e98044e348bac69cf5921265bcfacf195 100644 (file)
@@ -94,9 +94,9 @@ void rds_connect_path_complete(struct rds_connection *conn, int curr)
 
        conn->c_reconnect_jiffies = 0;
        set_bit(0, &conn->c_map_queued);
-       queue_delayed_work(rds_wq, &conn->c_send_w, 0);
-       queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
-       queue_delayed_work(rds_wq, &conn->c_hb_w, 0);
+       queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
+       queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0);
+       queue_delayed_work(conn->c_wq, &conn->c_hb_w, 0);
        conn->c_hb_start = 0;
 
        conn->c_connection_start = get_seconds();
@@ -211,11 +211,11 @@ void rds_send_worker(struct work_struct *work)
                switch (ret) {
                case -EAGAIN:
                        rds_stats_inc(s_send_immediate_retry);
-                       queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+                       queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
                        break;
                case -ENOMEM:
                        rds_stats_inc(s_send_delayed_retry);
-                       queue_delayed_work(rds_wq, &conn->c_send_w, 2);
+                       queue_delayed_work(conn->c_wq, &conn->c_send_w, 2);
                default:
                        break;
                }
@@ -233,11 +233,11 @@ void rds_recv_worker(struct work_struct *work)
                switch (ret) {
                case -EAGAIN:
                        rds_stats_inc(s_recv_immediate_retry);
-                       queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+                       queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0);
                        break;
                case -ENOMEM:
                        rds_stats_inc(s_recv_delayed_retry);
-                       queue_delayed_work(rds_wq, &conn->c_recv_w, 2);
+                       queue_delayed_work(conn->c_wq, &conn->c_recv_w, 2);
                default:
                        break;
                }
@@ -281,7 +281,7 @@ void rds_hb_worker(struct work_struct *work)
                        rds_conn_drop(conn, DR_HB_TIMEOUT);
                        return;
                }
-               queue_delayed_work(rds_wq, &conn->c_hb_w, HZ);
+               queue_delayed_work(conn->c_wq, &conn->c_hb_w, HZ);
        }
 }