From dde26895edf2ff27804e05a88780775d0159603e Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Fri, 26 Aug 2016 19:32:57 -0700 Subject: [PATCH] RDS: use c_wq for all activities on a connection RDS connection work, send work, recv work etc events have been serialised by use of a single threaded work queue. For loopback connections, we created a separate thread but only connection work is moved on it. This actually under utilises the thread and creates un-necessary contention for send/recv work and connection work for loopback connections. We move remainder loopback work as well on the rds_local_wq which garantees serialisation as well as delinks the loopback and non loopback work(s). Orabug: 22347191 Tested-by: Michael Nowak Tested-by: Rafael Alejandro Peralez Tested-by: Liwen Huang Tested-by: Hong Liu Reviewed-by: Mukesh Kacker Signed-off-by: Santosh Shilimkar --- net/rds/cong.c | 2 +- net/rds/ib_cm.c | 2 +- net/rds/ib_recv.c | 2 +- net/rds/ib_send.c | 2 +- net/rds/send.c | 14 +++++++------- net/rds/tcp_recv.c | 2 +- net/rds/tcp_send.c | 2 +- net/rds/threads.c | 16 ++++++++-------- 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/net/rds/cong.c b/net/rds/cong.c index 2cd81c3502b7..7ba13755c832 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c @@ -242,7 +242,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map) list_for_each_entry(conn, &map->m_conn_list, c_map_item) { if (!test_and_set_bit(0, &conn->c_map_queued)) { rds_stats_inc(s_cong_update_queued); - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 0); } } diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index f6926c71c217..47e2a006ce64 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -521,7 +521,7 @@ static void rds_ib_rx(struct rds_ib_connection *ic) if ((atomic_read(&rds_ibdev->srq->s_num_posted) < rds_ib_srq_hwm_refill) && !test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate)) - queue_delayed_work(rds_wq, + queue_delayed_work(conn->c_wq, &rds_ibdev->srq->s_refill_w, 0); if (ic->i_rx_poll_cq >= RDS_IB_RX_LIMIT) { diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 15063fadcbfa..2468d609fd73 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -703,7 +703,7 @@ release_out: if (rds_conn_up(conn) && (must_wake || (can_wait && ring_low) || rds_ib_ring_empty(&ic->i_recv_ring))) { - queue_delayed_work(rds_wq, &conn->c_recv_w, 1); + queue_delayed_work(conn->c_wq, &conn->c_recv_w, 1); } if (can_wait) cond_resched(); diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index df8b0f7a4365..216efb284451 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -464,7 +464,7 @@ void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 0); WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); diff --git a/net/rds/send.c b/net/rds/send.c index 462889d2b468..3741ebdbb109 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -495,7 +495,7 @@ over_batch: rds_stats_inc(s_send_lock_queue_raced); if (batch_count < send_batch_count) goto restart; - queue_delayed_work(rds_wq, &conn->c_send_w, 1); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 1); } } out: @@ -1401,7 +1401,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) ret = rds_send_xmit(conn); if (ret == -ENOMEM || ret == -EAGAIN) - queue_delayed_work(rds_wq, &conn->c_send_w, 1); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 1); rds_message_put(rm); @@ -1521,7 +1521,7 @@ int rds_send_internal(struct rds_connection *conn, struct rds_sock *rs, rds_stats_inc(s_send_queue_full); /* force a requeue of the work for later */ - queue_delayed_work(rds_wq, &conn->c_send_w, 1); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 1); ret = -EAGAIN; goto out; @@ -1534,7 +1534,7 @@ int rds_send_internal(struct rds_connection *conn, struct rds_sock *rs, rds_stats_inc(s_send_queued); /* always hand the send off to the worker thread */ - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 0); rdsdebug("message sent for rs %p, conn %p, len %d, %u.%u.%u.%u : %u -> %u.%u.%u.%u : %u\n", rs, conn, skb->len, NIPQUAD(dst->saddr), dst->sport, NIPQUAD(dst->daddr), dst->dport); @@ -1597,7 +1597,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) rds_stats_inc(s_send_pong); if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 0); rds_message_put(rm); return 0; @@ -1646,7 +1646,7 @@ rds_send_hb(struct rds_connection *conn, int response) ret = rds_send_xmit(conn); if (ret == -ENOMEM || ret == -EAGAIN) - queue_delayed_work(rds_wq, &conn->c_send_w, 1); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 1); rds_message_put(rm); return 0; @@ -1671,5 +1671,5 @@ void rds_route_to_base(struct rds_connection *conn) } spin_unlock_irqrestore(&base_conn->c_lock, flags); conn->c_route_to_base = 1; - queue_delayed_work(rds_wq, &base_conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &base_conn->c_send_w, 0); } diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index 4ee2145ca935..abf651a1dd82 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -318,7 +318,7 @@ void rds_tcp_data_ready(struct sock *sk) rds_tcp_stats_inc(s_tcp_data_ready_calls); if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM) - queue_delayed_work(rds_wq, &conn->c_recv_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0); out: read_unlock(&sk->sk_callback_lock); ready(sk); diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index edecc4606af6..aa3a82a918e2 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -197,7 +197,7 @@ void rds_tcp_write_space(struct sock *sk) rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 0); out: read_unlock(&sk->sk_callback_lock); diff --git a/net/rds/threads.c b/net/rds/threads.c index b729f36220b1..c59be62e9804 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -94,9 +94,9 @@ void rds_connect_path_complete(struct rds_connection *conn, int curr) conn->c_reconnect_jiffies = 0; set_bit(0, &conn->c_map_queued); - queue_delayed_work(rds_wq, &conn->c_send_w, 0); - queue_delayed_work(rds_wq, &conn->c_recv_w, 0); - queue_delayed_work(rds_wq, &conn->c_hb_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_hb_w, 0); conn->c_hb_start = 0; conn->c_connection_start = get_seconds(); @@ -211,11 +211,11 @@ void rds_send_worker(struct work_struct *work) switch (ret) { case -EAGAIN: rds_stats_inc(s_send_immediate_retry); - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 0); break; case -ENOMEM: rds_stats_inc(s_send_delayed_retry); - queue_delayed_work(rds_wq, &conn->c_send_w, 2); + queue_delayed_work(conn->c_wq, &conn->c_send_w, 2); default: break; } @@ -233,11 +233,11 @@ void rds_recv_worker(struct work_struct *work) switch (ret) { case -EAGAIN: rds_stats_inc(s_recv_immediate_retry); - queue_delayed_work(rds_wq, &conn->c_recv_w, 0); + queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0); break; case -ENOMEM: rds_stats_inc(s_recv_delayed_retry); - queue_delayed_work(rds_wq, &conn->c_recv_w, 2); + queue_delayed_work(conn->c_wq, &conn->c_recv_w, 2); default: break; } @@ -281,7 +281,7 @@ void rds_hb_worker(struct work_struct *work) rds_conn_drop(conn, DR_HB_TIMEOUT); return; } - queue_delayed_work(rds_wq, &conn->c_hb_w, HZ); + queue_delayed_work(conn->c_wq, &conn->c_hb_w, HZ); } } -- 2.50.1