From ab2ca254b3d3982df2e8790a1ef59530b086c547 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Feb 2012 11:07:41 -0500 Subject: [PATCH] RDS: make sure not to loop forever inside rds_send_xmit If a determined set of concurrent senders keep the send queue full, we can loop forever insdie rds_send_xmit. This fix has two parts. First we are dropping out of the while(1) loop after we've processed a large batch of messages. Second we add a generation number that gets bumped each time the xmit bit lock is acquired. If someone else has jumped in and made progress in the queue, we skip our goto restart. Signed-off-by: Chris Mason Signed-off-by: Chris Mason Signed-off-by: Bang Nguyen --- net/rds/connection.c | 1 + net/rds/rds.h | 1 + net/rds/send.c | 48 ++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 46 insertions(+), 4 deletions(-) diff --git a/net/rds/connection.c b/net/rds/connection.c index 0f7b4043c5a8c..fa4c3d3edbd78 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -191,6 +191,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, } atomic_set(&conn->c_state, RDS_CONN_DOWN); + conn->c_send_gen = 0; conn->c_reconnect_jiffies = 0; INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); diff --git a/net/rds/rds.h b/net/rds/rds.h index 240d856c8e022..3a19aa9948a6d 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -111,6 +111,7 @@ struct rds_connection { void *c_transport_data; atomic_t c_state; + unsigned long c_send_gen; unsigned long c_flags; unsigned long c_reconnect_jiffies; struct delayed_work c_send_w; diff --git a/net/rds/send.c b/net/rds/send.c index e1f7f0585b27f..2d758fea79516 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -135,9 +135,13 @@ int rds_send_xmit(struct rds_connection *conn) int ret = 0; LIST_HEAD(to_be_dropped); int same_rm = 0; + int batch_count; + unsigned long send_gen = 0; restart: + batch_count = 0; + /* * sendmsg calls here after having queued its message on the send * queue. We only have one task feeding the connection at a time. If @@ -151,6 +155,17 @@ restart: goto out; } + /* + * we record the send generation after doing the xmit acquire. + * if someone else manages to jump in and do some work, we'll use + * this to avoid a goto restart farther down. + * + * we don't need a lock because the counter is only incremented + * while we have the in_xmit bit held. + */ + conn->c_send_gen++; + send_gen = conn->c_send_gen; + /* * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, * we do the opposite to avoid races. @@ -209,6 +224,16 @@ restart: if (!rm) { unsigned int len; + batch_count++; + + /* we want to process as big a batch as we can, but + * we also want to avoid softlockups. If we've been + * through a lot of messages, lets back off and see + * if anyone else jumps in + */ + if (batch_count >= 1024) + goto over_batch; + spin_lock_irqsave(&conn->c_lock, flags); if (!list_empty(&conn->c_send_queue)) { @@ -364,9 +389,9 @@ restart: } } +over_batch: if (conn->c_trans->xmit_complete) conn->c_trans->xmit_complete(conn); - release_in_xmit(conn); /* Nuke any messages we decided not to retransmit. */ @@ -387,12 +412,27 @@ restart: * If the transport cannot continue (i.e ret != 0), then it must * call us when more room is available, such as from the tx * completion handler. + * + * We have an extra generation check here so that if someone manages + * to jump in after our release_in_xmit, we'll see that they have done + * some work and we will skip our goto */ if (ret == 0) { smp_mb(); - if (!list_empty(&conn->c_send_queue)) { - rds_stats_inc(s_send_lock_queue_raced); - goto restart; + if (!list_empty(&conn->c_send_queue) && + send_gen == conn->c_send_gen) { + cond_resched(); + /* + * repeat our check after the resched in case + * someone else was kind enough to empty or process + * the queue + */ + smp_mb(); + if (!list_empty(&conn->c_send_queue) && + send_gen == conn->c_send_gen) { + rds_stats_inc(s_send_lock_queue_raced); + goto restart; + } } } out: -- 2.50.1