list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
}
}
if ((atomic_read(&rds_ibdev->srq->s_num_posted) <
rds_ib_srq_hwm_refill) &&
!test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
- queue_delayed_work(rds_wq,
+ queue_delayed_work(conn->c_wq,
&rds_ibdev->srq->s_refill_w, 0);
if (ic->i_rx_poll_cq >= RDS_IB_RX_LIMIT) {
if (rds_conn_up(conn) &&
(must_wake || (can_wait && ring_low)
|| rds_ib_ring_empty(&ic->i_recv_ring))) {
- queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
+ queue_delayed_work(conn->c_wq, &conn->c_recv_w, 1);
}
if (can_wait)
cond_resched();
atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
rds_stats_inc(s_send_lock_queue_raced);
if (batch_count < send_batch_count)
goto restart;
- queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
}
}
out:
ret = rds_send_xmit(conn);
if (ret == -ENOMEM || ret == -EAGAIN)
- queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
rds_message_put(rm);
rds_stats_inc(s_send_queue_full);
/* force a requeue of the work for later */
- queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
ret = -EAGAIN;
goto out;
rds_stats_inc(s_send_queued);
/* always hand the send off to the worker thread */
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
rdsdebug("message sent for rs %p, conn %p, len %d, %u.%u.%u.%u : %u -> %u.%u.%u.%u : %u\n",
rs, conn, skb->len, NIPQUAD(dst->saddr), dst->sport, NIPQUAD(dst->daddr), dst->dport);
rds_stats_inc(s_send_pong);
if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
rds_message_put(rm);
return 0;
ret = rds_send_xmit(conn);
if (ret == -ENOMEM || ret == -EAGAIN)
- queue_delayed_work(rds_wq, &conn->c_send_w, 1);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 1);
rds_message_put(rm);
return 0;
}
spin_unlock_irqrestore(&base_conn->c_lock, flags);
conn->c_route_to_base = 1;
- queue_delayed_work(rds_wq, &base_conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &base_conn->c_send_w, 0);
}
rds_tcp_stats_inc(s_tcp_data_ready_calls);
if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
- queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0);
out:
read_unlock(&sk->sk_callback_lock);
ready(sk);
rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
out:
read_unlock(&sk->sk_callback_lock);
conn->c_reconnect_jiffies = 0;
set_bit(0, &conn->c_map_queued);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
- queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
- queue_delayed_work(rds_wq, &conn->c_hb_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_hb_w, 0);
conn->c_hb_start = 0;
conn->c_connection_start = get_seconds();
switch (ret) {
case -EAGAIN:
rds_stats_inc(s_send_immediate_retry);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 0);
break;
case -ENOMEM:
rds_stats_inc(s_send_delayed_retry);
- queue_delayed_work(rds_wq, &conn->c_send_w, 2);
+ queue_delayed_work(conn->c_wq, &conn->c_send_w, 2);
default:
break;
}
switch (ret) {
case -EAGAIN:
rds_stats_inc(s_recv_immediate_retry);
- queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+ queue_delayed_work(conn->c_wq, &conn->c_recv_w, 0);
break;
case -ENOMEM:
rds_stats_inc(s_recv_delayed_retry);
- queue_delayed_work(rds_wq, &conn->c_recv_w, 2);
+ queue_delayed_work(conn->c_wq, &conn->c_recv_w, 2);
default:
break;
}
rds_conn_drop(conn, DR_HB_TIMEOUT);
return;
}
- queue_delayed_work(rds_wq, &conn->c_hb_w, HZ);
+ queue_delayed_work(conn->c_wq, &conn->c_hb_w, HZ);
}
}