#include <rdma/ib_cm.h>
#include "sdp_dbg.h"
+#define sk_ssk(ssk) ((struct sock *)ssk)
+
/* Interval between sucessive polls in the Tx routine when polling is used
instead of interrupts (in per-core Tx rings) - should be power of 2 */
#define SDP_TX_POLL_MODER 16
else {
/* There is no point of setting up a timer for an immediate
* cq-arming, better arm it now. */
- sdp_arm_rx_cq(&ssk->isk.sk);
+ sdp_arm_rx_cq(sk_ssk(ssk));
}
}
unlikely(h->mid != SDP_MID_DATA) ||
(ssk->nonagle & TCP_NAGLE_OFF) ||
!ssk->nagle_last_unacked ||
- skb->next != (struct sk_buff *)&ssk->isk.sk.sk_write_queue ||
+ skb->next != (struct sk_buff *)&sk_ssk(ssk)->sk_write_queue ||
skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
(SDP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH) ||
(SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG);
if (!timer_pending(&ssk->nagle_timer) && ssk->qp_active) {
mod_timer(&ssk->nagle_timer,
jiffies + SDP_NAGLE_TIMEOUT);
- sdp_dbg_data(&ssk->isk.sk, "Starting nagle timer\n");
+ sdp_dbg_data(sk_ssk(ssk), "Starting nagle timer\n");
}
}
- sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %u\n",
+ sdp_dbg_data(sk_ssk(ssk), "send_now = %d last_unacked = %u\n",
send_now, ssk->nagle_last_unacked);
return send_now;
void sdp_nagle_timeout(unsigned long data)
{
struct sdp_sock *ssk = (struct sdp_sock *)data;
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
SDPSTATS_COUNTER_INC(nagle_timer);
sdp_dbg_data(sk, "last_unacked = %u\n", ssk->nagle_last_unacked);
/* TODO: nonagle? */
struct sk_buff *skb;
int post_count = 0;
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
if (unlikely(!ssk->id)) {
if (sk->sk_send_head) {
static void sdp_destroy_qp(struct sdp_sock *ssk)
{
- sdp_dbg(&ssk->isk.sk, "destroying qp\n");
- sdp_prf(&ssk->isk.sk, NULL, "destroying qp");
+ sdp_dbg(sk_ssk(ssk), "destroying qp\n");
+ sdp_prf(sk_ssk(ssk), NULL, "destroying qp");
- sdp_add_to_history(&ssk->isk.sk, __func__);
+ sdp_add_to_history(sk_ssk(ssk), __func__);
ssk->qp_active = 0;
if (ssk->qp) {
void sdp_set_default_moderation(struct sdp_sock *ssk)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
struct sdp_moderation *mod = &ssk->auto_mod;
int rx_buf_size;
moder_time = mod->moder_time;
}
- sdp_dbg_data(&ssk->isk.sk, "tx rate:%lu rx_rate:%lu\n",
+ sdp_dbg_data(sk_ssk(ssk), "tx rate:%lu rx_rate:%lu\n",
tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
- sdp_dbg_data(&ssk->isk.sk, "Rx moder_time changed from:%d to %d "
+ sdp_dbg_data(sk_ssk(ssk), "Rx moder_time changed from:%d to %d "
"period:%lu [jiff] packets:%lu avg_pkt_size:%lu "
"rate:%lu [p/s])\n",
mod->last_moder_time, moder_time, period, packets,
mod->last_moder_time = moder_time;
err = ib_modify_cq(ssk->rx_ring.cq, mod->moder_cnt, moder_time);
if (unlikely(err)) {
- sdp_dbg_data(&ssk->isk.sk,
+ sdp_dbg_data(sk_ssk(ssk),
"Failed modifying moderation for cq");
}
SDPSTATS_COUNTER_INC(rx_cq_modified);
static inline void sdp_kill_id_and_release(struct sdp_sock *ssk)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
struct rdma_cm_id *id;
lock_sock(sk);
static inline void sdp_start_dreq_wait_timeout(struct sdp_sock *ssk, int timeo)
{
- sdp_dbg(&ssk->isk.sk, "Starting dreq wait timeout\n");
+ sdp_dbg(sk_ssk(ssk), "Starting dreq wait timeout\n");
queue_delayed_work(sdp_wq, &ssk->dreq_wait_work, timeo);
ssk->dreq_wait_timeout = 1;
{
struct sdp_sock *ssk =
container_of(work, struct sdp_sock, cma_timewait_work.work);
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
lock_sock(sk);
if (!ssk->cma_timewait_timeout) {
list_del_init(&newssk->accept_queue);
newssk->parent = NULL;
sk_acceptq_removed(sk);
- newsk = &newssk->isk.sk;
+ newsk = sk_ssk(newssk);
out:
release_sock(sk);
if (newsk) {
lock_sock(newsk);
if (newssk->rx_ring.cq) {
newssk->poll_cq = 1;
- sdp_arm_rx_cq(&newssk->isk.sk);
+ sdp_arm_rx_cq(sk_ssk(newssk));
}
release_sock(newsk);
}
if (!ssk->dreq_wait_timeout)
return;
- sdp_dbg(&ssk->isk.sk, "cancelling dreq wait timeout\n");
+ sdp_dbg(sk_ssk(ssk), "cancelling dreq wait timeout\n");
ssk->dreq_wait_timeout = 0;
if (cancel_delayed_work_sync(&ssk->dreq_wait_work)) {
/* The timeout hasn't reached - need to clean ref count */
- sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
+ sock_put(sk_ssk(ssk), SOCK_REF_DREQ_TO);
}
}
{
struct sdp_sock *ssk = container_of(work, struct sdp_sock,
destroy_work);
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
lock_sock(sk);
{
struct sdp_sock *ssk =
container_of(work, struct sdp_sock, dreq_wait_work.work);
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
if (!ssk->dreq_wait_timeout)
goto out;
/* Wait for in-flight sends; should be quick */
if (bz->busy) {
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
unsigned long timeout = jiffies + SDP_BZCOPY_POLL_TIMEOUT;
while (jiffies < timeout) {
return ERR_PTR(-ENOMEM);
}
- rc = sdp_get_pages(&ssk->isk.sk, bz->pages, bz->page_cnt,
+ rc = sdp_get_pages(sk_ssk(ssk), bz->pages, bz->page_cnt,
(unsigned long)base);
if (unlikely(rc))
*/
int sdp_tx_wait_memory(struct sdp_sock *ssk, long *timeo_p, int *credits_needed)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
int err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
u8 tmp;
u32 ptr = skb->len - 1;
list_for_each_entry(ssk, &sock_list, sock_list) {
if (ssk->ib_device == device && !ssk->id_destroyed_already) {
spin_unlock_irq(&sock_list_lock);
- sk = &ssk->isk.sk;
+ sk = sk_ssk(ssk);
sdp_add_to_history(sk, __func__);
lock_sock(sk);
/* ssk->id must be lock-protected,
list_for_each_entry(ssk, &sock_list, sock_list) {
if (ssk->ib_device == device) {
spin_unlock_irq(&sock_list_lock);
- sk = &ssk->isk.sk;
+ sk = sk_ssk(ssk);
lock_sock(sk);
sdp_abort_srcavail(sk);
/* Now, allocate and repost recv */
/* TODO: allocate from cache */
- if (unlikely(ssk->isk.sk.sk_allocation)) {
- skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_SKB_HEAD_SIZE,
- ssk->isk.sk.sk_allocation);
- gfp_page = ssk->isk.sk.sk_allocation | __GFP_HIGHMEM;
+ if (unlikely(sk_ssk(ssk)->sk_allocation)) {
+ skb = sdp_stream_alloc_skb(sk_ssk(ssk), SDP_SKB_HEAD_SIZE,
+ sk_ssk(ssk)->sk_allocation);
+ gfp_page = sk_ssk(ssk)->sk_allocation | __GFP_HIGHMEM;
} else {
- skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_SKB_HEAD_SIZE,
+ skb = sdp_stream_alloc_skb(sk_ssk(ssk), SDP_SKB_HEAD_SIZE,
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
if (unlikely(!skb))
return -1;
- sdp_prf(&ssk->isk.sk, skb, "Posting skb");
+ sdp_prf(sk_ssk(ssk), skb, "Posting skb");
h = (struct sdp_bsdh *)skb->head;
rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1));
rx_wr.num_sge = frags + 1;
rc = ib_post_recv(ssk->qp, &rx_wr, &bad_wr);
if (unlikely(rc)) {
- sdp_warn(&ssk->isk.sk, "ib_post_recv failed. status %d\n", rc);
+ sdp_warn(sk_ssk(ssk), "ib_post_recv failed. status %d\n", rc);
goto err;
}
atomic_add(pages_alloced, &sdp_current_mem_usage);
sdp_cleanup_sdp_buf(ssk, rx_req, SDP_SKB_HEAD_SIZE, DMA_FROM_DEVICE);
sdp_free_skb(skb);
- sdp_reset(&ssk->isk.sk);
+ sdp_reset(sk_ssk(ssk));
return -1;
}
static inline int sdp_post_recvs_needed(struct sdp_sock *ssk)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
int buffer_size = SDP_SKB_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE;
unsigned long max_bytes = ssk->rcvbuf_scale;
unsigned long bytes_in_process;
goto out;
}
- sk_mem_reclaim(&ssk->isk.sk);
+ sk_mem_reclaim(sk_ssk(ssk));
if (sdp_post_recvs_needed(ssk))
goto again;
out:
- sk_mem_reclaim(&ssk->isk.sk);
+ sk_mem_reclaim(sk_ssk(ssk));
}
static inline struct sk_buff *sdp_sock_queue_rcv_skb(struct sock *sk,
rx_sa->skb = skb;
if (ssk->tx_sa) {
- sdp_dbg_data(&ssk->isk.sk, "got RX SrcAvail while waiting "
+ sdp_dbg_data(sk_ssk(ssk), "got RX SrcAvail while waiting "
"for TX SrcAvail. waking up TX SrcAvail"
"to be aborted\n");
wake_up(sk->sk_sleep);
struct sk_buff *skb;
if (unlikely(id != ring_tail(ssk->rx_ring))) {
- sdp_warn(&ssk->isk.sk, "Bogus recv completion id %d tail %d\n",
+ sdp_warn(sk_ssk(ssk), "Bogus recv completion id %d tail %d\n",
id, ring_tail(ssk->rx_ring));
return NULL;
}
static int sdp_process_rx_ctl_skb(struct sdp_sock *ssk, struct sk_buff *skb)
{
struct sdp_bsdh *h = (struct sdp_bsdh *)skb_transport_header(skb);
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
sdp_dbg_data(sk, "Handling %s\n", mid2str(h->mid));
sdp_prf(sk, skb, "Handling %s", mid2str(h->mid));
static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
int frags;
struct sdp_bsdh *h;
int pagesz, i;
if (!before(mseq_ack, ssk->nagle_last_unacked))
ssk->nagle_last_unacked = 0;
- sdp_prf1(&ssk->isk.sk, skb, "RX: %s +%d c:%d->%d mseq:%d ack:%d",
+ sdp_prf1(sk_ssk(ssk), skb, "RX: %s +%d c:%d->%d mseq:%d ack:%d",
mid2str(h->mid), ntohs(h->bufs), credits_before,
tx_credits(ssk), ntohl(h->mseq), ntohl(h->mseq_ack));
{
struct sk_buff *skb;
struct sdp_bsdh *h;
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
int mseq;
skb = sdp_recv_completion(ssk, wc->wr_id, wc->byte_len);
#else
skb->tail = skb->head + skb_headlen(skb);
#endif
- SDP_DUMP_PACKET(&ssk->isk.sk, "RX", skb, h);
+ SDP_DUMP_PACKET(sk_ssk(ssk), "RX", skb, h);
skb_reset_transport_header(skb);
ssk->rx_packets++;
/* like sk_stream_write_space - execpt measures remote credits */
static void sdp_bzcopy_write_space(struct sdp_sock *ssk)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
struct socket *sock = sk->sk_socket;
if (tx_credits(ssk) < ssk->min_bufs || !sock)
} while (n == SDP_NUM_WC);
if (wc_processed) {
- sdp_prf(&ssk->isk.sk, NULL, "processed %d", wc_processed);
+ sdp_prf(sk_ssk(ssk), NULL, "processed %d", wc_processed);
sdp_bzcopy_write_space(ssk);
}
{
struct sdp_sock *ssk = container_of(work, struct sdp_sock,
rx_comp_work);
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
SDPSTATS_COUNTER_INC(rx_wq);
void sdp_do_posts(struct sdp_sock *ssk)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
int xmit_poll_force;
struct sk_buff *skb;
struct sdp_sock *ssk = (struct sdp_sock *)data;
SDPSTATS_COUNTER_INC(rx_cq_arm_timer);
- sdp_arm_rx_cq(&ssk->isk.sk);
+ sdp_arm_rx_cq(sk_ssk(ssk));
}
int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device)
ssk->rx_ring.buffer = kzalloc(
sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, GFP_KERNEL);
if (!ssk->rx_ring.buffer) {
- sdp_warn(&ssk->isk.sk,
+ sdp_warn(sk_ssk(ssk),
"Unable to allocate RX Ring size %zd.\n",
sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE);
}
rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler,
- &ssk->isk.sk, SDP_RX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED);
+ sk_ssk(ssk), SDP_RX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED);
if (IS_ERR(rx_cq)) {
rc = PTR_ERR(rx_cq);
- sdp_warn(&ssk->isk.sk, "Unable to allocate RX CQ: %d.\n", rc);
+ sdp_warn(sk_ssk(ssk), "Unable to allocate RX CQ: %d.\n", rc);
goto err_cq;
}
INIT_WORK(&ssk->rx_comp_work, sdp_rx_comp_work);
setup_timer(&ssk->rx_ring.cq_arm_timer, sdp_arm_cq_timer,
(unsigned long)ssk);
- sdp_arm_rx_cq(&ssk->isk.sk);
+ sdp_arm_rx_cq(sk_ssk(ssk));
return 0;
if (ssk->rx_ring.cq) {
if (ib_destroy_cq(ssk->rx_ring.cq)) {
- sdp_warn(&ssk->isk.sk, "destroy cq(%p) failed\n",
+ sdp_warn(sk_ssk(ssk), "destroy cq(%p) failed\n",
ssk->rx_ring.cq);
} else {
ssk->rx_ring.cq = NULL;
{
int wc_processed = 0;
- sdp_prf(&ssk->isk.sk, NULL, "%s", __func__);
+ sdp_prf(sk_ssk(ssk), NULL, "%s", __func__);
/* If we don't have a pending timer, set one up to catch our recent
post in case the interface becomes idle */
- if (likely(ssk->qp_active && ssk->isk.sk.sk_state != TCP_CLOSE) &&
+ if (likely(ssk->qp_active && sk_ssk(ssk)->sk_state != TCP_CLOSE) &&
!timer_pending(&ssk->tx_ring.timer)) {
mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);
}
if (unlikely(h->mid == SDP_MID_SRCAVAIL)) {
struct tx_srcavail_state *tx_sa = TX_SRCAVAIL_STATE(skb);
if (ssk->tx_sa != tx_sa) {
- sdp_dbg_data(&ssk->isk.sk, "SrcAvail cancelled "
+ sdp_dbg_data(sk_ssk(ssk), "SrcAvail cancelled "
"before being sent!\n");
SDP_WARN_ON(1);
sdp_free_skb(skb);
h->mseq = htonl(mseq);
h->mseq_ack = htonl(mseq_ack(ssk));
- sdp_prf(&ssk->isk.sk, skb, "TX: %s bufs: %d mseq:%ld ack:%d c: %d",
+ sdp_prf(sk_ssk(ssk), skb, "TX: %s bufs: %d mseq:%ld ack:%d c: %d",
mid2str(h->mid), rx_ring_posted(ssk), mseq,
ntohl(h->mseq_ack), tx_credits(ssk));
- SDP_DUMP_PACKET(&ssk->isk.sk, "TX", skb, h);
+ SDP_DUMP_PACKET(sk_ssk(ssk), "TX", skb, h);
tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
tx_req->skb = skb;
rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr);
if (unlikely(rc)) {
- sdp_dbg(&ssk->isk.sk,
+ sdp_dbg(sk_ssk(ssk),
"ib_post_send failed with status %d.\n", rc);
sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE);
- sdp_set_error(&ssk->isk.sk, -ECONNRESET);
+ sdp_set_error(sk_ssk(ssk), -ECONNRESET);
goto err;
}
static inline void sdp_process_tx_wc(struct sdp_sock *ssk, struct ib_wc *wc)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
if (likely(wc->wr_id & SDP_OP_SEND)) {
struct sk_buff *skb;
int wc_processed = 0;
if (!ssk->tx_ring.cq) {
- sdp_dbg(&ssk->isk.sk, "tx irq on destroyed tx_cq\n");
+ sdp_dbg(sk_ssk(ssk), "tx irq on destroyed tx_cq\n");
return 0;
}
} while (n == SDP_NUM_WC);
if (wc_processed) {
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
sdp_prf1(sk, NULL, "Waking sendmsg. inflight=%d",
(u32) tx_ring_posted(ssk));
- sk_stream_write_space(&ssk->isk.sk);
+ sk_stream_write_space(sk_ssk(ssk));
if (sk->sk_write_pending &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
tx_ring_posted(ssk)) {
/* a write is pending and still no room in tx queue,
* arm tx cq
*/
- sdp_prf(&ssk->isk.sk, NULL, "pending tx - rearming");
+ sdp_prf(sk_ssk(ssk), NULL, "pending tx - rearming");
sdp_arm_tx_cq(sk);
}
*/
static int sdp_tx_handler_select(struct sdp_sock *ssk)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
if (sk->sk_write_pending) {
/* Do the TX posts from sender context */
static void sdp_poll_tx_timeout(unsigned long data)
{
struct sdp_sock *ssk = (struct sdp_sock *)data;
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
u32 inflight, wc_processed;
- sdp_prf1(&ssk->isk.sk, NULL, "TX timeout: inflight=%d, head=%d tail=%d",
+ sdp_prf1(sk_ssk(ssk), NULL, "TX timeout: inflight=%d, head=%d tail=%d",
(u32) tx_ring_posted(ssk),
ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring));
/* Only process if the socket is not in use */
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sdp_prf(&ssk->isk.sk, NULL, "TX comp: socket is busy");
+ sdp_prf(sk_ssk(ssk), NULL, "TX comp: socket is busy");
if (sdp_tx_handler_select(ssk) && sk->sk_state != TCP_CLOSE &&
likely(ssk->qp_active)) {
}
inflight = (u32) tx_ring_posted(ssk);
- sdp_prf1(&ssk->isk.sk, NULL, "finished tx proccessing. inflight = %d",
+ sdp_prf1(sk_ssk(ssk), NULL, "finished tx proccessing. inflight = %d",
tx_ring_posted(ssk));
/* If there are still packets in flight and the timer has not already
int rc;
struct ib_send_wr wr, *bad_wr;
- sdp_dbg(&ssk->isk.sk, "%s\n", __func__);
+ sdp_dbg(sk_ssk(ssk), "%s\n", __func__);
memset(&wr, 0, sizeof(wr));
rc = ib_post_send(ssk->qp, &wr, &bad_wr);
if (rc) {
- sdp_dbg(&ssk->isk.sk,
+ sdp_dbg(sk_ssk(ssk),
"ib_post_keepalive failed with status %d.\n", rc);
- sdp_set_error(&ssk->isk.sk, -ECONNRESET);
+ sdp_set_error(sk_ssk(ssk), -ECONNRESET);
}
sdp_cnt(sdp_keepalive_probes_sent);
sizeof *ssk->tx_ring.buffer * SDP_TX_SIZE, GFP_KERNEL);
if (!ssk->tx_ring.buffer) {
rc = -ENOMEM;
- sdp_warn(&ssk->isk.sk, "Can't allocate TX Ring size %zd.\n",
+ sdp_warn(sk_ssk(ssk), "Can't allocate TX Ring size %zd.\n",
sizeof(*ssk->tx_ring.buffer) * SDP_TX_SIZE);
goto out;
}
tx_cq = ib_create_cq(device, sdp_tx_irq, sdp_tx_cq_event_handler,
- &ssk->isk.sk, SDP_TX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED);
+ sk_ssk(ssk), SDP_TX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED);
if (IS_ERR(tx_cq)) {
rc = PTR_ERR(tx_cq);
- sdp_warn(&ssk->isk.sk, "Unable to allocate TX CQ: %d.\n", rc);
+ sdp_warn(sk_ssk(ssk), "Unable to allocate TX CQ: %d.\n", rc);
goto err_cq;
}
if (ssk->tx_ring.cq) {
if (ib_destroy_cq(ssk->tx_ring.cq)) {
- sdp_warn(&ssk->isk.sk, "destroy cq(%p) failed\n",
+ sdp_warn(sk_ssk(ssk), "destroy cq(%p) failed\n",
ssk->tx_ring.cq);
} else {
ssk->tx_ring.cq = NULL;
struct sdp_sock *ssk = sdp_sk(sk);
struct sk_buff *skb;
- sdp_dbg_data(&ssk->isk.sk, "Posting srcavail cancel\n");
+ sdp_dbg_data(sk_ssk(ssk), "Posting srcavail cancel\n");
skb = sdp_alloc_skb_srcavail_cancel(sk, 0);
if (unlikely(!skb))
static int sdp_wait_rdmardcompl(struct sdp_sock *ssk, long *timeo_p,
int ignore_signals)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
int err = 0;
long current_timeo = *timeo_p;
struct tx_srcavail_state *tx_sa = ssk->tx_sa;
tx_sa->abort_flags &&
ssk->rx_sa &&
(tx_sa->bytes_acked < tx_sa->bytes_sent));
- sdp_prf(&ssk->isk.sk, NULL, "woke up sleepers");
+ sdp_prf(sk_ssk(ssk), NULL, "woke up sleepers");
posts_handler_get(ssk);
static int sdp_wait_rdma_wr_finished(struct sdp_sock *ssk)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
long timeo = SDP_RDMA_READ_TIMEOUT;
int rc = 0;
DEFINE_WAIT(wait);
!ssk->tx_ring.rdma_inflight->busy ||
!ssk->qp_active);
sdp_prf1(sk, NULL, "Woke up");
- sdp_dbg_data(&ssk->isk.sk, "woke up sleepers\n");
+ sdp_dbg_data(sk_ssk(ssk), "woke up sleepers\n");
posts_handler_get(ssk);
}
}
void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
unsigned long flags;
spin_lock_irqsave(&ssk->tx_sa_lock, flags);
void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
u32 bytes_completed)
{
- struct sock *sk = &ssk->isk.sk;
+ struct sock *sk = sk_ssk(ssk);
unsigned long flags;
sdp_prf1(sk, NULL, "RdmaRdCompl ssk=%p tx_sa=%p", ssk, ssk->tx_sa);
while (!iov->iov_len)
++iov;
- sdp_dbg_data(&ssk->isk.sk, "preparing RDMA read."
+ sdp_dbg_data(sk_ssk(ssk), "preparing RDMA read."
" len: 0x%x. buffer len: 0x%zx\n", len, iov->iov_len);
sock_hold(sk, SOCK_REF_RDMA_RD);
rc = sdp_post_rdma_read(sk, rx_sa, offset);
if (unlikely(rc)) {
sdp_warn(sk, "ib_post_send failed with status %d.\n", rc);
- sdp_set_error(&ssk->isk.sk, -ECONNRESET);
+ sdp_set_error(sk_ssk(ssk), -ECONNRESET);
goto err_post_send;
}
return 0;
}
- sock_hold(&ssk->isk.sk, SOCK_REF_ZCOPY);
+ sock_hold(sk_ssk(ssk), SOCK_REF_ZCOPY);
SDPSTATS_COUNTER_INC(sendmsg_zcopy_segment);
/* Ok commence sending. */
sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy end rc: %d copied: %d", rc, copied);
- sock_put(&ssk->isk.sk, SOCK_REF_ZCOPY);
+ sock_put(sk_ssk(ssk), SOCK_REF_ZCOPY);
if (rc < 0 && rc != -EAGAIN && rc != -ETIME)
return rc;