From: Amir Vadai Date: Tue, 2 Nov 2010 10:43:21 +0000 (+0200) Subject: sdp: use a macro to convert ssk into sk X-Git-Tag: v4.1.12-92~264^2~5^2~66 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=c999ad1f54af8f137528d5cbcbacc404b066ac95;p=users%2Fjedix%2Flinux-maple.git sdp: use a macro to convert ssk into sk Instead of accessing &ssk->isk.sk cast the sdp socket into sock. Do it from a macro sk_ssk(ssk) Signed-off-by: Amir Vadai --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 41cbebd6b0cf..f83607ac00f9 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -12,6 +12,8 @@ #include #include "sdp_dbg.h" +#define sk_ssk(ssk) ((struct sock *)ssk) + /* Interval between sucessive polls in the Tx routine when polling is used instead of interrupts (in per-core Tx rings) - should be power of 2 */ #define SDP_TX_POLL_MODER 16 @@ -1028,7 +1030,7 @@ static inline void sdp_schedule_arm_rx_cq(struct sdp_sock *ssk, else { /* There is no point of setting up a timer for an immediate * cq-arming, better arm it now. */ - sdp_arm_rx_cq(&ssk->isk.sk); + sdp_arm_rx_cq(sk_ssk(ssk)); } } diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c index 4de8c1acdbbe..4fb195350a31 100644 --- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c @@ -122,7 +122,7 @@ static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb) unlikely(h->mid != SDP_MID_DATA) || (ssk->nonagle & TCP_NAGLE_OFF) || !ssk->nagle_last_unacked || - skb->next != (struct sk_buff *)&ssk->isk.sk.sk_write_queue || + skb->next != (struct sk_buff *)&sk_ssk(ssk)->sk_write_queue || skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal || (SDP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH) || (SDP_SKB_CB(skb)->flags & TCPCB_FLAG_URG); @@ -134,10 +134,10 @@ static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb) if (!timer_pending(&ssk->nagle_timer) && ssk->qp_active) { mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT); - sdp_dbg_data(&ssk->isk.sk, "Starting nagle timer\n"); + sdp_dbg_data(sk_ssk(ssk), "Starting nagle timer\n"); } } - sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %u\n", + sdp_dbg_data(sk_ssk(ssk), "send_now = %d last_unacked = %u\n", send_now, ssk->nagle_last_unacked); return send_now; @@ -146,7 +146,7 @@ static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb) void sdp_nagle_timeout(unsigned long data) { struct sdp_sock *ssk = (struct sdp_sock *)data; - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); SDPSTATS_COUNTER_INC(nagle_timer); sdp_dbg_data(sk, "last_unacked = %u\n", ssk->nagle_last_unacked); @@ -191,7 +191,7 @@ void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp) /* TODO: nonagle? */ struct sk_buff *skb; int post_count = 0; - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); if (unlikely(!ssk->id)) { if (sk->sk_send_head) { diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index 62a5a46c6807..378ac73e9b0e 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -185,10 +185,10 @@ static int sdp_get_port(struct sock *sk, unsigned short snum) static void sdp_destroy_qp(struct sdp_sock *ssk) { - sdp_dbg(&ssk->isk.sk, "destroying qp\n"); - sdp_prf(&ssk->isk.sk, NULL, "destroying qp"); + sdp_dbg(sk_ssk(ssk), "destroying qp\n"); + sdp_prf(sk_ssk(ssk), NULL, "destroying qp"); - sdp_add_to_history(&ssk->isk.sk, __func__); + sdp_add_to_history(sk_ssk(ssk), __func__); ssk->qp_active = 0; if (ssk->qp) { @@ -276,7 +276,7 @@ void sdp_start_keepalive_timer(struct sock *sk) void sdp_set_default_moderation(struct sdp_sock *ssk) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); struct sdp_moderation *mod = &ssk->auto_mod; int rx_buf_size; @@ -397,10 +397,10 @@ static void sdp_auto_moderation(struct sdp_sock *ssk) moder_time = mod->moder_time; } - sdp_dbg_data(&ssk->isk.sk, "tx rate:%lu rx_rate:%lu\n", + sdp_dbg_data(sk_ssk(ssk), "tx rate:%lu rx_rate:%lu\n", tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); - sdp_dbg_data(&ssk->isk.sk, "Rx moder_time changed from:%d to %d " + sdp_dbg_data(sk_ssk(ssk), "Rx moder_time changed from:%d to %d " "period:%lu [jiff] packets:%lu avg_pkt_size:%lu " "rate:%lu [p/s])\n", mod->last_moder_time, moder_time, period, packets, @@ -410,7 +410,7 @@ static void sdp_auto_moderation(struct sdp_sock *ssk) mod->last_moder_time = moder_time; err = ib_modify_cq(ssk->rx_ring.cq, mod->moder_cnt, moder_time); if (unlikely(err)) { - sdp_dbg_data(&ssk->isk.sk, + sdp_dbg_data(sk_ssk(ssk), "Failed modifying moderation for cq"); } SDPSTATS_COUNTER_INC(rx_cq_modified); @@ -511,7 +511,7 @@ static void sdp_destroy_resources(struct sock *sk) static inline void sdp_kill_id_and_release(struct sdp_sock *ssk) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); struct rdma_cm_id *id; lock_sock(sk); @@ -564,7 +564,7 @@ done: static inline void sdp_start_dreq_wait_timeout(struct sdp_sock *ssk, int timeo) { - sdp_dbg(&ssk->isk.sk, "Starting dreq wait timeout\n"); + sdp_dbg(sk_ssk(ssk), "Starting dreq wait timeout\n"); queue_delayed_work(sdp_wq, &ssk->dreq_wait_work, timeo); ssk->dreq_wait_timeout = 1; @@ -621,7 +621,7 @@ static void sdp_cma_timewait_timeout_work(struct work_struct *work) { struct sdp_sock *ssk = container_of(work, struct sdp_sock, cma_timewait_work.work); - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); lock_sock(sk); if (!ssk->cma_timewait_timeout) { @@ -932,14 +932,14 @@ static struct sock *sdp_accept(struct sock *sk, int flags, int *err) list_del_init(&newssk->accept_queue); newssk->parent = NULL; sk_acceptq_removed(sk); - newsk = &newssk->isk.sk; + newsk = sk_ssk(newssk); out: release_sock(sk); if (newsk) { lock_sock(newsk); if (newssk->rx_ring.cq) { newssk->poll_cq = 1; - sdp_arm_rx_cq(&newssk->isk.sk); + sdp_arm_rx_cq(sk_ssk(newssk)); } release_sock(newsk); } @@ -1012,12 +1012,12 @@ void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk) if (!ssk->dreq_wait_timeout) return; - sdp_dbg(&ssk->isk.sk, "cancelling dreq wait timeout\n"); + sdp_dbg(sk_ssk(ssk), "cancelling dreq wait timeout\n"); ssk->dreq_wait_timeout = 0; if (cancel_delayed_work_sync(&ssk->dreq_wait_work)) { /* The timeout hasn't reached - need to clean ref count */ - sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO); + sock_put(sk_ssk(ssk), SOCK_REF_DREQ_TO); } } @@ -1025,7 +1025,7 @@ static void sdp_destroy_work(struct work_struct *work) { struct sdp_sock *ssk = container_of(work, struct sdp_sock, destroy_work); - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt)); lock_sock(sk); @@ -1062,7 +1062,7 @@ static void sdp_dreq_wait_timeout_work(struct work_struct *work) { struct sdp_sock *ssk = container_of(work, struct sdp_sock, dreq_wait_work.work); - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); if (!ssk->dreq_wait_timeout) goto out; @@ -1499,7 +1499,7 @@ static inline struct bzcopy_state *sdp_bz_cleanup(struct bzcopy_state *bz) /* Wait for in-flight sends; should be quick */ if (bz->busy) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); unsigned long timeout = jiffies + SDP_BZCOPY_POLL_TIMEOUT; while (jiffies < timeout) { @@ -1645,7 +1645,7 @@ static struct bzcopy_state *sdp_bz_setup(struct sdp_sock *ssk, return ERR_PTR(-ENOMEM); } - rc = sdp_get_pages(&ssk->isk.sk, bz->pages, bz->page_cnt, + rc = sdp_get_pages(sk_ssk(ssk), bz->pages, bz->page_cnt, (unsigned long)base); if (unlikely(rc)) @@ -1820,7 +1820,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb, */ int sdp_tx_wait_memory(struct sdp_sock *ssk, long *timeo_p, int *credits_needed) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); int err = 0; long vm_wait = 0; long current_timeo = *timeo_p; @@ -2679,7 +2679,7 @@ static void sdp_enter_memory_pressure(struct sock *sk) void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); u8 tmp; u32 ptr = skb->len - 1; @@ -2861,7 +2861,7 @@ do_next: list_for_each_entry(ssk, &sock_list, sock_list) { if (ssk->ib_device == device && !ssk->id_destroyed_already) { spin_unlock_irq(&sock_list_lock); - sk = &ssk->isk.sk; + sk = sk_ssk(ssk); sdp_add_to_history(sk, __func__); lock_sock(sk); /* ssk->id must be lock-protected, @@ -2885,7 +2885,7 @@ kill_socks: list_for_each_entry(ssk, &sock_list, sock_list) { if (ssk->ib_device == device) { spin_unlock_irq(&sock_list_lock); - sk = &ssk->isk.sk; + sk = sk_ssk(ssk); lock_sock(sk); sdp_abort_srcavail(sk); diff --git a/drivers/infiniband/ulp/sdp/sdp_rx.c b/drivers/infiniband/ulp/sdp/sdp_rx.c index f90b88304f24..1bba5db535ec 100644 --- a/drivers/infiniband/ulp/sdp/sdp_rx.c +++ b/drivers/infiniband/ulp/sdp/sdp_rx.c @@ -163,12 +163,12 @@ static int sdp_post_recv(struct sdp_sock *ssk) /* Now, allocate and repost recv */ /* TODO: allocate from cache */ - if (unlikely(ssk->isk.sk.sk_allocation)) { - skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_SKB_HEAD_SIZE, - ssk->isk.sk.sk_allocation); - gfp_page = ssk->isk.sk.sk_allocation | __GFP_HIGHMEM; + if (unlikely(sk_ssk(ssk)->sk_allocation)) { + skb = sdp_stream_alloc_skb(sk_ssk(ssk), SDP_SKB_HEAD_SIZE, + sk_ssk(ssk)->sk_allocation); + gfp_page = sk_ssk(ssk)->sk_allocation | __GFP_HIGHMEM; } else { - skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_SKB_HEAD_SIZE, + skb = sdp_stream_alloc_skb(sk_ssk(ssk), SDP_SKB_HEAD_SIZE, GFP_KERNEL); gfp_page = GFP_HIGHUSER; } @@ -176,7 +176,7 @@ static int sdp_post_recv(struct sdp_sock *ssk) if (unlikely(!skb)) return -1; - sdp_prf(&ssk->isk.sk, skb, "Posting skb"); + sdp_prf(sk_ssk(ssk), skb, "Posting skb"); h = (struct sdp_bsdh *)skb->head; rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1)); @@ -235,7 +235,7 @@ static int sdp_post_recv(struct sdp_sock *ssk) rx_wr.num_sge = frags + 1; rc = ib_post_recv(ssk->qp, &rx_wr, &bad_wr); if (unlikely(rc)) { - sdp_warn(&ssk->isk.sk, "ib_post_recv failed. status %d\n", rc); + sdp_warn(sk_ssk(ssk), "ib_post_recv failed. status %d\n", rc); goto err; } @@ -249,13 +249,13 @@ err: atomic_add(pages_alloced, &sdp_current_mem_usage); sdp_cleanup_sdp_buf(ssk, rx_req, SDP_SKB_HEAD_SIZE, DMA_FROM_DEVICE); sdp_free_skb(skb); - sdp_reset(&ssk->isk.sk); + sdp_reset(sk_ssk(ssk)); return -1; } static inline int sdp_post_recvs_needed(struct sdp_sock *ssk) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); int buffer_size = SDP_SKB_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE; unsigned long max_bytes = ssk->rcvbuf_scale; unsigned long bytes_in_process; @@ -291,12 +291,12 @@ again: goto out; } - sk_mem_reclaim(&ssk->isk.sk); + sk_mem_reclaim(sk_ssk(ssk)); if (sdp_post_recvs_needed(ssk)) goto again; out: - sk_mem_reclaim(&ssk->isk.sk); + sk_mem_reclaim(sk_ssk(ssk)); } static inline struct sk_buff *sdp_sock_queue_rcv_skb(struct sock *sk, @@ -335,7 +335,7 @@ static inline struct sk_buff *sdp_sock_queue_rcv_skb(struct sock *sk, rx_sa->skb = skb; if (ssk->tx_sa) { - sdp_dbg_data(&ssk->isk.sk, "got RX SrcAvail while waiting " + sdp_dbg_data(sk_ssk(ssk), "got RX SrcAvail while waiting " "for TX SrcAvail. waking up TX SrcAvail" "to be aborted\n"); wake_up(sk->sk_sleep); @@ -453,7 +453,7 @@ static struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id, int len struct sk_buff *skb; if (unlikely(id != ring_tail(ssk->rx_ring))) { - sdp_warn(&ssk->isk.sk, "Bogus recv completion id %d tail %d\n", + sdp_warn(sk_ssk(ssk), "Bogus recv completion id %d tail %d\n", id, ring_tail(ssk->rx_ring)); return NULL; } @@ -472,7 +472,7 @@ static struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id, int len static int sdp_process_rx_ctl_skb(struct sdp_sock *ssk, struct sk_buff *skb) { struct sdp_bsdh *h = (struct sdp_bsdh *)skb_transport_header(skb); - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); sdp_dbg_data(sk, "Handling %s\n", mid2str(h->mid)); sdp_prf(sk, skb, "Handling %s", mid2str(h->mid)); @@ -534,7 +534,7 @@ static int sdp_process_rx_ctl_skb(struct sdp_sock *ssk, struct sk_buff *skb) static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); int frags; struct sdp_bsdh *h; int pagesz, i; @@ -552,7 +552,7 @@ static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb) if (!before(mseq_ack, ssk->nagle_last_unacked)) ssk->nagle_last_unacked = 0; - sdp_prf1(&ssk->isk.sk, skb, "RX: %s +%d c:%d->%d mseq:%d ack:%d", + sdp_prf1(sk_ssk(ssk), skb, "RX: %s +%d c:%d->%d mseq:%d ack:%d", mid2str(h->mid), ntohs(h->bufs), credits_before, tx_credits(ssk), ntohl(h->mseq), ntohl(h->mseq_ack)); @@ -623,7 +623,7 @@ static struct sk_buff *sdp_process_rx_wc(struct sdp_sock *ssk, { struct sk_buff *skb; struct sdp_bsdh *h; - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); int mseq; skb = sdp_recv_completion(ssk, wc->wr_id, wc->byte_len); @@ -665,7 +665,7 @@ static struct sk_buff *sdp_process_rx_wc(struct sdp_sock *ssk, #else skb->tail = skb->head + skb_headlen(skb); #endif - SDP_DUMP_PACKET(&ssk->isk.sk, "RX", skb, h); + SDP_DUMP_PACKET(sk_ssk(ssk), "RX", skb, h); skb_reset_transport_header(skb); ssk->rx_packets++; @@ -683,7 +683,7 @@ static struct sk_buff *sdp_process_rx_wc(struct sdp_sock *ssk, /* like sk_stream_write_space - execpt measures remote credits */ static void sdp_bzcopy_write_space(struct sdp_sock *ssk) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); struct socket *sock = sk->sk_socket; if (tx_credits(ssk) < ssk->min_bufs || !sock) @@ -722,7 +722,7 @@ int sdp_poll_rx_cq(struct sdp_sock *ssk) } while (n == SDP_NUM_WC); if (wc_processed) { - sdp_prf(&ssk->isk.sk, NULL, "processed %d", wc_processed); + sdp_prf(sk_ssk(ssk), NULL, "processed %d", wc_processed); sdp_bzcopy_write_space(ssk); } @@ -733,7 +733,7 @@ static void sdp_rx_comp_work(struct work_struct *work) { struct sdp_sock *ssk = container_of(work, struct sdp_sock, rx_comp_work); - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); SDPSTATS_COUNTER_INC(rx_wq); @@ -765,7 +765,7 @@ static void sdp_rx_comp_work(struct work_struct *work) void sdp_do_posts(struct sdp_sock *ssk) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); int xmit_poll_force; struct sk_buff *skb; @@ -878,7 +878,7 @@ static void sdp_arm_cq_timer(unsigned long data) struct sdp_sock *ssk = (struct sdp_sock *)data; SDPSTATS_COUNTER_INC(rx_cq_arm_timer); - sdp_arm_rx_cq(&ssk->isk.sk); + sdp_arm_rx_cq(sk_ssk(ssk)); } int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device) @@ -892,7 +892,7 @@ int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device) ssk->rx_ring.buffer = kzalloc( sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, GFP_KERNEL); if (!ssk->rx_ring.buffer) { - sdp_warn(&ssk->isk.sk, + sdp_warn(sk_ssk(ssk), "Unable to allocate RX Ring size %zd.\n", sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE); @@ -900,11 +900,11 @@ int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device) } rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler, - &ssk->isk.sk, SDP_RX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED); + sk_ssk(ssk), SDP_RX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED); if (IS_ERR(rx_cq)) { rc = PTR_ERR(rx_cq); - sdp_warn(&ssk->isk.sk, "Unable to allocate RX CQ: %d.\n", rc); + sdp_warn(sk_ssk(ssk), "Unable to allocate RX CQ: %d.\n", rc); goto err_cq; } @@ -913,7 +913,7 @@ int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device) INIT_WORK(&ssk->rx_comp_work, sdp_rx_comp_work); setup_timer(&ssk->rx_ring.cq_arm_timer, sdp_arm_cq_timer, (unsigned long)ssk); - sdp_arm_rx_cq(&ssk->isk.sk); + sdp_arm_rx_cq(sk_ssk(ssk)); return 0; @@ -936,7 +936,7 @@ void sdp_rx_ring_destroy(struct sdp_sock *ssk) if (ssk->rx_ring.cq) { if (ib_destroy_cq(ssk->rx_ring.cq)) { - sdp_warn(&ssk->isk.sk, "destroy cq(%p) failed\n", + sdp_warn(sk_ssk(ssk), "destroy cq(%p) failed\n", ssk->rx_ring.cq); } else { ssk->rx_ring.cq = NULL; diff --git a/drivers/infiniband/ulp/sdp/sdp_tx.c b/drivers/infiniband/ulp/sdp/sdp_tx.c index ae0791fefa5a..92b22269c19a 100644 --- a/drivers/infiniband/ulp/sdp/sdp_tx.c +++ b/drivers/infiniband/ulp/sdp/sdp_tx.c @@ -46,11 +46,11 @@ int sdp_xmit_poll(struct sdp_sock *ssk, int force) { int wc_processed = 0; - sdp_prf(&ssk->isk.sk, NULL, "%s", __func__); + sdp_prf(sk_ssk(ssk), NULL, "%s", __func__); /* If we don't have a pending timer, set one up to catch our recent post in case the interface becomes idle */ - if (likely(ssk->qp_active && ssk->isk.sk.sk_state != TCP_CLOSE) && + if (likely(ssk->qp_active && sk_ssk(ssk)->sk_state != TCP_CLOSE) && !timer_pending(&ssk->tx_ring.timer)) { mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT); } @@ -89,7 +89,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) if (unlikely(h->mid == SDP_MID_SRCAVAIL)) { struct tx_srcavail_state *tx_sa = TX_SRCAVAIL_STATE(skb); if (ssk->tx_sa != tx_sa) { - sdp_dbg_data(&ssk->isk.sk, "SrcAvail cancelled " + sdp_dbg_data(sk_ssk(ssk), "SrcAvail cancelled " "before being sent!\n"); SDP_WARN_ON(1); sdp_free_skb(skb); @@ -108,11 +108,11 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) h->mseq = htonl(mseq); h->mseq_ack = htonl(mseq_ack(ssk)); - sdp_prf(&ssk->isk.sk, skb, "TX: %s bufs: %d mseq:%ld ack:%d c: %d", + sdp_prf(sk_ssk(ssk), skb, "TX: %s bufs: %d mseq:%ld ack:%d c: %d", mid2str(h->mid), rx_ring_posted(ssk), mseq, ntohl(h->mseq_ack), tx_credits(ssk)); - SDP_DUMP_PACKET(&ssk->isk.sk, "TX", skb, h); + SDP_DUMP_PACKET(sk_ssk(ssk), "TX", skb, h); tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)]; tx_req->skb = skb; @@ -152,12 +152,12 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr); if (unlikely(rc)) { - sdp_dbg(&ssk->isk.sk, + sdp_dbg(sk_ssk(ssk), "ib_post_send failed with status %d.\n", rc); sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE); - sdp_set_error(&ssk->isk.sk, -ECONNRESET); + sdp_set_error(sk_ssk(ssk), -ECONNRESET); goto err; } @@ -207,7 +207,7 @@ out: static inline void sdp_process_tx_wc(struct sdp_sock *ssk, struct ib_wc *wc) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); if (likely(wc->wr_id & SDP_OP_SEND)) { struct sk_buff *skb; @@ -249,7 +249,7 @@ static int sdp_process_tx_cq(struct sdp_sock *ssk) int wc_processed = 0; if (!ssk->tx_ring.cq) { - sdp_dbg(&ssk->isk.sk, "tx irq on destroyed tx_cq\n"); + sdp_dbg(sk_ssk(ssk), "tx irq on destroyed tx_cq\n"); return 0; } @@ -262,17 +262,17 @@ static int sdp_process_tx_cq(struct sdp_sock *ssk) } while (n == SDP_NUM_WC); if (wc_processed) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); sdp_prf1(sk, NULL, "Waking sendmsg. inflight=%d", (u32) tx_ring_posted(ssk)); - sk_stream_write_space(&ssk->isk.sk); + sk_stream_write_space(sk_ssk(ssk)); if (sk->sk_write_pending && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && tx_ring_posted(ssk)) { /* a write is pending and still no room in tx queue, * arm tx cq */ - sdp_prf(&ssk->isk.sk, NULL, "pending tx - rearming"); + sdp_prf(sk_ssk(ssk), NULL, "pending tx - rearming"); sdp_arm_tx_cq(sk); } @@ -288,7 +288,7 @@ static int sdp_process_tx_cq(struct sdp_sock *ssk) */ static int sdp_tx_handler_select(struct sdp_sock *ssk) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); if (sk->sk_write_pending) { /* Do the TX posts from sender context */ @@ -314,17 +314,17 @@ static int sdp_tx_handler_select(struct sdp_sock *ssk) static void sdp_poll_tx_timeout(unsigned long data) { struct sdp_sock *ssk = (struct sdp_sock *)data; - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); u32 inflight, wc_processed; - sdp_prf1(&ssk->isk.sk, NULL, "TX timeout: inflight=%d, head=%d tail=%d", + sdp_prf1(sk_ssk(ssk), NULL, "TX timeout: inflight=%d, head=%d tail=%d", (u32) tx_ring_posted(ssk), ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring)); /* Only process if the socket is not in use */ bh_lock_sock(sk); if (sock_owned_by_user(sk)) { - sdp_prf(&ssk->isk.sk, NULL, "TX comp: socket is busy"); + sdp_prf(sk_ssk(ssk), NULL, "TX comp: socket is busy"); if (sdp_tx_handler_select(ssk) && sk->sk_state != TCP_CLOSE && likely(ssk->qp_active)) { @@ -350,7 +350,7 @@ static void sdp_poll_tx_timeout(unsigned long data) } inflight = (u32) tx_ring_posted(ssk); - sdp_prf1(&ssk->isk.sk, NULL, "finished tx proccessing. inflight = %d", + sdp_prf1(sk_ssk(ssk), NULL, "finished tx proccessing. inflight = %d", tx_ring_posted(ssk)); /* If there are still packets in flight and the timer has not already @@ -404,7 +404,7 @@ void sdp_post_keepalive(struct sdp_sock *ssk) int rc; struct ib_send_wr wr, *bad_wr; - sdp_dbg(&ssk->isk.sk, "%s\n", __func__); + sdp_dbg(sk_ssk(ssk), "%s\n", __func__); memset(&wr, 0, sizeof(wr)); @@ -416,9 +416,9 @@ void sdp_post_keepalive(struct sdp_sock *ssk) rc = ib_post_send(ssk->qp, &wr, &bad_wr); if (rc) { - sdp_dbg(&ssk->isk.sk, + sdp_dbg(sk_ssk(ssk), "ib_post_keepalive failed with status %d.\n", rc); - sdp_set_error(&ssk->isk.sk, -ECONNRESET); + sdp_set_error(sk_ssk(ssk), -ECONNRESET); } sdp_cnt(sdp_keepalive_probes_sent); @@ -440,18 +440,18 @@ int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device) sizeof *ssk->tx_ring.buffer * SDP_TX_SIZE, GFP_KERNEL); if (!ssk->tx_ring.buffer) { rc = -ENOMEM; - sdp_warn(&ssk->isk.sk, "Can't allocate TX Ring size %zd.\n", + sdp_warn(sk_ssk(ssk), "Can't allocate TX Ring size %zd.\n", sizeof(*ssk->tx_ring.buffer) * SDP_TX_SIZE); goto out; } tx_cq = ib_create_cq(device, sdp_tx_irq, sdp_tx_cq_event_handler, - &ssk->isk.sk, SDP_TX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED); + sk_ssk(ssk), SDP_TX_SIZE, IB_CQ_VECTOR_LEAST_ATTACHED); if (IS_ERR(tx_cq)) { rc = PTR_ERR(tx_cq); - sdp_warn(&ssk->isk.sk, "Unable to allocate TX CQ: %d.\n", rc); + sdp_warn(sk_ssk(ssk), "Unable to allocate TX CQ: %d.\n", rc); goto err_cq; } @@ -491,7 +491,7 @@ void sdp_tx_ring_destroy(struct sdp_sock *ssk) if (ssk->tx_ring.cq) { if (ib_destroy_cq(ssk->tx_ring.cq)) { - sdp_warn(&ssk->isk.sk, "destroy cq(%p) failed\n", + sdp_warn(sk_ssk(ssk), "destroy cq(%p) failed\n", ssk->tx_ring.cq); } else { ssk->tx_ring.cq = NULL; diff --git a/drivers/infiniband/ulp/sdp/sdp_zcopy.c b/drivers/infiniband/ulp/sdp/sdp_zcopy.c index dd8c9c0bb224..bf2adef4646f 100644 --- a/drivers/infiniband/ulp/sdp/sdp_zcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_zcopy.c @@ -131,7 +131,7 @@ static int sdp_post_srcavail_cancel(struct sock *sk) struct sdp_sock *ssk = sdp_sk(sk); struct sk_buff *skb; - sdp_dbg_data(&ssk->isk.sk, "Posting srcavail cancel\n"); + sdp_dbg_data(sk_ssk(ssk), "Posting srcavail cancel\n"); skb = sdp_alloc_skb_srcavail_cancel(sk, 0); if (unlikely(!skb)) @@ -147,7 +147,7 @@ static int sdp_post_srcavail_cancel(struct sock *sk) static int sdp_wait_rdmardcompl(struct sdp_sock *ssk, long *timeo_p, int ignore_signals) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); int err = 0; long current_timeo = *timeo_p; struct tx_srcavail_state *tx_sa = ssk->tx_sa; @@ -203,7 +203,7 @@ static int sdp_wait_rdmardcompl(struct sdp_sock *ssk, long *timeo_p, tx_sa->abort_flags && ssk->rx_sa && (tx_sa->bytes_acked < tx_sa->bytes_sent)); - sdp_prf(&ssk->isk.sk, NULL, "woke up sleepers"); + sdp_prf(sk_ssk(ssk), NULL, "woke up sleepers"); posts_handler_get(ssk); @@ -227,7 +227,7 @@ static int sdp_wait_rdmardcompl(struct sdp_sock *ssk, long *timeo_p, static int sdp_wait_rdma_wr_finished(struct sdp_sock *ssk) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); long timeo = SDP_RDMA_READ_TIMEOUT; int rc = 0; DEFINE_WAIT(wait); @@ -261,7 +261,7 @@ static int sdp_wait_rdma_wr_finished(struct sdp_sock *ssk) !ssk->tx_ring.rdma_inflight->busy || !ssk->qp_active); sdp_prf1(sk, NULL, "Woke up"); - sdp_dbg_data(&ssk->isk.sk, "woke up sleepers\n"); + sdp_dbg_data(sk_ssk(ssk), "woke up sleepers\n"); posts_handler_get(ssk); } @@ -338,7 +338,7 @@ static inline int sge_bytes(struct ib_sge *sge, int sge_cnt) } void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); unsigned long flags; spin_lock_irqsave(&ssk->tx_sa_lock, flags); @@ -368,7 +368,7 @@ out: void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack, u32 bytes_completed) { - struct sock *sk = &ssk->isk.sk; + struct sock *sk = sk_ssk(ssk); unsigned long flags; sdp_prf1(sk, NULL, "RdmaRdCompl ssk=%p tx_sa=%p", ssk, ssk->tx_sa); @@ -574,7 +574,7 @@ int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb, while (!iov->iov_len) ++iov; - sdp_dbg_data(&ssk->isk.sk, "preparing RDMA read." + sdp_dbg_data(sk_ssk(ssk), "preparing RDMA read." " len: 0x%x. buffer len: 0x%zx\n", len, iov->iov_len); sock_hold(sk, SOCK_REF_RDMA_RD); @@ -595,7 +595,7 @@ int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb, rc = sdp_post_rdma_read(sk, rx_sa, offset); if (unlikely(rc)) { sdp_warn(sk, "ib_post_send failed with status %d.\n", rc); - sdp_set_error(&ssk->isk.sk, -ECONNRESET); + sdp_set_error(sk_ssk(ssk), -ECONNRESET); goto err_post_send; } @@ -738,7 +738,7 @@ int sdp_sendmsg_zcopy(struct kiocb *iocb, struct sock *sk, struct iovec *iov) return 0; } - sock_hold(&ssk->isk.sk, SOCK_REF_ZCOPY); + sock_hold(sk_ssk(ssk), SOCK_REF_ZCOPY); SDPSTATS_COUNTER_INC(sendmsg_zcopy_segment); /* Ok commence sending. */ @@ -768,7 +768,7 @@ err_alloc_tx_sa: sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy end rc: %d copied: %d", rc, copied); - sock_put(&ssk->isk.sk, SOCK_REF_ZCOPY); + sock_put(sk_ssk(ssk), SOCK_REF_ZCOPY); if (rc < 0 && rc != -EAGAIN && rc != -ETIME) return rc;