]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sdp: debug message for reference count changed from CM_TW to CMA
authorEldad Zinger <eldadz@mellanox.co.il>
Tue, 4 May 2010 09:05:56 +0000 (12:05 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:04:56 +0000 (05:04 -0700)
This debug message marks that the sdp_cma_handler() is expected to be invoked.

Signed-off-by: Eldad Zinger <eldadz@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp_cma.c
drivers/infiniband/ulp/sdp/sdp_dbg.h
drivers/infiniband/ulp/sdp/sdp_main.c

index fdd98a720ec11fdee3a5e2bf83b71aa9ceed2095..e2816544f771a14f83feb4c8f3814e86321bf4a7 100644 (file)
@@ -474,7 +474,7 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                                sdp_dbg(sk, "IB teardown while in "
                                        "TCP_CLOSE_WAIT taking reference to "
                                        "let close() finish the work\n");
-                               sock_hold(sk, SOCK_REF_CM_TW);
+                               sock_hold(sk, SOCK_REF_CMA);
                        }
                        sdp_set_error(sk, EPIPE);
                        rc = sdp_disconnected_handler(sk);
index f25e86dc5e5cb4178d47e30e32053cc5b34fe9b2..2c994a44defb2e4d5cccb5217c1b4729de9c9d9c 100644 (file)
@@ -145,7 +145,7 @@ extern int sdp_data_debug_level;
 #define SOCK_REF_RESET "RESET"
 #define SOCK_REF_ALIVE "ALIVE" /* sock_alloc -> destruct_sock */
 #define SOCK_REF_CLONE "CLONE"
-#define SOCK_REF_CM_TW "CM_TW" /* TIMEWAIT_ENTER -> TIMEWAIT_EXIT */
+#define SOCK_REF_CMA "CMA" /* sdp_cma_handler() is expected to be invoked */
 #define SOCK_REF_SEQ "SEQ" /* during proc read */
 #define SOCK_REF_DREQ_TO "DREQ_TO" /* dreq timeout is pending */
 #define SOCK_REF_ZCOPY "ZCOPY" /* zcopy send in process */
index bf1752a9ee5e961648e494590bb2cc5699f0a008..a4d88c66118459634a5d2dd02f24a2ab4f1d8ce6 100644 (file)
@@ -625,7 +625,7 @@ static void sdp_close(struct sock *sk, long timeout)
                goto adjudge_to_death;
        }
 
-       sock_hold(sk, SOCK_REF_CM_TW);
+       sock_hold(sk, SOCK_REF_CMA);
 
        /*  We need to flush the recv. buffs.  We do this only on the
         *  descriptor close, not protocol-sourced closes, because the
@@ -1002,7 +1002,7 @@ static void sdp_destroy_work(struct work_struct *work)
        cancel_delayed_work(&ssk->srcavail_cancel_work);
 
        if (sk->sk_state == TCP_TIME_WAIT)
-               sock_put(sk, SOCK_REF_CM_TW);
+               sock_put(sk, SOCK_REF_CMA);
 
        /* In normal close current state is TCP_TIME_WAIT or TCP_CLOSE
           but if a CM connection is dropped below our legs state could
@@ -1045,7 +1045,7 @@ static void sdp_dreq_wait_timeout_work(struct work_struct *work)
                sdp_sk(sk)->qp_active = 0;
                rdma_disconnect(sdp_sk(sk)->id);
        } else
-               sock_put(sk, SOCK_REF_CM_TW);
+               sock_put(sk, SOCK_REF_CMA);
 
 out:
        sock_put(sk, SOCK_REF_DREQ_TO);
@@ -2745,7 +2745,7 @@ kill_socks:
                        if ((1 << sk->sk_state) &
                                (TCPF_FIN_WAIT1 | TCPF_CLOSE_WAIT |
                                 TCPF_LAST_ACK | TCPF_TIME_WAIT)) {
-                               sock_put(sk, SOCK_REF_CM_TW);
+                               sock_put(sk, SOCK_REF_CMA);
                        }
 
                        schedule();