}
 EXPORT_SYMBOL_GPL(smc_unhash_sk);
 
+/* This will be called before user really release sock_lock. So do the
+ * work which we didn't do because of user hold the sock_lock in the
+ * BH context
+ */
+static void smc_release_cb(struct sock *sk)
+{
+       struct smc_sock *smc = smc_sk(sk);
+
+       if (smc->conn.tx_in_release_sock) {
+               smc_tx_pending(&smc->conn);
+               smc->conn.tx_in_release_sock = false;
+       }
+}
+
 struct proto smc_proto = {
        .name           = "SMC",
        .owner          = THIS_MODULE,
        .keepalive      = smc_set_keepalive,
        .hash           = smc_hash_sk,
        .unhash         = smc_unhash_sk,
+       .release_cb     = smc_release_cb,
        .obj_size       = sizeof(struct smc_sock),
        .h.smc_hash     = &smc_v4_hashinfo,
        .slab_flags     = SLAB_TYPESAFE_BY_RCU,
        .keepalive      = smc_set_keepalive,
        .hash           = smc_hash_sk,
        .unhash         = smc_unhash_sk,
+       .release_cb     = smc_release_cb,
        .obj_size       = sizeof(struct smc_sock),
        .h.smc_hash     = &smc_v6_hashinfo,
        .slab_flags     = SLAB_TYPESAFE_BY_RCU,
 
                                                 * data still pending
                                                 */
        char                    urg_rx_byte;    /* urgent byte */
+       bool                    tx_in_release_sock;
+                                               /* flush pending tx data in
+                                                * sock release_cb()
+                                                */
        atomic_t                bytes_to_rcv;   /* arrived data,
                                                 * not yet received
                                                 */
 
        }
 
        if (atomic_dec_and_test(&conn->cdc_pend_tx_wr)) {
-               /* If this is the last pending WR complete, we must push to
-                * prevent hang when autocork enabled.
+               /* If user owns the sock_lock, mark the connection need sending.
+                * User context will later try to send when it release sock_lock
+                * in smc_release_cb()
                 */
-               smc_tx_sndbuf_nonempty(conn);
+               if (sock_owned_by_user(&smc->sk))
+                       conn->tx_in_release_sock = true;
+               else
+                       smc_tx_pending(conn);
+
                if (unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
                        wake_up(&conn->cdc_pend_tx_wq);
        }
        /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
        if ((diff_cons && smc_tx_prepared_sends(conn)) ||
            conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
-           conn->local_rx_ctrl.prod_flags.urg_data_pending)
-               smc_tx_sndbuf_nonempty(conn);
+           conn->local_rx_ctrl.prod_flags.urg_data_pending) {
+               if (!sock_owned_by_user(&smc->sk))
+                       smc_tx_pending(conn);
+               else
+                       conn->tx_in_release_sock = true;
+       }
 
        if (diff_cons && conn->urg_tx_pend &&
            atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {