ksmbd_work could be freed when after connection release.
Increment r_count of ksmbd_conn to indicate that requests
are not finished yet and to not release the connection.
Cc: stable@vger.kernel.org
Reported-by: Norbert Szetei <norbert@doyensec.com>
Tested-by: Norbert Szetei <norbert@doyensec.com>
Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
        default_conn_ops.terminate_fn = ops->terminate_fn;
 }
 
+void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn)
+{
+       atomic_inc(&conn->r_count);
+}
+
+void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn)
+{
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       atomic_inc(&conn->refcnt);
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
+
+       if (atomic_dec_and_test(&conn->refcnt))
+               kfree(conn);
+}
+
 int ksmbd_conn_transport_init(void)
 {
        int ret;
 
 void ksmbd_conn_transport_destroy(void);
 void ksmbd_conn_lock(struct ksmbd_conn *conn);
 void ksmbd_conn_unlock(struct ksmbd_conn *conn);
+void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn);
+void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn);
 
 /*
  * WARNING
 
 {
        struct smb2_oplock_break *rsp = NULL;
        struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+       struct ksmbd_conn *conn = work->conn;
        struct oplock_break_info *br_info = work->request_buf;
        struct smb2_hdr *rsp_hdr;
        struct ksmbd_file *fp;
 
 out:
        ksmbd_free_work_struct(work);
+       ksmbd_conn_r_count_dec(conn);
 }
 
 /**
        work->sess = opinfo->sess;
 
        if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+               ksmbd_conn_r_count_inc(conn);
                INIT_WORK(&work->work, __smb2_oplock_break_noti);
                ksmbd_queue_work(work);
 
 {
        struct smb2_lease_break *rsp = NULL;
        struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+       struct ksmbd_conn *conn = work->conn;
        struct lease_break_info *br_info = work->request_buf;
        struct smb2_hdr *rsp_hdr;
 
 
 out:
        ksmbd_free_work_struct(work);
+       ksmbd_conn_r_count_dec(conn);
 }
 
 /**
        work->sess = opinfo->sess;
 
        if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+               ksmbd_conn_r_count_inc(conn);
                INIT_WORK(&work->work, __smb2_lease_break_noti);
                ksmbd_queue_work(work);
                wait_for_break_ack(opinfo);
 
 
        ksmbd_conn_try_dequeue_request(work);
        ksmbd_free_work_struct(work);
-       /*
-        * Checking waitqueue to dropping pending requests on
-        * disconnection. waitqueue_active is safe because it
-        * uses atomic operation for condition.
-        */
-       atomic_inc(&conn->refcnt);
-       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
-               wake_up(&conn->r_count_q);
-
-       if (atomic_dec_and_test(&conn->refcnt))
-               kfree(conn);
+       ksmbd_conn_r_count_dec(conn);
 }
 
 /**
        conn->request_buf = NULL;
 
        ksmbd_conn_enqueue_request(work);
-       atomic_inc(&conn->r_count);
+       ksmbd_conn_r_count_inc(conn);
        /* update activity on connection */
        conn->last_active = jiffies;
        INIT_WORK(&work->work, handle_ksmbd_work);