struct mptcp_subflow_context *subflow, *tmp;
        struct mptcp_sock *msk = mptcp_sk(sk);
 
+       inet_sk_state_store(sk, TCP_CLOSE);
        mptcp_for_each_subflow_safe(msk, subflow, tmp)
                __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
                                  subflow, MPTCP_CF_FASTCLOSE);
         * even if it is orphaned and in FIN_WAIT2 state
         */
        if (sock_flag(sk, SOCK_DEAD)) {
-               if (mptcp_should_close(sk)) {
-                       inet_sk_state_store(sk, TCP_CLOSE);
+               if (mptcp_should_close(sk))
                        mptcp_do_fastclose(sk);
-               }
+
                if (sk->sk_state == TCP_CLOSE) {
                        __mptcp_destroy_sock(sk);
                        goto unlock;
 void __mptcp_unaccepted_force_close(struct sock *sk)
 {
        sock_set_flag(sk, SOCK_DEAD);
-       inet_sk_state_store(sk, TCP_CLOSE);
        mptcp_do_fastclose(sk);
        __mptcp_destroy_sock(sk);
 }
                /* If the msk has read data, or the caller explicitly ask it,
                 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
                 */
-               inet_sk_state_store(sk, TCP_CLOSE);
                mptcp_do_fastclose(sk);
                timeout = 0;
        } else if (mptcp_close_state(sk)) {