if (skb)
                TCP_SKB_CB(skb)->eor = 1;
 
-       /* We support starting offload on multiple sockets
-        * concurrently, so we only need a read lock here.
-        * This lock must precede get_netdev_for_sock to prevent races between
-        * NETDEV_DOWN and setsockopt.
-        */
-       down_read(&device_offload_lock);
        netdev = get_netdev_for_sock(sk);
        if (!netdev) {
                pr_err_ratelimited("%s: netdev not found\n", __func__);
                rc = -EINVAL;
-               goto release_lock;
+               goto disable_cad;
        }
 
        if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
        /* Avoid offloading if the device is down
         * We don't want to offload new flows after
         * the NETDEV_DOWN event
+        *
+        * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+        * handler thus protecting from the device going down before
+        * ctx was added to tls_device_list.
         */
+       down_read(&device_offload_lock);
        if (!(netdev->flags & IFF_UP)) {
                rc = -EINVAL;
-               goto release_netdev;
+               goto release_lock;
        }
 
        ctx->priv_ctx_tx = offload_ctx;
                                             &ctx->crypto_send.info,
                                             tcp_sk(sk)->write_seq);
        if (rc)
-               goto release_netdev;
+               goto release_lock;
 
        tls_device_attach(ctx, sk, netdev);
+       up_read(&device_offload_lock);
 
        /* following this assignment tls_is_sk_tx_device_offloaded
         * will return true and the context might be accessed
         */
        smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
        dev_put(netdev);
-       up_read(&device_offload_lock);
 
        return 0;
 
-release_netdev:
-       dev_put(netdev);
 release_lock:
        up_read(&device_offload_lock);
+release_netdev:
+       dev_put(netdev);
+disable_cad:
        clean_acked_data_disable(inet_csk(sk));
        crypto_free_aead(offload_ctx->aead_send);
 free_rec_seq:
        if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
                return -EOPNOTSUPP;
 
-       /* We support starting offload on multiple sockets
-        * concurrently, so we only need a read lock here.
-        * This lock must precede get_netdev_for_sock to prevent races between
-        * NETDEV_DOWN and setsockopt.
-        */
-       down_read(&device_offload_lock);
        netdev = get_netdev_for_sock(sk);
        if (!netdev) {
                pr_err_ratelimited("%s: netdev not found\n", __func__);
-               rc = -EINVAL;
-               goto release_lock;
+               return -EINVAL;
        }
 
        if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
        /* Avoid offloading if the device is down
         * We don't want to offload new flows after
         * the NETDEV_DOWN event
+        *
+        * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+        * handler thus protecting from the device going down before
+        * ctx was added to tls_device_list.
         */
+       down_read(&device_offload_lock);
        if (!(netdev->flags & IFF_UP)) {
                rc = -EINVAL;
-               goto release_netdev;
+               goto release_lock;
        }
 
        context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
        if (!context) {
                rc = -ENOMEM;
-               goto release_netdev;
+               goto release_lock;
        }
        context->resync_nh_reset = 1;
 
        down_read(&device_offload_lock);
 release_ctx:
        ctx->priv_ctx_rx = NULL;
-release_netdev:
-       dev_put(netdev);
 release_lock:
        up_read(&device_offload_lock);
+release_netdev:
+       dev_put(netdev);
        return rc;
 }