sk_tx_queue_clear(sk);
        WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
-       old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+       old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst)));
        dst_release(old_dst);
 }
 
 
 {
        struct net_rate_estimator *est;
 
-       est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
+       est = unrcu_pointer(xchg(rate_est, NULL));
        if (est) {
                timer_shutdown_sync(&est->timer);
                kfree_rcu(est, rcu);
 
 
 static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX];
 
-static struct sock_diag_inet_compat __rcu *inet_rcv_compat;
+static const struct sock_diag_inet_compat __rcu *inet_rcv_compat;
 
 static struct workqueue_struct *broadcast_wq;
 
 
 void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr)
 {
-       xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
-            ptr);
+       xchg(&inet_rcv_compat, RCU_INITIALIZER(ptr));
 }
 EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
 
 {
        const struct sock_diag_inet_compat *old;
 
-       old = xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
-                  NULL);
+       old = unrcu_pointer(xchg(&inet_rcv_compat, NULL));
        WARN_ON_ONCE(old != ptr);
 }
 EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
 
        buf = NULL;
 
        req_inet = inet_rsk(req);
-       opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
+       opt = unrcu_pointer(xchg(&req_inet->ireq_opt, RCU_INITIALIZER(opt)));
        if (opt)
                kfree_rcu(opt, rcu);
 
 
        icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
-       dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
+       dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL)));
        tcp_saved_syn_free(tp);
        tp->compressed_ack = 0;
        tp->segs_in = 0;
 
 {
        struct tcp_fastopen_context *ctxt;
 
-       ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
+       ctxt = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, NULL));
 
        if (ctxt)
                call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
 
        if (sk) {
                q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
-               octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
+               octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx)));
        } else {
-               octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
+               octx = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx,
+                                         RCU_INITIALIZER(ctx)));
        }
 
        if (octx)
 
        struct dst_entry *old;
 
        if (dst_hold_safe(dst)) {
-               old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
+               old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst)));
                dst_release(old);
                return old != dst;
        }
 
 
        /* Free tx options */
 
-       opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
+       opt = unrcu_pointer(xchg(&np->opt, NULL));
        if (opt) {
                atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
                txopt_put(opt);
 
                if (pcpu_rt && rcu_access_pointer(pcpu_rt->from) == match) {
                        struct fib6_info *from;
 
-                       from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
+                       from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
                        fib6_info_release(from);
                }
        }
 
                        icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
                }
        }
-       opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
-                  opt);
+       opt = unrcu_pointer(xchg(&inet6_sk(sk)->opt, RCU_INITIALIZER(opt)));
        sk_dst_reset(sk);
 
        return opt;
 
                in6_dev_put(idev);
        }
 
-       from = xchg((__force struct fib6_info **)&rt->from, NULL);
+       from = unrcu_pointer(xchg(&rt->from, NULL));
        fib6_info_release(from);
 }
 
        if (res->f6i->fib6_destroying) {
                struct fib6_info *from;
 
-               from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
+               from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
                fib6_info_release(from);
        }
 
        /* purge completely the exception to allow releasing the held resources:
         * some [sk] cache may keep the dst around for unlimited time
         */
-       from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
+       from = unrcu_pointer(xchg(&rt6_ex->rt6i->from, NULL));
        fib6_info_release(from);
        dst_dev_put(&rt6_ex->rt6i->dst);
 
 
 {
        struct tc_cookie *old;
 
-       old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
+       old = unrcu_pointer(xchg(old_cookie, RCU_INITIALIZER(new_cookie)));
        if (old)
                call_rcu(&old->rcu, tcf_free_cookie_rcu);
 }