#define tipc_aead_rcu_replace(rcu_ptr, ptr, lock)                      \
 do {                                                                   \
-       typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr),    \
+       struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr),  \
                                                lockdep_is_held(lock)); \
        rcu_assign_pointer((rcu_ptr), (ptr));                           \
        tipc_aead_put(__tmp);                                           \
        ehdr = (struct tipc_ehdr *)skb->data;
        salt = aead->salt;
        if (aead->mode == CLUSTER_KEY)
-               salt ^= ehdr->addr; /* __be32 */
+               salt ^= __be32_to_cpu(ehdr->addr);
        else if (__dnode)
                salt ^= tipc_node_get_addr(__dnode);
        memcpy(iv, &salt, 4);
        ehdr = (struct tipc_ehdr *)skb->data;
        salt = aead->salt;
        if (aead->mode == CLUSTER_KEY)
-               salt ^= ehdr->addr; /* __be32 */
+               salt ^= __be32_to_cpu(ehdr->addr);
        else if (ehdr->destined)
                salt ^= tipc_own_addr(net);
        memcpy(iv, &salt, 4);
                        goto rcv;
                }
                tipc_aead_put(aead);
-               aead = tipc_aead_get(tmp);
+               aead = tipc_aead_get((struct tipc_aead __force __rcu *)tmp);
        }
 
        if (unlikely(err)) {
-               tipc_aead_users_dec(aead, INT_MIN);
+               tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN);
                goto free_skb;
        }
 
        /* Set the RX key's user */
-       tipc_aead_users_set(aead, 1);
+       tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1);
 
        /* Mark this point, RX works */
        rx->timer1 = jiffies;
 
 
 const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
 
+static inline u16 mon_cpu_to_le16(u16 val)
+{
+       return (__force __u16)htons(val);
+}
+
+static inline u32 mon_cpu_to_le32(u32 val)
+{
+       return (__force __u32)htonl(val);
+}
+
+static inline u64 mon_cpu_to_le64(u64 val)
+{
+       return (__force __u64)cpu_to_be64(val);
+}
+
+static inline u16 mon_le16_to_cpu(u16 val)
+{
+       return ntohs((__force __be16)val);
+}
+
+static inline u32 mon_le32_to_cpu(u32 val)
+{
+       return ntohl((__force __be32)val);
+}
+
+static inline u64 mon_le64_to_cpu(u64 val)
+{
+       return be64_to_cpu((__force __be64)val);
+}
+
 /* dom_rec_len(): actual length of domain record for transport
  */
 static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
                diff |= dom->members[i] != peer->addr;
                dom->members[i] = peer->addr;
                map_set(&dom->up_map, i, peer->is_up);
-               cache->members[i] = htonl(peer->addr);
+               cache->members[i] = mon_cpu_to_le32(peer->addr);
        }
        diff |= dom->up_map != prev_up_map;
        if (!diff)
                return;
        dom->gen = ++mon->dom_gen;
-       cache->len = htons(dom->len);
-       cache->gen = htons(dom->gen);
-       cache->member_cnt = htons(member_cnt);
-       cache->up_map = cpu_to_be64(dom->up_map);
+       cache->len = mon_cpu_to_le16(dom->len);
+       cache->gen = mon_cpu_to_le16(dom->gen);
+       cache->member_cnt = mon_cpu_to_le16(member_cnt);
+       cache->up_map = mon_cpu_to_le64(dom->up_map);
        mon_apply_domain(mon, self);
 }
 
        struct tipc_mon_domain dom_bef;
        struct tipc_mon_domain *dom;
        struct tipc_peer *peer;
-       u16 new_member_cnt = ntohs(arrv_dom->member_cnt);
+       u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt);
        int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
-       u16 new_gen = ntohs(arrv_dom->gen);
-       u16 acked_gen = ntohs(arrv_dom->ack_gen);
+       u16 new_gen = mon_le16_to_cpu(arrv_dom->gen);
+       u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen);
+       u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len);
        bool probing = state->probing;
        int i, applied_bef;
 
                return;
        if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
                return;
-       if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen)
+       if (dlen < new_dlen || arrv_dlen != new_dlen)
                return;
 
        /* Synch generation numbers with peer if link just came up */
        dom->len = new_dlen;
        dom->gen = new_gen;
        dom->member_cnt = new_member_cnt;
-       dom->up_map = be64_to_cpu(arrv_dom->up_map);
+       dom->up_map = mon_le64_to_cpu(arrv_dom->up_map);
        for (i = 0; i < new_member_cnt; i++)
-               dom->members[i] = ntohl(arrv_dom->members[i]);
+               dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]);
 
        /* Update peers affected by this domain record */
        applied_bef = peer->applied;
        if (likely(state->acked_gen == gen)) {
                len = dom_rec_len(dom, 0);
                *dlen = len;
-               dom->len = htons(len);
-               dom->gen = htons(gen);
-               dom->ack_gen = htons(state->peer_gen);
+               dom->len = mon_cpu_to_le16(len);
+               dom->gen = mon_cpu_to_le16(gen);
+               dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
                dom->member_cnt = 0;
                return;
        }
        /* Send the full record */
        read_lock_bh(&mon->lock);
-       len = ntohs(mon->cache.len);
+       len = mon_le16_to_cpu(mon->cache.len);
        *dlen = len;
        memcpy(data, &mon->cache, len);
        read_unlock_bh(&mon->lock);
-       dom->ack_gen = htons(state->peer_gen);
+       dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
 }
 
 void tipc_mon_get_state(struct net *net, u32 addr,