if (udpfrag) {
                        iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
-                       if (skb->next != NULL)
+                       if (skb->next)
                                iph->frag_off |= htons(IP_MF);
                        offset += skb->len - nhoff - ihl;
                } else {
 
                break;
 #endif
        default:
-               if (target_hw != NULL)
+               if (target_hw)
                        memcpy(arp_ptr, target_hw, dev->addr_len);
                else
                        memset(arp_ptr, 0, dev->addr_len);
 
        atomic_set(&doi_def->refcount, 1);
 
        spin_lock(&cipso_v4_doi_list_lock);
-       if (cipso_v4_doi_search(doi_def->doi) != NULL) {
+       if (cipso_v4_doi_search(doi_def->doi)) {
                spin_unlock(&cipso_v4_doi_list_lock);
                ret_val = -EEXIST;
                goto doi_add_return;
 
 doi_add_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                const char *type_str;
                switch (doi_type) {
                case CIPSO_V4_MAP_TRANS:
 
 doi_remove_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                audit_log_format(audit_buf,
                                 " cipso_doi=%u res=%u",
                                 doi, ret_val == 0 ? 1 : 0);
 
        __be32 addr = 0;
        struct net_device *dev;
 
-       if (in_dev != NULL)
+       if (in_dev)
                return confirm_addr_indev(in_dev, dst, local, scope);
 
        rcu_read_lock();
 
        BUG_ON(i >= child_length(tn));
 
        /* update emptyChildren, overflow into fullChildren */
-       if (!n && chi != NULL)
+       if (!n && chi)
                empty_child_inc(tn);
-       if (n != NULL && !chi)
+       if (n && !chi)
                empty_child_dec(tn);
 
        /* update fullChildren */
 
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
 
        rcu_read_unlock();
 
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
 
        rcu_read_unlock();
 
                inet->mc_list = iml->next_rcu;
                in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
                (void) ip_mc_leave_src(sk, iml, in_dev);
-               if (in_dev != NULL)
+               if (in_dev)
                        ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
                if (unlikely(!idev))
                        continue;
                im = rcu_dereference(idev->mc_list);
-               if (likely(im != NULL)) {
+               if (likely(im)) {
                        spin_lock_bh(&im->lock);
                        psf = im->sources;
-                       if (likely(psf != NULL)) {
+                       if (likely(psf)) {
                                state->im = im;
                                state->idev = idev;
                                break;
        __releases(rcu)
 {
        struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
-       if (likely(state->im != NULL)) {
+       if (likely(state->im)) {
                spin_unlock_bh(&state->im->lock);
                state->im = NULL;
        }
 
 {
        struct sock *newsk = sk_clone_lock(sk, priority);
 
-       if (newsk != NULL) {
+       if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
                newsk->sk_state = TCP_SYN_RECV;
                sk_acceptq_removed(sk);
                reqsk_put(req);
        }
-       if (queue->fastopenq != NULL) {
+       if (queue->fastopenq) {
                /* Free all the reqs queued in rskq_rst_head. */
                spin_lock_bh(&queue->fastopenq->lock);
                acc_req = queue->fastopenq->rskq_rst_head;
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_getsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_getsockopt)
                return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->getsockopt(sk, level, optname,
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_setsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_setsockopt)
                return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->setsockopt(sk, level, optname,
 
 {
        struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
-       if (tb != NULL) {
+       if (tb) {
                write_pnet(&tb->ib_net, net);
                tb->port      = snum;
                tb->fastreuse = 0;
 
        struct inet_timewait_sock *tw =
                kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
                                 GFP_ATOMIC);
-       if (tw != NULL) {
+       if (tw) {
                const struct inet_sock *inet = inet_sk(sk);
 
                kmemcheck_annotate_bitfield(tw, flags);
 
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Lookup (or create) queue header */
-       if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
+       qp = ip_find(net, ip_hdr(skb), user);
+       if (qp) {
                int ret;
 
                spin_lock(&qp->q.lock);
 
                raw = raw_local_deliver(skb, protocol);
 
                ipprot = rcu_dereference(inet_protos[protocol]);
-               if (ipprot != NULL) {
+               if (ipprot) {
                        int ret;
 
                        if (!ipprot->no_policy) {
 
        unsigned char *iph;
        int optlen, l;
 
-       if (skb != NULL) {
+       if (skb) {
                rt = skb_rtable(skb);
                optptr = (unsigned char *)&(ip_hdr(skb)[1]);
        } else
 
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
-       if (skb_dst(skb)->xfrm != NULL) {
+       if (skb_dst(skb)->xfrm) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
        inet_opt = rcu_dereference(inet->inet_opt);
        fl4 = &fl->u.ip4;
        rt = skb_rtable(skb);
-       if (rt != NULL)
+       if (rt)
                goto packet_routed;
 
        /* Make sure we can route this packet. */
                                        ip_options_fragment(frag);
                                offset += skb->len - hlen;
                                iph->frag_off = htons(offset>>3);
-                               if (frag->next != NULL)
+                               if (frag->next)
                                        iph->frag_off |= htons(IP_MF);
                                /* Ready, complete checksum */
                                ip_send_check(iph);
 
                                   skb_network_header(skb);
        serr->port = port;
 
-       if (skb_pull(skb, payload - skb->data) != NULL) {
+       if (skb_pull(skb, payload - skb->data)) {
                skb_reset_transport_header(skb);
                if (sock_queue_err_skb(sk, skb) == 0)
                        return;
 
                        break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (t) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
 
 
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                                  iph->saddr, iph->daddr, 0);
-       if (tunnel != NULL) {
+       if (tunnel) {
                if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                        goto drop;
 
 
        unsigned int i;
 
        mrt = ipmr_get_table(net, id);
-       if (mrt != NULL)
+       if (mrt)
                return mrt;
 
        mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
 
        skb_push(skb, skb->data - (u8 *)icmph);
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
-       if (sk != NULL) {
+       if (sk) {
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                pr_debug("rcv on socket %p\n", sk);
 
 
        read_lock(&raw_v4_hashinfo.lock);
        raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
-       if (raw_sk != NULL) {
+       if (raw_sk) {
                iph = (const struct iphdr *)skb->data;
                net = dev_net(skb->dev);
 
 
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
-               if (skb != NULL)
+               if (skb)
                        amount = skb->len;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
 
 
        fnhe = find_exception(&FIB_RES_NH(*res), daddr);
        if (do_cache) {
-               if (fnhe != NULL)
+               if (fnhe)
                        rth = rcu_dereference(fnhe->fnhe_rth_input);
                else
                        rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
 
 
        /* Connected or passive Fast Open socket? */
        if (sk->sk_state != TCP_SYN_SENT &&
-           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
+           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
                if (tp->urg_seq == tp->copied_seq &&
 
 void tcp_free_fastopen_req(struct tcp_sock *tp)
 {
-       if (tp->fastopen_req != NULL) {
+       if (tp->fastopen_req) {
                kfree(tp->fastopen_req);
                tp->fastopen_req = NULL;
        }
 
        if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
                return -EOPNOTSUPP;
-       if (tp->fastopen_req != NULL)
+       if (tp->fastopen_req)
                return -EALREADY; /* Another Fast Open is in progress */
 
        tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
                 * aborted (e.g., closed with unread data) before 3WHS
                 * finishes.
                 */
-               if (req != NULL)
+               if (req)
                        reqsk_fastopen_remove(sk, req, false);
                inet_csk_destroy_sock(sk);
        }
                break;
 
        case TCP_FASTOPEN:
-               if (icsk->icsk_accept_queue.fastopenq != NULL)
+               if (icsk->icsk_accept_queue.fastopenq)
                        val = icsk->icsk_accept_queue.fastopenq->max_qlen;
                else
                        val = 0;
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
-       if (req != NULL)
+       if (req)
                reqsk_fastopen_remove(sk, req, false);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
 
                r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
                r->idiag_wqueue = tp->write_seq - tp->snd_una;
        }
-       if (info != NULL)
+       if (info)
                tcp_get_info(sk, info);
 }
 
 
                fack_count += pcount;
 
                /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
-               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+               if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
                    before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
                        tp->lost_cnt_hint += pcount;
 
                if (!before(TCP_SKB_CB(skb)->seq, end_seq))
                        break;
 
-               if ((next_dup != NULL) &&
+               if (next_dup  &&
                    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
                        in_sack = tcp_match_skb_to_sack(sk, skb,
                                                        next_dup->start_seq,
                if (in_sack <= 0) {
                        tmp = tcp_shift_skb_data(sk, skb, state,
                                                 start_seq, end_seq, dup_sack);
-                       if (tmp != NULL) {
+                       if (tmp) {
                                if (tmp != skb) {
                                        skb = tmp;
                                        continue;
 
        tcp_set_state(sk, TCP_ESTABLISHED);
 
-       if (skb != NULL) {
+       if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
                security_inet_conn_established(sk, skb);
        }
        }
 
        req = tp->fastopen_rsk;
-       if (req != NULL) {
+       if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
                    sk->sk_state != TCP_FIN_WAIT1);
 
                 * ACK we have received, this would have acknowledged
                 * our SYNACK so stop the SYNACK timer.
                 */
-               if (req != NULL) {
+               if (req) {
                        /* Return RST if ack_seq is invalid.
                         * Note that RFC793 only says to generate a
                         * DUPACK for it but for TCP Fast Open it seems
 
        /* Copy over the MD5 key from the original socket */
        key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
                                AF_INET);
-       if (key != NULL) {
+       if (key) {
                /*
                 * We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       BUG_ON(tp->fastopen_rsk != NULL);
+       BUG_ON(tp->fastopen_rsk);
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
 
        if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
                tw = inet_twsk_alloc(sk, state);
 
-       if (tw != NULL) {
+       if (tw) {
                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
                struct inet_sock *inet = inet_sk(sk);
                        struct tcp_md5sig_key *key;
                        tcptw->tw_md5_key = NULL;
                        key = tp->af_specific->md5_lookup(sk, sk);
-                       if (key != NULL) {
+                       if (key) {
                                tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
                                if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
                                        BUG();
 {
        struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
-       if (newsk != NULL) {
+       if (newsk) {
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
                if (unlikely(!ireq->tstamp_ok))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
-       if (foc != NULL && foc->len >= 0) {
+       if (foc && foc->len >= 0) {
                u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
                need = (need + 3) & ~3U;  /* Align to 32 bits */
                if (remaining >= need) {
        int mss = tcp_current_mss(sk);
        int err = -1;
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
                goto rearm_timer;
        }
                        if (!tcp_can_forward_retransmit(sk))
                                break;
                        /* Backtrack if necessary to non-L'ed skb */
-                       if (hole != NULL) {
+                       if (hole) {
                                skb = hole;
                                hole = NULL;
                        }
         */
        mss_now = tcp_current_mss(sk);
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(skb)->end_seq++;
                tp->write_seq++;
                (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
 
 #ifdef CONFIG_TCP_MD5SIG
-       if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+       if (tp->af_specific->md5_lookup(sk, sk))
                tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
 
        if (sk->sk_state == TCP_CLOSE)
                return -1;
 
-       if ((skb = tcp_send_head(sk)) != NULL &&
-           before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
+       skb = tcp_send_head(sk);
+       if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
                int err;
                unsigned int mss = tcp_current_mss(sk);
                unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
 
 
                /* if we're overly short, let UDP handle it */
                encap_rcv = ACCESS_ONCE(up->encap_rcv);
-               if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+               if (skb->len > sizeof(struct udphdr) && encap_rcv) {
                        int ret;
 
                        /* Verify checksum before giving to encap */
                                                saddr, daddr, udptable, proto);
 
        sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-       if (sk != NULL) {
+       if (sk) {
                int ret;
 
                if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
 
        pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
 unlock:
        spin_unlock(&udp_offload_lock);
-       if (uo_priv != NULL)
+       if (uo_priv)
                call_rcu(&uo_priv->rcu, udp_offload_free_routine);
 }
 EXPORT_SYMBOL(udp_del_offload);
                        break;
        }
 
-       if (uo_priv != NULL) {
+       if (uo_priv) {
                NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
                err = uo_priv->offload->callbacks.gro_complete(skb,
                                nhoff + sizeof(struct udphdr),