*/
        struct inet_bind_hashbucket     *bhash;
 
-       int                             bhash_size;
+       unsigned int                    bhash_size;
        unsigned int                    ehash_size;
 
        /* All sockets in TCP_LISTEN state will be in here.  This is the only
 
        skb->next = NULL;
 }
 
-#define sk_wait_event(__sk, __timeo, __condition)              \
-({     int rc;                                                 \
-       release_sock(__sk);                                     \
-       rc = __condition;                                       \
-       if (!rc) {                                              \
-               *(__timeo) = schedule_timeout(*(__timeo));      \
-       }                                                       \
-       lock_sock(__sk);                                        \
-       rc = __condition;                                       \
-       rc;                                                     \
-})
+#define sk_wait_event(__sk, __timeo, __condition)                      \
+       ({      int __rc;                                               \
+               release_sock(__sk);                                     \
+               __rc = __condition;                                     \
+               if (!__rc) {                                            \
+                       *(__timeo) = schedule_timeout(*(__timeo));      \
+               }                                                       \
+               lock_sock(__sk);                                        \
+               __rc = __condition;                                     \
+               __rc;                                                   \
+       })
 
 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
 
                                 * address. So as a precaution flush any
                                 * entries we have for this address.
                                 */
-                               struct aarp_entry *a;
-
                                a = __aarp_find_entry(resolved[sa.s_node %
                                                          (AARP_HASH_SIZE - 1)],
                                                      skb->dev, &sa);
 
                struct hlist_head *head = &vcc_hash[i];
 
                sk_for_each(s, node, head) {
-                       struct atm_vcc *vcc = atm_sk(s);
+                       vcc = atm_sk(s);
 
                        purge_vcc(vcc);
                }
 
        int i = 0;
        const char *p;
        const int max_netdevices = 8*PAGE_SIZE;
-       long *inuse;
+       unsigned long *inuse;
        struct net_device *d;
 
        p = strnchr(name, IFNAMSIZ-1, '%');
                        return -EINVAL;
 
                /* Use one page as a bit array of possible slots */
-               inuse = (long *) get_zeroed_page(GFP_ATOMIC);
+               inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
                if (!inuse)
                        return -ENOMEM;
 
 
        }
 
        if (!strcmp(name, "mpls")) {
-               unsigned n, offset;
+               unsigned n, cnt;
+
                len = get_labels(&user_buffer[i], pkt_dev);
-               if (len < 0) { return len; }
+               if (len < 0)
+                       return len;
                i += len;
-               offset = sprintf(pg_result, "OK: mpls=");
+               cnt = sprintf(pg_result, "OK: mpls=");
                for (n = 0; n < pkt_dev->nr_labels; n++)
-                       offset += sprintf(pg_result + offset,
-                                         "%08x%s", ntohl(pkt_dev->labels[n]),
-                                         n == pkt_dev->nr_labels-1 ? "" : ",");
+                       cnt += sprintf(pg_result + cnt,
+                                      "%08x%s", ntohl(pkt_dev->labels[n]),
+                                      n == pkt_dev->nr_labels-1 ? "" : ",");
 
                if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) {
                        pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */
        unsigned int prefixlen = 0;
        unsigned int suffixlen = 0;
        __be32 tmp;
+       char *pos;
 
        for (i = 0; i < 16; i++)
                ip[i] = 0;
                        }
                        s++;
                }
-               {
-                       char *tmp;
-                       u = simple_strtoul(s, &tmp, 16);
-                       i = tmp - s;
-               }
 
+               u = simple_strtoul(s, &pos, 16);
+               i = pos - s;
                if (!i)
                        return 0;
                if (prefixlen == 12 && s[i] == '.') {
                        len++;
                } else if (suffixlen != 0)
                        break;
-               {
-                       char *tmp;
-                       u = simple_strtol(s, &tmp, 16);
-                       i = tmp - s;
-               }
+
+               u = simple_strtol(s, &pos, 16);
+               i = pos - s;
                if (!i) {
                        if (*s)
                                len--;
 
 
 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
 {
-       struct cmsghdr __user *cm = (struct cmsghdr __user *)msg->msg_control;
+       struct cmsghdr __user *cm
+               = (__force struct cmsghdr __user *)msg->msg_control;
        struct cmsghdr cmhdr;
        int cmlen = CMSG_LEN(len);
        int err;
 
 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
 {
-       struct cmsghdr __user *cm = (struct cmsghdr __user*)msg->msg_control;
+       struct cmsghdr __user *cm
+               = (__force struct cmsghdr __user*)msg->msg_control;
 
        int fdmax = 0;
        int fdnum = scm->fp->count;
        if (fdnum < fdmax)
                fdmax = fdnum;
 
-       for (i=0, cmfptr=(int __user *)CMSG_DATA(cm); i<fdmax; i++, cmfptr++)
+       for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
+            i++, cmfptr++)
        {
                int new_fd;
                err = security_file_receive(fp[i]);
 
                        (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
        } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
 #ifdef CONFIG_IP_MULTICAST
-               struct in_device *in_dev = pmc->interface;
                struct ip_sf_list *psf;
+               in_dev = pmc->interface;
 #endif
 
                /* filter mode change */
 {
        int err;
 
-       if (iml->sflist == 0) {
+       if (iml->sflist == NULL) {
                /* any-source empty exclude case */
                return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
                        iml->sfmode, 0, NULL, 0);
                return -EFAULT;
        }
        for (i=0; i<copycount; i++) {
-               struct sockaddr_in *psin;
                struct sockaddr_storage ss;
 
                psin = (struct sockaddr_in *)&ss;
 
 
                lro_init_desc(lro_desc, skb, iph, tcph, 0, NULL);
                LRO_INC_STATS(lro_mgr, aggregated);
-               return 0;
+               return NULL;
        }
 
        if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
 
                        break;
                }
                msf = kmalloc(optlen, GFP_KERNEL);
-               if (msf == 0) {
+               if (!msf) {
                        err = -ENOBUFS;
                        break;
                }
                        break;
                }
                gsf = kmalloc(optlen,GFP_KERNEL);
-               if (gsf == 0) {
+               if (!gsf) {
                        err = -ENOBUFS;
                        break;
                }
                }
                msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
                msf = kmalloc(msize,GFP_KERNEL);
-               if (msf == 0) {
+               if (!msf) {
                        err = -ENOBUFS;
                        goto mc_msf_out;
                }
 
        { "TimestampReps", ICMP_TIMESTAMPREPLY },
        { "AddrMasks", ICMP_ADDRESS },
        { "AddrMaskReps", ICMP_ADDRESSREPLY },
-       { 0, 0 }
+       { NULL, 0 }
 };
 
 
 
 
 static struct rt_hash_bucket   *rt_hash_table;
 static unsigned                        rt_hash_mask;
-static int                     rt_hash_log;
+static unsigned int            rt_hash_log;
 static unsigned int            rt_hash_rnd;
 
 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
                i = (i + 1) & rt_hash_mask;
                rthp = &rt_hash_table[i].chain;
 
-               if (*rthp == 0)
+               if (*rthp == NULL)
                        continue;
                spin_lock_bh(rt_hash_lock_addr(i));
                while ((rth = *rthp) != NULL) {
 
        BUG_TRAP((int)tp->lost_out >= 0);
        BUG_TRAP((int)tp->retrans_out >= 0);
        if (!tp->packets_out && tcp_is_sack(tp)) {
-               const struct inet_connection_sock *icsk = inet_csk(sk);
+               icsk = inet_csk(sk);
                if (tp->lost_out) {
                        printk(KERN_DEBUG "Leak l=%u %d\n",
                               tp->lost_out, icsk->icsk_ca_state);
 
                                break;
                read_unlock_bh(&idev->lock);
                in6_dev_put(idev);
-               return aca != 0;
+               return aca != NULL;
        }
        return 0;
 }
 
                        break;
                }
                gsf = kmalloc(optlen,GFP_KERNEL);
-               if (gsf == 0) {
+               if (!gsf) {
                        retv = -ENOBUFS;
                        break;
                }
 
        /* we assume size > sizeof(ra) here */
        skb = sock_alloc_send_skb(sk, size + LL_RESERVED_SPACE(dev), 1, &err);
 
-       if (skb == 0)
+       if (!skb)
                return NULL;
 
        skb_reserve(skb, LL_RESERVED_SPACE(dev));
        /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
         * so no other readers or writers of iml or its sflist
         */
-       if (iml->sflist == 0) {
+       if (!iml->sflist) {
                /* any-source empty exclude case */
                return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
        }
 
                        break;
                case ND_OPT_PREFIX_INFO:
                        ndopts->nd_opts_pi_end = nd_opt;
-                       if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0)
+                       if (!ndopts->nd_opt_array[nd_opt->nd_opt_type])
                                ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
                        break;
 #ifdef CONFIG_IPV6_ROUTE_INFO
 
        } else
                handle = gen_new_kid(ht, htid);
 
-       if (tb[TCA_U32_SEL-1] == 0 ||
+       if (tb[TCA_U32_SEL-1] == NULL ||
            RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel))
                return -EINVAL;
 
 
         *      kernel msghdr to use the kernel address space)
         */
 
-       uaddr = (void __user *)msg_sys.msg_name;
+       uaddr = (__force void __user *)msg_sys.msg_name;
        uaddr_len = COMPAT_NAMELEN(msg);
        if (MSG_CMSG_COMPAT & flags) {
                err = verify_compat_iovec(&msg_sys, iov, addr, VERIFY_WRITE);