* Allocate a hashtable of hlist_head (if nulls == 0),
  * or hlist_nulls_head (if nulls == 1)
  */
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+extern void nf_ct_free_hashtable(void *hash, unsigned int size);
 
 extern struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(struct net *net, u16 zone,
 
        struct ctl_table_header *acct_sysctl_header;
        struct ctl_table_header *event_sysctl_header;
 #endif
-       int                     hash_vmalloc;
-       int                     expect_vmalloc;
        char                    *slabname;
 };
 #endif
 
        struct xt_table         *nat_table;
        struct hlist_head       *nat_bysource;
        unsigned int            nat_htable_size;
-       int                     nat_vmalloced;
 #endif
 
        int sysctl_icmp_echo_ignore_all;
 
 {
        /* Leave them the same for the moment. */
        net->ipv4.nat_htable_size = net->ct.htable_size;
-       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
-                                                      &net->ipv4.nat_vmalloced, 0);
+       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
        if (!net->ipv4.nat_bysource)
                return -ENOMEM;
        return 0;
 {
        nf_ct_iterate_cleanup(net, &clean_nat, NULL);
        synchronize_rcu();
-       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
-                            net->ipv4.nat_htable_size);
+       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
 
        return 1;
 }
 
-void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, unsigned int size)
 {
-       if (vmalloced)
+       if (is_vmalloc_addr(hash))
                vfree(hash);
        else
                free_pages((unsigned long)hash,
                goto i_see_dead_people;
        }
 
-       nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            net->ct.htable_size);
+       nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
        nf_conntrack_ecache_fini(net);
        nf_conntrack_acct_fini(net);
        nf_conntrack_expect_fini(net);
        }
 }
 
-void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
 {
        struct hlist_nulls_head *hash;
        unsigned int nr_slots, i;
        size_t sz;
 
-       *vmalloced = 0;
-
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
        if (!hash) {
-               *vmalloced = 1;
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
                hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
                                 PAGE_KERNEL);
 
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 {
-       int i, bucket, vmalloced, old_vmalloced;
+       int i, bucket;
        unsigned int hashsize, old_size;
        struct hlist_nulls_head *hash, *old_hash;
        struct nf_conntrack_tuple_hash *h;
        if (!hashsize)
                return -EINVAL;
 
-       hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
+       hash = nf_ct_alloc_hashtable(&hashsize, 1);
        if (!hash)
                return -ENOMEM;
 
                }
        }
        old_size = init_net.ct.htable_size;
-       old_vmalloced = init_net.ct.hash_vmalloc;
        old_hash = init_net.ct.hash;
 
        init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
-       init_net.ct.hash_vmalloc = vmalloced;
        init_net.ct.hash = hash;
        spin_unlock_bh(&nf_conntrack_lock);
 
-       nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+       nf_ct_free_hashtable(old_hash, old_size);
        return 0;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
        }
 
        net->ct.htable_size = nf_conntrack_htable_size;
-       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
-                                            &net->ct.hash_vmalloc, 1);
+       net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
        if (!net->ct.hash) {
                ret = -ENOMEM;
                printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
 err_acct:
        nf_conntrack_expect_fini(net);
 err_expect:
-       nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-                            net->ct.htable_size);
+       nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
 err_hash:
        kmem_cache_destroy(net->ct.nf_conntrack_cachep);
 err_cache:
 
        }
 
        net->ct.expect_count = 0;
-       net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
-                                                 &net->ct.expect_vmalloc, 0);
+       net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
        if (net->ct.expect_hash == NULL)
                goto err1;
 
        if (net_eq(net, &init_net))
                kmem_cache_destroy(nf_ct_expect_cachep);
 err2:
-       nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-                            nf_ct_expect_hsize);
+       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 err1:
        return err;
 }
                rcu_barrier(); /* Wait for call_rcu() before destroy */
                kmem_cache_destroy(nf_ct_expect_cachep);
        }
-       nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-                            nf_ct_expect_hsize);
+       nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 }
 
 static struct hlist_head *nf_ct_helper_hash __read_mostly;
 static unsigned int nf_ct_helper_hsize __read_mostly;
 static unsigned int nf_ct_helper_count __read_mostly;
-static int nf_ct_helper_vmalloc;
 
 
 /* Stupid hash, but collision free for the default registrations of the
        int err;
 
        nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
-       nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
-                                                 &nf_ct_helper_vmalloc, 0);
+       nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
        if (!nf_ct_helper_hash)
                return -ENOMEM;
 
        return 0;
 
 err1:
-       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-                            nf_ct_helper_hsize);
+       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
        return err;
 }
 
 void nf_conntrack_helper_fini(void)
 {
        nf_ct_extend_unregister(&helper_extend);
-       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-                            nf_ct_helper_hsize);
+       nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
 }