unsigned int stacksize;
        unsigned int __percpu *stackptr;
        void ***jumpstack;
-       /* ipt_entry tables: one per CPU */
+
        /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
-       void *entries[1];
+       void *entries;
 };
 
 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
 
         * pointer.
         */
        smp_read_barrier_depends();
-       table_base = private->entries[smp_processor_id()];
+       table_base = private->entries;
 
        e = get_entry(table_base, private->hook_entry[hook]);
        back = get_entry(table_base, private->underflow[hook]);
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i) {
-               if (newinfo->entries[i] && newinfo->entries[i] != entry0)
-                       memcpy(newinfo->entries[i], entry0, newinfo->size);
-       }
-
        return ret;
 }
 
                seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
                i = 0;
-               xt_entry_foreach(iter, t->entries[cpu], t->size) {
+               xt_entry_foreach(iter, t->entries, t->size) {
                        struct xt_counters *tmp;
                        u64 bcnt, pcnt;
                        unsigned int start;
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        /* ... then copy entire thing ... */
        if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
                ret = -EFAULT;
        if (!newinfo || !info)
                return -EINVAL;
 
-       /* we dont care about newinfo->entries[] */
+       /* we dont care about newinfo->entries */
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
-       loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       loc_cpu_entry = info->entries;
        xt_compat_init_offsets(NFPROTO_ARP, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
        get_counters(oldinfo, counters);
 
        /* Decrease module usage counts and free resource */
-       loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
+       loc_cpu_old_entry = oldinfo->entries;
        xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
                cleanup_entry(iter);
 
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
                           tmp.size) != 0) {
                ret = -EFAULT;
 static int do_add_counters(struct net *net, const void __user *user,
                           unsigned int len, int compat)
 {
-       unsigned int i, curcpu;
+       unsigned int i;
        struct xt_counters_info tmp;
        struct xt_counters *paddc;
        unsigned int num_counters;
        struct xt_table *t;
        const struct xt_table_info *private;
        int ret = 0;
-       void *loc_cpu_entry;
        struct arpt_entry *iter;
        unsigned int addend;
 #ifdef CONFIG_COMPAT
        }
 
        i = 0;
-       /* Choose the copy that is on our node */
-       curcpu = smp_processor_id();
-       loc_cpu_entry = private->entries[curcpu];
+
        addend = xt_write_recseq_begin();
-       xt_entry_foreach(iter, loc_cpu_entry, private->size) {
+       xt_entry_foreach(iter,  private->entries, private->size) {
                struct xt_counters *tmp;
 
                tmp = xt_get_this_cpu_counter(&iter->counters);
                newinfo->hook_entry[i] = info->hook_entry[i];
                newinfo->underflow[i] = info->underflow[i];
        }
-       entry1 = newinfo->entries[raw_smp_processor_id()];
+       entry1 = newinfo->entries;
        pos = entry1;
        size = total_size;
        xt_entry_foreach(iter0, entry0, total_size) {
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i)
-               if (newinfo->entries[i] && newinfo->entries[i] != entry1)
-                       memcpy(newinfo->entries[i], entry1, newinfo->size);
-
        *pinfo = newinfo;
        *pentry0 = entry1;
        xt_free_table_info(info);
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
                ret = -EFAULT;
                goto free_newinfo;
        void __user *pos;
        unsigned int size;
        int ret = 0;
-       void *loc_cpu_entry;
        unsigned int i = 0;
        struct arpt_entry *iter;
 
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       /* choose the copy on our node/cpu */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        pos = userptr;
        size = total_size;
-       xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+       xt_entry_foreach(iter, private->entries, total_size) {
                ret = compat_copy_entry_to_user(iter, &pos,
                                                &size, counters, i++);
                if (ret != 0)
                goto out;
        }
 
-       /* choose the copy on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        memcpy(loc_cpu_entry, repl->entries, repl->size);
 
        ret = translate_table(newinfo, loc_cpu_entry, repl);
        private = xt_unregister_table(table);
 
        /* Decrease module usage counts and free resources */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        xt_entry_foreach(iter, loc_cpu_entry, private->size)
                cleanup_entry(iter);
        if (private->number > private->initial_entries)
 
                         const struct xt_table_info *private,
                         const struct ipt_entry *e)
 {
-       const void *table_base;
        const struct ipt_entry *root;
        const char *hookname, *chainname, *comment;
        const struct ipt_entry *iter;
        unsigned int rulenum = 0;
        struct net *net = dev_net(in ? in : out);
 
-       table_base = private->entries[smp_processor_id()];
-       root = get_entry(table_base, private->hook_entry[hook]);
+       root = get_entry(private->entries, private->hook_entry[hook]);
 
        hookname = chainname = hooknames[hook];
        comment = comments[NF_IP_TRACE_COMMENT_RULE];
         * pointer.
         */
        smp_read_barrier_depends();
-       table_base = private->entries[cpu];
+       table_base = private->entries;
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
        stackptr   = per_cpu_ptr(private->stackptr, cpu);
        origptr    = *stackptr;
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i) {
-               if (newinfo->entries[i] && newinfo->entries[i] != entry0)
-                       memcpy(newinfo->entries[i], entry0, newinfo->size);
-       }
-
        return ret;
 }
 
                seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
                i = 0;
-               xt_entry_foreach(iter, t->entries[cpu], t->size) {
+               xt_entry_foreach(iter, t->entries, t->size) {
                        struct xt_counters *tmp;
                        u64 bcnt, pcnt;
                        unsigned int start;
        struct xt_counters *counters;
        const struct xt_table_info *private = table->private;
        int ret = 0;
-       const void *loc_cpu_entry;
+       void *loc_cpu_entry;
 
        counters = alloc_counters(table);
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       /* choose the copy that is on our node/cpu, ...
-        * This choice is lazy (because current thread is
-        * allowed to migrate to another cpu)
-        */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
                ret = -EFAULT;
                goto free_counters;
        if (!newinfo || !info)
                return -EINVAL;
 
-       /* we dont care about newinfo->entries[] */
+       /* we dont care about newinfo->entries */
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
-       loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       loc_cpu_entry = info->entries;
        xt_compat_init_offsets(AF_INET, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
        struct xt_table *t;
        struct xt_table_info *oldinfo;
        struct xt_counters *counters;
-       void *loc_cpu_old_entry;
        struct ipt_entry *iter;
 
        ret = 0;
        get_counters(oldinfo, counters);
 
        /* Decrease module usage counts and free resource */
-       loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
-       xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+       xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
                cleanup_entry(iter, net);
 
        xt_free_table_info(oldinfo);
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
                           tmp.size) != 0) {
                ret = -EFAULT;
 do_add_counters(struct net *net, const void __user *user,
                 unsigned int len, int compat)
 {
-       unsigned int i, curcpu;
+       unsigned int i;
        struct xt_counters_info tmp;
        struct xt_counters *paddc;
        unsigned int num_counters;
        struct xt_table *t;
        const struct xt_table_info *private;
        int ret = 0;
-       void *loc_cpu_entry;
        struct ipt_entry *iter;
        unsigned int addend;
 #ifdef CONFIG_COMPAT
        }
 
        i = 0;
-       /* Choose the copy that is on our node */
-       curcpu = smp_processor_id();
-       loc_cpu_entry = private->entries[curcpu];
        addend = xt_write_recseq_begin();
-       xt_entry_foreach(iter, loc_cpu_entry, private->size) {
+       xt_entry_foreach(iter, private->entries, private->size) {
                struct xt_counters *tmp;
 
                tmp = xt_get_this_cpu_counter(&iter->counters);
                newinfo->hook_entry[i] = info->hook_entry[i];
                newinfo->underflow[i] = info->underflow[i];
        }
-       entry1 = newinfo->entries[raw_smp_processor_id()];
+       entry1 = newinfo->entries;
        pos = entry1;
        size = total_size;
        xt_entry_foreach(iter0, entry0, total_size) {
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i)
-               if (newinfo->entries[i] && newinfo->entries[i] != entry1)
-                       memcpy(newinfo->entries[i], entry1, newinfo->size);
-
        *pinfo = newinfo;
        *pentry0 = entry1;
        xt_free_table_info(info);
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
                           tmp.size) != 0) {
                ret = -EFAULT;
        void __user *pos;
        unsigned int size;
        int ret = 0;
-       const void *loc_cpu_entry;
        unsigned int i = 0;
        struct ipt_entry *iter;
 
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       /* choose the copy that is on our node/cpu, ...
-        * This choice is lazy (because current thread is
-        * allowed to migrate to another cpu)
-        */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        pos = userptr;
        size = total_size;
-       xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+       xt_entry_foreach(iter, private->entries, total_size) {
                ret = compat_copy_entry_to_user(iter, &pos,
                                                &size, counters, i++);
                if (ret != 0)
                goto out;
        }
 
-       /* choose the copy on our node/cpu, but dont care about preemption */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        memcpy(loc_cpu_entry, repl->entries, repl->size);
 
        ret = translate_table(net, newinfo, loc_cpu_entry, repl);
        private = xt_unregister_table(table);
 
        /* Decrease module usage counts and free resources */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        xt_entry_foreach(iter, loc_cpu_entry, private->size)
                cleanup_entry(iter, net);
        if (private->number > private->initial_entries)
 
                         const struct xt_table_info *private,
                         const struct ip6t_entry *e)
 {
-       const void *table_base;
        const struct ip6t_entry *root;
        const char *hookname, *chainname, *comment;
        const struct ip6t_entry *iter;
        unsigned int rulenum = 0;
        struct net *net = dev_net(in ? in : out);
 
-       table_base = private->entries[smp_processor_id()];
-       root = get_entry(table_base, private->hook_entry[hook]);
+       root = get_entry(private->entries, private->hook_entry[hook]);
 
        hookname = chainname = hooknames[hook];
        comment = comments[NF_IP6_TRACE_COMMENT_RULE];
         */
        smp_read_barrier_depends();
        cpu        = smp_processor_id();
-       table_base = private->entries[cpu];
+       table_base = private->entries;
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
        stackptr   = per_cpu_ptr(private->stackptr, cpu);
        origptr    = *stackptr;
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i) {
-               if (newinfo->entries[i] && newinfo->entries[i] != entry0)
-                       memcpy(newinfo->entries[i], entry0, newinfo->size);
-       }
-
        return ret;
 }
 
                seqcount_t *s = &per_cpu(xt_recseq, cpu);
 
                i = 0;
-               xt_entry_foreach(iter, t->entries[cpu], t->size) {
+               xt_entry_foreach(iter, t->entries, t->size) {
                        struct xt_counters *tmp;
                        u64 bcnt, pcnt;
                        unsigned int start;
        struct xt_counters *counters;
        const struct xt_table_info *private = table->private;
        int ret = 0;
-       const void *loc_cpu_entry;
+       void *loc_cpu_entry;
 
        counters = alloc_counters(table);
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       /* choose the copy that is on our node/cpu, ...
-        * This choice is lazy (because current thread is
-        * allowed to migrate to another cpu)
-        */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
                ret = -EFAULT;
                goto free_counters;
        if (!newinfo || !info)
                return -EINVAL;
 
-       /* we dont care about newinfo->entries[] */
+       /* we dont care about newinfo->entries */
        memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
        newinfo->initial_entries = 0;
-       loc_cpu_entry = info->entries[raw_smp_processor_id()];
+       loc_cpu_entry = info->entries;
        xt_compat_init_offsets(AF_INET6, info->number);
        xt_entry_foreach(iter, loc_cpu_entry, info->size) {
                ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
        struct xt_table *t;
        struct xt_table_info *oldinfo;
        struct xt_counters *counters;
-       const void *loc_cpu_old_entry;
        struct ip6t_entry *iter;
 
        ret = 0;
        get_counters(oldinfo, counters);
 
        /* Decrease module usage counts and free resource */
-       loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
-       xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+       xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
                cleanup_entry(iter, net);
 
        xt_free_table_info(oldinfo);
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
                           tmp.size) != 0) {
                ret = -EFAULT;
 do_add_counters(struct net *net, const void __user *user, unsigned int len,
                int compat)
 {
-       unsigned int i, curcpu;
+       unsigned int i;
        struct xt_counters_info tmp;
        struct xt_counters *paddc;
        unsigned int num_counters;
        struct xt_table *t;
        const struct xt_table_info *private;
        int ret = 0;
-       const void *loc_cpu_entry;
        struct ip6t_entry *iter;
        unsigned int addend;
 #ifdef CONFIG_COMPAT
        }
 
        i = 0;
-       /* Choose the copy that is on our node */
-       curcpu = smp_processor_id();
        addend = xt_write_recseq_begin();
-       loc_cpu_entry = private->entries[curcpu];
-       xt_entry_foreach(iter, loc_cpu_entry, private->size) {
+       xt_entry_foreach(iter, private->entries, private->size) {
                struct xt_counters *tmp;
 
                tmp = xt_get_this_cpu_counter(&iter->counters);
                ++i;
        }
        xt_write_recseq_end(addend);
-
  unlock_up_free:
        local_bh_enable();
        xt_table_unlock(t);
                newinfo->hook_entry[i] = info->hook_entry[i];
                newinfo->underflow[i] = info->underflow[i];
        }
-       entry1 = newinfo->entries[raw_smp_processor_id()];
+       entry1 = newinfo->entries;
        pos = entry1;
        size = total_size;
        xt_entry_foreach(iter0, entry0, total_size) {
                return ret;
        }
 
-       /* And one copy for every other CPU */
-       for_each_possible_cpu(i)
-               if (newinfo->entries[i] && newinfo->entries[i] != entry1)
-                       memcpy(newinfo->entries[i], entry1, newinfo->size);
-
        *pinfo = newinfo;
        *pentry0 = entry1;
        xt_free_table_info(info);
        if (!newinfo)
                return -ENOMEM;
 
-       /* choose the copy that is on our node/cpu */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
                           tmp.size) != 0) {
                ret = -EFAULT;
        void __user *pos;
        unsigned int size;
        int ret = 0;
-       const void *loc_cpu_entry;
        unsigned int i = 0;
        struct ip6t_entry *iter;
 
        if (IS_ERR(counters))
                return PTR_ERR(counters);
 
-       /* choose the copy that is on our node/cpu, ...
-        * This choice is lazy (because current thread is
-        * allowed to migrate to another cpu)
-        */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        pos = userptr;
        size = total_size;
-       xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+       xt_entry_foreach(iter, private->entries, total_size) {
                ret = compat_copy_entry_to_user(iter, &pos,
                                                &size, counters, i++);
                if (ret != 0)
                goto out;
        }
 
-       /* choose the copy on our node/cpu, but dont care about preemption */
-       loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+       loc_cpu_entry = newinfo->entries;
        memcpy(loc_cpu_entry, repl->entries, repl->size);
 
        ret = translate_table(net, newinfo, loc_cpu_entry, repl);
        private = xt_unregister_table(table);
 
        /* Decrease module usage counts and free resources */
-       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries;
        xt_entry_foreach(iter, loc_cpu_entry, private->size)
                cleanup_entry(iter, net);
        if (private->number > private->initial_entries)
 
 struct xt_table_info *xt_alloc_table_info(unsigned int size)
 {
        struct xt_table_info *newinfo;
-       int cpu;
 
        /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
        if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
 
        newinfo->size = size;
 
-       for_each_possible_cpu(cpu) {
-               if (size <= PAGE_SIZE)
-                       newinfo->entries[cpu] = kmalloc_node(size,
-                                                       GFP_KERNEL,
-                                                       cpu_to_node(cpu));
-               else
-                       newinfo->entries[cpu] = vmalloc_node(size,
-                                                       cpu_to_node(cpu));
+       if (size <= PAGE_SIZE)
+               newinfo->entries = kmalloc(size, GFP_KERNEL);
+       else
+               newinfo->entries = vmalloc(size);
 
-               if (newinfo->entries[cpu] == NULL) {
-                       xt_free_table_info(newinfo);
-                       return NULL;
-               }
+       if (newinfo->entries == NULL) {
+               xt_free_table_info(newinfo);
+               return NULL;
        }
 
        return newinfo;
 {
        int cpu;
 
-       for_each_possible_cpu(cpu)
-               kvfree(info->entries[cpu]);
+       kvfree(info->entries);
 
        if (info->jumpstack != NULL) {
                for_each_possible_cpu(cpu)