select HAVE_RSEQ
        select HAVE_SAMPLE_FTRACE_DIRECT
        select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+       select HAVE_SETUP_PER_CPU_AREA
        select HAVE_SOFTIRQ_ON_OWN_STACK
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING
        select MMU_GATHER_MERGE_VMAS
        select MODULES_USE_ELF_RELA
        select NEED_DMA_MAP_STATE       if PCI
+       select NEED_PER_CPU_EMBED_FIRST_CHUNK
        select NEED_SG_DMA_LENGTH       if PCI
        select OLD_SIGACTION
        select OLD_SIGSUSPEND3
 
 #include <linux/cma.h>
 #include <linux/gfp.h>
 #include <linux/dma-direct.h>
+#include <linux/percpu.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgalloc.h>
        return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
 }
 
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+       return LOCAL_DISTANCE;
+}
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+       return 0;
+}
+
+void __init setup_per_cpu_areas(void)
+{
+       unsigned long delta;
+       unsigned int cpu;
+       int rc;
+
+       /*
+        * Always reserve area for module percpu variables.  That's
+        * what the legacy allocator did.
+        */
+       rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+                                   PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+                                   pcpu_cpu_distance,
+                                   pcpu_cpu_to_node);
+       if (rc < 0)
+               panic("Failed to initialize percpu areas.");
+
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu)
+               __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+
 #ifdef CONFIG_MEMORY_HOTPLUG
 
 #ifdef CONFIG_CMA