} else {
                /* Return stub is in 32bit vsyscall page */
                if (current->mm->context.vdso)
-                       restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-                                                sigreturn);
+                       restorer = current->mm->context.vdso +
+                               selected_vdso32->sym___kernel_sigreturn;
                else
                        restorer = &frame->retcode;
        }
                if (ksig->ka.sa.sa_flags & SA_RESTORER)
                        restorer = ksig->ka.sa.sa_restorer;
                else
-                       restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-                                                rt_sigreturn);
+                       restorer = current->mm->context.vdso +
+                               selected_vdso32->sym___kernel_rt_sigreturn;
                put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
 
                /*
 
 do {                                                                   \
        if (vdso64_enabled)                                             \
                NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
-                           (unsigned long)current->mm->context.vdso);  \
+                           (unsigned long __force)current->mm->context.vdso); \
 } while (0)
 
 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
 do {                                                                   \
        if (vdso64_enabled)                                             \
                NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
-                           (unsigned long)current->mm->context.vdso);  \
+                           (unsigned long __force)current->mm->context.vdso); \
 } while (0)
 
 #define AT_SYSINFO             32
 #define VDSO_CURRENT_BASE      ((unsigned long)current->mm->context.vdso)
 
 #define VDSO_ENTRY                                                     \
-       ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+       ((unsigned long)current->mm->context.vdso +                     \
+        selected_vdso32->sym___kernel_vsyscall)
 
 struct linux_binprm;
 
 
 #endif
 
        struct mutex lock;
-       void *vdso;
+       void __user *vdso;
 } mm_context_t;
 
 #ifdef CONFIG_SMP
 
 
 #include <asm/page_types.h>
 #include <linux/linkage.h>
+#include <linux/init.h>
 
-#ifdef __ASSEMBLER__
+#ifndef __ASSEMBLER__
 
-#define DEFINE_VDSO_IMAGE(symname, filename)                           \
-__PAGE_ALIGNED_DATA ;                                                  \
-       .globl symname##_start, symname##_end ;                         \
-       .align PAGE_SIZE ;                                              \
-       symname##_start: ;                                              \
-       .incbin filename ;                                              \
-       symname##_end: ;                                                \
-       .align PAGE_SIZE /* extra data here leaks to userspace. */ ;    \
-                                                                       \
-.previous ;                                                            \
-                                                                       \
-       .globl symname##_pages ;                                        \
-       .bss ;                                                          \
-       .align 8 ;                                                      \
-       .type symname##_pages, @object ;                                \
-       symname##_pages: ;                                              \
-       .zero (symname##_end - symname##_start + PAGE_SIZE - 1) / PAGE_SIZE * (BITS_PER_LONG / 8) ; \
-       .size symname##_pages, .-symname##_pages
+struct vdso_image {
+       void *data;
+       unsigned long size;   /* Always a multiple of PAGE_SIZE */
+       struct page **pages;  /* Big enough for data/size page pointers */
 
-#else
+       unsigned long alt, alt_len;
 
-#define DECLARE_VDSO_IMAGE(symname)                            \
-       extern char symname##_start[], symname##_end[];         \
-       extern struct page *symname##_pages[]
+       unsigned long sym_VDSO32_NOTE_MASK;
+       unsigned long sym___kernel_sigreturn;
+       unsigned long sym___kernel_rt_sigreturn;
+       unsigned long sym___kernel_vsyscall;
+       unsigned long sym_VDSO32_SYSENTER_RETURN;
+};
 
-#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
+#ifdef CONFIG_X86_64
+extern const struct vdso_image vdso_image_64;
+#endif
 
-#include <asm/vdso32.h>
+#ifdef CONFIG_X86_X32
+extern const struct vdso_image vdso_image_x32;
+#endif
 
-DECLARE_VDSO_IMAGE(vdso32_int80);
+#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
+extern const struct vdso_image vdso_image_32_int80;
 #ifdef CONFIG_COMPAT
-DECLARE_VDSO_IMAGE(vdso32_syscall);
+extern const struct vdso_image vdso_image_32_syscall;
 #endif
-DECLARE_VDSO_IMAGE(vdso32_sysenter);
+extern const struct vdso_image vdso_image_32_sysenter;
 
-/*
- * Given a pointer to the vDSO image, find the pointer to VDSO32_name
- * as that symbol is defined in the vDSO sources or linker script.
- */
-#define VDSO32_SYMBOL(base, name)                                      \
-({                                                                     \
-       extern const char VDSO32_##name[];                              \
-       (void __user *)(VDSO32_##name + (unsigned long)(base));         \
-})
+extern const struct vdso_image *selected_vdso32;
 #endif
 
-/*
- * These symbols are defined with the addresses in the vsyscall page.
- * See vsyscall-sigreturn.S.
- */
-extern void __user __kernel_sigreturn;
-extern void __user __kernel_rt_sigreturn;
-
-void __init patch_vdso32(void *vdso, size_t len);
+extern void __init init_vdso_image(const struct vdso_image *image);
 
 #endif /* __ASSEMBLER__ */
 
 
        }
 
        if (current->mm->context.vdso)
-               restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+               restorer = current->mm->context.vdso +
+                       selected_vdso32->sym___kernel_sigreturn;
        else
                restorer = &frame->retcode;
        if (ksig->ka.sa.sa_flags & SA_RESTORER)
                save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
                /* Set up to return from userspace.  */
-               restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+               restorer = current->mm->context.vdso +
+                       selected_vdso32->sym___kernel_sigreturn;
                if (ksig->ka.sa.sa_flags & SA_RESTORER)
                        restorer = ksig->ka.sa.sa_restorer;
                put_user_ex(restorer, &frame->pretcode);
 
 
 const char *arch_vma_name(struct vm_area_struct *vma)
 {
-       if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+       if (vma->vm_mm && vma->vm_start ==
+           (long __force)vma->vm_mm->context.vdso)
                return "[vdso]";
        if (vma == &gate_vma)
                return "[vsyscall]";
 
 vdso.lds
-vdso-syms.lds
 vdsox32.lds
-vdsox32-syms.lds
-vdso32-syms.lds
 vdso32-syscall-syms.lds
 vdso32-sysenter-syms.lds
 vdso32-int80-syms.lds
+vdso-image-*.c
+vdso2c
 
 
 # files to link into kernel
 obj-y                          += vma.o
-obj-$(VDSO64-y)                        += vdso.o
-obj-$(VDSOX32-y)               += vdsox32.o
-obj-$(VDSO32-y)                        += vdso32.o vdso32-setup.o
+
+# vDSO images to build
+vdso_img-$(VDSO64-y)           += 64
+vdso_img-$(VDSOX32-y)          += x32
+vdso_img-$(VDSO32-y)           += 32-int80
+vdso_img-$(CONFIG_COMPAT)      += 32-syscall
+vdso_img-$(VDSO32-y)           += 32-sysenter
+
+obj-$(VDSO32-y)                        += vdso32-setup.o
 
 vobjs := $(foreach F,$(vobj64s),$(obj)/$F)
 
 $(obj)/vdso.o: $(obj)/vdso.so
 
-targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
+targets += vdso.lds $(vobjs-y)
+
+# Build the vDSO image C files and link them in.
+vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
+vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
+vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
+obj-y += $(vdso_img_objs)
+targets += $(vdso_img_cfiles)
+targets += $(vdso_img_sodbg)
+.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c)
 
 export CPPFLAGS_vdso.lds += -P -C
 
                        -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
                        $(DISABLE_LTO)
 
-$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
-
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
        $(call if_changed,vdso)
 
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
-       $(call if_changed,objcopy)
+hostprogs-y                    += vdso2c
+
+quiet_cmd_vdso2c = VDSO2C  $@
+define cmd_vdso2c
+       $(obj)/vdso2c $< $@
+endef
+
+$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
+       $(call if_changed,vdso2c)
 
 #
 # Don't omit frame pointers for ease of userspace debugging, but do
 CFLAGS_REMOVE_vgetcpu.o = -pg
 CFLAGS_REMOVE_vvar.o = -pg
 
-targets += vdso-syms.lds
-obj-$(VDSO64-y)                        += vdso-syms.lds
-
-#
-# Match symbols in the DSO that look like VDSO*; produce a file of constants.
-#
-sed-vdsosym := -e 's/^00*/0/' \
-       -e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
-quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
-       $(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
-endef
-
-$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
-       $(call if_changed,vdsosym)
-
 #
 # X32 processes use x32 vDSO to access 64bit kernel data.
 #
 # so that it can reach 64bit address space with 64bit pointers.
 #
 
-targets += vdsox32-syms.lds
-obj-$(VDSOX32-y)               += vdsox32-syms.lds
-
 CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
 VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
                           -Wl,-soname=linux-vdso.so.1 \
 $(obj)/%-x32.o: $(obj)/%.o FORCE
        $(call if_changed,x32)
 
-targets += vdsox32.so vdsox32.so.dbg vdsox32.lds $(vobjx32s-y)
-
-$(obj)/vdsox32.o: $(src)/vdsox32.S $(obj)/vdsox32.so
+targets += vdsox32.lds $(vobjx32s-y)
 
 $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
        $(call if_changed,vdso)
 #
 # Build multiple 32-bit vDSO images to choose from at boot time.
 #
-obj-$(VDSO32-y)                        += vdso32-syms.lds
 vdso32.so-$(VDSO32-y)          += int80
 vdso32.so-$(CONFIG_COMPAT)     += syscall
 vdso32.so-$(VDSO32-y)          += sysenter
 override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
 
 targets += vdso32/vdso32.lds
-targets += $(vdso32-images) $(vdso32-images:=.dbg)
 targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
-
-extra-y        += $(vdso32-images)
+targets += vdso32/vclock_gettime.o
 
 $(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
 
                                 $(obj)/vdso32/%.o
        $(call if_changed,vdso)
 
-# Make vdso32-*-syms.lds from each image, and then make sure they match.
-# The only difference should be that some do not define VDSO32_SYSENTER_RETURN.
-
-targets += vdso32-syms.lds $(vdso32.so-y:%=vdso32-%-syms.lds)
-
-quiet_cmd_vdso32sym = VDSOSYM $@
-define cmd_vdso32sym
-       if LC_ALL=C sort -u $(filter-out FORCE,$^) > $(@D)/.tmp_$(@F) && \
-          $(foreach H,$(filter-out FORCE,$^),\
-                    if grep -q VDSO32_SYSENTER_RETURN $H; \
-                    then diff -u $(@D)/.tmp_$(@F) $H; \
-                    else sed /VDSO32_SYSENTER_RETURN/d $(@D)/.tmp_$(@F) | \
-                         diff -u - $H; fi &&) : ;\
-       then mv -f $(@D)/.tmp_$(@F) $@; \
-       else rm -f $(@D)/.tmp_$(@F); exit 1; \
-       fi
-endef
-
-$(obj)/vdso32-syms.lds: $(vdso32.so-y:%=$(obj)/vdso32-%-syms.lds) FORCE
-       $(call if_changed,vdso32sym)
-
 #
 # The DSO images are built using a special linker script.
 #
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
-               $(LTO_CFLAGS)
+       -Wl,-Bsymbolic $(LTO_CFLAGS)
 GCOV_PROFILE := n
 
 #
 
        asm(
                "mov %%ebx, %%edx \n"
                "mov %2, %%ebx \n"
-               "call VDSO32_vsyscall \n"
+               "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
                : "=a" (ret)
                : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
        asm(
                "mov %%ebx, %%edx \n"
                "mov %2, %%ebx \n"
-               "call VDSO32_vsyscall \n"
+               "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
                : "=a" (ret)
                : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
 
+++ /dev/null
-#include <asm/vdso.h>
-
-DEFINE_VDSO_IMAGE(vdso, "arch/x86/vdso/vdso.so")
 
--- /dev/null
+#include <inttypes.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <err.h>
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <linux/elf.h>
+#include <linux/types.h>
+
+/* Symbols that we need in vdso2c. */
+char const * const required_syms[] = {
+       "VDSO32_NOTE_MASK",
+       "VDSO32_SYSENTER_RETURN",
+       "__kernel_vsyscall",
+       "__kernel_sigreturn",
+       "__kernel_rt_sigreturn",
+};
+
+__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
+static void fail(const char *format, ...)
+{
+       va_list ap;
+       va_start(ap, format);
+       fprintf(stderr, "Error: ");
+       vfprintf(stderr, format, ap);
+       exit(1);
+       va_end(ap);
+}
+
+#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
+
+#define BITS 64
+#define GOFUNC go64
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Phdr Elf64_Phdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Dyn Elf64_Dyn
+#include "vdso2c.h"
+#undef BITS
+#undef GOFUNC
+#undef Elf_Ehdr
+#undef Elf_Shdr
+#undef Elf_Phdr
+#undef Elf_Sym
+#undef Elf_Dyn
+
+#define BITS 32
+#define GOFUNC go32
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Phdr Elf32_Phdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Dyn Elf32_Dyn
+#include "vdso2c.h"
+#undef BITS
+#undef GOFUNC
+#undef Elf_Ehdr
+#undef Elf_Shdr
+#undef Elf_Phdr
+#undef Elf_Sym
+#undef Elf_Dyn
+
+static int go(void *addr, size_t len, FILE *outfile, const char *name)
+{
+       Elf64_Ehdr *hdr = (Elf64_Ehdr *)addr;
+
+       if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
+               return go64(addr, len, outfile, name);
+       } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
+               return go32(addr, len, outfile, name);
+       } else {
+               fprintf(stderr, "Error: unknown ELF class\n");
+               return 1;
+       }
+}
+
+int main(int argc, char **argv)
+{
+       int fd;
+       off_t len;
+       void *addr;
+       FILE *outfile;
+       int ret;
+       char *name, *tmp;
+       int namelen;
+
+       if (argc != 3) {
+               printf("Usage: vdso2c INPUT OUTPUT\n");
+               return 1;
+       }
+
+       /*
+        * Figure out the struct name.  If we're writing to a .so file,
+        * generate raw output insted.
+        */
+       name = strdup(argv[2]);
+       namelen = strlen(name);
+       if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
+               name = NULL;
+       } else {
+               tmp = strrchr(name, '/');
+               if (tmp)
+                       name = tmp + 1;
+               tmp = strchr(name, '.');
+               if (tmp)
+                       *tmp = '\0';
+               for (tmp = name; *tmp; tmp++)
+                       if (*tmp == '-')
+                               *tmp = '_';
+       }
+
+       fd = open(argv[1], O_RDONLY);
+       if (fd == -1)
+               err(1, "%s", argv[1]);
+
+       len = lseek(fd, 0, SEEK_END);
+       if (len == (off_t)-1)
+               err(1, "lseek");
+
+       addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+       if (addr == MAP_FAILED)
+               err(1, "mmap");
+
+       outfile = fopen(argv[2], "w");
+       if (!outfile)
+               err(1, "%s", argv[2]);
+
+       ret = go(addr, (size_t)len, outfile, name);
+
+       munmap(addr, len);
+       fclose(outfile);
+
+       return ret;
+}
 
--- /dev/null
+/*
+ * This file is included twice from vdso2c.c.  It generates code for 32-bit
+ * and 64-bit vDSOs.  We need both for 64-bit builds, since 32-bit vDSOs
+ * are built for 32-bit userspace.
+ */
+
+static int GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
+{
+       int found_load = 0;
+       unsigned long load_size = -1;  /* Work around bogus warning */
+       unsigned long data_size;
+       Elf_Ehdr *hdr = (Elf_Ehdr *)addr;
+       int i;
+       unsigned long j;
+       Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+               *alt_sec = NULL;
+       Elf_Dyn *dyn = 0, *dyn_end = 0;
+       const char *secstrings;
+       uint64_t syms[NSYMS] = {};
+
+       Elf_Phdr *pt = (Elf_Phdr *)(addr + hdr->e_phoff);
+
+       /* Walk the segment table. */
+       for (i = 0; i < hdr->e_phnum; i++) {
+               if (pt[i].p_type == PT_LOAD) {
+                       if (found_load)
+                               fail("multiple PT_LOAD segs\n");
+
+                       if (pt[i].p_offset != 0 || pt[i].p_vaddr != 0)
+                               fail("PT_LOAD in wrong place\n");
+
+                       if (pt[i].p_memsz != pt[i].p_filesz)
+                               fail("cannot handle memsz != filesz\n");
+
+                       load_size = pt[i].p_memsz;
+                       found_load = 1;
+               } else if (pt[i].p_type == PT_DYNAMIC) {
+                       dyn = addr + pt[i].p_offset;
+                       dyn_end = addr + pt[i].p_offset + pt[i].p_memsz;
+               }
+       }
+       if (!found_load)
+               fail("no PT_LOAD seg\n");
+       data_size = (load_size + 4095) / 4096 * 4096;
+
+       /* Walk the dynamic table */
+       for (i = 0; dyn + i < dyn_end && dyn[i].d_tag != DT_NULL; i++) {
+               if (dyn[i].d_tag == DT_REL || dyn[i].d_tag == DT_RELSZ ||
+                   dyn[i].d_tag == DT_RELENT || dyn[i].d_tag == DT_TEXTREL)
+                       fail("vdso image contains dynamic relocations\n");
+       }
+
+       /* Walk the section table */
+       secstrings_hdr = addr + hdr->e_shoff + hdr->e_shentsize*hdr->e_shstrndx;
+       secstrings = addr + secstrings_hdr->sh_offset;
+       for (i = 0; i < hdr->e_shnum; i++) {
+               Elf_Shdr *sh = addr + hdr->e_shoff + hdr->e_shentsize * i;
+               if (sh->sh_type == SHT_SYMTAB)
+                       symtab_hdr = sh;
+
+               if (!strcmp(secstrings + sh->sh_name, ".altinstructions"))
+                       alt_sec = sh;
+       }
+
+       if (!symtab_hdr) {
+               fail("no symbol table\n");
+               return 1;
+       }
+
+       strtab_hdr = addr + hdr->e_shoff +
+               hdr->e_shentsize * symtab_hdr->sh_link;
+
+       /* Walk the symbol table */
+       for (i = 0; i < symtab_hdr->sh_size / symtab_hdr->sh_entsize; i++) {
+               int k;
+               Elf_Sym *sym = addr + symtab_hdr->sh_offset +
+                       symtab_hdr->sh_entsize * i;
+               const char *name = addr + strtab_hdr->sh_offset + sym->st_name;
+               for (k = 0; k < NSYMS; k++) {
+                       if (!strcmp(name, required_syms[k])) {
+                               if (syms[k]) {
+                                       fail("duplicate symbol %s\n",
+                                            required_syms[k]);
+                               }
+                               syms[k] = sym->st_value;
+                       }
+               }
+       }
+
+       /* Remove sections. */
+       hdr->e_shoff = 0;
+       hdr->e_shentsize = 0;
+       hdr->e_shnum = 0;
+       hdr->e_shstrndx = SHN_UNDEF;
+
+       if (!name) {
+               fwrite(addr, load_size, 1, outfile);
+               return 0;
+       }
+
+       fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
+       fprintf(outfile, "#include <linux/linkage.h>\n");
+       fprintf(outfile, "#include <asm/page_types.h>\n");
+       fprintf(outfile, "#include <asm/vdso.h>\n");
+       fprintf(outfile, "\n");
+       fprintf(outfile,
+               "static unsigned char raw_data[%lu] __page_aligned_data = {",
+               data_size);
+       for (j = 0; j < load_size; j++) {
+               if (j % 10 == 0)
+                       fprintf(outfile, "\n\t");
+               fprintf(outfile, "0x%02X, ", (int)((unsigned char *)addr)[j]);
+       }
+       fprintf(outfile, "\n};\n\n");
+
+       fprintf(outfile, "static struct page *pages[%lu];\n\n",
+               data_size / 4096);
+
+       fprintf(outfile, "const struct vdso_image %s = {\n", name);
+       fprintf(outfile, "\t.data = raw_data,\n");
+       fprintf(outfile, "\t.size = %lu,\n", data_size);
+       fprintf(outfile, "\t.pages = pages,\n");
+       if (alt_sec) {
+               fprintf(outfile, "\t.alt = %lu,\n",
+                       (unsigned long)alt_sec->sh_offset);
+               fprintf(outfile, "\t.alt_len = %lu,\n",
+                       (unsigned long)alt_sec->sh_size);
+       }
+       for (i = 0; i < NSYMS; i++) {
+               if (syms[i])
+                       fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
+                               required_syms[i], syms[i]);
+       }
+       fprintf(outfile, "};\n");
+
+       return 0;
+}
 
 #include <asm/fixmap.h>
 #include <asm/hpet.h>
 #include <asm/vvar.h>
+#include <asm/vdso32.h>
 
 #ifdef CONFIG_COMPAT_VDSO
 #define VDSO_DEFAULT   0
 __setup_param("vdso=", vdso_setup, vdso32_setup, 0);
 #endif
 
-static struct page **vdso32_pages;
-static unsigned vdso32_size;
-
 #ifdef CONFIG_X86_64
 
 #define        vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
 
 #endif /* CONFIG_X86_64 */
 
+#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+const struct vdso_image *selected_vdso32;
+#endif
+
 int __init sysenter_setup(void)
 {
-       char *vdso32_start, *vdso32_end;
-       int npages, i;
-
 #ifdef CONFIG_COMPAT
-       if (vdso32_syscall()) {
-               vdso32_start = vdso32_syscall_start;
-               vdso32_end = vdso32_syscall_end;
-               vdso32_pages = vdso32_syscall_pages;
-       } else
+       if (vdso32_syscall())
+               selected_vdso32 = &vdso_image_32_syscall;
+       else
 #endif
-       if (vdso32_sysenter()) {
-               vdso32_start = vdso32_sysenter_start;
-               vdso32_end = vdso32_sysenter_end;
-               vdso32_pages = vdso32_sysenter_pages;
-       } else {
-               vdso32_start = vdso32_int80_start;
-               vdso32_end = vdso32_int80_end;
-               vdso32_pages = vdso32_int80_pages;
-       }
-
-       npages = ((vdso32_end - vdso32_start) + PAGE_SIZE - 1) / PAGE_SIZE;
-       vdso32_size = npages << PAGE_SHIFT;
-       for (i = 0; i < npages; i++)
-               vdso32_pages[i] = virt_to_page(vdso32_start + i*PAGE_SIZE);
+       if (vdso32_sysenter())
+               selected_vdso32 = &vdso_image_32_sysenter;
+       else
+               selected_vdso32 = &vdso_image_32_int80;
 
-       patch_vdso32(vdso32_start, vdso32_size);
+       init_vdso_image(selected_vdso32);
 
        return 0;
 }
        unsigned long addr;
        int ret = 0;
        struct vm_area_struct *vma;
+       unsigned long vdso32_size = selected_vdso32->size;
 
 #ifdef CONFIG_X86_X32_ABI
        if (test_thread_flag(TIF_X32))
 
        addr += VDSO_OFFSET(VDSO_PREV_PAGES);
 
-       current->mm->context.vdso = (void *)addr;
+       current->mm->context.vdso = (void __user *)addr;
 
        /*
         * MAYWRITE to allow gdb to COW and set breakpoints
                        vdso32_size,
                        VM_READ|VM_EXEC|
                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                       vdso32_pages);
+                       selected_vdso32->pages);
 
        if (ret)
                goto up_fail;
        }
 #endif
 
-       current_thread_info()->sysenter_return =
-               VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+       if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
+               current_thread_info()->sysenter_return =
+                       current->mm->context.vdso +
+                       selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
 
   up_fail:
        if (ret)
 
+++ /dev/null
-#include <asm/vdso.h>
-
-DEFINE_VDSO_IMAGE(vdso32_int80, "arch/x86/vdso/vdso32-int80.so")
-
-#ifdef CONFIG_COMPAT
-DEFINE_VDSO_IMAGE(vdso32_syscall, "arch/x86/vdso/vdso32-syscall.so")
-#endif
-
-DEFINE_VDSO_IMAGE(vdso32_sysenter, "arch/x86/vdso/vdso32-sysenter.so")
 
        local: *;
        };
 }
-
-/*
- * Symbols we define here called VDSO* get their values into vdso32-syms.h.
- */
-VDSO32_vsyscall                = __kernel_vsyscall;
-VDSO32_sigreturn       = __kernel_sigreturn;
-VDSO32_rt_sigreturn    = __kernel_rt_sigreturn;
-VDSO32_clock_gettime   = clock_gettime;
-VDSO32_gettimeofday    = gettimeofday;
-VDSO32_time            = time;
 
+++ /dev/null
-#include <asm/vdso.h>
-
-DEFINE_VDSO_IMAGE(vdsox32, "arch/x86/vdso/vdsox32.so")
 
 #if defined(CONFIG_X86_64)
 unsigned int __read_mostly vdso64_enabled = 1;
 
-DECLARE_VDSO_IMAGE(vdso);
 extern unsigned short vdso_sync_cpuid;
-static unsigned vdso_size;
-
-#ifdef CONFIG_X86_X32_ABI
-DECLARE_VDSO_IMAGE(vdsox32);
-static unsigned vdsox32_size;
-#endif
 #endif
 
-#if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
-       defined(CONFIG_COMPAT)
-void __init patch_vdso32(void *vdso, size_t len)
+void __init init_vdso_image(const struct vdso_image *image)
 {
-       Elf32_Ehdr *hdr = vdso;
-       Elf32_Shdr *sechdrs, *alt_sec = 0;
-       char *secstrings;
-       void *alt_data;
        int i;
+       int npages = (image->size) / PAGE_SIZE;
 
-       BUG_ON(len < sizeof(Elf32_Ehdr));
-       BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
-
-       sechdrs = (void *)hdr + hdr->e_shoff;
-       secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-
-       for (i = 1; i < hdr->e_shnum; i++) {
-               Elf32_Shdr *shdr = &sechdrs[i];
-               if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
-                       alt_sec = shdr;
-                       goto found;
-               }
-       }
-
-       /* If we get here, it's probably a bug. */
-       pr_warning("patch_vdso32: .altinstructions not found\n");
-       return;  /* nothing to patch */
+       BUG_ON(image->size % PAGE_SIZE != 0);
+       for (i = 0; i < npages; i++)
+               image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE);
 
-found:
-       alt_data = (void *)hdr + alt_sec->sh_offset;
-       apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
+       apply_alternatives((struct alt_instr *)(image->data + image->alt),
+                          (struct alt_instr *)(image->data + image->alt +
+                                               image->alt_len));
 }
-#endif
-
-#if defined(CONFIG_X86_64)
-static void __init patch_vdso64(void *vdso, size_t len)
-{
-       Elf64_Ehdr *hdr = vdso;
-       Elf64_Shdr *sechdrs, *alt_sec = 0;
-       char *secstrings;
-       void *alt_data;
-       int i;
 
-       BUG_ON(len < sizeof(Elf64_Ehdr));
-       BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
-
-       sechdrs = (void *)hdr + hdr->e_shoff;
-       secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-
-       for (i = 1; i < hdr->e_shnum; i++) {
-               Elf64_Shdr *shdr = &sechdrs[i];
-               if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
-                       alt_sec = shdr;
-                       goto found;
-               }
-       }
-
-       /* If we get here, it's probably a bug. */
-       pr_warning("patch_vdso64: .altinstructions not found\n");
-       return;  /* nothing to patch */
-
-found:
-       alt_data = (void *)hdr + alt_sec->sh_offset;
-       apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
-}
 
+#if defined(CONFIG_X86_64)
 static int __init init_vdso(void)
 {
-       int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
-       int i;
-
-       patch_vdso64(vdso_start, vdso_end - vdso_start);
-
-       vdso_size = npages << PAGE_SHIFT;
-       for (i = 0; i < npages; i++)
-               vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
+       init_vdso_image(&vdso_image_64);
 
 #ifdef CONFIG_X86_X32_ABI
-       patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
-       npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
-       vdsox32_size = npages << PAGE_SHIFT;
-       for (i = 0; i < npages; i++)
-               vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
+       init_vdso_image(&vdso_image_x32);
 #endif
 
        return 0;
                goto up_fail;
        }
 
-       current->mm->context.vdso = (void *)addr;
+       current->mm->context.vdso = (void __user *)addr;
 
        ret = install_special_mapping(mm, addr, size,
                                      VM_READ|VM_EXEC|
 
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
-       return setup_additional_pages(bprm, uses_interp, vdso_pages,
-                                     vdso_size);
+       return setup_additional_pages(bprm, uses_interp, vdso_image_64.pages,
+                                     vdso_image_64.size);
 }
 
 #ifdef CONFIG_X86_X32_ABI
 int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
-       return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
-                                     vdsox32_size);
+       return setup_additional_pages(bprm, uses_interp, vdso_image_x32.pages,
+                                     vdso_image_x32.size);
 }
 #endif
 
 
 static void __init fiddle_vdso(void)
 {
 #ifdef CONFIG_X86_32
+       /*
+        * This could be called before selected_vdso32 is initialized, so
+        * just fiddle with both possible images.  vdso_image_32_syscall
+        * can't be selected, since it only exists on 64-bit systems.
+        */
        u32 *mask;
-       mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
+       mask = vdso_image_32_int80.data +
+               vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
        *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
-       mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
+       mask = vdso_image_32_sysenter.data +
+               vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
        *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
 #endif
 }