#define DUMP_WRITE(addr, nr)   \
        if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
                goto end_coredump;
-#define DUMP_SEEK(off) \
-       if (!dump_seek(file, (off))) \
-               goto end_coredump;
 
 static void fill_elf_header(struct elfhdr *elf, int segs,
                            u16 machine, u32 flags, u8 osabi)
                goto end_coredump;
 
        /* Align to page */
-       DUMP_SEEK(dataoff - foffset);
+       if (!dump_seek(file, dataoff - foffset))
+               goto end_coredump;
 
        for (vma = first_vma(current, gate_vma); vma != NULL;
                        vma = next_vma(vma, gate_vma)) {
 
                for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
                        struct page *page;
-                       struct vm_area_struct *tmp_vma;
-
-                       if (get_user_pages(current, current->mm, addr, 1, 0, 1,
-                                               &page, &tmp_vma) <= 0) {
-                               DUMP_SEEK(PAGE_SIZE);
-                       } else {
-                               if (page == ZERO_PAGE(0)) {
-                                       if (!dump_seek(file, PAGE_SIZE)) {
-                                               page_cache_release(page);
-                                               goto end_coredump;
-                                       }
-                               } else {
-                                       void *kaddr;
-                                       flush_cache_page(tmp_vma, addr,
-                                                        page_to_pfn(page));
-                                       kaddr = kmap(page);
-                                       if ((size += PAGE_SIZE) > limit ||
-                                           !dump_write(file, kaddr,
-                                           PAGE_SIZE)) {
-                                               kunmap(page);
-                                               page_cache_release(page);
-                                               goto end_coredump;
-                                       }
-                                       kunmap(page);
-                               }
+                       int stop;
+
+                       page = get_dump_page(addr);
+                       if (page) {
+                               void *kaddr = kmap(page);
+                               stop = ((size += PAGE_SIZE) > limit) ||
+                                       !dump_write(file, kaddr, PAGE_SIZE);
+                               kunmap(page);
                                page_cache_release(page);
-                       }
+                       } else
+                               stop = !dump_seek(file, PAGE_SIZE);
+                       if (stop)
+                               goto end_coredump;
                }
        }
 
 
 #define DUMP_WRITE(addr, nr)   \
        if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
                goto end_coredump;
-#define DUMP_SEEK(off) \
-       if (!dump_seek(file, (off))) \
-               goto end_coredump;
 
 static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
 {
                           unsigned long *limit, unsigned long mm_flags)
 {
        struct vm_area_struct *vma;
+       int err = 0;
 
        for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
                unsigned long addr;
                if (!maydump(vma, mm_flags))
                        continue;
 
-               for (addr = vma->vm_start;
-                    addr < vma->vm_end;
-                    addr += PAGE_SIZE
-                    ) {
-                       struct vm_area_struct *vma;
-                       struct page *page;
-
-                       if (get_user_pages(current, current->mm, addr, 1, 0, 1,
-                                          &page, &vma) <= 0) {
-                               DUMP_SEEK(file->f_pos + PAGE_SIZE);
-                       }
-                       else if (page == ZERO_PAGE(0)) {
-                               page_cache_release(page);
-                               DUMP_SEEK(file->f_pos + PAGE_SIZE);
-                       }
-                       else {
-                               void *kaddr;
-
-                               flush_cache_page(vma, addr, page_to_pfn(page));
-                               kaddr = kmap(page);
-                               if ((*size += PAGE_SIZE) > *limit ||
-                                   !dump_write(file, kaddr, PAGE_SIZE)
-                                   ) {
-                                       kunmap(page);
-                                       page_cache_release(page);
-                                       return -EIO;
-                               }
+               for (addr = vma->vm_start; addr < vma->vm_end;
+                                                       addr += PAGE_SIZE) {
+                       struct page *page = get_dump_page(addr);
+                       if (page) {
+                               void *kaddr = kmap(page);
+                               *size += PAGE_SIZE;
+                               if (*size > *limit)
+                                       err = -EFBIG;
+                               else if (!dump_write(file, kaddr, PAGE_SIZE))
+                                       err = -EIO;
                                kunmap(page);
                                page_cache_release(page);
-                       }
+                       } else if (!dump_seek(file, file->f_pos + PAGE_SIZE))
+                               err = -EFBIG;
+                       if (err)
+                               goto out;
                }
        }
-
-       return 0;
-
-end_coredump:
-       return -EFBIG;
+out:
+       return err;
 }
 #endif
 
                                goto end_coredump;
        }
 
-       DUMP_SEEK(dataoff);
+       if (!dump_seek(file, dataoff))
+               goto end_coredump;
 
        if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
                goto end_coredump;
 
                        struct page **pages, struct vm_area_struct **vmas);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
+struct page *get_dump_page(unsigned long addr);
 
 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
 extern void do_invalidatepage(struct page *page, unsigned long offset);
 
 
        return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
 }
-
 EXPORT_SYMBOL(get_user_pages);
 
+/**
+ * get_dump_page() - pin user page in memory while writing it to core dump
+ * @addr: user address
+ *
+ * Returns struct page pointer of user page pinned for dump,
+ * to be freed afterwards by page_cache_release() or put_page().
+ *
+ * Returns NULL on any kind of failure - a hole must then be inserted into
+ * the corefile, to preserve alignment with its headers; and also returns
+ * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
+ * allowing a hole to be left in the corefile to save diskspace.
+ *
+ * Called without mmap_sem, but after all other threads have been killed.
+ */
+#ifdef CONFIG_ELF_CORE
+struct page *get_dump_page(unsigned long addr)
+{
+       struct vm_area_struct *vma;
+       struct page *page;
+
+       if (__get_user_pages(current, current->mm, addr, 1,
+                               GUP_FLAGS_FORCE, &page, &vma) < 1)
+               return NULL;
+       if (page == ZERO_PAGE(0)) {
+               page_cache_release(page);
+               return NULL;
+       }
+       flush_cache_page(vma, addr, page_to_pfn(page));
+       return page;
+}
+#endif /* CONFIG_ELF_CORE */
+
 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
                        spinlock_t **ptl)
 {