if (start < offset + dump->size) {
                        tsz = min(offset + (u64)dump->size - start, (u64)size);
                        buf = dump->buf + start - offset;
-                       if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
+                       if (remap_vmalloc_range_partial(vma, dst, buf, 0,
+                                                       tsz)) {
                                ret = -EFAULT;
                                goto out_unlock;
                        }
                tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
                kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
                if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
-                                               kaddr, tsz))
+                                               kaddr, 0, tsz))
                        goto fail;
 
                size -= tsz;
 
 
 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
                                       unsigned long uaddr, void *kaddr,
-                                      unsigned long size);
+                                      unsigned long pgoff, unsigned long size);
 
 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
                                                        unsigned long pgoff);
 
 #include <linux/llist.h>
 #include <linux/bitops.h>
 #include <linux/rbtree_augmented.h>
+#include <linux/overflow.h>
 
 #include <linux/uaccess.h>
 #include <asm/tlbflush.h>
  * @vma:               vma to cover
  * @uaddr:             target user address to start at
  * @kaddr:             virtual address of vmalloc kernel memory
+ * @pgoff:             offset from @kaddr to start at
  * @size:              size of map area
  *
  * Returns:    0 for success, -Exxx on failure
  * Similar to remap_pfn_range() (see mm/memory.c)
  */
 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
-                               void *kaddr, unsigned long size)
+                               void *kaddr, unsigned long pgoff,
+                               unsigned long size)
 {
        struct vm_struct *area;
+       unsigned long off;
+       unsigned long end_index;
+
+       if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
+               return -EINVAL;
 
        size = PAGE_ALIGN(size);
 
        if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
                return -EINVAL;
 
-       if (kaddr + size > area->addr + get_vm_area_size(area))
+       if (check_add_overflow(size, off, &end_index) ||
+           end_index > get_vm_area_size(area))
                return -EINVAL;
+       kaddr += off;
 
        do {
                struct page *page = vmalloc_to_page(kaddr);
                                                unsigned long pgoff)
 {
        return remap_vmalloc_range_partial(vma, vma->vm_start,
-                                          addr + (pgoff << PAGE_SHIFT),
+                                          addr, pgoff,
                                           vma->vm_end - vma->vm_start);
 }
 EXPORT_SYMBOL(remap_vmalloc_range);