* have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
-                              unsigned long prot_val)
+static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+               unsigned long size, unsigned long prot_val, void *caller)
 {
        unsigned long pfn, offset, vaddr;
        resource_size_t last_addr;
        /*
         * Ok, go for it..
         */
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
  */
 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
 {
        if (pat_wc_enabled)
-               return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+                                       __builtin_return_address(0));
        else
                return ioremap_nocache(phys_addr, size);
 }
 
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_cache);
 
 
 #include <linux/interrupt.h>
 #include <linux/seq_file.h>
 #include <linux/vmalloc.h>
+#include <linux/kallsyms.h>
 
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 struct vm_struct *vmlist;
 
 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
-                           int node);
+                           int node, void *caller);
 
 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
 {
 }
 EXPORT_SYMBOL(vmalloc_to_pfn);
 
-static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
-                                           unsigned long start, unsigned long end,
-                                           int node, gfp_t gfp_mask)
+static struct vm_struct *
+__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
+               unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
        struct vm_struct **p, *tmp, *area;
        unsigned long align = 1;
        area->pages = NULL;
        area->nr_pages = 0;
        area->phys_addr = 0;
+       area->caller = caller;
        write_unlock(&vmlist_lock);
 
        return area;
 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
                                unsigned long start, unsigned long end)
 {
-       return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
+       return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+                                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL_GPL(__get_vm_area);
 
  */
 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
 {
-       return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
+       return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+                               -1, GFP_KERNEL, __builtin_return_address(0));
+}
+
+struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+                               void *caller)
+{
+       return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+                                               -1, GFP_KERNEL, caller);
 }
 
 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
                                   int node, gfp_t gfp_mask)
 {
        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
-                                 gfp_mask);
+                                 gfp_mask, __builtin_return_address(0));
 }
 
 /* Caller must hold vmlist_lock */
        if (count > num_physpages)
                return NULL;
 
-       area = get_vm_area((count << PAGE_SHIFT), flags);
+       area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+                                       __builtin_return_address(0));
        if (!area)
                return NULL;
+
        if (map_vm_area(area, prot, &pages)) {
                vunmap(area->addr);
                return NULL;
 EXPORT_SYMBOL(vmap);
 
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
-                                pgprot_t prot, int node)
+                                pgprot_t prot, int node, void *caller)
 {
        struct page **pages;
        unsigned int nr_pages, array_size, i;
        /* Please note that the recursion is strictly bounded. */
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
-                                       PAGE_KERNEL, node);
+                               PAGE_KERNEL, node, caller);
                area->flags |= VM_VPAGES;
        } else {
                pages = kmalloc_node(array_size,
                                node);
        }
        area->pages = pages;
+       area->caller = caller;
        if (!area->pages) {
                remove_vm_area(area->addr);
                kfree(area);
 
 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
 {
-       return __vmalloc_area_node(area, gfp_mask, prot, -1);
+       return __vmalloc_area_node(area, gfp_mask, prot, -1,
+                                       __builtin_return_address(0));
 }
 
 /**
  *     kernel virtual space, using a pagetable protection of @prot.
  */
 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
-                           int node)
+                                               int node, void *caller)
 {
        struct vm_struct *area;
 
        if (!size || (size >> PAGE_SHIFT) > num_physpages)
                return NULL;
 
-       area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
+       area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
+                                               node, gfp_mask, caller);
+
        if (!area)
                return NULL;
 
-       return __vmalloc_area_node(area, gfp_mask, prot, node);
+       return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
 }
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 {
-       return __vmalloc_node(size, gfp_mask, prot, -1);
+       return __vmalloc_node(size, gfp_mask, prot, -1,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__vmalloc);
 
  */
 void *vmalloc(unsigned long size)
 {
-       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+                                       -1, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc);
 
  */
 void *vmalloc_node(unsigned long size, int node)
 {
-       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
+       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+                                       node, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc_node);
 
 {
        struct vm_struct *area;
 
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP,
+                               __builtin_return_address(0));
        if (area == NULL)
                return NULL;
 
        seq_printf(m, "0x%p-0x%p %7ld",
                v->addr, v->addr + v->size, v->size);
 
+       if (v->caller) {
+               char buff[2 * KSYM_NAME_LEN];
+
+               seq_putc(m, ' ');
+               sprint_symbol(buff, (unsigned long)v->caller);
+               seq_puts(m, buff);
+       }
+
        if (v->nr_pages)
                seq_printf(m, " pages=%d", v->nr_pages);