return copied;
 }
 
+static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long flags)
+{
+       char *start;
+       struct vmap_block *vb;
+       unsigned long offset;
+       unsigned int rs, re, n;
+
+       /*
+        * If it's area created by vm_map_ram() interface directly, but
+        * not further subdividing and delegating management to vmap_block,
+        * handle it here.
+        */
+       if (!(flags & VMAP_BLOCK)) {
+               aligned_vread(buf, addr, count);
+               return;
+       }
+
+       /*
+        * Area is split into regions and tracked with vmap_block, read out
+        * each region and zero fill the hole between regions.
+        */
+       vb = xa_load(&vmap_blocks, addr_to_vb_idx((unsigned long)addr));
+       if (!vb)
+               goto finished;
+
+       spin_lock(&vb->lock);
+       if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
+               spin_unlock(&vb->lock);
+               goto finished;
+       }
+       for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
+               if (!count)
+                       break;
+               start = vmap_block_vaddr(vb->va->va_start, rs);
+               while (addr < start) {
+                       if (count == 0)
+                               goto unlock;
+                       *buf = '\0';
+                       buf++;
+                       addr++;
+                       count--;
+               }
+               /*it could start reading from the middle of used region*/
+               offset = offset_in_page(addr);
+               n = ((re - rs + 1) << PAGE_SHIFT) - offset;
+               if (n > count)
+                       n = count;
+               aligned_vread(buf, start+offset, n);
+
+               buf += n;
+               addr += n;
+               count -= n;
+       }
+unlock:
+       spin_unlock(&vb->lock);
+
+finished:
+       /* zero-fill the left dirty or free regions */
+       if (count)
+               memset(buf, 0, count);
+}
+
 /**
  * vread() - read vmalloc area in a safe way.
  * @buf:     buffer for reading data
        struct vm_struct *vm;
        char *vaddr, *buf_start = buf;
        unsigned long buflen = count;
-       unsigned long n;
+       unsigned long n, size, flags;
 
        addr = kasan_reset_tag(addr);
 
                if (!count)
                        break;
 
-               if (!va->vm)
+               vm = va->vm;
+               flags = va->flags & VMAP_FLAGS_MASK;
+               /*
+                * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
+                * be set together with VMAP_RAM.
+                */
+               WARN_ON(flags == VMAP_BLOCK);
+
+               if (!vm && !flags)
                        continue;
 
-               vm = va->vm;
-               vaddr = (char *) vm->addr;
-               if (addr >= vaddr + get_vm_area_size(vm))
+               vaddr = (char *) va->va_start;
+               size = vm ? get_vm_area_size(vm) : va_size(va);
+
+               if (addr >= vaddr + size)
                        continue;
                while (addr < vaddr) {
                        if (count == 0)
                        addr++;
                        count--;
                }
-               n = vaddr + get_vm_area_size(vm) - addr;
+               n = vaddr + size - addr;
                if (n > count)
                        n = count;
-               if (!(vm->flags & VM_IOREMAP))
+
+               if (flags & VMAP_RAM)
+                       vmap_ram_vread(buf, addr, n, flags);
+               else if (!(vm->flags & VM_IOREMAP))
                        aligned_vread(buf, addr, n);
                else /* IOREMAP area is treated as memory hole */
                        memset(buf, 0, n);