return addr;
 }
 
-static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
+static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
+{
+       unsigned long addr;
+
+       addr = va_start + (pages_off << PAGE_SHIFT);
+       BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
+       return (void *)addr;
+}
+
+/**
+ * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
+ *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
+ * @order:    how many 2^order pages should be occupied in newly allocated block
+ * @gfp_mask: flags for the page level allocator
+ *
+ * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
+ */
+static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
 {
        struct vmap_block_queue *vbq;
        struct vmap_block *vb;
        struct vmap_area *va;
        unsigned long vb_idx;
        int node, err;
+       void *vaddr;
 
        node = numa_node_id();
 
                return ERR_PTR(err);
        }
 
+       vaddr = vmap_block_vaddr(va->va_start, 0);
        spin_lock_init(&vb->lock);
        vb->va = va;
-       vb->free = VMAP_BBMAP_BITS;
+       /* At least something should be left free */
+       BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
+       vb->free = VMAP_BBMAP_BITS - (1UL << order);
        vb->dirty = 0;
        bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
        INIT_LIST_HEAD(&vb->free_list);
        spin_unlock(&vbq->lock);
        put_cpu_var(vmap_block_queue);
 
-       return vb;
+       return vaddr;
 }
 
 static void free_vmap_block(struct vmap_block *vb)
 {
        struct vmap_block_queue *vbq;
        struct vmap_block *vb;
-       unsigned long addr = 0;
+       void *vaddr = NULL;
        unsigned int order;
 
        BUG_ON(size & ~PAGE_MASK);
        }
        order = get_order(size);
 
-again:
        rcu_read_lock();
        vbq = &get_cpu_var(vmap_block_queue);
        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
-               int i;
+               unsigned long pages_off;
 
                spin_lock(&vb->lock);
-               if (vb->free < 1UL << order)
-                       goto next;
+               if (vb->free < (1UL << order)) {
+                       spin_unlock(&vb->lock);
+                       continue;
+               }
 
-               i = VMAP_BBMAP_BITS - vb->free;
-               addr = vb->va->va_start + (i << PAGE_SHIFT);
-               BUG_ON(addr_to_vb_idx(addr) !=
-                               addr_to_vb_idx(vb->va->va_start));
+               pages_off = VMAP_BBMAP_BITS - vb->free;
+               vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
                vb->free -= 1UL << order;
                if (vb->free == 0) {
                        spin_lock(&vbq->lock);
                        list_del_rcu(&vb->free_list);
                        spin_unlock(&vbq->lock);
                }
+
                spin_unlock(&vb->lock);
                break;
-next:
-               spin_unlock(&vb->lock);
        }
 
        put_cpu_var(vmap_block_queue);
        rcu_read_unlock();
 
-       if (!addr) {
-               vb = new_vmap_block(gfp_mask);
-               if (IS_ERR(vb))
-                       return vb;
-               goto again;
-       }
+       /* Allocate new block if nothing was found */
+       if (!vaddr)
+               vaddr = new_vmap_block(order, gfp_mask);
 
-       return (void *)addr;
+       return vaddr;
 }
 
 static void vb_free(const void *addr, unsigned long size)