#define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
#define VMAP_FLAGS_MASK 0x3
+/*
+ * We should probably have a fallback mechanism to allocate virtual memory
+ * out of partially filled vmap blocks. However vmap block sizing should be
+ * fairly reasonable according to the vmalloc size, so it shouldn't be a
+ * big problem.
+ */
struct vmap_block_queue {
spinlock_t lock;
struct list_head free;
+
+ /*
+ * An xarray requires an extra memory dynamically to
+ * be allocated. If it is an issue, we can use rb-tree
+ * instead.
+ */
+ struct xarray vmap_blocks;
};
struct vmap_block {
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
/*
- * XArray of vmap blocks, indexed by address, to quickly find a vmap block
- * in the free path. Could get rid of this if we change the API to return a
- * "cookie" from alloc, to be passed to free. But no big deal yet.
+ * In order to fast access to any "vmap_block" associated with a
+ * specific address, we store them into a per-cpu xarray. A hash
+ * function is addr_to_vbq() whereas a key is a vb->va->va_start
+ * value.
+ *
+ * Please note, a vmap_block_queue, which is a per-cpu, is not
+ * serialized by a raw_smp_processor_id() current CPU, instead
+ * it is chosen based on a CPU-index it belongs to, i.e. it is
+ * a hash-table.
+ *
+ * An example:
+ *
+ * CPU_1 CPU_2 CPU_0
+ * | | |
+ * V V V
+ * 0 10 20 30 40 50 60
+ * |------|------|------|------|------|------|...<vmap address space>
+ * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
+ *
+ * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
+ * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
+ *
+ * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
+ * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
+ *
+ * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
+ * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
*/
-static DEFINE_XARRAY(vmap_blocks);
+static struct vmap_block_queue *
+addr_to_vbq(unsigned long addr)
+{
+ int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
-/*
- * We should probably have a fallback mechanism to allocate virtual memory
- * out of partially filled vmap blocks. However vmap block sizing should be
- * fairly reasonable according to the vmalloc size, so it shouldn't be a
- * big problem.
- */
+ return &per_cpu(vmap_block_queue, index);
+}
-static unsigned long addr_to_vb_idx(unsigned long addr)
+static unsigned long
+addr_to_vb_va_start(unsigned long addr)
{
- addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
- addr /= VMAP_BLOCK_SIZE;
- return addr;
+ return rounddown(addr, VMAP_BLOCK_SIZE);
}
static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
unsigned long addr;
addr = va_start + (pages_off << PAGE_SHIFT);
- BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
+ WARN_ON_ONCE(addr_to_vb_va_start(addr) != va_start);
return (void *)addr;
}
struct vmap_block_queue *vbq;
struct vmap_block *vb;
struct vmap_area *va;
- unsigned long vb_idx;
int node, err;
void *vaddr;
bitmap_set(vb->used_map, 0, (1UL << order));
INIT_LIST_HEAD(&vb->free_list);
- vb_idx = addr_to_vb_idx(va->va_start);
- err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
+ vbq = addr_to_vbq(va->va_start);
+ err = xa_insert(&vbq->vmap_blocks, va->va_start, vb, gfp_mask);
if (err) {
kfree(vb);
free_vmap_area(va);
static void free_vmap_block(struct vmap_block *vb)
{
+ struct vmap_block_queue *vbq;
struct vmap_block *tmp;
- tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
+ vbq = addr_to_vbq(vb->va->va_start);
+ tmp = xa_erase(&vbq->vmap_blocks, vb->va->va_start);
BUG_ON(tmp != vb);
spin_lock(&vmap_area_lock);
unsigned long offset;
unsigned int order;
struct vmap_block *vb;
+ struct vmap_block_queue *vbq;
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
order = get_order(size);
offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
- vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
+
+ vbq = addr_to_vbq(addr);
+ vb = xa_load(&vbq->vmap_blocks, addr_to_vb_va_start(addr));
+
spin_lock(&vb->lock);
bitmap_clear(vb->used_map, offset, (1UL << order));
spin_unlock(&vb->lock);
{
char *start;
struct vmap_block *vb;
+ struct vmap_block_queue *vbq;
unsigned long offset;
unsigned int rs, re;
size_t remains, n;
* Area is split into regions and tracked with vmap_block, read out
* each region and zero fill the hole between regions.
*/
- vb = xa_load(&vmap_blocks, addr_to_vb_idx((unsigned long)addr));
+ vbq = addr_to_vbq((unsigned long) addr);
+ vb = xa_load(&vbq->vmap_blocks, addr_to_vb_va_start((unsigned long) addr));
if (!vb)
goto finished_zero;
p = &per_cpu(vfree_deferred, i);
init_llist_head(&p->list);
INIT_WORK(&p->wq, delayed_vfree_work);
+ xa_init(&vbq->vmap_blocks);
}
/* Import existing vmlist entries. */