#include <asm/set_memory.h>
 #endif
 #include <sound/memalloc.h>
+#include "memalloc_local.h"
 
-/*
- *
- *  Bus-specific memory allocators
- *
- */
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
 
-#ifdef CONFIG_HAS_DMA
-/* allocate the coherent DMA pages */
-static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
-{
-       gfp_t gfp_flags;
-
-       gfp_flags = GFP_KERNEL
-               | __GFP_COMP    /* compound page lets parts be mapped */
-               | __GFP_NORETRY /* don't trigger OOM-killer */
-               | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
-       dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
-                                       gfp_flags);
-#ifdef CONFIG_X86
-       if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
-               set_memory_wc((unsigned long)dmab->area,
-                             PAGE_ALIGN(size) >> PAGE_SHIFT);
-#endif
-}
-
-/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
+/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
+static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
+                                         gfp_t default_gfp)
 {
-#ifdef CONFIG_X86
-       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
-               set_memory_wb((unsigned long)dmab->area,
-                             PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
-#endif
-       dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+       if (!dmab->dev.dev)
+               return default_gfp;
+       else
+               return (__force gfp_t)(unsigned long)dmab->dev.dev;
 }
 
-#ifdef CONFIG_GENERIC_ALLOCATOR
-/**
- * snd_malloc_dev_iram - allocate memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- * @size: number of bytes to allocate from the iram
- *
- * This function requires iram phandle provided via of_node
- */
-static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
+static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
 {
-       struct device *dev = dmab->dev.dev;
-       struct gen_pool *pool = NULL;
-
-       dmab->area = NULL;
-       dmab->addr = 0;
-
-       if (dev->of_node)
-               pool = of_gen_pool_get(dev->of_node, "iram", 0);
-
-       if (!pool)
-               return;
-
-       /* Assign the pool into private_data field */
-       dmab->private_data = pool;
+       const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
 
-       dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
-                                       PAGE_SIZE);
-}
-
-/**
- * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
- * @dmab: buffer allocation record to store the allocated data
- */
-static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
-{
-       struct gen_pool *pool = dmab->private_data;
-
-       if (pool && dmab->area)
-               gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
-}
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-#endif /* CONFIG_HAS_DMA */
-
-/*
- *
- *  ALSA generic memory management
- *
- */
-
-static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
-                                         gfp_t default_gfp)
-{
-       if (!dev)
-               return default_gfp;
-       else
-               return (__force gfp_t)(unsigned long)dev;
+       if (WARN_ON_ONCE(!ops || !ops->alloc))
+               return -EINVAL;
+       return ops->alloc(dmab, size);
 }
 
 /**
 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
                        struct snd_dma_buffer *dmab)
 {
-       gfp_t gfp;
+       int err;
 
        if (WARN_ON(!size))
                return -ENXIO;
        dmab->area = NULL;
        dmab->addr = 0;
        dmab->private_data = NULL;
-       switch (type) {
-       case SNDRV_DMA_TYPE_CONTINUOUS:
-               gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
-               dmab->area = alloc_pages_exact(size, gfp);
-               break;
-       case SNDRV_DMA_TYPE_VMALLOC:
-               gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
-               dmab->area = __vmalloc(size, gfp);
-               break;
-#ifdef CONFIG_HAS_DMA
-#ifdef CONFIG_GENERIC_ALLOCATOR
-       case SNDRV_DMA_TYPE_DEV_IRAM:
-               snd_malloc_dev_iram(dmab, size);
-               if (dmab->area)
-                       break;
-               /* Internal memory might have limited size and no enough space,
-                * so if we fail to malloc, try to fetch memory traditionally.
-                */
-               dmab->dev.type = SNDRV_DMA_TYPE_DEV;
-               fallthrough;
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-       case SNDRV_DMA_TYPE_DEV:
-       case SNDRV_DMA_TYPE_DEV_UC:
-               snd_malloc_dev_pages(dmab, size);
-               break;
-#endif
-#ifdef CONFIG_SND_DMA_SGBUF
-       case SNDRV_DMA_TYPE_DEV_SG:
-       case SNDRV_DMA_TYPE_DEV_UC_SG:
-               snd_malloc_sgbuf_pages(device, size, dmab, NULL);
-               break;
-#endif
-       default:
-               pr_err("snd-malloc: invalid device type %d\n", type);
-               return -ENXIO;
-       }
-       if (! dmab->area)
+       err = __snd_dma_alloc_pages(dmab, size);
+       if (err < 0)
+               return err;
+       if (!dmab->area)
                return -ENOMEM;
        dmab->bytes = size;
        return 0;
 }
 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
 
-
 /**
  * snd_dma_free_pages - release the allocated buffer
  * @dmab: the buffer allocation record to release
  */
 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
 {
-       switch (dmab->dev.type) {
-       case SNDRV_DMA_TYPE_CONTINUOUS:
-               free_pages_exact(dmab->area, dmab->bytes);
-               break;
-       case SNDRV_DMA_TYPE_VMALLOC:
-               vfree(dmab->area);
-               break;
+       const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+       if (ops && ops->free)
+               ops->free(dmab);
+}
+EXPORT_SYMBOL(snd_dma_free_pages);
+
+/**
+ * snd_sgbuf_get_addr - return the physical address at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
+{
+       const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+       if (ops && ops->get_addr)
+               return ops->get_addr(dmab, offset);
+       else
+               return dmab->addr + offset;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_addr);
+
+/**
+ * snd_sgbuf_get_page - return the physical page at the corresponding offset
+ * @dmab: buffer allocation information
+ * @offset: offset in the ring buffer
+ */
+struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
+{
+       const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+       if (ops && ops->get_page)
+               return ops->get_page(dmab, offset);
+       else
+               return virt_to_page(dmab->area + offset);
+}
+EXPORT_SYMBOL(snd_sgbuf_get_page);
+
+/**
+ * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
+ *     on sg-buffer
+ * @dmab: buffer allocation information
+ * @ofs: offset in the ring buffer
+ * @size: the requested size
+ */
+unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
+                                     unsigned int ofs, unsigned int size)
+{
+       const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+       if (ops && ops->get_chunk_size)
+               return ops->get_chunk_size(dmab, ofs, size);
+       else
+               return size;
+}
+EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+/*
+ * Continuous pages allocator
+ */
+static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
+
+       dmab->area = alloc_pages_exact(size, gfp);
+       return 0;
+}
+
+static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
+{
+       free_pages_exact(dmab->area, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_continuous_ops = {
+       .alloc = snd_dma_continuous_alloc,
+       .free = snd_dma_continuous_free,
+};
+
+/*
+ * VMALLOC allocator
+ */
+static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
+
+       dmab->area = __vmalloc(size, gfp);
+       return 0;
+}
+
+static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
+{
+       vfree(dmab->area);
+}
+
+static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
+                                          size_t offset)
+{
+       return page_to_phys(vmalloc_to_page(dmab->area + offset)) +
+               offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
+                                            size_t offset)
+{
+       return vmalloc_to_page(dmab->area + offset);
+}
+
+static unsigned int
+snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
+                              unsigned int ofs, unsigned int size)
+{
+       ofs %= PAGE_SIZE;
+       size += ofs;
+       if (size > PAGE_SIZE)
+               size = PAGE_SIZE;
+       return size - ofs;
+}
+
+static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
+       .alloc = snd_dma_vmalloc_alloc,
+       .free = snd_dma_vmalloc_free,
+       .get_addr = snd_dma_vmalloc_get_addr,
+       .get_page = snd_dma_vmalloc_get_page,
+       .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
 #ifdef CONFIG_HAS_DMA
+/*
+ * IRAM allocator
+ */
 #ifdef CONFIG_GENERIC_ALLOCATOR
-       case SNDRV_DMA_TYPE_DEV_IRAM:
-               snd_free_dev_iram(dmab);
-               break;
+static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       struct device *dev = dmab->dev.dev;
+       struct gen_pool *pool;
+
+       if (dev->of_node) {
+               pool = of_gen_pool_get(dev->of_node, "iram", 0);
+               /* Assign the pool into private_data field */
+               dmab->private_data = pool;
+
+               dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
+                                                     PAGE_SIZE);
+               if (dmab->area)
+                       return 0;
+       }
+
+       /* Internal memory might have limited size and no enough space,
+        * so if we fail to malloc, try to fetch memory traditionally.
+        */
+       dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+       return __snd_dma_alloc_pages(dmab, size);
+}
+
+static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
+{
+       struct gen_pool *pool = dmab->private_data;
+
+       if (pool && dmab->area)
+               gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
+}
+
+static const struct snd_malloc_ops snd_dma_iram_ops = {
+       .alloc = snd_dma_iram_alloc,
+       .free = snd_dma_iram_free,
+};
 #endif /* CONFIG_GENERIC_ALLOCATOR */
-       case SNDRV_DMA_TYPE_DEV:
-       case SNDRV_DMA_TYPE_DEV_UC:
-               snd_free_dev_pages(dmab);
-               break;
+
+/*
+ * Coherent device pages allocator
+ */
+static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       gfp_t gfp_flags;
+
+       gfp_flags = GFP_KERNEL
+               | __GFP_COMP    /* compound page lets parts be mapped */
+               | __GFP_NORETRY /* don't trigger OOM-killer */
+               | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
+       dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
+                                       gfp_flags);
+#ifdef CONFIG_X86
+       if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+               set_memory_wc((unsigned long)dmab->area,
+                             PAGE_ALIGN(size) >> PAGE_SHIFT);
 #endif
+       return 0;
+}
+
+static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
+{
+#ifdef CONFIG_X86
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+               set_memory_wb((unsigned long)dmab->area,
+                             PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
+#endif
+       dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+}
+
+static const struct snd_malloc_ops snd_dma_dev_ops = {
+       .alloc = snd_dma_dev_alloc,
+       .free = snd_dma_dev_free,
+};
+#endif /* CONFIG_HAS_DMA */
+
+/*
+ * Entry points
+ */
+static const struct snd_malloc_ops *dma_ops[] = {
+       [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
+       [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
+#ifdef CONFIG_HAS_DMA
+       [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
+       [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops,
+#ifdef CONFIG_GENERIC_ALLOCATOR
+       [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
+#endif /* CONFIG_GENERIC_ALLOCATOR */
+#endif /* CONFIG_HAS_DMA */
 #ifdef CONFIG_SND_DMA_SGBUF
-       case SNDRV_DMA_TYPE_DEV_SG:
-       case SNDRV_DMA_TYPE_DEV_UC_SG:
-               snd_free_sgbuf_pages(dmab);
-               break;
+       [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+       [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops,
 #endif
-       default:
-               pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type);
-       }
+};
+
+static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
+{
+       if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
+                        dmab->dev.type >= ARRAY_SIZE(dma_ops)))
+               return NULL;
+       return dma_ops[dmab->dev.type];
 }
-EXPORT_SYMBOL(snd_dma_free_pages);
 
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <sound/memalloc.h>
-
+#include "memalloc_local.h"
+
+struct snd_sg_page {
+       void *buf;
+       dma_addr_t addr;
+};
+
+struct snd_sg_buf {
+       int size;       /* allocated byte size */
+       int pages;      /* allocated pages */
+       int tblsize;    /* allocated table size */
+       struct snd_sg_page *table;      /* address table */
+       struct page **page_table;       /* page table (for vmap/vunmap) */
+       struct device *dev;
+};
 
 /* table entries are align to 32 */
 #define SGBUF_TBL_ALIGN                32
 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
 
-int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
+static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
 {
        struct snd_sg_buf *sgbuf = dmab->private_data;
        struct snd_dma_buffer tmpb;
        int i;
 
-       if (! sgbuf)
-               return -EINVAL;
+       if (!sgbuf)
+               return;
 
        vunmap(dmab->area);
        dmab->area = NULL;
        kfree(sgbuf->page_table);
        kfree(sgbuf);
        dmab->private_data = NULL;
-       
-       return 0;
 }
 
 #define MAX_ALLOC_PAGES                32
 
-void *snd_malloc_sgbuf_pages(struct device *device,
-                            size_t size, struct snd_dma_buffer *dmab,
-                            size_t *res_size)
+static int snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
 {
        struct snd_sg_buf *sgbuf;
        unsigned int i, pages, chunk, maxpages;
        int type = SNDRV_DMA_TYPE_DEV;
        pgprot_t prot = PAGE_KERNEL;
 
-       dmab->area = NULL;
-       dmab->addr = 0;
        dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
-       if (! sgbuf)
-               return NULL;
+       if (!sgbuf)
+               return -ENOMEM;
        if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
                type = SNDRV_DMA_TYPE_DEV_UC;
 #ifdef pgprot_noncached
                prot = pgprot_noncached(PAGE_KERNEL);
 #endif
        }
-       sgbuf->dev = device;
+       sgbuf->dev = dmab->dev.dev;
        pages = snd_sgbuf_aligned_pages(size);
        sgbuf->tblsize = sgbuf_align_table(pages);
        table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
                if (chunk > maxpages)
                        chunk = maxpages;
                chunk <<= PAGE_SHIFT;
-               if (snd_dma_alloc_pages_fallback(type, device,
+               if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
                                                 chunk, &tmpb) < 0) {
                        if (!sgbuf->pages)
                                goto _failed;
-                       if (!res_size)
-                               goto _failed;
                        size = sgbuf->pages * PAGE_SIZE;
                        break;
                }
        dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
        if (! dmab->area)
                goto _failed;
-       if (res_size)
-               *res_size = sgbuf->size;
-       return dmab->area;
+       return 0;
 
  _failed:
-       snd_free_sgbuf_pages(dmab); /* free the table */
-       return NULL;
+       snd_dma_sg_free(dmab); /* free the table */
+       return -ENOMEM;
 }
 
-/*
- * compute the max chunk size with continuous pages on sg-buffer
- */
-unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
-                                     unsigned int ofs, unsigned int size)
+static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
+                                     size_t offset)
+{
+       struct snd_sg_buf *sgbuf = dmab->private_data;
+       dma_addr_t addr;
+
+       addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+       addr &= ~((dma_addr_t)PAGE_SIZE - 1);
+       return addr + offset % PAGE_SIZE;
+}
+
+static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
+                                       size_t offset)
+{
+       struct snd_sg_buf *sgbuf = dmab->private_data;
+       unsigned int idx = offset >> PAGE_SHIFT;
+
+       if (idx >= (unsigned int)sgbuf->pages)
+               return NULL;
+       return sgbuf->page_table[idx];
+}
+
+static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
+                                             unsigned int ofs,
+                                             unsigned int size)
 {
        struct snd_sg_buf *sg = dmab->private_data;
        unsigned int start, end, pg;
 
-       if (!sg)
-               return size;
-
        start = ofs >> PAGE_SHIFT;
        end = (ofs + size - 1) >> PAGE_SHIFT;
        /* check page continuity */
        /* ok, all on continuous pages */
        return size;
 }
-EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
+
+const struct snd_malloc_ops snd_dma_sg_ops = {
+       .alloc = snd_dma_sg_alloc,
+       .free = snd_dma_sg_free,
+       .get_addr = snd_dma_sg_get_addr,
+       .get_page = snd_dma_sg_get_page,
+       .get_chunk_size = snd_dma_sg_get_chunk_size,
+};