struct device;
 struct page;
+struct vm_area_struct;
 
 /*
  * buffer device info
 int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
                                  struct snd_dma_buffer *dmab);
 void snd_dma_free_pages(struct snd_dma_buffer *dmab);
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+                       struct vm_area_struct *area);
 
 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset);
 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset);
 
 }
 EXPORT_SYMBOL(snd_dma_free_pages);
 
+/**
+ * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
+ * @dmab: buffer allocation information
+ * @area: VM area information
+ */
+int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
+                       struct vm_area_struct *area)
+{
+       const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
+
+       if (ops && ops->mmap)
+               return ops->mmap(dmab, area);
+       else
+               return -ENOENT;
+}
+EXPORT_SYMBOL(snd_dma_buffer_mmap);
+
 /**
  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
  * @dmab: buffer allocation information
                gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
 }
 
+static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
+                            struct vm_area_struct *area)
+{
+       area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+       return remap_pfn_range(area, area->vm_start,
+                              dmab->addr >> PAGE_SHIFT,
+                              area->vm_end - area->vm_start,
+                              area->vm_page_prot);
+}
+
 static const struct snd_malloc_ops snd_dma_iram_ops = {
        .alloc = snd_dma_iram_alloc,
        .free = snd_dma_iram_free,
+       .mmap = snd_dma_iram_mmap,
 };
 #endif /* CONFIG_GENERIC_ALLOCATOR */
 
        dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
 }
 
+static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
+                           struct vm_area_struct *area)
+{
+       return dma_mmap_coherent(dmab->dev.dev, area,
+                                dmab->area, dmab->addr, dmab->bytes);
+}
+
 static const struct snd_malloc_ops snd_dma_dev_ops = {
        .alloc = snd_dma_dev_alloc,
        .free = snd_dma_dev_free,
+       .mmap = snd_dma_dev_mmap,
 };
 #endif /* CONFIG_HAS_DMA */
 
 
                             struct vm_area_struct *area)
 {
        area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-#ifdef CONFIG_GENERIC_ALLOCATOR
-       if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
-               area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
-               return remap_pfn_range(area, area->vm_start,
-                               substream->dma_buffer.addr >> PAGE_SHIFT,
-                               area->vm_end - area->vm_start, area->vm_page_prot);
-       }
-#endif /* CONFIG_GENERIC_ALLOCATOR */
-       if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
-           (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
-            substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
-               return dma_mmap_coherent(substream->dma_buffer.dev.dev,
-                                        area,
-                                        substream->runtime->dma_area,
-                                        substream->runtime->dma_addr,
-                                        substream->runtime->dma_bytes);
+       if (!substream->ops->page &&
+           !snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
+               return 0;
        /* mmap with fault handler */
        area->vm_ops = &snd_pcm_vm_ops_data_fault;
        return 0;