.map_resource = intel_map_resource,
        .unmap_resource = intel_unmap_resource,
        .dma_supported = dma_direct_supported,
+       .mmap = dma_common_mmap,
+       .get_sgtable = dma_common_get_sgtable,
  };
  
 +static void
 +bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
 +                 enum dma_data_direction dir, enum dma_sync_target target)
 +{
 +      struct dmar_domain *domain;
 +      phys_addr_t tlb_addr;
 +
 +      domain = find_domain(dev);
 +      if (WARN_ON(!domain))
 +              return;
 +
 +      tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
 +      if (is_swiotlb_buffer(tlb_addr))
 +              swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
 +}
 +
 +static dma_addr_t
 +bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
 +                enum dma_data_direction dir, unsigned long attrs,
 +                u64 dma_mask)
 +{
 +      size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
 +      struct dmar_domain *domain;
 +      struct intel_iommu *iommu;
 +      unsigned long iova_pfn;
 +      unsigned long nrpages;
 +      phys_addr_t tlb_addr;
 +      int prot = 0;
 +      int ret;
 +
 +      domain = find_domain(dev);
 +      if (WARN_ON(dir == DMA_NONE || !domain))
 +              return DMA_MAPPING_ERROR;
 +
 +      iommu = domain_get_iommu(domain);
 +      if (WARN_ON(!iommu))
 +              return DMA_MAPPING_ERROR;
 +
 +      nrpages = aligned_nrpages(0, size);
 +      iova_pfn = intel_alloc_iova(dev, domain,
 +                                  dma_to_mm_pfn(nrpages), dma_mask);
 +      if (!iova_pfn)
 +              return DMA_MAPPING_ERROR;
 +
 +      /*
 +       * Check if DMAR supports zero-length reads on write only
 +       * mappings..
 +       */
 +      if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
 +                      !cap_zlr(iommu->cap))
 +              prot |= DMA_PTE_READ;
 +      if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
 +              prot |= DMA_PTE_WRITE;
 +
 +      /*
 +       * If both the physical buffer start address and size are
 +       * page aligned, we don't need to use a bounce page.
 +       */
 +      if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
 +              tlb_addr = swiotlb_tbl_map_single(dev,
 +                              __phys_to_dma(dev, io_tlb_start),
 +                              paddr, size, aligned_size, dir, attrs);
 +              if (tlb_addr == DMA_MAPPING_ERROR) {
 +                      goto swiotlb_error;
 +              } else {
 +                      /* Cleanup the padding area. */
 +                      void *padding_start = phys_to_virt(tlb_addr);
 +                      size_t padding_size = aligned_size;
 +
 +                      if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 +                          (dir == DMA_TO_DEVICE ||
 +                           dir == DMA_BIDIRECTIONAL)) {
 +                              padding_start += size;
 +                              padding_size -= size;
 +                      }
 +
 +                      memset(padding_start, 0, padding_size);
 +              }
 +      } else {
 +              tlb_addr = paddr;
 +      }
 +
 +      ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
 +                               tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
 +      if (ret)
 +              goto mapping_error;
 +
 +      trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
 +
 +      return (phys_addr_t)iova_pfn << PAGE_SHIFT;
 +
 +mapping_error:
 +      if (is_swiotlb_buffer(tlb_addr))
 +              swiotlb_tbl_unmap_single(dev, tlb_addr, size,
 +                                       aligned_size, dir, attrs);
 +swiotlb_error:
 +      free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
 +      dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
 +              size, (unsigned long long)paddr, dir);
 +
 +      return DMA_MAPPING_ERROR;
 +}
 +
 +static void
 +bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
 +                  enum dma_data_direction dir, unsigned long attrs)
 +{
 +      size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
 +      struct dmar_domain *domain;
 +      phys_addr_t tlb_addr;
 +
 +      domain = find_domain(dev);
 +      if (WARN_ON(!domain))
 +              return;
 +
 +      tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
 +      if (WARN_ON(!tlb_addr))
 +              return;
 +
 +      intel_unmap(dev, dev_addr, size);
 +      if (is_swiotlb_buffer(tlb_addr))
 +              swiotlb_tbl_unmap_single(dev, tlb_addr, size,
 +                                       aligned_size, dir, attrs);
 +
 +      trace_bounce_unmap_single(dev, dev_addr, size);
 +}
 +
 +static dma_addr_t
 +bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
 +              size_t size, enum dma_data_direction dir, unsigned long attrs)
 +{
 +      return bounce_map_single(dev, page_to_phys(page) + offset,
 +                               size, dir, attrs, *dev->dma_mask);
 +}
 +
 +static dma_addr_t
 +bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
 +                  enum dma_data_direction dir, unsigned long attrs)
 +{
 +      return bounce_map_single(dev, phys_addr, size,
 +                               dir, attrs, *dev->dma_mask);
 +}
 +
 +static void
 +bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
 +                enum dma_data_direction dir, unsigned long attrs)
 +{
 +      bounce_unmap_single(dev, dev_addr, size, dir, attrs);
 +}
 +
 +static void
 +bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
 +                    enum dma_data_direction dir, unsigned long attrs)
 +{
 +      bounce_unmap_single(dev, dev_addr, size, dir, attrs);
 +}
 +
 +static void
 +bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
 +              enum dma_data_direction dir, unsigned long attrs)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      for_each_sg(sglist, sg, nelems, i)
 +              bounce_unmap_page(dev, sg->dma_address,
 +                                sg_dma_len(sg), dir, attrs);
 +}
 +
 +static int
 +bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
 +            enum dma_data_direction dir, unsigned long attrs)
 +{
 +      int i;
 +      struct scatterlist *sg;
 +
 +      for_each_sg(sglist, sg, nelems, i) {
 +              sg->dma_address = bounce_map_page(dev, sg_page(sg),
 +                                                sg->offset, sg->length,
 +                                                dir, attrs);
 +              if (sg->dma_address == DMA_MAPPING_ERROR)
 +                      goto out_unmap;
 +              sg_dma_len(sg) = sg->length;
 +      }
 +
 +      return nelems;
 +
 +out_unmap:
 +      bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
 +      return 0;
 +}
 +
 +static void
 +bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 +                         size_t size, enum dma_data_direction dir)
 +{
 +      bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
 +}
 +
 +static void
 +bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
 +                            size_t size, enum dma_data_direction dir)
 +{
 +      bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
 +}
 +
 +static void
 +bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
 +                     int nelems, enum dma_data_direction dir)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      for_each_sg(sglist, sg, nelems, i)
 +              bounce_sync_single(dev, sg_dma_address(sg),
 +                                 sg_dma_len(sg), dir, SYNC_FOR_CPU);
 +}
 +
 +static void
 +bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
 +                        int nelems, enum dma_data_direction dir)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      for_each_sg(sglist, sg, nelems, i)
 +              bounce_sync_single(dev, sg_dma_address(sg),
 +                                 sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
 +}
 +
 +static const struct dma_map_ops bounce_dma_ops = {
 +      .alloc                  = intel_alloc_coherent,
 +      .free                   = intel_free_coherent,
 +      .map_sg                 = bounce_map_sg,
 +      .unmap_sg               = bounce_unmap_sg,
 +      .map_page               = bounce_map_page,
 +      .unmap_page             = bounce_unmap_page,
 +      .sync_single_for_cpu    = bounce_sync_single_for_cpu,
 +      .sync_single_for_device = bounce_sync_single_for_device,
 +      .sync_sg_for_cpu        = bounce_sync_sg_for_cpu,
 +      .sync_sg_for_device     = bounce_sync_sg_for_device,
 +      .map_resource           = bounce_map_resource,
 +      .unmap_resource         = bounce_unmap_resource,
 +      .dma_supported          = dma_direct_supported,
 +};
 +
  static inline int iommu_domain_cache_init(void)
  {
        int ret = 0;