|| !exp_info->ops->release))
                return ERR_PTR(-EINVAL);
 
-       if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
-                   (exp_info->ops->pin || exp_info->ops->unpin)))
-               return ERR_PTR(-EINVAL);
-
        if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
                return ERR_PTR(-EINVAL);
 
                return;
 
        dma_resv_lock(dmabuf->resv, NULL);
-
-       if (attach->sgt) {
-               mangle_sg_table(attach->sgt);
-               attach->dmabuf->ops->unmap_dma_buf(attach, attach->sgt,
-                                                  attach->dir);
-
-               if (dma_buf_pin_on_map(attach))
-                       dma_buf_unpin(attach);
-       }
        list_del(&attach->node);
-
        dma_resv_unlock(dmabuf->resv);
 
        if (dmabuf->ops->detach)
 
        dma_resv_assert_held(attach->dmabuf->resv);
 
-       if (attach->sgt) {
-               /*
-                * Two mappings with different directions for the same
-                * attachment are not allowed.
-                */
-               if (attach->dir != direction &&
-                   attach->dir != DMA_BIDIRECTIONAL)
-                       return ERR_PTR(-EBUSY);
-
-               return attach->sgt;
-       }
-
        if (dma_buf_pin_on_map(attach)) {
                ret = attach->dmabuf->ops->pin(attach);
                /*
        }
        mangle_sg_table(sg_table);
 
-       if (attach->dmabuf->ops->cache_sgt_mapping) {
-               attach->sgt = sg_table;
-               attach->dir = direction;
-       }
-
 #ifdef CONFIG_DMA_API_DEBUG
        {
                struct scatterlist *sg;
 
        dma_resv_assert_held(attach->dmabuf->resv);
 
-       if (attach->sgt == sg_table)
-               return;
-
        mangle_sg_table(sg_table);
        attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 
 
 }
 
 static const struct dma_buf_ops udmabuf_ops = {
-       .cache_sgt_mapping = true,
        .map_dma_buf       = map_udmabuf,
        .unmap_dma_buf     = unmap_udmabuf,
        .release           = release_udmabuf,
 
 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
 
 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
-       .cache_sgt_mapping = true,
        .attach = drm_gem_map_attach,
        .detach = drm_gem_map_detach,
        .map_dma_buf = drm_gem_map_dma_buf,
 
 
 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
        .ops = {
-               .cache_sgt_mapping = true,
                .attach = virtio_dma_buf_attach,
                .detach = drm_gem_map_detach,
                .map_dma_buf = virtgpu_gem_map_dma_buf,
 
  * @vunmap: [optional] unmaps a vmap from the buffer
  */
 struct dma_buf_ops {
-       /**
-         * @cache_sgt_mapping:
-         *
-         * If true the framework will cache the first mapping made for each
-         * attachment. This avoids creating mappings for attachments multiple
-         * times.
-         */
-       bool cache_sgt_mapping;
-
        /**
         * @attach:
         *
  * @dmabuf: buffer for this attachment.
  * @dev: device attached to the buffer.
  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
- * @sgt: cached mapping.
- * @dir: direction of cached mapping.
  * @peer2peer: true if the importer can handle peer resources without pages.
  * @priv: exporter specific attachment data.
  * @importer_ops: importer operations for this attachment, if provided
        struct dma_buf *dmabuf;
        struct device *dev;
        struct list_head node;
-       struct sg_table *sgt;
-       enum dma_data_direction dir;
        bool peer2peer;
        const struct dma_buf_attach_ops *importer_ops;
        void *importer_priv;