struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
        struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
 
+       dma_descriptor_unmap(txd);
        if (!plchan->slave)
                pl08x_unmap_buffers(txd);
 
 
        list_move(&desc->desc_node, &atchan->free_list);
 
        /* unmap dma addresses (not on slave channels) */
+       dma_descriptor_unmap(txd);
        if (!atchan->chan_common.private) {
                struct device *parent = chan2parent(&atchan->chan_common);
                if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 
        list_splice_init(&desc->tx_list, &dwc->free_list);
        list_move(&desc->desc_node, &dwc->free_list);
 
+       dma_descriptor_unmap(txd);
        if (!is_slave_direction(dwc->direction)) {
                struct device *parent = chan2parent(&dwc->chan);
                if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 
                 * For the memcpy channels the API requires us to unmap the
                 * buffers unless requested otherwise.
                 */
+               dma_descriptor_unmap(&desc->txd);
                if (!edmac->chan.private)
                        ep93xx_dma_unmap_buffers(desc);
 
 
        /* Run any dependencies */
        dma_run_dependencies(txd);
 
+       dma_descriptor_unmap(txd);
        /* Unmap the dst buffer, if requested */
        if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
                if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
 
                dump_desc_dbg(ioat, desc);
                if (tx->cookie) {
                        dma_cookie_complete(tx);
+                       dma_descriptor_unmap(tx);
                        ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
                        ioat->active -= desc->hw->tx_cnt;
                        if (tx->callback) {
 
                tx = &desc->txd;
                dump_desc_dbg(ioat, desc);
                if (tx->cookie) {
+                       dma_descriptor_unmap(tx);
                        ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
                        dma_cookie_complete(tx);
                        if (tx->callback) {
 
                tx = &desc->txd;
                if (tx->cookie) {
                        dma_cookie_complete(tx);
+                       dma_descriptor_unmap(tx);
                        ioat3_dma_unmap(ioat, desc, idx + i);
                        if (tx->callback) {
                                tx->callback(tx->callback_param);
 
                if (tx->callback)
                        tx->callback(tx->callback_param);
 
+               dma_descriptor_unmap(tx);
                /* unmap dma addresses
                 * (unmap_single vs unmap_page?)
                 */
 
                        desc->async_tx.callback(
                                desc->async_tx.callback_param);
 
+               dma_descriptor_unmap(&desc->async_tx);
                /* unmap dma addresses
                 * (unmap_single vs unmap_page?)
                 */
 
                        list_move_tail(&desc->node, &pch->dmac->desc_pool);
                }
 
+               dma_descriptor_unmap(&desc->txd);
+
                if (callback) {
                        spin_unlock_irqrestore(&pch->lock, flags);
                        callback(callback_param);
 
                        desc->async_tx.callback(
                                desc->async_tx.callback_param);
 
+               dma_descriptor_unmap(&desc->async_tx);
                /* unmap dma addresses
                 * (unmap_single vs unmap_page?)
                 *
 
 
        list_move(&td_desc->desc_node, &td_chan->free_list);
 
+       dma_descriptor_unmap(txd);
        if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
                __td_unmap_descs(td_desc,
                        txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
 
        list_splice_init(&desc->tx_list, &dc->free_list);
        list_move(&desc->desc_node, &dc->free_list);
 
+       dma_descriptor_unmap(txd);
        if (!ds) {
                dma_addr_t dmaaddr;
                if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 
 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+struct dmaengine_unmap_data {
+       u8 to_cnt;
+       u8 from_cnt;
+       u8 bidi_cnt;
+       struct device *dev;
+       struct kref kref;
+       size_t len;
+       dma_addr_t addr[0];
+};
+
 /**
  * struct dma_async_tx_descriptor - async transaction descriptor
  * ---dma generic offload fields---
        dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
        dma_async_tx_callback callback;
        void *callback_param;
+       struct dmaengine_unmap_data *unmap;
 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
        struct dma_async_tx_descriptor *next;
        struct dma_async_tx_descriptor *parent;
 #endif
 };
 
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+                                struct dmaengine_unmap_data *unmap)
+{
+       kref_get(&unmap->kref);
+       tx->unmap = unmap;
+}
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+       if (tx->unmap) {
+               tx->unmap = NULL;
+       }
+}
+
 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
 {