sg.dma_address = addr;
        sg.length = size;
 
-       return chan->device->device_prep_slave_sg(chan, &sg, 1,
-                                                 direction, flags);
+       return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
 }
 
 #else
 
                            info->len, offset_in_page(info->buf));
                sg_dma_address(&sg) = info->buf;
 
-               desc = chan->device->device_prep_slave_sg(chan,
+               desc = dmaengine_prep_slave_sg(chan,
                        &sg, 1, info->direction, DMA_PREP_INTERRUPT);
                break;
        case DMA_CYCLIC:
-               desc = chan->device->device_prep_dma_cyclic(chan,
+               desc = dmaengine_prep_dma_cyclic(chan,
                        info->buf, info->len, info->period, info->direction);
                break;
        default:
 
                sg_dma_address(sg)      = vb2_dma_contig_plane_dma_addr(vb, 0);
                sg_dma_len(sg)          = new_size;
 
-               txd = ichan->dma_chan.device->device_prep_slave_sg(
+               txd = dmaengine_prep_slave_sg(
                        &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT);
                if (!txd)
 
 
        spin_unlock_irq(&fh->queue_lock);
 
-       desc = fh->chan->device->device_prep_slave_sg(fh->chan,
+       desc = dmaengine_prep_slave_sg(fh->chan,
                buf->sg, sg_elems, DMA_DEV_TO_MEM,
                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
        if (!desc) {
 
                        data->sg_len, direction);
 
        dmaengine_slave_config(chan, &host->dma_conf);
-       desc = chan->device->device_prep_slave_sg(chan,
+       desc = dmaengine_prep_slave_sg(chan,
                        data->sg, sglen, slave_dirn,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc)
 
                return -EINVAL;
 
        dmaengine_slave_config(chan, &conf);
-       desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+       desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
                                            conf.direction, DMA_CTRL_ACK);
        if (!desc)
                goto unmap_exit;
 
        if (nents != data->sg_len)
                return -EINVAL;
 
-       host->desc = host->dma->device->device_prep_slave_sg(host->dma,
+       host->desc = dmaengine_prep_slave_sg(host->dma,
                data->sg, data->sg_len, slave_dirn,
                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 
                sg_len = SSP_PIO_NUM;
        }
 
-       desc = host->dmach->device->device_prep_slave_sg(host->dmach,
+       desc = dmaengine_prep_slave_sg(host->dmach,
                                sgl, sg_len, host->slave_dirn, append);
        if (desc) {
                desc->callback = mxs_mmc_dma_irq_callback;
 
                         DMA_FROM_DEVICE);
        if (ret > 0) {
                host->dma_active = true;
-               desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+               desc = dmaengine_prep_slave_sg(chan, sg, ret,
                        DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        }
 
                         DMA_TO_DEVICE);
        if (ret > 0) {
                host->dma_active = true;
-               desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+               desc = dmaengine_prep_slave_sg(chan, sg, ret,
                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        }
 
 
 
        ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
        if (ret > 0)
-               desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+               desc = dmaengine_prep_slave_sg(chan, sg, ret,
                        DMA_DEV_TO_MEM, DMA_CTRL_ACK);
 
        if (desc) {
 
        ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
        if (ret > 0)
-               desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+               desc = dmaengine_prep_slave_sg(chan, sg, ret,
                        DMA_MEM_TO_DEV, DMA_CTRL_ACK);
 
        if (desc) {
 
                | BM_GPMI_CTRL0_ADDRESS_INCREMENT
                | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
        pio[1] = pio[2] = 0;
-       desc = channel->device->device_prep_slave_sg(channel,
+       desc = dmaengine_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
 
        sg_init_one(sgl, this->cmd_buffer, this->command_length);
        dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
-       desc = channel->device->device_prep_slave_sg(channel,
-                                       sgl, 1, DMA_MEM_TO_DEV, 1);
+       desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_MEM_TO_DEV, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
                | BF_GPMI_CTRL0_ADDRESS(address)
                | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
        pio[1] = 0;
-       desc = channel->device->device_prep_slave_sg(channel,
-                                       (struct scatterlist *)pio,
+       desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
 
        /* [2] send DMA request */
        prepare_data_dma(this, DMA_TO_DEVICE);
-       desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
+       desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
                                                1, DMA_MEM_TO_DEV, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
                | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
        pio[1] = 0;
-       desc = channel->device->device_prep_slave_sg(channel,
+       desc = dmaengine_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
 
        /* [2] : send DMA request */
        prepare_data_dma(this, DMA_FROM_DEVICE);
-       desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
-                                               1, DMA_DEV_TO_MEM, 1);
+       desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+                                       1, DMA_DEV_TO_MEM, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
        pio[4] = payload;
        pio[5] = auxiliary;
 
-       desc = channel->device->device_prep_slave_sg(channel,
-                                       (struct scatterlist *)pio,
+       desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 2 error\n");
                | BF_GPMI_CTRL0_ADDRESS(address)
                | BF_GPMI_CTRL0_XFER_COUNT(0);
        pio[1] = 0;
-       desc = channel->device->device_prep_slave_sg(channel,
+       desc = dmaengine_prep_slave_sg(channel,
                                (struct scatterlist *)pio, 2,
                                DMA_TRANS_NONE, 0);
        if (!desc) {
        pio[3] = geo->page_size;
        pio[4] = payload;
        pio[5] = auxiliary;
-       desc = channel->device->device_prep_slave_sg(channel,
+       desc = dmaengine_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
        if (!desc) {
                | BF_GPMI_CTRL0_ADDRESS(address)
                | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
        pio[1] = 0;
-       desc = channel->device->device_prep_slave_sg(channel,
+       desc = dmaengine_prep_slave_sg(channel,
                                (struct scatterlist *)pio, 2,
                                DMA_TRANS_NONE, 1);
        if (!desc) {
 
        if (sg_dma_len(&ctl->sg) % 4)
                sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
 
-       ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+       ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
                &ctl->sg, 1, DMA_MEM_TO_DEV,
                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
        if (!ctl->adesc)
 
                sg_dma_len(sg) = DMA_BUFFER_SIZE;
 
-               ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+               ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
                        sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
 
 
        dws->tx_sgl.dma_address = dws->tx_dma;
        dws->tx_sgl.length = dws->len;
 
-       txdesc = txchan->device->device_prep_slave_sg(txchan,
+       txdesc = dmaengine_prep_slave_sg(txchan,
                                &dws->tx_sgl,
                                1,
                                DMA_MEM_TO_DEV,
        dws->rx_sgl.dma_address = dws->rx_dma;
        dws->rx_sgl.length = dws->len;
 
-       rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
+       rxdesc = dmaengine_prep_slave_sg(rxchan,
                                &dws->rx_sgl,
                                1,
                                DMA_DEV_TO_MEM,
 
        if (!nents)
                return ERR_PTR(-ENOMEM);
 
-       txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
-                                                slave_dirn, DMA_CTRL_ACK);
+       txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents,
+                                       slave_dirn, DMA_CTRL_ACK);
        if (!txd) {
                dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
                return ERR_PTR(-ENOMEM);
 
                goto err_tx_sgmap;
 
        /* Send both scatterlists */
-       rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
+       rxdesc = dmaengine_prep_slave_sg(rxchan,
                                      pl022->sgt_rx.sgl,
                                      rx_sglen,
                                      DMA_DEV_TO_MEM,
        if (!rxdesc)
                goto err_rxdesc;
 
-       txdesc = txchan->device->device_prep_slave_sg(txchan,
+       txdesc = dmaengine_prep_slave_sg(txchan,
                                      pl022->sgt_tx.sgl,
                                      tx_sglen,
                                      DMA_MEM_TO_DEV,
 
                sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
        }
        sg = dma->sg_rx_p;
-       desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
+       desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
                                        num, DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc_rx) {
                sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
        }
        sg = dma->sg_tx_p;
-       desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
+       desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
                                        sg, num, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc_tx) {
 
                return -EBUSY;
        }
 
-       desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
+       desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
 {
        struct dma_chan *rxchan = uap->dmarx.chan;
-       struct dma_device *dma_dev;
        struct pl011_dmarx_data *dmarx = &uap->dmarx;
        struct dma_async_tx_descriptor *desc;
        struct pl011_sgbuf *sgbuf;
        /* Start the RX DMA job */
        sgbuf = uap->dmarx.use_buf_b ?
                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
-       dma_dev = rxchan->device;
-       desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+       desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
                                        DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        /*
 
 
        sg_dma_address(sg) = priv->rx_buf_dma;
 
-       desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
+       desc = dmaengine_prep_slave_sg(priv->chan_rx,
                        sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
                        sg_dma_len(sg) = size;
        }
 
-       desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
+       desc = dmaengine_prep_slave_sg(priv->chan_tx,
                                        priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
 
                struct scatterlist *sg = &s->sg_rx[i];
                struct dma_async_tx_descriptor *desc;
 
-               desc = chan->device->device_prep_slave_sg(chan,
+               desc = dmaengine_prep_slave_sg(chan,
                        sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
 
                if (desc) {
 
        BUG_ON(!sg_dma_len(sg));
 
-       desc = chan->device->device_prep_slave_sg(chan,
+       desc = dmaengine_prep_slave_sg(chan,
                        sg, s->sg_len_tx, DMA_MEM_TO_DEV,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
 
        dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
                                             (unsigned long) &slave_conf);
 
-       dma_desc = dma_chan->device->
-                       device_prep_slave_sg(dma_chan, &sg, 1, direction,
+       dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!dma_desc)
                return false;
 
        sg_dma_address(&sg) = pkt->dma + pkt->actual;
        sg_dma_len(&sg) = pkt->trans;
 
-       desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir,
-                                                 DMA_PREP_INTERRUPT |
-                                                 DMA_CTRL_ACK);
+       desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir,
+                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc)
                return;
 
 
 
        /* This enables the channel */
        if (mx3_fbi->cookie < 0) {
-               mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
+               mx3_fbi->txd = dmaengine_prep_slave_sg(dma_chan,
                      &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
                if (!mx3_fbi->txd) {
                        dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
        if (mx3_fbi->txd)
                async_tx_ack(mx3_fbi->txd);
 
-       txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
+       txd = dmaengine_prep_slave_sg(dma_chan, sg +
                mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
        if (!txd) {
                dev_err(fbi->device,
 
        return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
 }
 
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+       enum dma_transfer_direction dir, unsigned long flags)
+{
+       return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+                                                 dir, flags);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+               struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+               size_t period_len, enum dma_transfer_direction dir)
+{
+       return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
+                                                       period_len, dir);
+}
+
 static inline int dmaengine_terminate_all(struct dma_chan *chan)
 {
        return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
 
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct ep93xx_runtime_data *rtd = runtime->private_data;
        struct dma_chan *chan = rtd->dma_chan;
-       struct dma_device *dma_dev = chan->device;
        struct dma_async_tx_descriptor *desc;
 
        rtd->pointer_bytes = 0;
-       desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr,
+       desc = dmaengine_prep_dma_cyclic(chan, runtime->dma_addr,
                                               rtd->period_bytes * rtd->periods,
                                               rtd->period_bytes,
                                               rtd->dma_data.direction);
 
 
        iprtd->buf = (unsigned int *)substream->dma_buffer.area;
 
-       iprtd->desc = chan->device->device_prep_dma_cyclic(chan, dma_addr,
+       iprtd->desc = dmaengine_prep_dma_cyclic(chan, dma_addr,
                        iprtd->period_bytes * iprtd->periods,
                        iprtd->period_bytes,
                        substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
 
 
        iprtd->buf = substream->dma_buffer.area;
 
-       iprtd->desc = chan->device->device_prep_dma_cyclic(chan, dma_addr,
+       iprtd->desc = dmaengine_prep_dma_cyclic(chan, dma_addr,
                        iprtd->period_bytes * iprtd->periods,
                        iprtd->period_bytes,
                        substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
 
        sg_dma_len(&sg) = size;
        sg_dma_address(&sg) = buff;
 
-       desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
+       desc = dmaengine_prep_slave_sg(siu_stream->chan,
                &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(dev, "Failed to allocate a dma descriptor\n");
        sg_dma_len(&sg) = size;
        sg_dma_address(&sg) = buff;
 
-       desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
+       desc = dmaengine_prep_slave_sg(siu_stream->chan,
                &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(dev, "Failed to allocate dma descriptor\n");
 
        sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
                    dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
        sg_dma_address(&sg) = buf_dma_addr;
-       desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
+       desc = dmaengine_prep_slave_sg(chan, &sg, 1,
                dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
                DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);