#ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct dma_chan         *dmach;
        struct list_head        async_list;
-       struct list_head        pend_list;
        spinlock_t              req_lock;
        struct mutex            dma_lock;
        struct kref             dma_ref;
 static struct class *dev_class;
 static dev_t dev_number;
 
-static struct workqueue_struct *dma_wq;
-
 static void mport_release_mapping(struct kref *ref);
 
 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
 
 struct mport_dma_req {
+       struct kref refcount;
        struct list_head node;
        struct file *filp;
        struct mport_cdev_priv *priv;
        struct completion req_comp;
 };
 
-struct mport_faf_work {
-       struct work_struct work;
-       struct mport_dma_req *req;
-};
-
 static void mport_release_def_dma(struct kref *dma_ref)
 {
        struct mport_dev *md =
        complete(&priv->comp);
 }
 
-static void dma_req_free(struct mport_dma_req *req)
+static void dma_req_free(struct kref *ref)
 {
+       struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
+                       refcount);
        struct mport_cdev_priv *priv = req->priv;
        unsigned int i;
 
        req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
                                               NULL, NULL);
        complete(&req->req_comp);
-}
-
-static void dma_faf_cleanup(struct work_struct *_work)
-{
-       struct mport_faf_work *work = container_of(_work,
-                                               struct mport_faf_work, work);
-       struct mport_dma_req *req = work->req;
-
-       dma_req_free(req);
-       kfree(work);
-}
-
-static void dma_faf_callback(void *param)
-{
-       struct mport_dma_req *req = (struct mport_dma_req *)param;
-       struct mport_faf_work *work;
-
-       work = kmalloc(sizeof(*work), GFP_ATOMIC);
-       if (!work)
-               return;
-
-       INIT_WORK(&work->work, dma_faf_cleanup);
-       work->req = req;
-       queue_work(dma_wq, &work->work);
+       kref_put(&req->refcount, dma_req_free);
 }
 
 /*
                goto err_out;
        }
 
-       if (sync == RIO_TRANSFER_FAF)
-               tx->callback = dma_faf_callback;
-       else
-               tx->callback = dma_xfer_callback;
+       tx->callback = dma_xfer_callback;
        tx->callback_param = req;
 
        req->dmach = chan;
        req->sync = sync;
        req->status = DMA_IN_PROGRESS;
        init_completion(&req->req_comp);
+       kref_get(&req->refcount);
 
        cookie = dmaengine_submit(tx);
        req->cookie = cookie;
        if (dma_submit_error(cookie)) {
                rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
                           cookie, xfer->rio_addr, xfer->length);
+               kref_put(&req->refcount, dma_req_free);
                ret = -EIO;
                goto err_out;
        }
        if (!req)
                return -ENOMEM;
 
+       kref_init(&req->refcount);
+
        ret = get_dma_channel(priv);
        if (ret) {
                kfree(req);
        ret = do_dma_request(req, xfer, sync, nents);
 
        if (ret >= 0) {
-               if (sync == RIO_TRANSFER_SYNC)
-                       goto sync_out;
-               return ret; /* return ASYNC cookie */
-       }
-
-       if (ret == -ETIMEDOUT || ret == -EINTR) {
-               /*
-                * This can happen only in case of SYNC transfer.
-                * Do not free unfinished request structure immediately.
-                * Place it into pending list and deal with it later
-                */
-               spin_lock(&priv->req_lock);
-               list_add_tail(&req->node, &priv->pend_list);
-               spin_unlock(&priv->req_lock);
-               return ret;
+               if (sync == RIO_TRANSFER_ASYNC)
+                       return ret; /* return ASYNC cookie */
+       } else {
+               rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
        }
 
-
-       rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
-sync_out:
-       dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
-       sg_free_table(&req->sgt);
 err_pg:
-       if (page_list) {
+       if (!req->page_list) {
                for (i = 0; i < nr_pages; i++)
                        put_page(page_list[i]);
                kfree(page_list);
        }
 err_req:
-       if (req->map) {
-               mutex_lock(&md->buf_mutex);
-               kref_put(&req->map->ref, mport_release_mapping);
-               mutex_unlock(&md->buf_mutex);
-       }
-       put_dma_channel(priv);
-       kfree(req);
+       kref_put(&req->refcount, dma_req_free);
        return ret;
 }
 
                ret = 0;
 
        if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
-               dma_req_free(req);
+               kref_put(&req->refcount, dma_req_free);
 
        return ret;
 
 
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
        INIT_LIST_HEAD(&priv->async_list);
-       INIT_LIST_HEAD(&priv->pend_list);
        spin_lock_init(&priv->req_lock);
        mutex_init(&priv->dma_lock);
 #endif
 
        md = priv->md;
 
-       flush_workqueue(dma_wq);
-
        spin_lock(&priv->req_lock);
        if (!list_empty(&priv->async_list)) {
                rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
                                   req->filp, req->cookie,
                                   completion_done(&req->req_comp)?"yes":"no");
                        list_del(&req->node);
-                       dma_req_free(req);
-               }
-       }
-
-       if (!list_empty(&priv->pend_list)) {
-               rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
-                          filp, current->comm, task_pid_nr(current));
-               list_for_each_entry_safe(req,
-                                        req_next, &priv->pend_list, node) {
-                       rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
-                                  req->filp, req->cookie,
-                                  completion_done(&req->req_comp)?"yes":"no");
-                       list_del(&req->node);
-                       dma_req_free(req);
+                       kref_put(&req->refcount, dma_req_free);
                }
        }
 
                        current->comm, task_pid_nr(current), wret);
        }
 
-       spin_lock(&priv->req_lock);
-
-       if (!list_empty(&priv->pend_list)) {
-               rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
-                          filp, current->comm, task_pid_nr(current));
-       }
-
-       spin_unlock(&priv->req_lock);
-
        if (priv->dmach != priv->md->dma_chan) {
                rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
                           filp, current->comm, task_pid_nr(current));
        cdev_device_del(&md->cdev, &md->dev);
        mport_cdev_kill_fasync(md);
 
-       flush_workqueue(dma_wq);
-
        /* TODO: do we need to give clients some time to close file
         * descriptors? Simple wait for XX, or kref?
         */
                goto err_cli;
        }
 
-       dma_wq = create_singlethread_workqueue("dma_wq");
-       if (!dma_wq) {
-               rmcd_error("failed to create DMA work queue");
-               ret = -ENOMEM;
-               goto err_wq;
-       }
-
        return 0;
 
-err_wq:
-       class_interface_unregister(&rio_mport_interface);
 err_cli:
        unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
 err_chr:
        class_interface_unregister(&rio_mport_interface);
        class_destroy(dev_class);
        unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
-       destroy_workqueue(dma_wq);
 }
 
 module_init(mport_init);