int                             errors_count;
        int                             overrun_count;
        int                             buffers_count;
+
+       /* Ensure DMA operations atomicity */
+       struct mutex                    dma_lock;
 };
 
 static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
                return ret;
        }
 
+       /*
+        * Avoid call of dmaengine_terminate_all() between
+        * dmaengine_prep_slave_single() and dmaengine_submit()
+        * by locking the whole DMA submission sequence
+        */
+       mutex_lock(&dcmi->dma_lock);
+
        /* Prepare a DMA transaction */
        desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
                                           buf->size,
        if (!desc) {
                dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
                        __func__, &buf->paddr, buf->size);
+               mutex_unlock(&dcmi->dma_lock);
                return -EINVAL;
        }
 
        dcmi->dma_cookie = dmaengine_submit(desc);
        if (dma_submit_error(dcmi->dma_cookie)) {
                dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
+               mutex_unlock(&dcmi->dma_lock);
                return -ENXIO;
        }
 
+       mutex_unlock(&dcmi->dma_lock);
+
        dma_async_issue_pending(dcmi->dma_chan);
 
        return 0;
        spin_unlock_irq(&dcmi->irqlock);
 
        /* Stop all pending DMA operations */
+       mutex_lock(&dcmi->dma_lock);
        dmaengine_terminate_all(dcmi->dma_chan);
+       mutex_unlock(&dcmi->dma_lock);
 
        pm_runtime_put(dcmi->dev);
 
 
        spin_lock_init(&dcmi->irqlock);
        mutex_init(&dcmi->lock);
+       mutex_init(&dcmi->dma_lock);
        init_completion(&dcmi->complete);
        INIT_LIST_HEAD(&dcmi->buffers);