spin_unlock_irq(&ctx->dma.lock);
 
        addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
-       ctx->sequence = 0;
-       ctx->dma.state = CAL_DMA_RUNNING;
 
        pm_runtime_get_sync(ctx->cal->dev);
 
-       cal_ctx_csi2_config(ctx);
-       cal_ctx_pix_proc_config(ctx);
-       cal_ctx_wr_dma_config(ctx);
-       cal_ctx_wr_dma_addr(ctx, addr);
-       cal_ctx_enable_irqs(ctx);
+       cal_ctx_set_dma_addr(ctx, addr);
+       cal_ctx_start(ctx);
 
        ret = v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 1);
        if (ret)
        return 0;
 
 err:
-       cal_ctx_wr_dma_disable(ctx);
-       cal_ctx_disable_irqs(ctx);
-       ctx->dma.state = CAL_DMA_STOPPED;
+       cal_ctx_stop(ctx);
+       pm_runtime_put_sync(ctx->cal->dev);
 
        cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED);
        return ret;
 {
        struct cal_ctx *ctx = vb2_get_drv_priv(vq);
 
-       cal_ctx_wr_dma_stop(ctx);
-       cal_ctx_disable_irqs(ctx);
+       cal_ctx_stop(ctx);
 
        v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 0);
 
-       cal_release_buffers(ctx, VB2_BUF_STATE_ERROR);
-
        pm_runtime_put_sync(ctx->cal->dev);
+
+       cal_release_buffers(ctx, VB2_BUF_STATE_ERROR);
 }
 
 static const struct vb2_ops cal_video_qops = {
 
  * ------------------------------------------------------------------
  */
 
-void cal_ctx_csi2_config(struct cal_ctx *ctx)
+static void cal_ctx_csi2_config(struct cal_ctx *ctx)
 {
        u32 val;
 
                cal_read(ctx->cal, CAL_CSI2_CTX0(ctx->index)));
 }
 
-void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
+static void cal_ctx_pix_proc_config(struct cal_ctx *ctx)
 {
        u32 val, extract, pack;
 
                cal_read(ctx->cal, CAL_PIX_PROC(ctx->index)));
 }
 
-void cal_ctx_wr_dma_config(struct cal_ctx *ctx)
+static void cal_ctx_wr_dma_config(struct cal_ctx *ctx)
 {
        unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline;
        u32 val;
        ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", cal_read(ctx->cal, CAL_CTRL));
 }
 
-void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, dma_addr_t addr)
+void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr)
 {
        cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->index), addr);
 }
 
-void cal_ctx_wr_dma_disable(struct cal_ctx *ctx)
+static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx)
 {
        u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->index));
 
        return stopped;
 }
 
-int cal_ctx_wr_dma_stop(struct cal_ctx *ctx)
+void cal_ctx_start(struct cal_ctx *ctx)
+{
+       ctx->sequence = 0;
+       ctx->dma.state = CAL_DMA_RUNNING;
+
+       /* Configure the CSI-2, pixel processing and write DMA contexts. */
+       cal_ctx_csi2_config(ctx);
+       cal_ctx_pix_proc_config(ctx);
+       cal_ctx_wr_dma_config(ctx);
+
+       /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */
+       cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1),
+                 CAL_HL_IRQ_MASK(ctx->index));
+       cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2),
+                 CAL_HL_IRQ_MASK(ctx->index));
+}
+
+void cal_ctx_stop(struct cal_ctx *ctx)
 {
        long timeout;
 
-       /* Request DMA stop and wait until it completes. */
+       /*
+        * Request DMA stop and wait until it completes. If completion times
+        * out, forcefully disable the DMA.
+        */
        spin_lock_irq(&ctx->dma.lock);
        ctx->dma.state = CAL_DMA_STOP_REQUESTED;
        spin_unlock_irq(&ctx->dma.lock);
                                     msecs_to_jiffies(500));
        if (!timeout) {
                ctx_err(ctx, "failed to disable dma cleanly\n");
-               return -ETIMEDOUT;
+               cal_ctx_wr_dma_disable(ctx);
        }
 
-       return 0;
-}
-
-void cal_ctx_enable_irqs(struct cal_ctx *ctx)
-{
-       /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */
-       cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1),
-                 CAL_HL_IRQ_MASK(ctx->index));
-       cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2),
-                 CAL_HL_IRQ_MASK(ctx->index));
-}
-
-void cal_ctx_disable_irqs(struct cal_ctx *ctx)
-{
        /* Disable IRQ_WDMA_END and IRQ_WDMA_START. */
        cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1),
                  CAL_HL_IRQ_MASK(ctx->index));
        cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2),
                  CAL_HL_IRQ_MASK(ctx->index));
+
+       ctx->dma.state = CAL_DMA_STOPPED;
 }
 
 /* ------------------------------------------------------------------
                buf = list_first_entry(&ctx->dma.queue, struct cal_buffer,
                                       list);
                addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
-               cal_ctx_wr_dma_addr(ctx, addr);
+               cal_ctx_set_dma_addr(ctx, addr);
 
                ctx->dma.pending = buf;
                list_del(&buf->list);
 
                                         unsigned int instance);
 void cal_camerarx_destroy(struct cal_camerarx *phy);
 
-void cal_ctx_csi2_config(struct cal_ctx *ctx);
-void cal_ctx_pix_proc_config(struct cal_ctx *ctx);
-void cal_ctx_wr_dma_config(struct cal_ctx *ctx);
-void cal_ctx_wr_dma_addr(struct cal_ctx *ctx, dma_addr_t addr);
-void cal_ctx_wr_dma_disable(struct cal_ctx *ctx);
-int cal_ctx_wr_dma_stop(struct cal_ctx *ctx);
-void cal_ctx_enable_irqs(struct cal_ctx *ctx);
-void cal_ctx_disable_irqs(struct cal_ctx *ctx);
+void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr);
+void cal_ctx_start(struct cal_ctx *ctx);
+void cal_ctx_stop(struct cal_ctx *ctx);
 
 int cal_ctx_v4l2_register(struct cal_ctx *ctx);
 void cal_ctx_v4l2_unregister(struct cal_ctx *ctx);