vb2_buffer_done(&buf->vb.vb2_buf, state);
        }
 
-       if (ctx->dma.pending != ctx->dma.active)
+       if (ctx->dma.pending) {
                vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state);
-       vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
+               ctx->dma.pending = NULL;
+       }
 
-       ctx->dma.active = NULL;
-       ctx->dma.pending = NULL;
+       if (ctx->dma.active) {
+               vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
+               ctx->dma.active = NULL;
+       }
 
        spin_unlock_irq(&ctx->dma.lock);
 }
 
        spin_lock_irq(&ctx->dma.lock);
        buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list);
-       ctx->dma.active = buf;
        ctx->dma.pending = buf;
        list_del(&buf->list);
        spin_unlock_irq(&ctx->dma.lock);
 
-       addr = vb2_dma_contig_plane_dma_addr(&ctx->dma.active->vb.vb2_buf, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
        ctx->sequence = 0;
        ctx->dma.state = CAL_DMA_RUNNING;
 
 
                 */
                cal_ctx_wr_dma_disable(ctx);
                ctx->dma.state = CAL_DMA_STOP_PENDING;
-       } else if (!list_empty(&ctx->dma.queue) &&
-                  ctx->dma.active == ctx->dma.pending) {
+       } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) {
                /*
                 * Otherwise, if a new buffer is available, queue it to the
                 * hardware.
        }
 
        /* If a new buffer was queued, complete the current buffer. */
-       if (ctx->dma.active != ctx->dma.pending) {
+       if (ctx->dma.pending) {
                buf = ctx->dma.active;
                ctx->dma.active = ctx->dma.pending;
+               ctx->dma.pending = NULL;
        }
 
        spin_unlock(&ctx->dma.lock);