* 2) at least one destination buffer has to be queued,
  * 3) streaming has to be on.
  *
+ * If a queue is buffered (for example a decoder hardware ringbuffer that has
+ * to be drained before doing streamoff), allow scheduling without v4l2 buffers
+ * on that queue.
+ *
  * There may also be additional, custom requirements. In such case the driver
  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
  * return 1 if the instance is ready.
        }
 
        spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
-       if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
+       if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
+           && !m2m_ctx->out_q_ctx.buffered) {
                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
                                        flags_out);
                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
                return;
        }
        spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
-       if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
+       if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
+           && !m2m_ctx->cap_q_ctx.buffered) {
                spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
                                        flags_cap);
                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
 
        struct list_head        rdy_queue;
        spinlock_t              rdy_spinlock;
        u8                      num_rdy;
+       bool                    buffered;
 };
 
 struct v4l2_m2m_ctx {
                void *drv_priv,
                int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
 
+static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
+                                            bool buffered)
+{
+       m2m_ctx->out_q_ctx.buffered = buffered;
+}
+
+static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
+                                            bool buffered)
+{
+       m2m_ctx->cap_q_ctx.buffered = buffered;
+}
+
 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
 
 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);