int ret;
        unsigned long timeout;
        struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+       struct vchiq_mmal_port *port = dev->capture.port;
 
        v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
                 __func__, dev);
                                      &dev->capture.frame_count,
                                      sizeof(dev->capture.frame_count));
 
-       /* wait for last frame to complete */
-       timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ);
-       if (timeout == 0)
-               v4l2_err(&dev->v4l2_dev,
-                        "timed out waiting for frame completion\n");
-
        v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
                 "disabling connection\n");
 
                         ret);
        }
 
+       /* wait for all buffers to be returned */
+       while (atomic_read(&port->buffers_with_vpu)) {
+               v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+                        "%s: Waiting for buffers to be returned - %d outstanding\n",
+                        __func__, atomic_read(&port->buffers_with_vpu));
+               timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt,
+                                                     HZ);
+               if (timeout == 0) {
+                       v4l2_err(&dev->v4l2_dev, "%s: Timeout waiting for buffers to be returned - %d outstanding\n",
+                                __func__,
+                                atomic_read(&port->buffers_with_vpu));
+                       break;
+               }
+       }
+
        if (disable_camera(dev) < 0)
                v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
 }
 
        struct mmal_msg_context *msg_context =
                container_of(work, struct mmal_msg_context, u.bulk.work);
 
+       atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
+
        msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
                                            msg_context->u.bulk.port,
                                            msg_context->u.bulk.status,
        INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
                  buffer_to_host_work_cb);
 
+       atomic_inc(&port->buffers_with_vpu);
+
        /* prep the buffer from host message */
        memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
 
 
        struct list_head buffers;
        /* lock to serialise adding and removing buffers from list */
        spinlock_t slock;
+
+       /* Count of buffers the VPU has yet to return */
+       atomic_t buffers_with_vpu;
        /* callback on buffer completion */
        vchiq_mmal_buffer_cb buffer_cb;
        /* callback context */