* We already have buffers allocated, so first check if they
                 * are not in use and can be freed.
                 */
+               mutex_lock(&q->mmap_lock);
                if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
+                       mutex_unlock(&q->mmap_lock);
                        dprintk(1, "memory in use, cannot free\n");
                        return -EBUSY;
                }
                 */
                __vb2_queue_cancel(q);
                ret = __vb2_queue_free(q, q->num_buffers);
+               mutex_unlock(&q->mmap_lock);
                if (ret)
                        return ret;
 
                 */
        }
 
+       mutex_lock(&q->mmap_lock);
        q->num_buffers = allocated_buffers;
 
        if (ret < 0) {
                 * from q->num_buffers.
                 */
                __vb2_queue_free(q, allocated_buffers);
+               mutex_unlock(&q->mmap_lock);
                return ret;
        }
+       mutex_unlock(&q->mmap_lock);
 
        /*
         * Return the number of successfully allocated buffers
                 */
        }
 
+       mutex_lock(&q->mmap_lock);
        q->num_buffers += allocated_buffers;
 
        if (ret < 0) {
                 * from q->num_buffers.
                 */
                __vb2_queue_free(q, allocated_buffers);
+               mutex_unlock(&q->mmap_lock);
                return -ENOMEM;
        }
+       mutex_unlock(&q->mmap_lock);
 
        /*
         * Return the number of successfully allocated buffers
 static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 {
        struct vb2_queue *q = vb->vb2_queue;
-       struct rw_semaphore *mmap_sem;
        int ret;
 
        ret = __verify_length(vb, b);
                ret = __qbuf_mmap(vb, b);
                break;
        case V4L2_MEMORY_USERPTR:
-               /*
-                * In case of user pointer buffers vb2 allocators need to get
-                * direct access to userspace pages. This requires getting
-                * the mmap semaphore for read access in the current process
-                * structure. The same semaphore is taken before calling mmap
-                * operation, while both qbuf/prepare_buf and mmap are called
-                * by the driver or v4l2 core with the driver's lock held.
-                * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
-                * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
-                * the videobuf2 core releases the driver's lock, takes
-                * mmap_sem and then takes the driver's lock again.
-                */
-               mmap_sem = ¤t->mm->mmap_sem;
-               call_void_qop(q, wait_prepare, q);
-               down_read(mmap_sem);
-               call_void_qop(q, wait_finish, q);
-
                ret = __qbuf_userptr(vb, b);
-
-               up_read(mmap_sem);
                break;
        case V4L2_MEMORY_DMABUF:
                ret = __qbuf_dmabuf(vb, b);
                return -EINVAL;
        }
 
+       mutex_lock(&q->mmap_lock);
        ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+       mutex_unlock(&q->mmap_lock);
        if (ret)
                return ret;
 
        unsigned long off = pgoff << PAGE_SHIFT;
        struct vb2_buffer *vb;
        unsigned int buffer, plane;
+       void *vaddr;
        int ret;
 
        if (q->memory != V4L2_MEMORY_MMAP) {
 
        vb = q->bufs[buffer];
 
-       return (unsigned long)vb2_plane_vaddr(vb, plane);
+       vaddr = vb2_plane_vaddr(vb, plane);
+       return vaddr ? (unsigned long)vaddr : -EINVAL;
 }
 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
 #endif
        INIT_LIST_HEAD(&q->queued_list);
        INIT_LIST_HEAD(&q->done_list);
        spin_lock_init(&q->done_lock);
+       mutex_init(&q->mmap_lock);
        init_waitqueue_head(&q->done_wq);
 
        if (q->buf_struct_size == 0)
 {
        __vb2_cleanup_fileio(q);
        __vb2_queue_cancel(q);
+       mutex_lock(&q->mmap_lock);
        __vb2_queue_free(q, q->num_buffers);
+       mutex_unlock(&q->mmap_lock);
 }
 EXPORT_SYMBOL_GPL(vb2_queue_release);
 
 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct video_device *vdev = video_devdata(file);
-       struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
-       int err;
 
-       if (lock && mutex_lock_interruptible(lock))
-               return -ERESTARTSYS;
-       err = vb2_mmap(vdev->queue, vma);
-       if (lock)
-               mutex_unlock(lock);
-       return err;
+       return vb2_mmap(vdev->queue, vma);
 }
 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
 
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct video_device *vdev = video_devdata(file);
-       struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
-       int ret;
 
-       if (lock && mutex_lock_interruptible(lock))
-               return -ERESTARTSYS;
-       ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
-       if (lock)
-               mutex_unlock(lock);
-       return ret;
+       return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
 }
 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
 #endif