map->orig_phys == INVALID_PHYS_ADDR))
                        return;
 
-               addr = page_address(map->bounce_page) + offset;
-               do_bounce(map->orig_phys + offset, addr, sz, dir);
+               addr = kmap_local_page(map->bounce_page);
+               do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
+               kunmap_local(addr);
                size -= sz;
                iova += sz;
        }
 vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
 {
        struct vduse_bounce_map *map;
-       struct page *page;
+       struct page *page = NULL;
 
+       read_lock(&domain->bounce_lock);
        map = &domain->bounce_maps[iova >> PAGE_SHIFT];
-       if (!map->bounce_page)
-               return NULL;
+       if (domain->user_bounce_pages || !map->bounce_page)
+               goto out;
 
        page = map->bounce_page;
        get_page(page);
+out:
+       read_unlock(&domain->bounce_lock);
 
        return page;
 }
 
 static void
-vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
 {
        struct vduse_bounce_map *map;
        unsigned long pfn, bounce_pfns;
        }
 }
 
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+                                      struct page **pages, int count)
+{
+       struct vduse_bounce_map *map;
+       int i, ret;
+
+       /* Now we don't support partial mapping */
+       if (count != (domain->bounce_size >> PAGE_SHIFT))
+               return -EINVAL;
+
+       write_lock(&domain->bounce_lock);
+       ret = -EEXIST;
+       if (domain->user_bounce_pages)
+               goto out;
+
+       for (i = 0; i < count; i++) {
+               map = &domain->bounce_maps[i];
+               if (map->bounce_page) {
+                       /* Copy kernel page to user page if it's in use */
+                       if (map->orig_phys != INVALID_PHYS_ADDR)
+                               memcpy_to_page(pages[i], 0,
+                                              page_address(map->bounce_page),
+                                              PAGE_SIZE);
+                       __free_page(map->bounce_page);
+               }
+               map->bounce_page = pages[i];
+               get_page(pages[i]);
+       }
+       domain->user_bounce_pages = true;
+       ret = 0;
+out:
+       write_unlock(&domain->bounce_lock);
+
+       return ret;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+{
+       struct vduse_bounce_map *map;
+       unsigned long i, count;
+
+       write_lock(&domain->bounce_lock);
+       if (!domain->user_bounce_pages)
+               goto out;
+
+       count = domain->bounce_size >> PAGE_SHIFT;
+       for (i = 0; i < count; i++) {
+               struct page *page = NULL;
+
+               map = &domain->bounce_maps[i];
+               if (WARN_ON(!map->bounce_page))
+                       continue;
+
+               /* Copy user page to kernel page if it's in use */
+               if (map->orig_phys != INVALID_PHYS_ADDR) {
+                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+                       memcpy_from_page(page_address(page),
+                                        map->bounce_page, 0, PAGE_SIZE);
+               }
+               put_page(map->bounce_page);
+               map->bounce_page = page;
+       }
+       domain->user_bounce_pages = false;
+out:
+       write_unlock(&domain->bounce_lock);
+}
+
 void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
 {
        if (!domain->bounce_map)
        if (vduse_domain_init_bounce_map(domain))
                goto err;
 
+       read_lock(&domain->bounce_lock);
        if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
-               goto err;
+               goto err_unlock;
 
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
                vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
 
+       read_unlock(&domain->bounce_lock);
+
        return iova;
+err_unlock:
+       read_unlock(&domain->bounce_lock);
 err:
        vduse_domain_free_iova(iovad, iova, size);
        return DMA_MAPPING_ERROR;
 {
        struct iova_domain *iovad = &domain->stream_iovad;
 
+       read_lock(&domain->bounce_lock);
        if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
                vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
 
        vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+       read_unlock(&domain->bounce_lock);
        vduse_domain_free_iova(iovad, dma_addr, size);
 }
 
 
        spin_lock(&domain->iotlb_lock);
        vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
-       vduse_domain_free_bounce_pages(domain);
+       vduse_domain_remove_user_bounce_pages(domain);
+       vduse_domain_free_kernel_bounce_pages(domain);
        spin_unlock(&domain->iotlb_lock);
        put_iova_domain(&domain->stream_iovad);
        put_iova_domain(&domain->consistent_iovad);
                goto err_file;
 
        domain->file = file;
+       rwlock_init(&domain->bounce_lock);
        spin_lock_init(&domain->iotlb_lock);
        init_iova_domain(&domain->stream_iovad,
                        PAGE_SIZE, IOVA_START_PFN);