"Gerd Hoffmann <kraxel@redhat.com>");
 MODULE_DESCRIPTION("User-space granted page access driver");
 
+#define GNTDEV_COPY_BATCH 16
+
+struct gntdev_copy_batch {
+       struct gnttab_copy ops[GNTDEV_COPY_BATCH];
+       struct page *pages[GNTDEV_COPY_BATCH];
+       s16 __user *status[GNTDEV_COPY_BATCH];
+       unsigned int nr_ops;
+       unsigned int nr_pages;
+       bool writeable;
+       struct gntdev_copy_batch *next;
+};
+
 static unsigned int limit = 64*1024;
 module_param(limit, uint, 0644);
 MODULE_PARM_DESC(limit,
        INIT_LIST_HEAD(&priv->maps);
        mutex_init(&priv->lock);
 
+       mutex_init(&priv->batch_lock);
+
 #ifdef CONFIG_XEN_GNTDEV_DMABUF
        priv->dmabuf_priv = gntdev_dmabuf_init(flip);
        if (IS_ERR(priv->dmabuf_priv)) {
 {
        struct gntdev_priv *priv = flip->private_data;
        struct gntdev_grant_map *map;
+       struct gntdev_copy_batch *batch;
 
        pr_debug("priv %p\n", priv);
 
        }
        mutex_unlock(&priv->lock);
 
+       mutex_lock(&priv->batch_lock);
+       while (priv->batch) {
+               batch = priv->batch;
+               priv->batch = batch->next;
+               kfree(batch);
+       }
+       mutex_unlock(&priv->batch_lock);
+
 #ifdef CONFIG_XEN_GNTDEV_DMABUF
        gntdev_dmabuf_fini(priv->dmabuf_priv);
 #endif
        return rc;
 }
 
-#define GNTDEV_COPY_BATCH 16
-
-struct gntdev_copy_batch {
-       struct gnttab_copy ops[GNTDEV_COPY_BATCH];
-       struct page *pages[GNTDEV_COPY_BATCH];
-       s16 __user *status[GNTDEV_COPY_BATCH];
-       unsigned int nr_ops;
-       unsigned int nr_pages;
-       bool writeable;
-};
-
 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
                                unsigned long *gfn)
 {
 static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 {
        struct ioctl_gntdev_grant_copy copy;
-       struct gntdev_copy_batch batch;
+       struct gntdev_copy_batch *batch;
        unsigned int i;
        int ret = 0;
 
        if (copy_from_user(©, u, sizeof(copy)))
                return -EFAULT;
 
-       batch.nr_ops = 0;
-       batch.nr_pages = 0;
+       mutex_lock(&priv->batch_lock);
+       if (!priv->batch) {
+               batch = kmalloc(sizeof(*batch), GFP_KERNEL);
+       } else {
+               batch = priv->batch;
+               priv->batch = batch->next;
+       }
+       mutex_unlock(&priv->batch_lock);
+       if (!batch)
+               return -ENOMEM;
+
+       batch->nr_ops = 0;
+       batch->nr_pages = 0;
 
        for (i = 0; i < copy.count; i++) {
                struct gntdev_grant_copy_segment seg;
 
                if (copy_from_user(&seg, ©.segments[i], sizeof(seg))) {
                        ret = -EFAULT;
+                       gntdev_put_pages(batch);
                        goto out;
                }
 
-               ret = gntdev_grant_copy_seg(&batch, &seg, ©.segments[i].status);
-               if (ret < 0)
+               ret = gntdev_grant_copy_seg(batch, &seg, ©.segments[i].status);
+               if (ret < 0) {
+                       gntdev_put_pages(batch);
                        goto out;
+               }
 
                cond_resched();
        }
-       if (batch.nr_ops)
-               ret = gntdev_copy(&batch);
-       return ret;
+       if (batch->nr_ops)
+               ret = gntdev_copy(batch);
+
+ out:
+       mutex_lock(&priv->batch_lock);
+       batch->next = priv->batch;
+       priv->batch = batch;
+       mutex_unlock(&priv->batch_lock);
 
-  out:
-       gntdev_put_pages(&batch);
        return ret;
 }