struct dma_heap_attachment {
        struct device *dev;
-       struct sg_table *table;
+       struct sg_table table;
        struct list_head list;
        bool mapped;
 };
 static const unsigned int orders[] = {8, 4, 0};
 #define NUM_ORDERS ARRAY_SIZE(orders)
 
-static struct sg_table *dup_sg_table(struct sg_table *table)
+static int dup_sg_table(struct sg_table *from, struct sg_table *to)
 {
-       struct sg_table *new_table;
-       int ret, i;
        struct scatterlist *sg, *new_sg;
+       int ret, i;
 
-       new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
-       if (!new_table)
-               return ERR_PTR(-ENOMEM);
-
-       ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
-       if (ret) {
-               kfree(new_table);
-               return ERR_PTR(-ENOMEM);
-       }
+       ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
+       if (ret)
+               return ret;
 
-       new_sg = new_table->sgl;
-       for_each_sgtable_sg(table, sg, i) {
+       new_sg = to->sgl;
+       for_each_sgtable_sg(from, sg, i) {
                sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
                new_sg = sg_next(new_sg);
        }
 
-       return new_table;
+       return 0;
 }
 
 static int system_heap_attach(struct dma_buf *dmabuf,
 {
        struct system_heap_buffer *buffer = dmabuf->priv;
        struct dma_heap_attachment *a;
-       struct sg_table *table;
+       int ret;
 
        a = kzalloc(sizeof(*a), GFP_KERNEL);
        if (!a)
                return -ENOMEM;
 
-       table = dup_sg_table(&buffer->sg_table);
-       if (IS_ERR(table)) {
+       ret = dup_sg_table(&buffer->sg_table, &a->table);
+       if (ret) {
                kfree(a);
-               return -ENOMEM;
+               return ret;
        }
 
-       a->table = table;
        a->dev = attachment->dev;
        INIT_LIST_HEAD(&a->list);
        a->mapped = false;
        list_del(&a->list);
        mutex_unlock(&buffer->lock);
 
-       sg_free_table(a->table);
-       kfree(a->table);
+       sg_free_table(&a->table);
        kfree(a);
 }
 
                                                enum dma_data_direction direction)
 {
        struct dma_heap_attachment *a = attachment->priv;
-       struct sg_table *table = a->table;
+       struct sg_table *table = &a->table;
        int ret;
 
        ret = dma_map_sgtable(attachment->dev, table, direction, 0);
        list_for_each_entry(a, &buffer->attachments, list) {
                if (!a->mapped)
                        continue;
-               dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+               dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
        }
        mutex_unlock(&buffer->lock);
 
        list_for_each_entry(a, &buffer->attachments, list) {
                if (!a->mapped)
                        continue;
-               dma_sync_sgtable_for_device(a->dev, a->table, direction);
+               dma_sync_sgtable_for_device(a->dev, &a->table, direction);
        }
        mutex_unlock(&buffer->lock);