Add drm_device argument to drm_prime_pages_to_sg(), so we can
call dma_max_mapping_size() to figure the segment size limit
and call into __sg_alloc_table_from_pages() with the correct
limit.
This fixes virtio-gpu with sev.  Possibly it'll fix other bugs
too given that drm seems to totaly ignore segment size limits
so far ...
v2: place max_segment in drm driver not gem object.
v3: move max_segment next to the other gem fields.
v4: just use dma_max_mapping_size().
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20200907112425.15610-2-kraxel@redhat.com
 
 
        switch (bo->tbo.mem.mem_type) {
        case TTM_PL_TT:
-               sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+               sgt = drm_prime_pages_to_sg(obj->dev,
+                                           bo->tbo.ttm->pages,
                                            bo->tbo.num_pages);
                if (IS_ERR(sgt))
                        return sgt;
 
 
        WARN_ON(shmem->base.import_attach);
 
-       return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+       return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
 
 
  *
  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
  */
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+                                      struct page **pages, unsigned int nr_pages)
 {
        struct sg_table *sg = NULL;
+       size_t max_segment = 0;
        int ret;
 
        sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
                goto out;
        }
 
-       ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
-                               nr_pages << PAGE_SHIFT, GFP_KERNEL);
+       if (dev)
+               max_segment = dma_max_mapping_size(dev->dev);
+       if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
+               max_segment = SCATTERLIST_MAX_SEGMENT;
+       ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+                                         nr_pages << PAGE_SHIFT,
+                                         max_segment, GFP_KERNEL);
        if (ret)
                goto out;
 
 
                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
                struct sg_table *sgt;
 
-               sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+               sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
+                                           etnaviv_obj->pages, npages);
                if (IS_ERR(sgt)) {
                        dev_err(dev->dev, "failed to allocate sgt: %ld\n",
                                PTR_ERR(sgt));
 
        if (WARN_ON(!etnaviv_obj->pages))  /* should have already pinned! */
                return ERR_PTR(-EINVAL);
 
-       return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+       return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
 }
 
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
 
 
                msm_obj->pages = p;
 
-               msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+               msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
                if (IS_ERR(msm_obj->sgt)) {
                        void *ptr = ERR_CAST(msm_obj->sgt);
 
 
        if (WARN_ON(!msm_obj->pages))  /* should have already pinned! */
                return NULL;
 
-       return drm_prime_pages_to_sg(msm_obj->pages, npages);
+       return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
 }
 
 void *msm_gem_prime_vmap(struct drm_gem_object *obj)
 
        struct nouveau_bo *nvbo = nouveau_gem_object(obj);
        int npages = nvbo->bo.num_pages;
 
-       return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+       return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
 }
 
 void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
 
        struct radeon_bo *bo = gem_to_radeon_bo(obj);
        int npages = bo->tbo.num_pages;
 
-       return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+       return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
 }
 
 void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
 
 
        rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
 
-       rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+       rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
+                                           rk_obj->pages, rk_obj->num_pages);
        if (IS_ERR(rk_obj->sgt)) {
                ret = PTR_ERR(rk_obj->sgt);
                goto err_put_pages;
        int ret;
 
        if (rk_obj->pages)
-               return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+               return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
 
        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
        if (!sgt)
 
 
        bo->num_pages = bo->gem.size >> PAGE_SHIFT;
 
-       bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+       bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
        if (IS_ERR(bo->sgt)) {
                err = PTR_ERR(bo->sgt);
                goto put_pages;
 
 {
        struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
 
-       return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
+       return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
 }
 
 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
 
        if (!xen_obj->pages)
                return ERR_PTR(-ENOMEM);
 
-       return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
+       return drm_prime_pages_to_sg(gem_obj->dev,
+                                    xen_obj->pages, xen_obj->num_pages);
 }
 
 struct drm_gem_object *
 
 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
 
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+                                      struct page **pages, unsigned int nr_pages);
 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
                                     int flags);