]> www.infradead.org Git - nvme.git/commitdiff
drm/i915: Add ww locking to dma-buf ops, v2.
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tue, 23 Mar 2021 15:50:26 +0000 (16:50 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 24 Mar 2021 16:27:21 +0000 (17:27 +0100)
vmap is using pin_pages, but needs to use ww locking,
add pin_pages_unlocked to correctly lock the mapping.

Also add ww locking to begin/end cpu access.

Changes since v1:
- Fix i915_gem_map_dma_buf by using pin_pages_unlocked().

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-38-maarten.lankhorst@linux.intel.com
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c

index 36e3c2765f4ccdc209006c45603e165aaf7f17b8..1d6f395d9391584faeadeb250256c7bcac8ec8a2 100644 (file)
@@ -25,7 +25,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
        struct scatterlist *src, *dst;
        int ret, i;
 
-       ret = i915_gem_object_pin_pages(obj);
+       ret = i915_gem_object_pin_pages_unlocked(obj);
        if (ret)
                goto err;
 
@@ -82,7 +82,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        void *vaddr;
 
-       vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
        if (IS_ERR(vaddr))
                return PTR_ERR(vaddr);
 
@@ -123,42 +123,48 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+       struct i915_gem_ww_ctx ww;
        int err;
 
-       err = i915_gem_object_pin_pages(obj);
-       if (err)
-               return err;
-
-       err = i915_gem_object_lock_interruptible(obj, NULL);
-       if (err)
-               goto out;
-
-       err = i915_gem_object_set_to_cpu_domain(obj, write);
-       i915_gem_object_unlock(obj);
-
-out:
-       i915_gem_object_unpin_pages(obj);
+       i915_gem_ww_ctx_init(&ww, true);
+retry:
+       err = i915_gem_object_lock(obj, &ww);
+       if (!err)
+               err = i915_gem_object_pin_pages(obj);
+       if (!err) {
+               err = i915_gem_object_set_to_cpu_domain(obj, write);
+               i915_gem_object_unpin_pages(obj);
+       }
+       if (err == -EDEADLK) {
+               err = i915_gem_ww_ctx_backoff(&ww);
+               if (!err)
+                       goto retry;
+       }
+       i915_gem_ww_ctx_fini(&ww);
        return err;
 }
 
 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+       struct i915_gem_ww_ctx ww;
        int err;
 
-       err = i915_gem_object_pin_pages(obj);
-       if (err)
-               return err;
-
-       err = i915_gem_object_lock_interruptible(obj, NULL);
-       if (err)
-               goto out;
-
-       err = i915_gem_object_set_to_gtt_domain(obj, false);
-       i915_gem_object_unlock(obj);
-
-out:
-       i915_gem_object_unpin_pages(obj);
+       i915_gem_ww_ctx_init(&ww, true);
+retry:
+       err = i915_gem_object_lock(obj, &ww);
+       if (!err)
+               err = i915_gem_object_pin_pages(obj);
+       if (!err) {
+               err = i915_gem_object_set_to_gtt_domain(obj, false);
+               i915_gem_object_unpin_pages(obj);
+       }
+       if (err == -EDEADLK) {
+               err = i915_gem_ww_ctx_backoff(&ww);
+               if (!err)
+                       goto retry;
+       }
+       i915_gem_ww_ctx_fini(&ww);
        return err;
 }