update the list first to avoid redundant checks.
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
                struct dma_fence *f = e->fence;
                struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
+               if (dma_fence_is_signaled(f)) {
+                       hash_del(&e->node);
+                       dma_fence_put(f);
+                       kmem_cache_free(amdgpu_sync_slab, e);
+                       continue;
+               }
                if (ring && s_fence) {
                        /* For fences from the same ring it is sufficient
                         * when they are scheduled.
                        }
                }
 
-               if (dma_fence_is_signaled(f)) {
-                       hash_del(&e->node);
-                       dma_fence_put(f);
-                       kmem_cache_free(amdgpu_sync_slab, e);
-                       continue;
-               }
-
                return f;
        }