Not used any more since we now always use the sheduler.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
        struct amdgpu_ring              *ring;
        uint64_t                        seq;
 
-       /* filp or special value for fence creator */
-       void                            *owner;
-
        wait_queue_t                    fence_wake;
 };
 
                                   unsigned irq_type);
 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
-                     struct amdgpu_fence **fence);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
                  unsigned size, struct amdgpu_ib *ib);
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
-                      struct amdgpu_ib *ib, void *owner,
-                      struct fence *last_vm_update,
+                      struct amdgpu_ib *ib, struct fence *last_vm_update,
                       struct fence **f);
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 
  * amdgpu_fence_emit - emit a fence on the requested ring
  *
  * @ring: ring the fence is associated with
- * @owner: creator of the fence
  * @fence: amdgpu fence object
  *
  * Emits a fence command on the requested ring (all asics).
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
-                     struct amdgpu_fence **fence)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence **fence)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       /* we are protected by the ring emission mutex */
        *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
        if ((*fence) == NULL) {
                return -ENOMEM;
        }
        (*fence)->seq = ++ring->fence_drv.sync_seq;
        (*fence)->ring = ring;
-       (*fence)->owner = owner;
        fence_init(&(*fence)->base, &amdgpu_fence_ops,
                &ring->fence_drv.fence_queue.lock,
                adev->fence_context + ring->idx,
 
  * @adev: amdgpu_device pointer
  * @num_ibs: number of IBs to schedule
  * @ibs: IB objects to schedule
- * @owner: owner for creating the fences
  * @f: fence created during this submission
  *
  * Schedule an IB on the associated ring (all asics).
  * to SI there was just a DE IB.
  */
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
-                      struct amdgpu_ib *ibs, void *owner,
-                      struct fence *last_vm_update,
+                      struct amdgpu_ib *ibs, struct fence *last_vm_update,
                       struct fence **f)
 {
        struct amdgpu_device *adev = ring->adev;
                        amdgpu_ring_emit_hdp_invalidate(ring);
        }
 
-       r = amdgpu_fence_emit(ring, owner, &ib->fence);
+       r = amdgpu_fence_emit(ring, &ib->fence);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                ring->current_ctx = old_ctx;
 
        }
 
        trace_amdgpu_sched_run_job(job);
-       r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner,
+       r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
                               job->sync.last_vm_update, &fence);
        if (r) {
                DRM_ERROR("Error scheduling IBs (%d)\n", r);
 
  */
 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
 {
-       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
        struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
-       if (a_fence)
-               return a_fence->ring->adev == adev;
-
        if (s_fence) {
                struct amdgpu_ring *ring;
 
  */
 static void *amdgpu_sync_get_owner(struct fence *f)
 {
-       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
        struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
        if (s_fence)
                return s_fence->owner;
-       else if (a_fence)
-               return a_fence->owner;
+
        return AMDGPU_FENCE_OWNER_UNDEFINED;
 }
 
 
        ib->length_dw = 16;
 
        if (direct) {
-               r = amdgpu_ib_schedule(ring, 1, ib,
-                                      AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f);
+               r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
                if (r)
                        goto err_free;
 
 
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED,
-                              NULL, &f);
+       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
        if (r)
                goto err;
 
                ib->ptr[i] = 0x0;
 
        if (direct) {
-               r = amdgpu_ib_schedule(ring, 1, ib,
-                                      AMDGPU_FENCE_OWNER_UNDEFINED,
-                                      NULL, &f);
+               r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
                if (r)
                        goto err;
 
 
        ib.ptr[3] = 1;
        ib.ptr[4] = 0xDEADBEEF;
        ib.length_dw = 5;
-       r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
-                              NULL, &f);
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
                goto err1;
 
 
        ib.ptr[2] = 0xDEADBEEF;
        ib.length_dw = 3;
 
-       r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
-                              NULL, &f);
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
                goto err2;
 
 
        ib.ptr[2] = 0xDEADBEEF;
        ib.length_dw = 3;
 
-       r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
-                              NULL, &f);
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
                goto err2;
 
        ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 
        /* shedule the ib on the ring */
-       r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
-                              NULL, &f);
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r) {
                DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
                goto fail;
 
        ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
        ib.length_dw = 8;
 
-       r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
-                              NULL, &f);
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
                goto err1;
 
 
        ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
        ib.length_dw = 8;
 
-       r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
-                              NULL, &f);
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
                goto err1;