job->uf_sequence = seq;
 
        amdgpu_job_free_resources(job);
+       amdgpu_ring_priority_get(job->ring,
+                                amd_sched_get_job_priority(&job->base));
 
        trace_amdgpu_cs_ioctl(job);
        amd_sched_entity_push_job(&job->base);
 
 {
        struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
+       amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->dep_sync);
        job->fence_ctx = entity->fence_context;
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
+       amdgpu_ring_priority_get(job->ring,
+                                amd_sched_get_job_priority(&job->base));
        amd_sched_entity_push_job(&job->base);
 
        return 0;
        /* if gpu reset, hw fence will be replaced here */
        dma_fence_put(job->fence);
        job->fence = dma_fence_get(fence);
+
        amdgpu_job_free_resources(job);
        return fence;
 }
 
                ring->funcs->end_use(ring);
 }
 
+/**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority)
+{
+       int i;
+
+       if (!ring->funcs->set_priority)
+               return;
+
+       if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+               return;
+
+       /* no need to restore if the job is already at the lowest priority */
+       if (priority == AMD_SCHED_PRIORITY_NORMAL)
+               return;
+
+       mutex_lock(&ring->priority_mutex);
+       /* something higher prio is executing, no need to decay */
+       if (ring->priority > priority)
+               goto out_unlock;
+
+       /* decay priority to the next level with a job available */
+       for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+               if (i == AMD_SCHED_PRIORITY_NORMAL
+                               || atomic_read(&ring->num_jobs[i])) {
+                       ring->priority = i;
+                       ring->funcs->set_priority(ring, i);
+                       break;
+               }
+       }
+
+out_unlock:
+       mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to be raised to @priority (refcounted).
+ */
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority)
+{
+       if (!ring->funcs->set_priority)
+               return;
+
+       atomic_inc(&ring->num_jobs[priority]);
+
+       mutex_lock(&ring->priority_mutex);
+       if (priority <= ring->priority)
+               goto out_unlock;
+
+       ring->priority = priority;
+       ring->funcs->set_priority(ring, priority);
+
+out_unlock:
+       mutex_unlock(&ring->priority_mutex);
+}
+
 /**
  * amdgpu_ring_init - init driver ring struct.
  *
                     unsigned max_dw, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type)
 {
-       int r;
+       int r, i;
        int sched_hw_submission = amdgpu_sched_hw_submission;
 
        /* Set the hw submission limit higher for KIQ because
        }
 
        ring->max_dw = max_dw;
+       ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+       mutex_init(&ring->priority_mutex);
        INIT_LIST_HEAD(&ring->lru_list);
        amdgpu_ring_lru_touch(adev, ring);
 
+       for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+               atomic_set(&ring->num_jobs[i], 0);
+
        if (amdgpu_debugfs_ring_init(adev, ring)) {
                DRM_ERROR("Failed to register debugfs file for rings !\n");
        }
 
 #ifndef __AMDGPU_RING_H__
 #define __AMDGPU_RING_H__
 
+#include <drm/amdgpu_drm.h>
 #include "gpu_scheduler.h"
 
 /* max number of rings */
 struct amdgpu_ring;
 struct amdgpu_ib;
 struct amdgpu_cs_parser;
+struct amdgpu_job;
 
 /*
  * Fences.
        void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
        void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
        void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+       /* priority functions */
+       void (*set_priority) (struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
 };
 
 struct amdgpu_ring {
        volatile u32            *cond_exe_cpu_addr;
        unsigned                vm_inv_eng;
        bool                    has_compute_vm_bug;
+
+       atomic_t                num_jobs[AMD_SCHED_PRIORITY_MAX];
+       struct mutex            priority_mutex;
+       /* protected by priority_mutex */
+       int                     priority;
+
 #if defined(CONFIG_DEBUG_FS)
        struct dentry *ent;
 #endif
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned ring_size, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type);
 
 bool amd_sched_dependency_optimized(struct dma_fence* fence,
                                    struct amd_sched_entity *entity);
 void amd_sched_job_kickout(struct amd_sched_job *s_job);
+
+static inline enum amd_sched_priority
+amd_sched_get_job_priority(struct amd_sched_job *job)
+{
+       return (job->s_entity->rq - job->sched->sched_rq);
+}
+
 #endif