return -EACCES;
 }
 
-static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
+static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
 {
        switch (prio) {
        case AMDGPU_CTX_PRIORITY_HIGH:
                        ctx->init_priority : ctx->override_priority;
 
        switch (hw_ip) {
+       case AMDGPU_HW_IP_GFX:
        case AMDGPU_HW_IP_COMPUTE:
-               hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+               hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
                break;
        case AMDGPU_HW_IP_VCE:
        case AMDGPU_HW_IP_VCN_ENC:
                                      amdgpu_ctx_to_drm_sched_prio(priority));
 
        /* set hw priority */
-       if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
+       if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
                hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
                hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
                scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
 
        }
 }
 
-static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
+static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
+{
+       return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
+}
+
+static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
 {
        if (amdgpu_compute_multipipe != -1) {
                DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
        return adev->gfx.mec.num_mec > 1;
 }
 
+bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
+                                               struct amdgpu_ring *ring)
+{
+       int queue = ring->queue;
+       int pipe = ring->pipe;
+
+       /* Policy: use pipe1 queue0 as high priority graphics queue if we
+        * have more than one gfx pipe.
+        */
+       if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
+           adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
+               int me = ring->me;
+               int bit;
+
+               bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
+               if (ring == &adev->gfx.gfx_ring[bit])
+                       return true;
+       }
+
+       return false;
+}
+
 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
                                               struct amdgpu_ring *ring)
 {
 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
 {
        int i, queue, pipe;
-       bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
+       bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
        int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
                                     adev->gfx.mec.num_queue_per_pipe,
                                     adev->gfx.num_compute_rings);
 
 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
 {
-       int i, queue, me;
-
-       for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
-               queue = i % adev->gfx.me.num_queue_per_pipe;
-               me = (i / adev->gfx.me.num_queue_per_pipe)
-                     / adev->gfx.me.num_pipe_per_me;
+       int i, queue, pipe;
+       bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
+       int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
+                                       adev->gfx.me.num_queue_per_pipe;
 
-               if (me >= adev->gfx.me.num_me)
-                       break;
+       if (multipipe_policy) {
                /* policy: amdgpu owns the first queue per pipe at this stage
                 * will extend to mulitple queues per pipe later */
-               if (me == 0 && queue < 1)
+               for (i = 0; i < max_queues_per_me; i++) {
+                       pipe = i % adev->gfx.me.num_pipe_per_me;
+                       queue = (i / adev->gfx.me.num_pipe_per_me) %
+                               adev->gfx.me.num_queue_per_pipe;
+
+                       set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+                                       adev->gfx.me.queue_bitmap);
+               }
+       } else {
+               for (i = 0; i < max_queues_per_me; ++i)
                        set_bit(i, adev->gfx.me.queue_bitmap);
        }
 
 
                                     int pipe, int queue);
 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
                                               struct amdgpu_ring *ring);
+bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
+                                               struct amdgpu_ring *ring);
 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
                               int pipe, int queue);
 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
 
         */
        prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
 
-       if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
-               if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
-                       prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
-                       prop->hqd_queue_priority =
-                               AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
-               }
+       if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
+           amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
+           (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
+           amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
+               prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+               prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
        }
 }
 
 
  * 2. Async ring
  */
 #define GFX10_NUM_GFX_RINGS_NV1X       1
-#define GFX10_NUM_GFX_RINGS_Sienna_Cichlid     1
+#define GFX10_NUM_GFX_RINGS_Sienna_Cichlid     2
 #define GFX10_MEC_HPD_SIZE     2048
 
 #define F32_CE_PROGRAM_RAM_SIZE                65536
 {
        struct amdgpu_ring *ring;
        unsigned int irq_type;
+       unsigned int hw_prio;
 
        ring = &adev->gfx.gfx_ring[ring_id];
 
        sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
        irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
+       hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ?
+                       AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
-                            AMDGPU_RING_PRIO_DEFAULT, NULL);
+                               hw_prio, NULL);
 }
 
 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        }
 }
 
+static void gfx_v10_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
+                                          struct v10_gfx_mqd *mqd,
+                                          struct amdgpu_mqd_prop *prop)
+{
+       bool priority = 0;
+       u32 tmp;
+
+       /* set up default queue priority level
+        * 0x0 = low priority, 0x1 = high priority
+        */
+       if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
+               priority = 1;
+
+       tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
+       tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
+       mqd->cp_gfx_hqd_queue_priority = tmp;
+}
+
 static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
                                  struct amdgpu_mqd_prop *prop)
 {
        tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
        mqd->cp_gfx_hqd_vmid = 0;
 
-       /* set up default queue priority level
-        * 0x0 = low priority, 0x1 = high priority */
-       tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
-       tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
-       mqd->cp_gfx_hqd_queue_priority = tmp;
+       /* set up gfx queue priority */
+       gfx_v10_0_gfx_mqd_set_priority(adev, mqd, prop);
 
        /* set up time quantum */
        tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);