int user_ring,
                          struct amdgpu_ring **out_ring)
 {
-       int r;
+       int r, i, j;
        int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
+       int ring_blacklist[AMDGPU_MAX_RINGS];
+       struct amdgpu_ring *ring;
 
-       r = amdgpu_ring_lru_get(adev, ring_type, out_ring);
+       /* 0 is a valid ring index, so initialize to -1 */
+       memset(ring_blacklist, 0xff, sizeof(ring_blacklist));
+
+       for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) {
+               ring = mapper->queue_map[i];
+               if (ring)
+                       ring_blacklist[j++] = ring->idx;
+       }
+
+       r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
+                               j, out_ring);
        if (r)
                return r;
 
 
        list_move_tail(&ring->lru_list, &adev->ring_lru_list);
 }
 
+static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
+                                      int *blacklist, int num_blacklist)
+{
+       int i;
+
+       for (i = 0; i < num_blacklist; i++) {
+               if (ring->idx == blacklist[i])
+                       return true;
+       }
+
+       return false;
+}
+
 /**
  * amdgpu_ring_lru_get - get the least recently used ring for a HW IP block
  *
  * @adev: amdgpu_device pointer
  * @type: amdgpu_ring_type enum
+ * @blacklist: blacklisted ring ids array
+ * @num_blacklist: number of entries in @blacklist
  * @ring: output ring
  *
  * Retrieve the amdgpu_ring structure for the least recently used ring of
  * a specific IP block (all asics).
  * Returns 0 on success, error on failure.
  */
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
-                       struct amdgpu_ring **ring)
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
+                       int num_blacklist, struct amdgpu_ring **ring)
 {
        struct amdgpu_ring *entry;
 
        *ring = NULL;
        spin_lock(&adev->ring_lru_list_lock);
        list_for_each_entry(entry, &adev->ring_lru_list, lru_list) {
-               if (entry->funcs->type == type) {
-                       *ring = entry;
-                       amdgpu_ring_lru_touch_locked(adev, *ring);
-                       break;
-               }
+               if (entry->funcs->type != type)
+                       continue;
+
+               if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
+                       continue;
+
+               *ring = entry;
+               amdgpu_ring_lru_touch_locked(adev, *ring);
+               break;
        }
        spin_unlock(&adev->ring_lru_list_lock);
 
 
                     unsigned ring_size, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int hw_ip,
-                       struct amdgpu_ring **ring);
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
+                       int num_blacklist, struct amdgpu_ring **ring);
 void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
 {