size_t size, loff_t *pos)
 {
        struct amdgpu_ring *ring = file_inode(f)->i_private;
-       volatile u32 *mqd;
-       u32 *kbuf;
-       int r, i;
-       uint32_t value, result;
+       ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
+       void *from = ((u8 *)ring->mqd_ptr) + *pos;
 
-       if (*pos & 3 || size & 3)
-               return -EINVAL;
-
-       kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
-       if (!kbuf)
-               return -ENOMEM;
-
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0))
-               goto err_free;
-
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
-       if (r)
-               goto err_unreserve;
-
-       /*
-        * Copy to local buffer to avoid put_user(), which might fault
-        * and acquire mmap_sem, under reservation_ww_class_mutex.
-        */
-       for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
-               kbuf[i] = mqd[i];
+       if (*pos > ring->mqd_size)
+               return 0;
 
-       amdgpu_bo_kunmap(ring->mqd_obj);
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       if (copy_to_user(buf, from, bytes))
+               return -EFAULT;
 
-       result = 0;
-       while (size) {
-               if (*pos >= ring->mqd_size)
-                       break;
-
-               value = kbuf[*pos/4];
-               r = put_user(value, (uint32_t *)buf);
-               if (r)
-                       goto err_free;
-               buf += 4;
-               result += 4;
-               size -= 4;
-               *pos += 4;
-       }
-
-       kfree(kbuf);
-       return result;
-
-err_unreserve:
-       amdgpu_bo_unreserve(ring->mqd_obj);
-err_free:
-       kfree(kbuf);
-       return r;
+       *pos += bytes;
+       return bytes;
 }
 
 static const struct file_operations amdgpu_debugfs_mqd_fops = {
 
 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
 {
        int r, i;
-       struct amdgpu_ring *ring;
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-               ring = &adev->gfx.gfx_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       return r;
-
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v10_0_kgq_init_queue(ring, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v10_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
                if (r)
                        return r;
        }
 
 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring;
-       int r;
-
-       ring = &adev->gfx.kiq[0].ring;
-
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(ring->mqd_obj);
-               return r;
-       }
-
-       gfx_v10_0_kiq_init_queue(ring);
-       amdgpu_bo_kunmap(ring->mqd_obj);
-       ring->mqd_ptr = NULL;
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       gfx_v10_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
        return 0;
 }
 
 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = NULL;
-       int r = 0, i;
+       int i, r;
 
        gfx_v10_0_cp_compute_enable(adev, true);
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               ring = &adev->gfx.compute_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       goto done;
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v10_0_kcq_init_queue(ring, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v10_0_kcq_init_queue(&adev->gfx.compute_ring[i],
+                                            false);
                if (r)
-                       goto done;
+                       return r;
        }
 
-       r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
-       return r;
+       return amdgpu_gfx_enable_kcq(adev, 0);
 }
 
 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)) {
-               DRM_ERROR("fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v10_0_kgq_init_queue(ring, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v10_0_kgq_init_queue(ring, true);
        if (r) {
-               DRM_ERROR("fail to unresv mqd_obj\n");
+               DRM_ERROR("fail to init kgq\n");
                return r;
        }
 
                return r;
        }
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)) {
-               dev_err(adev->dev, "fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v10_0_kcq_init_queue(ring, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v10_0_kcq_init_queue(ring, true);
        if (r) {
-               dev_err(adev->dev, "fail to unresv mqd_obj\n");
+               dev_err(adev->dev, "fail to init kcq\n");
                return r;
        }
 
 
 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
 {
        int r, i;
-       struct amdgpu_ring *ring;
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-               ring = &adev->gfx.gfx_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       return r;
-
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v11_0_kgq_init_queue(ring, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
                if (r)
                        return r;
        }
 
 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring;
-       int r;
-
-       ring = &adev->gfx.kiq[0].ring;
-
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(ring->mqd_obj);
-               return r;
-       }
-
-       gfx_v11_0_kiq_init_queue(ring);
-       amdgpu_bo_kunmap(ring->mqd_obj);
-       ring->mqd_ptr = NULL;
-       amdgpu_bo_unreserve(ring->mqd_obj);
-       ring->sched.ready = true;
+       gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
        return 0;
 }
 
 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = NULL;
-       int r = 0, i;
+       int i, r;
 
        if (!amdgpu_async_gfx_ring)
                gfx_v11_0_cp_compute_enable(adev, true);
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               ring = &adev->gfx.compute_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       goto done;
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v11_0_kcq_init_queue(ring, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
                if (r)
-                       goto done;
+                       return r;
        }
 
-       r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
-       return r;
+       return amdgpu_gfx_enable_kcq(adev, 0);
 }
 
 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)) {
-               dev_err(adev->dev, "fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v11_0_kgq_init_queue(ring, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v11_0_kgq_init_queue(ring, true);
        if (r) {
-               dev_err(adev->dev, "fail to unresv mqd_obj\n");
+               dev_err(adev->dev, "failed to init kgq\n");
                return r;
        }
 
                return r;
        }
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)) {
-               dev_err(adev->dev, "fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v11_0_kcq_init_queue(ring, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v11_0_kcq_init_queue(ring, true);
        if (r) {
-               dev_err(adev->dev, "fail to unresv mqd_obj\n");
+               dev_err(adev->dev, "fail to init kcq\n");
                return r;
        }
        r = amdgpu_mes_map_legacy_queue(adev, ring);
 
 
 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
 {
-       int r, i;
-       struct amdgpu_ring *ring;
+       int i, r;
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-               ring = &adev->gfx.gfx_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       goto done;
-
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v12_0_kgq_init_queue(ring, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v12_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
                if (r)
-                       goto done;
+                       return r;
        }
 
        r = amdgpu_gfx_enable_kgq(adev, 0);
        if (r)
-               goto done;
-
-       r = gfx_v12_0_cp_gfx_start(adev);
-       if (r)
-               goto done;
+               return r;
 
-done:
-       return r;
+       return gfx_v12_0_cp_gfx_start(adev);
 }
 
 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
 
 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring;
-       int r;
-
-       ring = &adev->gfx.kiq[0].ring;
-
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(ring->mqd_obj);
-               return r;
-       }
-
-       gfx_v12_0_kiq_init_queue(ring);
-       amdgpu_bo_kunmap(ring->mqd_obj);
-       ring->mqd_ptr = NULL;
-       amdgpu_bo_unreserve(ring->mqd_obj);
-       ring->sched.ready = true;
+       gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
+       adev->gfx.kiq[0].ring.sched.ready = true;
        return 0;
 }
 
 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = NULL;
-       int r = 0, i;
+       int i, r;
 
        if (!amdgpu_async_gfx_ring)
                gfx_v12_0_cp_compute_enable(adev, true);
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               ring = &adev->gfx.compute_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       goto done;
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v12_0_kcq_init_queue(ring, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v12_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
                if (r)
-                       goto done;
+                       return r;
        }
 
-       r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
-       return r;
+       return amdgpu_gfx_enable_kcq(adev, 0);
 }
 
 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev)
                return r;
        }
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)) {
-               dev_err(adev->dev, "fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v12_0_kgq_init_queue(ring, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v12_0_kgq_init_queue(ring, true);
        if (r) {
-               DRM_ERROR("fail to unresv mqd_obj\n");
+               dev_err(adev->dev, "failed to init kgq\n");
                return r;
        }
 
                return r;
        }
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)) {
-               DRM_ERROR("fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v12_0_kcq_init_queue(ring, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v12_0_kcq_init_queue(ring, true);
        if (r) {
-               DRM_ERROR("fail to unresv mqd_obj\n");
+               dev_err(adev->dev, "failed to init kcq\n");
                return r;
        }
        r = amdgpu_mes_map_legacy_queue(adev, ring);
 
 
 static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring;
-       int r;
-
-       ring = &adev->gfx.kiq[0].ring;
-
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
-       if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(ring->mqd_obj);
-               return r;
-       }
-
-       gfx_v8_0_kiq_init_queue(ring);
-       amdgpu_bo_kunmap(ring->mqd_obj);
-       ring->mqd_ptr = NULL;
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       gfx_v8_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
        return 0;
 }
 
 static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = NULL;
-       int r = 0, i;
+       int i, r;
 
        gfx_v8_0_cp_compute_enable(adev, true);
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               ring = &adev->gfx.compute_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       goto done;
-               r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v8_0_kcq_init_queue(ring);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v8_0_kcq_init_queue(&adev->gfx.compute_ring[i]);
                if (r)
-                       goto done;
+                       return r;
        }
 
        gfx_v8_0_set_mec_doorbell_range(adev);
 
-       r = gfx_v8_0_kiq_kcq_enable(adev);
-       if (r)
-               goto done;
-
-done:
-       return r;
+       return gfx_v8_0_kiq_kcq_enable(adev);
 }
 
 static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
 
 
 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring;
-       int r;
-
-       ring = &adev->gfx.kiq[0].ring;
-
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(ring->mqd_obj);
-               return r;
-       }
-
-       gfx_v9_0_kiq_init_queue(ring);
-       amdgpu_bo_kunmap(ring->mqd_obj);
-       ring->mqd_ptr = NULL;
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
        return 0;
 }
 
 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = NULL;
-       int r = 0, i;
+       int i, r;
 
        gfx_v9_0_cp_compute_enable(adev, true);
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               ring = &adev->gfx.compute_ring[i];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       goto done;
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v9_0_kcq_init_queue(ring, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
                if (r)
-                       goto done;
+                       return r;
        }
 
-       r = amdgpu_gfx_enable_kcq(adev, 0);
-done:
-       return r;
+       return amdgpu_gfx_enable_kcq(adev, 0);
 }
 
 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
                return r;
        }
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)){
-               dev_err(adev->dev, "fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v9_0_kcq_init_queue(ring, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v9_0_kcq_init_queue(ring, true);
        if (r) {
-               dev_err(adev->dev, "fail to unresv mqd_obj\n");
+               dev_err(adev->dev, "fail to init kcq\n");
                return r;
        }
        spin_lock_irqsave(&kiq->ring_lock, flags);
 
 
 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
 {
-       struct amdgpu_ring *ring;
-       int r;
-
-       ring = &adev->gfx.kiq[xcc_id].ring;
-
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(ring->mqd_obj);
-               return r;
-       }
-
-       gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
-       amdgpu_bo_kunmap(ring->mqd_obj);
-       ring->mqd_ptr = NULL;
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id);
        return 0;
 }
 
 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
 {
-       struct amdgpu_ring *ring = NULL;
-       int r = 0, i;
+       struct amdgpu_ring *ring;
+       int i, r;
 
        gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
-
-               r = amdgpu_bo_reserve(ring->mqd_obj, false);
-               if (unlikely(r != 0))
-                       goto done;
-               r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-               if (!r) {
-                       r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
-                       amdgpu_bo_kunmap(ring->mqd_obj);
-                       ring->mqd_ptr = NULL;
-               }
-               amdgpu_bo_unreserve(ring->mqd_obj);
+               ring = &adev->gfx.compute_ring[i + xcc_id *
+                       adev->gfx.num_compute_rings];
+
+               r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
                if (r)
-                       goto done;
+                       return r;
        }
 
-       r = amdgpu_gfx_enable_kcq(adev, xcc_id);
-done:
-       return r;
+       return amdgpu_gfx_enable_kcq(adev, xcc_id);
 }
 
 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
                        return r;
        }
 
-       r = amdgpu_bo_reserve(ring->mqd_obj, false);
-       if (unlikely(r != 0)){
-               dev_err(adev->dev, "fail to resv mqd_obj\n");
-               return r;
-       }
-       r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (!r) {
-               r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
-               amdgpu_bo_kunmap(ring->mqd_obj);
-               ring->mqd_ptr = NULL;
-       }
-       amdgpu_bo_unreserve(ring->mqd_obj);
+       r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
        if (r) {
-               dev_err(adev->dev, "fail to unresv mqd_obj\n");
+               dev_err(adev->dev, "fail to init kcq\n");
                return r;
        }
        spin_lock_irqsave(&kiq->ring_lock, flags);