MODULE_FIRMWARE(FIRMWARE_VEGA20);
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo);
+
+static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev,
+                                          uint32_t size,
+                                          struct amdgpu_bo **bo_ptr)
+{
+       struct ttm_operation_ctx ctx = { true, false };
+       struct amdgpu_bo *bo = NULL;
+       void *addr;
+       int r;
+
+       r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_GTT,
+                                     &bo, NULL, &addr);
+       if (r)
+               return r;
+
+       if (adev->uvd.address_64_bit)
+               goto succ;
+
+       amdgpu_bo_kunmap(bo);
+       amdgpu_bo_unpin(bo);
+       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+       amdgpu_uvd_force_into_uvd_segment(bo);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (r)
+               goto err;
+       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM);
+       if (r)
+               goto err_pin;
+       r = amdgpu_bo_kmap(bo, &addr);
+       if (r)
+               goto err_kmap;
+succ:
+       amdgpu_bo_unreserve(bo);
+       *bo_ptr = bo;
+       return 0;
+err_kmap:
+       amdgpu_bo_unpin(bo);
+err_pin:
+err:
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
+       return r;
+}
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 {
        if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
                adev->uvd.address_64_bit = true;
 
+       r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo);
+       if (r)
+               return r;
+
        switch (adev->asic_type) {
        case CHIP_TONGA:
                adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
 
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
+       void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo);
        int i, j;
 
        drm_sched_entity_destroy(&adev->uvd.entity);
                for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
                        amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
        }
+       amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
        release_firmware(adev->uvd.fw);
 
        return 0;
        unsigned offset_idx = 0;
        unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
 
-       amdgpu_bo_kunmap(bo);
-       amdgpu_bo_unpin(bo);
-
-       if (!ring->adev->uvd.address_64_bit) {
-               struct ttm_operation_ctx ctx = { true, false };
-
-               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
-               amdgpu_uvd_force_into_uvd_segment(bo);
-               r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-               if (r)
-                       goto err;
-       }
-
        r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
                                     AMDGPU_IB_POOL_DELAYED, &job);
        if (r)
-               goto err;
+               return r;
 
        if (adev->asic_type >= CHIP_VEGA10) {
                offset_idx = 1 + ring->me;
                        goto err_free;
        }
 
+       amdgpu_bo_reserve(bo, true);
        amdgpu_bo_fence(bo, f, false);
        amdgpu_bo_unreserve(bo);
-       amdgpu_bo_unref(&bo);
 
        if (fence)
                *fence = dma_fence_get(f);
 
 err_free:
        amdgpu_job_free(job);
-
-err:
-       amdgpu_bo_unreserve(bo);
-       amdgpu_bo_unref(&bo);
        return r;
 }
 
                              struct dma_fence **fence)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_bo *bo = NULL;
+       struct amdgpu_bo *bo = adev->uvd.ib_bo;
        uint32_t *msg;
-       int r, i;
-
-       r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT,
-                                     &bo, NULL, (void **)&msg);
-       if (r)
-               return r;
+       int i;
 
+       msg = amdgpu_bo_kptr(bo);
        /* stitch together an UVD create msg */
        msg[0] = cpu_to_le32(0x00000de4);
        msg[1] = cpu_to_le32(0x00000000);
                msg[i] = cpu_to_le32(0x0);
 
        return amdgpu_uvd_send_msg(ring, bo, true, fence);
+
 }
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        uint32_t *msg;
        int r, i;
 
-       r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT,
-                                     &bo, NULL, (void **)&msg);
-       if (r)
-               return r;
+       if (direct) {
+               bo = adev->uvd.ib_bo;
+       } else {
+               r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo);
+               if (r)
+                       return r;
+       }
 
+       msg = amdgpu_bo_kptr(bo);
        /* stitch together an UVD destroy msg */
        msg[0] = cpu_to_le32(0x00000de4);
        msg[1] = cpu_to_le32(0x00000002);
        for (i = 4; i < 1024; ++i)
                msg[i] = cpu_to_le32(0x0);
 
-       return amdgpu_uvd_send_msg(ring, bo, direct, fence);
+       r = amdgpu_uvd_send_msg(ring, bo, direct, fence);
+
+       if (!direct)
+               amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
+
+       return r;
 }
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)