struct amdgpu_bo *bo = NULL;
        struct amdgpu_bo_param bp;
        int r;
-       uint64_t gpu_addr_tmp = 0;
        void *cpu_ptr_tmp = NULL;
 
        memset(&bp, 0, sizeof(bp));
                goto allocate_mem_reserve_bo_failed;
        }
 
-       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
-                               &gpu_addr_tmp);
+       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
        if (r) {
                dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
                goto allocate_mem_pin_bo_failed;
        }
 
        *mem_obj = bo;
-       *gpu_addr = gpu_addr_tmp;
+       *gpu_addr = amdgpu_bo_gpu_offset(bo);
        *cpu_ptr = cpu_ptr_tmp;
 
        amdgpu_bo_unreserve(bo);
 
                goto bo_reserve_failed;
        }
 
-       ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+       ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
        if (ret) {
                pr_err("Failed to pin bo. ret %d\n", ret);
                goto pin_failed;
 
        r = amdgpu_bo_reserve(sobj, false);
        if (unlikely(r != 0))
                goto out_cleanup;
-       r = amdgpu_bo_pin(sobj, sdomain, &saddr);
+       r = amdgpu_bo_pin(sobj, sdomain);
+       saddr = amdgpu_bo_gpu_offset(sobj);
        amdgpu_bo_unreserve(sobj);
        if (r) {
                goto out_cleanup;
        r = amdgpu_bo_reserve(dobj, false);
        if (unlikely(r != 0))
                goto out_cleanup;
-       r = amdgpu_bo_pin(dobj, ddomain, &daddr);
+       r = amdgpu_bo_pin(dobj, ddomain);
+       daddr = amdgpu_bo_gpu_offset(dobj);
        amdgpu_bo_unreserve(dobj);
        if (r) {
                goto out_cleanup;
 
                        struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
                        r = amdgpu_bo_reserve(aobj, true);
                        if (r == 0) {
-                               r = amdgpu_bo_pin(aobj,
-                                                 AMDGPU_GEM_DOMAIN_VRAM,
-                                                 &amdgpu_crtc->cursor_addr);
+                               r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
                                if (r != 0)
                                        DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+                               amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
                                amdgpu_bo_unreserve(aobj);
                        }
                }
 
        struct amdgpu_bo *new_abo;
        unsigned long flags;
        u64 tiling_flags;
-       u64 base;
        int i, r;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
                goto cleanup;
        }
 
-       r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
+       r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
        if (unlikely(r != 0)) {
                DRM_ERROR("failed to pin new abo buffer before flip\n");
                goto unreserve;
        amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
        amdgpu_bo_unreserve(new_abo);
 
-       work->base = base;
+       work->base = amdgpu_bo_gpu_offset(new_abo);
        work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
                amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
 
 
        }
 
 
-       ret = amdgpu_bo_pin(abo, domain, NULL);
+       ret = amdgpu_bo_pin(abo, domain);
        if (ret) {
                amdgpu_bo_unreserve(abo);
                goto out_unref;
 
  */
 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
 {
-       uint64_t gpu_addr;
        int r;
 
        r = amdgpu_bo_reserve(adev->gart.robj, false);
        if (unlikely(r != 0))
                return r;
-       r = amdgpu_bo_pin(adev->gart.robj,
-                               AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+       r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
        if (r) {
                amdgpu_bo_unreserve(adev->gart.robj);
                return r;
        if (r)
                amdgpu_bo_unpin(adev->gart.robj);
        amdgpu_bo_unreserve(adev->gart.robj);
-       adev->gart.table_addr = gpu_addr;
+       adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
        return r;
 }
 
 
                goto error_free;
        }
 
-       r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+       r = amdgpu_bo_pin(*bo_ptr, domain);
        if (r) {
                dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
                goto error_unreserve;
        }
+       if (gpu_addr)
+               *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
 
        if (cpu_addr) {
                r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
  * @domain: domain to be pinned to
  * @min_offset: the start of requested address range
  * @max_offset: the end of requested address range
- * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
  *
  * Pins the buffer object according to requested domain and address range. If
  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
  * 0 for success or a negative error code on failure.
  */
 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
-                            u64 min_offset, u64 max_offset,
-                            u64 *gpu_addr)
+                            u64 min_offset, u64 max_offset)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct ttm_operation_ctx ctx = { false, false };
                        return -EINVAL;
 
                bo->pin_count++;
-               if (gpu_addr)
-                       *gpu_addr = amdgpu_bo_gpu_offset(bo);
 
                if (max_offset != 0) {
                        u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
        }
 
        bo->pin_count = 1;
-       if (gpu_addr != NULL)
-               *gpu_addr = amdgpu_bo_gpu_offset(bo);
 
        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
  * @bo: &amdgpu_bo buffer object to be pinned
  * @domain: domain to be pinned to
- * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
  *
  * A simple wrapper to amdgpu_bo_pin_restricted().
  * Provides a simpler API for buffers that do not have any strict restrictions
  * Returns:
  * 0 for success or a negative error code on failure.
  */
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
 {
-       return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
+       return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
 }
 
 /**
 
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
 void amdgpu_bo_unref(struct amdgpu_bo **bo);
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
-                            u64 min_offset, u64 max_offset,
-                            u64 *gpu_addr);
+                            u64 min_offset, u64 max_offset);
 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
 
        }
 
        /* pin buffer into GTT */
-       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
        if (r)
                goto error_unreserve;
 
 
        r = amdgpu_bo_reserve(vram_obj, false);
        if (unlikely(r != 0))
                goto out_unref;
-       r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+       r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
        if (r) {
                DRM_ERROR("Failed to pin VRAM object\n");
                goto out_unres;
        }
+       vram_addr = amdgpu_bo_gpu_offset(vram_obj);
        for (i = 0; i < n; i++) {
                void *gtt_map, *vram_map;
                void **gart_start, **gart_end;
                r = amdgpu_bo_reserve(gtt_obj[i], false);
                if (unlikely(r != 0))
                        goto out_lclean_unref;
-               r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
+               r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
                if (r) {
                        DRM_ERROR("Failed to pin GTT object %d\n", i);
                        goto out_lclean_unres;
                }
+               gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
 
                r = amdgpu_bo_kmap(gtt_obj[i], >t_map);
                if (r) {
 
                        AMDGPU_GEM_DOMAIN_VRAM,
                        adev->fw_vram_usage.start_offset,
                        (adev->fw_vram_usage.start_offset +
-                       adev->fw_vram_usage.size), NULL);
+                       adev->fw_vram_usage.size));
                if (r)
                        goto error_pin;
                r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
 
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v10_0_lock_cursor(crtc, true);
 
 
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v11_0_lock_cursor(crtc, true);
 
 
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v6_0_lock_cursor(crtc, true);
 
 
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v8_0_lock_cursor(crtc, true);
 
 
        else
                domain = AMDGPU_GEM_DOMAIN_VRAM;
 
-       r = amdgpu_bo_pin(rbo, domain, &afb->address);
+       r = amdgpu_bo_pin(rbo, domain);
        amdgpu_bo_unreserve(rbo);
 
        if (unlikely(r != 0)) {
                        DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
                return r;
        }
+       afb->address = amdgpu_bo_gpu_offset(rbo);
 
        amdgpu_bo_ref(rbo);