void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
                                             uint64_t throttle_bitmask)
 {
-       struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
        /*
         * ThermalThrottle msg = throttle_bitmask(8):
         *                       thermal_interrupt_count(16):
 
        len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n",
                       KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
-                      atomic64_read(&adev->smu.throttle_int_counter));
+                      atomic64_read(&dev->adev->smu.throttle_int_counter));
 
        add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
 }
 
 void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
 {
-       struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
        struct amdgpu_task_info task_info;
        /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */
        /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n +
                return;
 
        memset(&task_info, 0, sizeof(struct amdgpu_task_info));
-       amdgpu_vm_get_task_info(adev, pasid, &task_info);
+       amdgpu_vm_get_task_info(dev->adev, pasid, &task_info);
        /* Report VM faults from user applications, not retry from kernel */
        if (!task_info.pid)
                return;
 
 
        for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
                struct kfd_process_device *pdd;
-               struct amdgpu_device *adev;
 
                pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
                pdd = kfd_process_device_from_gpuidx(p, gpuidx);
                        pr_debug("failed to find device idx %d\n", gpuidx);
                        return -EINVAL;
                }
-               adev = (struct amdgpu_device *)pdd->dev->kgd;
 
-               r = svm_range_dma_map_dev(adev, prange, offset, npages,
+               r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
                                          hmm_pfns, gpuidx);
                if (r)
                        break;
                return NULL;
        }
 
-       return (struct amdgpu_device *)pdd->dev->kgd;
+       return pdd->dev->adev;
 }
 
 struct kfd_process_device *
 
        p = container_of(prange->svms, struct kfd_process, svms);
 
-       r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
+       r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
        if (r) {
                pr_debug("failed to get device id by adev %p\n", adev);
                return NULL;
 {
        struct kfd_process_device *pdd;
        struct amdgpu_device *bo_adev;
-       struct amdgpu_device *adev;
        struct kfd_process *p;
        struct dma_fence *fence = NULL;
        uint32_t gpuidx;
                        pr_debug("failed to find device idx %d\n", gpuidx);
                        return -EINVAL;
                }
-               adev = (struct amdgpu_device *)pdd->dev->kgd;
 
                pdd = kfd_bind_process_to_device(pdd->dev, p);
                if (IS_ERR(pdd))
                        return -EINVAL;
 
-               if (bo_adev && adev != bo_adev &&
-                   !amdgpu_xgmi_same_hive(adev, bo_adev)) {
+               if (bo_adev && pdd->dev->adev != bo_adev &&
+                   !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
                        pr_debug("cannot map to device idx %d\n", gpuidx);
                        continue;
                }
 
-               r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
+               r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv),
                                         prange, offset, npages, readonly,
                                         prange->dma_addr[gpuidx],
                                         bo_adev, wait ? &fence : NULL);
 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
 {
        struct kfd_process_device *pdd;
-       struct amdgpu_device *adev;
        struct amdgpu_vm *vm;
        uint32_t gpuidx;
        int r;
                        pr_debug("failed to find device idx %d\n", gpuidx);
                        return -EINVAL;
                }
-               adev = (struct amdgpu_device *)pdd->dev->kgd;
                vm = drm_priv_to_vm(pdd->drm_priv);
 
                ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
                        r = -EINVAL;
                        goto unreserve_out;
                }
-               adev = (struct amdgpu_device *)pdd->dev->kgd;
 
-               r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv),
+               r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
+                                             drm_priv_to_vm(pdd->drm_priv),
                                              svm_range_bo_validate, NULL);
                if (r) {
                        pr_debug("failed %d validate pt bos\n", r);
 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
 {
        struct kfd_process_device *pdd;
-       struct amdgpu_device *adev;
 
        pdd = kfd_process_device_from_gpuidx(p, gpuidx);
-       adev = (struct amdgpu_device *)pdd->dev->kgd;
 
-       return SVM_ADEV_PGMAP_OWNER(adev);
+       return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
 }
 
 /*
 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
 {
        struct kfd_process_device *pdd;
-       struct amdgpu_device *adev;
        struct kfd_process *p;
        uint32_t i;
 
                        continue;
 
                pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
-               adev = (struct amdgpu_device *)pdd->dev->kgd;
 
-               amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
+               amdgpu_ih_wait_on_checkpoint_process(pdd->dev->adev,
+                                                    &pdd->dev->adev->irq.ih1);
                pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
        }
 }
 
        p = container_of(prange->svms, struct kfd_process, svms);
 
-       r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx);
+       r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
        if (r < 0) {
                pr_debug("failed to get gpuid from kgd\n");
                return -1;
                pr_debug("Failed to create prange in address [0x%llx]\n", addr);
                return NULL;
        }
-       if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
+       if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
                pr_debug("failed to get gpuid from kgd\n");
                svm_range_free(prange);
                return NULL;
                uint32_t gpuid;
                int r;
 
-               r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
+               r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
                if (r < 0)
                        return;
        }
        uint32_t best_loc = prange->prefetch_loc;
        struct kfd_process_device *pdd;
        struct amdgpu_device *bo_adev;
-       struct amdgpu_device *adev;
        struct kfd_process *p;
        uint32_t gpuidx;
 
                        pr_debug("failed to get device by idx 0x%x\n", gpuidx);
                        continue;
                }
-               adev = (struct amdgpu_device *)pdd->dev->kgd;
 
-               if (adev == bo_adev)
+               if (pdd->dev->adev == bo_adev)
                        continue;
 
-               if (!amdgpu_xgmi_same_hive(adev, bo_adev)) {
+               if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
                        best_loc = 0;
                        break;
                }
 
        void *crat_image = NULL;
        size_t image_size = 0;
        int proximity_domain;
-       struct amdgpu_device *adev;
 
        INIT_LIST_HEAD(&temp_topology_device_list);
 
 
        proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
 
-       adev = (struct amdgpu_device *)(gpu->kgd);
-
        /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
-       if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) {
+       if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
                struct kfd_topology_device *top_dev;
 
                down_read(&topology_lock);
 
        /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
        dev->node_props.capability |=
-               ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
+               ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
                HSA_CAP_SRAM_EDCSUPPORTED : 0;
-       dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+       dev->node_props.capability |=
+               ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
                HSA_CAP_MEM_EDCSUPPORTED : 0;
 
-       if (adev->asic_type != CHIP_VEGA10)
-               dev->node_props.capability |= (adev->ras_enabled != 0) ?
+       if (dev->gpu->adev->asic_type != CHIP_VEGA10)
+               dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
                        HSA_CAP_RASEVENTNOTIFY : 0;
 
-       if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev))
+       if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
                dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
 
        kfd_debug_print_topology();