return ddev->dev_private;
 }
 
+static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
+{
+       return adev->ddev;
+}
+
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
 {
        return container_of(bdev, struct amdgpu_device, mman.bdev);
 
 
                if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
                        if (adev->flags & AMD_IS_PX) {
-                               pm_runtime_get_sync(adev->ddev->dev);
+                               pm_runtime_get_sync(adev_to_drm(adev)->dev);
                                /* Just fire off a uevent and let userspace tell us what to do */
-                               drm_helper_hpd_irq_event(adev->ddev);
-                               pm_runtime_mark_last_busy(adev->ddev->dev);
-                               pm_runtime_put_autosuspend(adev->ddev->dev);
+                               drm_helper_hpd_irq_event(adev_to_drm(adev));
+                               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        }
                }
                /* TODO: check other events */
                        struct drm_encoder *tmp;
 
                        /* Find the encoder controlling the brightness */
-                       list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
+                       list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list,
                                            head) {
                                struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
 
 
                        .gpuvm_size = min(adev->vm_manager.max_pfn
                                          << AMDGPU_GPU_PAGE_SHIFT,
                                          AMDGPU_GMC_HOLE_START),
-                       .drm_render_minor = adev->ddev->render->index,
+                       .drm_render_minor = adev_to_drm(adev)->render->index,
                        .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
 
                };
                                        adev->doorbell_index.last_non_cp;
                }
 
-               kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
+               kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
        }
 }
 
                goto out_put;
 
        obj = dma_buf->priv;
-       if (obj->dev->driver != adev->ddev->driver)
+       if (obj->dev->driver != adev_to_drm(adev)->driver)
                /* Can't handle buffers from different drivers */
                goto out_put;
 
 
 
                        if (i2c.valid) {
                                sprintf(stmp, "0x%x", i2c.i2c_id);
-                               adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
+                               adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
                        }
                        gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
                                ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
                }
        }
 
-       amdgpu_link_encoder_connector(adev->ddev);
+       amdgpu_link_encoder_connector(adev_to_drm(adev));
 
        return true;
 }
                return -ENOMEM;
 
        adev->mode_info.atom_card_info = atom_card_info;
-       atom_card_info->dev = adev->ddev;
+       atom_card_info->dev = adev_to_drm(adev);
        atom_card_info->reg_read = cail_reg_read;
        atom_card_info->reg_write = cail_reg_write;
        /* needed for iio ops */
 
                      struct amdgpu_hpd *hpd,
                      struct amdgpu_router *router)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        struct amdgpu_connector *amdgpu_connector;
 
        adev->debugfs_count = i;
 #if defined(CONFIG_DEBUG_FS)
        drm_debugfs_create_files(files, nfiles,
-                                adev->ddev->primary->debugfs_root,
-                                adev->ddev->primary);
+                                adev_to_drm(adev)->primary->debugfs_root,
+                                adev_to_drm(adev)->primary);
 #endif
        return 0;
 }
        init_waitqueue_head(&adev->autodump.gpu_hang);
 
        debugfs_create_file("amdgpu_autodump", 0600,
-               adev->ddev->primary->debugfs_root,
+               adev_to_drm(adev)->primary->debugfs_root,
                adev, &autodump_debug_fops);
 }
 
 
        *pos &= (1UL << 22) - 1;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        if (use_bank) {
                if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
                    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        amdgpu_virt_disable_access_debugfs(adev);
                        return -EINVAL;
                }
        if (pm_pg_lock)
                mutex_unlock(&adev->pm.mutex);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        amdgpu_virt_disable_access_debugfs(adev);
        return result;
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
                value = RREG32_PCIE(*pos >> 2);
                r = put_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        amdgpu_virt_disable_access_debugfs(adev);
        return result;
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
 
                r = get_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        amdgpu_virt_disable_access_debugfs(adev);
        return result;
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
                value = RREG32_DIDT(*pos >> 2);
                r = put_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        amdgpu_virt_disable_access_debugfs(adev);
        return result;
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
 
                r = get_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        amdgpu_virt_disable_access_debugfs(adev);
        return result;
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
                value = RREG32_SMC(*pos);
                r = put_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        amdgpu_virt_disable_access_debugfs(adev);
        return result;
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
 
                r = get_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        amdgpu_virt_disable_access_debugfs(adev);
                        return r;
                }
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        amdgpu_virt_disable_access_debugfs(adev);
        return result;
 
        valuesize = sizeof(values);
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r) {
                amdgpu_virt_disable_access_debugfs(adev);
        wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
        simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_virt_enable_access_debugfs(adev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
        mutex_unlock(&adev->grbm_idx_mutex);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (!x) {
                amdgpu_virt_disable_access_debugfs(adev);
        if (!data)
                return -ENOMEM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0)
                goto err;
 
        amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
        mutex_unlock(&adev->grbm_idx_mutex);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        while (size) {
                uint32_t value;
        return result;
 
 err:
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
        kfree(data);
        return r;
 }
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
 
                r = get_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        return r;
                }
 
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return result;
 }
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0)
                return r;
 
 
                r = amdgpu_get_gfx_off_status(adev, &value);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        return r;
                }
 
                r = put_user(value, (uint32_t *)buf);
                if (r) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        return r;
                }
 
                size -= 4;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return result;
 }
  */
 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
 {
-       struct drm_minor *minor = adev->ddev->primary;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *ent, *root = minor->debugfs_root;
        unsigned int i;
 
 
        r = pm_runtime_get_sync(dev->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
 
        r = pm_runtime_get_sync(dev->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
 
        r = pm_runtime_get_sync(dev->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
                return -EINVAL;
 
-       ret = pm_runtime_get_sync(adev->ddev->dev);
+       ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return ret;
        }
 
                return 0;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (ret)
                return -EINVAL;
 
        adev->debugfs_preempt =
                debugfs_create_file("amdgpu_preempt_ib", 0600,
-                                   adev->ddev->primary->debugfs_root, adev,
+                                   adev_to_drm(adev)->primary->debugfs_root, adev,
                                    &fops_ib_preempt);
        if (!(adev->debugfs_preempt)) {
                DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
 
        adev->smu.debugfs_sclk =
                debugfs_create_file("amdgpu_force_sclk", 0200,
-                                   adev->ddev->primary->debugfs_root, adev,
+                                   adev_to_drm(adev)->primary->debugfs_root, adev,
                                    &fops_sclk_set);
        if (!(adev->smu.debugfs_sclk)) {
                DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
 
        adev->enable_virtual_display = false;
 
        if (amdgpu_virtual_display) {
-               struct drm_device *ddev = adev->ddev;
+               struct drm_device *ddev = adev_to_drm(adev);
                const char *pci_address_name = pci_name(ddev->pdev);
                char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
 
        if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 
                task_barrier_enter(&hive->tb);
-               adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
+               adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
 
                if (adev->asic_reset_res)
                        goto fail;
 
                task_barrier_exit(&hive->tb);
-               adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
+               adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
 
                if (adev->asic_reset_res)
                        goto fail;
 fail:
        if (adev->asic_reset_res)
                DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
-                        adev->asic_reset_res, adev->ddev->unique);
+                        adev->asic_reset_res, adev_to_drm(adev)->unique);
        amdgpu_put_xgmi_hive(hive);
 }
 
        }
 
        /* init the mode config */
-       drm_mode_config_init(adev->ddev);
+       drm_mode_config_init(adev_to_drm(adev));
 
        r = amdgpu_device_ip_init(adev);
        if (r) {
        amdgpu_irq_disable_all(adev);
        if (adev->mode_info.mode_config_initialized){
                if (!amdgpu_device_has_dc_support(adev))
-                       drm_helper_force_disable_all(adev->ddev);
+                       drm_helper_force_disable_all(adev_to_drm(adev));
                else
-                       drm_atomic_helper_shutdown(adev->ddev);
+                       drm_atomic_helper_shutdown(adev_to_drm(adev));
        }
        amdgpu_fence_driver_fini(adev);
        if (adev->pm_sysfs_en)
             amdgpu_has_atpx_dgpu_power_cntl()) &&
            !pci_is_thunderbolt_attached(adev->pdev))
                vga_switcheroo_unregister_client(adev->pdev);
-       if (amdgpu_device_supports_boco(adev->ddev))
+       if (amdgpu_device_supports_boco(adev_to_drm(adev)))
                vga_switcheroo_fini_domain_pm_ops(adev->dev);
        vga_client_register(adev->pdev, NULL, NULL, NULL);
        if (adev->rio_mem)
 
                        if (r) {
                                dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
-                                        r, tmp_adev->ddev->unique);
+                                        r, adev_to_drm(tmp_adev)->unique);
                                break;
                        }
                }
                /*TODO Should we stop ?*/
                if (r) {
                        dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
-                                 r, tmp_adev->ddev->unique);
+                                 r, adev_to_drm(tmp_adev)->unique);
                        tmp_adev->asic_reset_res = r;
                }
        }
                }
 
                if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
-                       drm_helper_resume_force_mode(tmp_adev->ddev);
+                       drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
                }
 
                tmp_adev->asic_reset_res = 0;
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
-       if (!amdgpu_device_supports_baco(adev->ddev))
+       if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
                return -ENOTSUPP;
 
        if (ras && ras->supported)
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
        int ret = 0;
 
-       if (!amdgpu_device_supports_baco(adev->ddev))
+       if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
                return -ENOTSUPP;
 
        ret = amdgpu_dpm_baco_exit(adev);
 
         * targeted by the flip
         */
        if (amdgpu_crtc->enabled &&
-           (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+           (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
                                                &vpos, &hpos, NULL, NULL,
                                                &crtc->hwmode)
             & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
        int sz;
 
        adev->mode_info.coherent_mode_property =
-               drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
+               drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
        if (!adev->mode_info.coherent_mode_property)
                return -ENOMEM;
 
        adev->mode_info.load_detect_property =
-               drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
+               drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
        if (!adev->mode_info.load_detect_property)
                return -ENOMEM;
 
-       drm_mode_create_scaling_mode_property(adev->ddev);
+       drm_mode_create_scaling_mode_property(adev_to_drm(adev));
 
        sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
        adev->mode_info.underscan_property =
-               drm_property_create_enum(adev->ddev, 0,
-                                   "underscan",
-                                   amdgpu_underscan_enum_list, sz);
+               drm_property_create_enum(adev_to_drm(adev), 0,
+                                        "underscan",
+                                        amdgpu_underscan_enum_list, sz);
 
        adev->mode_info.underscan_hborder_property =
-               drm_property_create_range(adev->ddev, 0,
-                                       "underscan hborder", 0, 128);
+               drm_property_create_range(adev_to_drm(adev), 0,
+                                         "underscan hborder", 0, 128);
        if (!adev->mode_info.underscan_hborder_property)
                return -ENOMEM;
 
        adev->mode_info.underscan_vborder_property =
-               drm_property_create_range(adev->ddev, 0,
-                                       "underscan vborder", 0, 128);
+               drm_property_create_range(adev_to_drm(adev), 0,
+                                         "underscan vborder", 0, 128);
        if (!adev->mode_info.underscan_vborder_property)
                return -ENOMEM;
 
        sz = ARRAY_SIZE(amdgpu_audio_enum_list);
        adev->mode_info.audio_property =
-               drm_property_create_enum(adev->ddev, 0,
+               drm_property_create_enum(adev_to_drm(adev), 0,
                                         "audio",
                                         amdgpu_audio_enum_list, sz);
 
        sz = ARRAY_SIZE(amdgpu_dither_enum_list);
        adev->mode_info.dither_property =
-               drm_property_create_enum(adev->ddev, 0,
+               drm_property_create_enum(adev_to_drm(adev), 0,
                                         "dither",
                                         amdgpu_dither_enum_list, sz);
 
        if (amdgpu_device_has_dc_support(adev)) {
                adev->mode_info.abm_level_property =
-                       drm_property_create_range(adev->ddev, 0,
-                                               "abm level", 0, 4);
+                       drm_property_create_range(adev_to_drm(adev), 0,
+                                                 "abm level", 0, 4);
                if (!adev->mode_info.abm_level_property)
                        return -ENOMEM;
        }
 
                               AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS     |
                               AMDGPU_GEM_CREATE_VRAM_CLEARED;
 
-       info = drm_get_format_info(adev->ddev, mode_cmd);
+       info = drm_get_format_info(adev_to_drm(adev), mode_cmd);
        cpp = info->cpp[0];
 
        /* need to align pitch with crtc limits */
                goto out;
        }
 
-       ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
+       ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb,
                                              &mode_cmd, gobj);
        if (ret) {
                DRM_ERROR("failed to initialize framebuffer %d\n", ret);
        drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
 
        /* setup aperture base/size for vesafb takeover */
-       info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
+       info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base;
        info->apertures->ranges[0].size = adev->gmc.aper_size;
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
        DRM_INFO("fb depth is %d\n", fb->format->depth);
        DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
-       vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
+       vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
        return 0;
 
 out:
                return 0;
 
        /* don't init fbdev if there are no connectors */
-       if (list_empty(&adev->ddev->mode_config.connector_list))
+       if (list_empty(&adev_to_drm(adev)->mode_config.connector_list))
                return 0;
 
        /* select 8 bpp console on low vram cards */
        rfbdev->adev = adev;
        adev->mode_info.rfbdev = rfbdev;
 
-       drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
-                       &amdgpu_fb_helper_funcs);
+       drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper,
+                             &amdgpu_fb_helper_funcs);
 
-       ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
+       ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper);
        if (ret) {
                kfree(rfbdev);
                return ret;
 
        /* disable all the possible outputs/crtcs before entering KMS mode */
        if (!amdgpu_device_has_dc_support(adev))
-               drm_helper_disable_unused_functions(adev->ddev);
+               drm_helper_disable_unused_functions(adev_to_drm(adev));
 
        drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
        return 0;
        if (!adev->mode_info.rfbdev)
                return;
 
-       amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
+       amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev);
        kfree(adev->mode_info.rfbdev);
        adev->mode_info.rfbdev = NULL;
 }
 
                       seq);
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, flags | AMDGPU_FENCE_FLAG_INT);
-       pm_runtime_get_noresume(adev->ddev->dev);
+       pm_runtime_get_noresume(adev_to_drm(adev)->dev);
        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
        if (unlikely(rcu_dereference_protected(*ptr, 1))) {
                struct dma_fence *old;
                        BUG();
 
                dma_fence_put(fence);
-               pm_runtime_mark_last_busy(adev->ddev->dev);
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
        } while (last_seq != seq);
 
        return true;
 
 
 void amdgpu_gem_force_release(struct amdgpu_device *adev)
 {
-       struct drm_device *ddev = adev->ddev;
+       struct drm_device *ddev = adev_to_drm(adev);
        struct drm_file *file;
 
        mutex_lock(&ddev->filelist_mutex);
 
                    const struct amdgpu_i2c_bus_rec *rec,
                    const char *name)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        int i;
 
        for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
 
 {
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  hotplug_work);
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
                if (!adev->enable_virtual_display)
                        /* Disable vblank IRQs aggressively for power-saving */
                        /* XXX: can this be enabled for DC? */
-                       adev->ddev->vblank_disable_immediate = true;
+                       adev_to_drm(adev)->vblank_disable_immediate = true;
 
-               r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+               r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
                if (r)
                        return r;
 
 
        adev->irq.installed = true;
        /* Use vector 0 for MSI-X */
-       r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
+       r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
        if (r) {
                adev->irq.installed = false;
                if (!amdgpu_device_has_dc_support(adev))
                        flush_work(&adev->hotplug_work);
                return r;
        }
-       adev->ddev->max_vblank_count = 0x00ffffff;
+       adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
 
        DRM_DEBUG("amdgpu: irq initialized.\n");
        return 0;
        unsigned i, j;
 
        if (adev->irq.installed) {
-               drm_irq_uninstall(adev->ddev);
+               drm_irq_uninstall(adev_to_drm(adev));
                adev->irq.installed = false;
                if (adev->irq.msi_enabled)
                        pci_free_irq_vectors(adev->pdev);
 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
 {
-       if (!adev->ddev->irq_enabled)
+       if (!adev_to_drm(adev)->irq_enabled)
                return -ENOENT;
 
        if (type >= src->num_types)
 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
 {
-       if (!adev->ddev->irq_enabled)
+       if (!adev_to_drm(adev)->irq_enabled)
                return -ENOENT;
 
        if (type >= src->num_types)
 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                        unsigned type)
 {
-       if (!adev->ddev->irq_enabled)
+       if (!adev_to_drm(adev)->irq_enabled)
                return false;
 
        if (type >= src->num_types)
 
        bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
-       drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
+       drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
        INIT_LIST_HEAD(&bo->shadow_list);
        bo->vm_bo = NULL;
        bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
 
        pmu_entry->pmu.attr_groups = attr_groups;
        pmu_entry->pmu_perf_type = pmu_perf_type;
        snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
-                               pmu_file_prefix, adev->ddev->primary->index);
+                               pmu_file_prefix, adev_to_drm(adev)->primary->index);
 
        ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
 
 
        struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
        struct ta_rap_shared_memory *rap_shared_mem;
        struct ta_rap_cmd_output_data *rap_cmd_output;
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        uint32_t op;
        int ret;
 
 void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
-       struct drm_minor *minor = adev->ddev->primary;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
 
        if (!adev->psp.rap_context.rap_initialized)
                return;
 
 static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       struct drm_minor *minor = adev->ddev->primary;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
 
        con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
        debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
 
                             struct amdgpu_ring *ring)
 {
 #if defined(CONFIG_DEBUG_FS)
-       struct drm_minor *minor = adev->ddev->primary;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *ent, *root = minor->debugfs_root;
        char name[32];
 
 
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&adev->mman.bdev,
                               &amdgpu_bo_driver,
-                              adev->ddev->anon_inode->i_mapping,
-                              adev->ddev->vma_offset_manager,
+                              adev_to_drm(adev)->anon_inode->i_mapping,
+                              adev_to_drm(adev)->vma_offset_manager,
                               dma_addressing_limited(adev->dev));
        if (r) {
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 #if defined(CONFIG_DEBUG_FS)
        unsigned count;
 
-       struct drm_minor *minor = adev->ddev->primary;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *ent, *root = minor->debugfs_root;
 
        for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
 
        if (adev->mode_info.num_crtc == 0)
                adev->mode_info.num_crtc = 1;
        adev->enable_virtual_display = true;
-       adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
+       adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
        adev->cg_flags = 0;
        adev->pg_flags = 0;
 }
 
 
 
 remove_link:
-       sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+       sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
 
 remove_file:
        device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
 
 void
 amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_encoder *encoder;
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 
  */
 static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
  */
 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
        if (amdgpu_crtc == NULL)
                return -ENOMEM;
 
-       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
+       drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
        amdgpu_crtc->crtc_id = index;
 
        amdgpu_crtc->max_cursor_width = 128;
        amdgpu_crtc->max_cursor_height = 128;
-       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
-       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+       adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
 
        switch (amdgpu_crtc->crtc_id) {
        case 0:
        if (r)
                return r;
 
-       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+       adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
 
-       adev->ddev->mode_config.async_page_flip = true;
+       adev_to_drm(adev)->mode_config.async_page_flip = true;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
-       adev->ddev->mode_config.preferred_depth = 24;
-       adev->ddev->mode_config.prefer_shadow = 1;
+       adev_to_drm(adev)->mode_config.preferred_depth = 24;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
-       adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
        r = amdgpu_display_modeset_create_props(adev);
        if (r)
                return r;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
        /* allocate crtcs */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
        }
 
        if (amdgpu_atombios_get_connector_info_from_object_table(adev))
-               amdgpu_display_print_display_setup(adev->ddev);
+               amdgpu_display_print_display_setup(adev_to_drm(adev));
        else
                return -EINVAL;
 
        if (r)
                return r;
 
-       drm_kms_helper_poll_init(adev->ddev);
+       drm_kms_helper_poll_init(adev_to_drm(adev));
 
        adev->mode_info.mode_config_initialized = true;
        return 0;
 
        kfree(adev->mode_info.bios_hardcoded_edid);
 
-       drm_kms_helper_poll_fini(adev->ddev);
+       drm_kms_helper_poll_fini(adev_to_drm(adev));
 
        dce_v10_0_audio_fini(adev);
 
        dce_v10_0_afmt_fini(adev);
 
-       drm_mode_config_cleanup(adev->ddev);
+       drm_mode_config_cleanup(adev_to_drm(adev));
        adev->mode_info.mode_config_initialized = false;
 
        return 0;
        if (amdgpu_crtc == NULL)
                return 0;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        works = amdgpu_crtc->pflip_works;
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
                                                 "AMDGPU_FLIP_SUBMITTED(%d)\n",
                                                 amdgpu_crtc->pflip_status,
                                                 AMDGPU_FLIP_SUBMITTED);
-               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return 0;
        }
 
        if (works->event)
                drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
        drm_crtc_vblank_put(&amdgpu_crtc->base);
        schedule_work(&works->unpin_work);
                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
                if (amdgpu_irq_enabled(adev, source, irq_type)) {
-                       drm_handle_vblank(adev->ddev, crtc);
+                       drm_handle_vblank(adev_to_drm(adev), crtc);
                }
                DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
 
                                 uint32_t supported_device,
                                 u16 caps)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_encoder *encoder;
        struct amdgpu_encoder *amdgpu_encoder;
 
 
  */
 static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
  */
 static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
        if (amdgpu_crtc == NULL)
                return -ENOMEM;
 
-       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
+       drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
        amdgpu_crtc->crtc_id = index;
 
        amdgpu_crtc->max_cursor_width = 128;
        amdgpu_crtc->max_cursor_height = 128;
-       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
-       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+       adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
 
        switch (amdgpu_crtc->crtc_id) {
        case 0:
        if (r)
                return r;
 
-       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+       adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
 
-       adev->ddev->mode_config.async_page_flip = true;
+       adev_to_drm(adev)->mode_config.async_page_flip = true;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
-       adev->ddev->mode_config.preferred_depth = 24;
-       adev->ddev->mode_config.prefer_shadow = 1;
+       adev_to_drm(adev)->mode_config.preferred_depth = 24;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
-       adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
        r = amdgpu_display_modeset_create_props(adev);
        if (r)
                return r;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
 
        /* allocate crtcs */
        }
 
        if (amdgpu_atombios_get_connector_info_from_object_table(adev))
-               amdgpu_display_print_display_setup(adev->ddev);
+               amdgpu_display_print_display_setup(adev_to_drm(adev));
        else
                return -EINVAL;
 
        if (r)
                return r;
 
-       drm_kms_helper_poll_init(adev->ddev);
+       drm_kms_helper_poll_init(adev_to_drm(adev));
 
        adev->mode_info.mode_config_initialized = true;
        return 0;
 
        kfree(adev->mode_info.bios_hardcoded_edid);
 
-       drm_kms_helper_poll_fini(adev->ddev);
+       drm_kms_helper_poll_fini(adev_to_drm(adev));
 
        dce_v11_0_audio_fini(adev);
 
        dce_v11_0_afmt_fini(adev);
 
-       drm_mode_config_cleanup(adev->ddev);
+       drm_mode_config_cleanup(adev_to_drm(adev));
        adev->mode_info.mode_config_initialized = false;
 
        return 0;
        if(amdgpu_crtc == NULL)
                return 0;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        works = amdgpu_crtc->pflip_works;
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
                                                 "AMDGPU_FLIP_SUBMITTED(%d)\n",
                                                 amdgpu_crtc->pflip_status,
                                                 AMDGPU_FLIP_SUBMITTED);
-               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return 0;
        }
 
        if(works->event)
                drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
        drm_crtc_vblank_put(&amdgpu_crtc->base);
        schedule_work(&works->unpin_work);
                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
                if (amdgpu_irq_enabled(adev, source, irq_type)) {
-                       drm_handle_vblank(adev->ddev, crtc);
+                       drm_handle_vblank(adev_to_drm(adev), crtc);
                }
                DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
 
                                 uint32_t supported_device,
                                 u16 caps)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_encoder *encoder;
        struct amdgpu_encoder *amdgpu_encoder;
 
 
  */
 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
  */
 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
        if (amdgpu_crtc == NULL)
                return -ENOMEM;
 
-       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+       drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
        amdgpu_crtc->crtc_id = index;
 
        amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
        amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
-       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
-       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+       adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
 
        amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
 
 
        adev->mode_info.mode_config_initialized = true;
 
-       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
-       adev->ddev->mode_config.async_page_flip = true;
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
-       adev->ddev->mode_config.preferred_depth = 24;
-       adev->ddev->mode_config.prefer_shadow = 1;
-       adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+       adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
+       adev_to_drm(adev)->mode_config.async_page_flip = true;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.preferred_depth = 24;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
        r = amdgpu_display_modeset_create_props(adev);
        if (r)
                return r;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
        /* allocate crtcs */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
 
        ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
        if (ret)
-               amdgpu_display_print_display_setup(adev->ddev);
+               amdgpu_display_print_display_setup(adev_to_drm(adev));
        else
                return -EINVAL;
 
        if (r)
                return r;
 
-       drm_kms_helper_poll_init(adev->ddev);
+       drm_kms_helper_poll_init(adev_to_drm(adev));
 
        return r;
 }
 
        kfree(adev->mode_info.bios_hardcoded_edid);
 
-       drm_kms_helper_poll_fini(adev->ddev);
+       drm_kms_helper_poll_fini(adev_to_drm(adev));
 
        dce_v6_0_audio_fini(adev);
        dce_v6_0_afmt_fini(adev);
 
-       drm_mode_config_cleanup(adev->ddev);
+       drm_mode_config_cleanup(adev_to_drm(adev));
        adev->mode_info.mode_config_initialized = false;
 
        return 0;
                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
                if (amdgpu_irq_enabled(adev, source, irq_type)) {
-                       drm_handle_vblank(adev->ddev, crtc);
+                       drm_handle_vblank(adev_to_drm(adev), crtc);
                }
                DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
                break;
        if (amdgpu_crtc == NULL)
                return 0;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        works = amdgpu_crtc->pflip_works;
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
                                                "AMDGPU_FLIP_SUBMITTED(%d)\n",
                                                amdgpu_crtc->pflip_status,
                                                AMDGPU_FLIP_SUBMITTED);
-               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return 0;
        }
 
        if (works->event)
                drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
        drm_crtc_vblank_put(&amdgpu_crtc->base);
        schedule_work(&works->unpin_work);
                                 uint32_t supported_device,
                                 u16 caps)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_encoder *encoder;
        struct amdgpu_encoder *amdgpu_encoder;
 
 
  */
 static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
  */
 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        u32 tmp;
        if (amdgpu_crtc == NULL)
                return -ENOMEM;
 
-       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
+       drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
        amdgpu_crtc->crtc_id = index;
 
        amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
        amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
-       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
-       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+       adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
 
        amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
 
        if (r)
                return r;
 
-       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+       adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
 
-       adev->ddev->mode_config.async_page_flip = true;
+       adev_to_drm(adev)->mode_config.async_page_flip = true;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
-       adev->ddev->mode_config.preferred_depth = 24;
-       adev->ddev->mode_config.prefer_shadow = 1;
+       adev_to_drm(adev)->mode_config.preferred_depth = 24;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
-       adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
        r = amdgpu_display_modeset_create_props(adev);
        if (r)
                return r;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
        /* allocate crtcs */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
        }
 
        if (amdgpu_atombios_get_connector_info_from_object_table(adev))
-               amdgpu_display_print_display_setup(adev->ddev);
+               amdgpu_display_print_display_setup(adev_to_drm(adev));
        else
                return -EINVAL;
 
        if (r)
                return r;
 
-       drm_kms_helper_poll_init(adev->ddev);
+       drm_kms_helper_poll_init(adev_to_drm(adev));
 
        adev->mode_info.mode_config_initialized = true;
        return 0;
 
        kfree(adev->mode_info.bios_hardcoded_edid);
 
-       drm_kms_helper_poll_fini(adev->ddev);
+       drm_kms_helper_poll_fini(adev_to_drm(adev));
 
        dce_v8_0_audio_fini(adev);
 
        dce_v8_0_afmt_fini(adev);
 
-       drm_mode_config_cleanup(adev->ddev);
+       drm_mode_config_cleanup(adev_to_drm(adev));
        adev->mode_info.mode_config_initialized = false;
 
        return 0;
                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
                if (amdgpu_irq_enabled(adev, source, irq_type)) {
-                       drm_handle_vblank(adev->ddev, crtc);
+                       drm_handle_vblank(adev_to_drm(adev), crtc);
                }
                DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
                break;
        if (amdgpu_crtc == NULL)
                return 0;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        works = amdgpu_crtc->pflip_works;
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
                                                "AMDGPU_FLIP_SUBMITTED(%d)\n",
                                                amdgpu_crtc->pflip_status,
                                                AMDGPU_FLIP_SUBMITTED);
-               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return 0;
        }
 
        if (works->event)
                drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
        drm_crtc_vblank_put(&amdgpu_crtc->base);
        schedule_work(&works->unpin_work);
                                 uint32_t supported_device,
                                 u16 caps)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_encoder *encoder;
        struct amdgpu_encoder *amdgpu_encoder;
 
 
        if (amdgpu_crtc == NULL)
                return -ENOMEM;
 
-       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+       drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
        amdgpu_crtc->crtc_id = index;
        if (r)
                return r;
 
-       adev->ddev->max_vblank_count = 0;
+       adev_to_drm(adev)->max_vblank_count = 0;
 
-       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+       adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
-       adev->ddev->mode_config.preferred_depth = 24;
-       adev->ddev->mode_config.prefer_shadow = 1;
+       adev_to_drm(adev)->mode_config.preferred_depth = 24;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
 
-       adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
        r = amdgpu_display_modeset_create_props(adev);
        if (r)
                return r;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
        /* allocate crtcs, encoders, connectors */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
                        return r;
        }
 
-       drm_kms_helper_poll_init(adev->ddev);
+       drm_kms_helper_poll_init(adev_to_drm(adev));
 
        adev->mode_info.mode_config_initialized = true;
        return 0;
 
        kfree(adev->mode_info.bios_hardcoded_edid);
 
-       drm_kms_helper_poll_fini(adev->ddev);
+       drm_kms_helper_poll_fini(adev_to_drm(adev));
 
-       drm_mode_config_cleanup(adev->ddev);
+       drm_mode_config_cleanup(adev_to_drm(adev));
        /* clear crtcs pointer to avoid dce irq finish routine access freed data */
        memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
        adev->mode_info.mode_config_initialized = false;
        if (!encoder)
                return -ENOMEM;
        encoder->possible_crtcs = 1 << index;
-       drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+       drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs,
                         DRM_MODE_ENCODER_VIRTUAL, NULL);
        drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
 
        }
 
        /* add a new connector */
-       drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+       drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs,
                           DRM_MODE_CONNECTOR_VIRTUAL);
        drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
        if (amdgpu_crtc == NULL)
                return 0;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        works = amdgpu_crtc->pflip_works;
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
                        "AMDGPU_FLIP_SUBMITTED(%d)\n",
                        amdgpu_crtc->pflip_status,
                        AMDGPU_FLIP_SUBMITTED);
-               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return 0;
        }
 
        if (works->event)
                drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
        drm_crtc_vblank_put(&amdgpu_crtc->base);
        amdgpu_bo_unref(&works->old_abo);
 
 get_crtc_by_otg_inst(struct amdgpu_device *adev,
                     int otg_inst)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_crtc *crtc;
        struct amdgpu_crtc *amdgpu_crtc;
 
                return;
        }
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
                                                 AMDGPU_FLIP_SUBMITTED,
                                                 amdgpu_crtc->crtc_id,
                                                 amdgpu_crtc);
-               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return;
        }
 
                e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
                e->pipe = amdgpu_crtc->crtc_id;
 
-               list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
+               list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
                e = NULL;
        }
 
                amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
 
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
        DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
                         amdgpu_crtc->crtc_id, amdgpu_crtc,
                        /* BTR processing for pre-DCE12 ASICs */
                        if (acrtc_state->stream &&
                            adev->family < AMDGPU_FAMILY_AI) {
-                               spin_lock_irqsave(&adev->ddev->event_lock, flags);
+                               spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                                mod_freesync_handle_v_update(
                                    adev->dm.freesync_module,
                                    acrtc_state->stream,
                                    adev->dm.dc,
                                    acrtc_state->stream,
                                    &acrtc_state->vrr_params.adjust);
-                               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+                               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                        }
                }
        }
        if (adev->family < AMDGPU_FAMILY_AI)
                return;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
        if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
            acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
                acrtc->pflip_status = AMDGPU_FLIP_NONE;
        }
 
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 }
 
 static int dm_set_clockgating_state(void *handle,
 #endif
        int r;
 
-       adev->dm.ddev = adev->ddev;
+       adev->dm.ddev = adev_to_drm(adev);
        adev->dm.adev = adev;
 
        /* Zero all the fields */
        /* TODO: Add_display_info? */
 
        /* TODO use dynamic cursor width */
-       adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
-       adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+       adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+       adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
 
-       if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+       if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
                DRM_ERROR(
                "amdgpu: failed to initialize sw for display support.\n");
                goto error;
        bool ret = true;
 
        if (!adev->dm.fw_dmcu)
-               return detect_mst_link_for_all_connectors(adev->ddev);
+               return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 
        dmcu = adev->dm.dc->res_pool->dmcu;
 
        if (!ret)
                return -EINVAL;
 
-       return detect_mst_link_for_all_connectors(adev->ddev);
+       return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 }
 
 static void s3_handle_mst(struct drm_device *dev, bool suspend)
        }
 
        WARN_ON(adev->dm.cached_state);
-       adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+       adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
 
-       s3_handle_mst(adev->ddev, true);
+       s3_handle_mst(adev_to_drm(adev), true);
 
        amdgpu_dm_irq_suspend(adev);
 
 static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
-       struct drm_device *ddev = adev->ddev;
+       struct drm_device *ddev = adev_to_drm(adev);
        struct amdgpu_display_manager *dm = &adev->dm;
        struct amdgpu_dm_connector *aconnector;
        struct drm_connector *connector;
 
 static void register_hpd_handlers(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct amdgpu_dm_connector *aconnector;
        const struct dc_link *dc_link;
 
        adev->mode_info.mode_config_initialized = true;
 
-       adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
-       adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+       adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+       adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
 
-       adev->ddev->mode_config.max_width = 16384;
-       adev->ddev->mode_config.max_height = 16384;
+       adev_to_drm(adev)->mode_config.max_width = 16384;
+       adev_to_drm(adev)->mode_config.max_height = 16384;
 
-       adev->ddev->mode_config.preferred_depth = 24;
-       adev->ddev->mode_config.prefer_shadow = 1;
+       adev_to_drm(adev)->mode_config.preferred_depth = 24;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
        /* indicates support for immediate flip */
-       adev->ddev->mode_config.async_page_flip = true;
+       adev_to_drm(adev)->mode_config.async_page_flip = true;
 
-       adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
        if (!state)
 
        dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
 
-       drm_atomic_private_obj_init(adev->ddev,
+       drm_atomic_private_obj_init(adev_to_drm(adev),
                                    &adev->dm.atomic_obj,
                                    &state->base,
                                    &dm_atomic_state_funcs);
        props.type = BACKLIGHT_RAW;
 
        snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
-                       dm->adev->ddev->primary->index);
+                adev_to_drm(dm->adev)->primary->index);
 
        dm->backlight_dev = backlight_device_register(bl_name,
-                       dm->adev->ddev->dev,
-                       dm,
-                       &amdgpu_dm_backlight_ops,
-                       &props);
+                                                     adev_to_drm(dm->adev)->dev,
+                                                     dm,
+                                                     &amdgpu_dm_backlight_ops,
+                                                     &props);
 
        if (IS_ERR(dm->backlight_dev))
                DRM_ERROR("DM: Backlight registration failed!\n");
        if (ret == 0) {
                if (s3_state) {
                        dm_resume(adev);
-                       drm_kms_helper_hotplug_event(adev->ddev);
+                       drm_kms_helper_hotplug_event(adev_to_drm(adev));
                } else
                        dm_suspend(adev);
        }
         */
 #if defined(CONFIG_DEBUG_KERNEL_DC)
        device_create_file(
-               adev->ddev->dev,
+               adev_to_drm(adev)->dev,
                &dev_attr_s3_debug);
 #endif
 
        num_formats = get_plane_formats(plane, plane_cap, formats,
                                        ARRAY_SIZE(formats));
 
-       res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
+       res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
                                       &dm_plane_funcs, formats, num_formats,
                                       NULL, plane->type, NULL);
        if (res)
        if (!new_stream->timing.h_total || !new_stream->timing.v_total)
                return;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        vrr_params = new_crtc_state->vrr_params;
 
        if (surface) {
                              (int)new_crtc_state->base.vrr_enabled,
                              (int)vrr_params.state);
 
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 }
 
 static void pre_update_freesync_state_on_stream(
        if (!new_stream->timing.h_total || !new_stream->timing.v_total)
                return;
 
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        vrr_params = new_crtc_state->vrr_params;
 
        if (new_crtc_state->vrr_supported &&
                        sizeof(vrr_params.adjust)) != 0);
 
        new_crtc_state->vrr_params = vrr_params;
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 }
 
 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
         * send vblank event on all events not handled in flip and
         * mark consumed event for drm_atomic_helper_commit_hw_done
         */
-       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
 
                if (new_crtc_state->event)
 
                new_crtc_state->event = NULL;
        }
-       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
        /* Signal HW programming completion */
        drm_atomic_helper_commit_hw_done(state);
 
 
        adev->dm.force_timing_sync = (bool)val;
 
-       amdgpu_dm_trigger_timing_sync(adev->ddev);
+       amdgpu_dm_trigger_timing_sync(adev_to_drm(adev));
 
        return 0;
 }
                .llseek = default_llseek
        };
 
-       struct drm_minor *minor = adev->ddev->primary;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *root = minor->debugfs_root;
        int ret;
 
 
  */
 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
 
  */
 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
 
 
 void
 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        int i;
 
        for (i = 0; i < adev->dm.display_indexes_num; i++) {
        aconnector->mst_mgr.cbs = &dm_mst_cbs;
        drm_dp_mst_topology_mgr_init(
                &aconnector->mst_mgr,
-               dm->adev->ddev,
+               adev_to_drm(dm->adev),
                &aconnector->dm_dp_aux.aux,
                16,
                4,
 
 
 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
 {
-       struct drm_device *ddev = adev->ddev;
+       struct drm_device *ddev = adev_to_drm(adev);
        struct drm_crtc *crtc;
        struct amdgpu_crtc *amdgpu_crtc;
 
 
 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_crtc *crtc;
        struct amdgpu_crtc *amdgpu_crtc;
        u32 vblank_in_pixels;
 
 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev->ddev;
+       struct drm_device *dev = adev_to_drm(adev);
        struct drm_crtc *crtc;
        struct amdgpu_crtc *amdgpu_crtc;
        u32 vrefresh = 0;
 
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
        return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
-                       adev->ddev->unique,
+                       adev_to_drm(adev)->unique,
                        atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
                        adev->throttling_logging_rs.interval / HZ + 1);
 }
        if (channel >= PP_TEMP_MAX)
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
                break;
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       ret = pm_runtime_get_sync(adev->ddev->dev);
+       ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return ret;
        }
 
                pwm_mode = smu_get_fan_control_mode(&adev->smu);
        } else {
                if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        return -EINVAL;
                }
 
                pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return sprintf(buf, "%i\n", pwm_mode);
 }
        if (err)
                return err;
 
-       ret = pm_runtime_get_sync(adev->ddev->dev);
+       ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return ret;
        }
 
                smu_set_fan_control_mode(&adev->smu, value);
        } else {
                if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        return -EINVAL;
                }
 
                amdgpu_dpm_set_fan_control_mode(adev, value);
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return count;
 }
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       err = pm_runtime_get_sync(adev->ddev->dev);
+       err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
 
        if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
                pr_info("manual fan speed control should be enabled first\n");
-               pm_runtime_mark_last_busy(adev->ddev->dev);
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return -EINVAL;
        }
 
        err = kstrtou32(buf, 10, &value);
        if (err) {
-               pm_runtime_mark_last_busy(adev->ddev->dev);
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
        else
                err = -EINVAL;
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (err)
                return err;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       err = pm_runtime_get_sync(adev->ddev->dev);
+       err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
        else
                err = -EINVAL;
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (err)
                return err;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       err = pm_runtime_get_sync(adev->ddev->dev);
+       err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
        else
                err = -EINVAL;
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (err)
                return err;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
                                   (void *)&min_rpm, &size);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
                                   (void *)&max_rpm, &size);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       err = pm_runtime_get_sync(adev->ddev->dev);
+       err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
        else
                err = -EINVAL;
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (err)
                return err;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       err = pm_runtime_get_sync(adev->ddev->dev);
+       err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
                pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
 
        if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
-               pm_runtime_mark_last_busy(adev->ddev->dev);
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return -ENODATA;
        }
 
        err = kstrtou32(buf, 10, &value);
        if (err) {
-               pm_runtime_mark_last_busy(adev->ddev->dev);
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
        else
                err = -EINVAL;
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (err)
                return err;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       ret = pm_runtime_get_sync(adev->ddev->dev);
+       ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return ret;
        }
 
                pwm_mode = smu_get_fan_control_mode(&adev->smu);
        } else {
                if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        return -EINVAL;
                }
 
                pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
 }
        else
                return -EINVAL;
 
-       err = pm_runtime_get_sync(adev->ddev->dev);
+       err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
                smu_set_fan_control_mode(&adev->smu, pwm_mode);
        } else {
                if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                        return -EINVAL;
                }
                amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return count;
 }
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
                                   (void *)&vddgfx, &size);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;
        if  (!(adev->flags & AMD_IS_APU))
                return -EINVAL;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
                                   (void *)&vddnb, &size);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
                                   (void *)&query, &size);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
                size = snprintf(buf, PAGE_SIZE, "\n");
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return size;
 }
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
                size = snprintf(buf, PAGE_SIZE, "\n");
        }
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        return size;
 }
        value = value / 1000000; /* convert to Watt */
 
 
-       err = pm_runtime_get_sync(adev->ddev->dev);
+       err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
        else
                err = -EINVAL;
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (err)
                return err;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
                                   (void *)&sclk, &size);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;
        if (amdgpu_in_reset(adev))
                return -EPERM;
 
-       r = pm_runtime_get_sync(adev->ddev->dev);
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
-               pm_runtime_put_autosuspend(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
        r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
                                   (void *)&mclk, &size);
 
-       pm_runtime_mark_last_busy(adev->ddev->dev);
-       pm_runtime_put_autosuspend(adev->ddev->dev);
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
        if (r)
                return r;