static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        gfx_v9_0_write_data_to_reg(ring, 0, true,
                                   SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 }
 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
                                         u64 seq, unsigned int flags)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        /* we only allocate 32bit for each seq wb address */
        BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
 
 }
 
 static int
-psp_v10_0_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
+psp_v10_0_sram_map(struct amdgpu_device *adev,
+               unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+               unsigned int *sram_data_reg_offset,
+               enum AMDGPU_UCODE_ID ucode_id)
 {
        int ret = 0;
 
        uint32_t *ucode_mem = NULL;
        struct amdgpu_device *adev = psp->adev;
 
-       err = psp_v10_0_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+       err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
                                &fw_sram_data_reg_offset, ucode_type);
        if (err)
                return false;
 
 }
 
 static int
-psp_v3_1_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
-                 unsigned int *sram_data_reg_offset,
-                 enum AMDGPU_UCODE_ID ucode_id)
+psp_v3_1_sram_map(struct amdgpu_device *adev,
+               unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+               unsigned int *sram_data_reg_offset,
+               enum AMDGPU_UCODE_ID ucode_id)
 {
        int ret = 0;
 
        uint32_t *ucode_mem = NULL;
        struct amdgpu_device *adev = psp->adev;
 
-       err = psp_v3_1_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+       err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
                                &fw_sram_data_reg_offset, ucode_type);
        if (err)
                return false;
 
 
 static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
        amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
 
        if (indexed) {
                return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
        } else {
-               switch (reg_offset) {
-               case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG):
+               if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
                        return adev->gfx.config.gb_addr_config;
-               default:
-                       return RREG32(reg_offset);
-               }
+               return RREG32(reg_offset);
        }
 }
 
 
 
 
 /* Register Access Macros */
-#define SOC15_REG_OFFSET(ip, inst, reg)       (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
-                                                (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
-                                                    (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
-                                                        (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
-                                                            (ip##_BASE__INST##inst##_SEG4 + reg)))))
+#define SOC15_REG_OFFSET(ip, inst, reg)        (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
 
 #define WREG32_FIELD15(ip, idx, reg, field, val)       \
        WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg,  \
 
 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                     unsigned flags)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
        amdgpu_ring_write(ring,
 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
                        u64 seq, unsigned flags)
 {
+
        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
  */
 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
                mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
        amdgpu_ring_write(ring, 0);
  */
 static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
        amdgpu_ring_write(ring, 1);
 }
                                  struct amdgpu_ib *ib,
                                  unsigned vm_id, bool ctx_switch)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
        amdgpu_ring_write(ring, vm_id);
 static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);
 static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1, uint32_t mask)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);
 
  */
 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, 0);
  */
 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
        amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                     unsigned flags)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
        amdgpu_ring_write(ring,
  */
 static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
        amdgpu_ring_write(ring, 1);
 }
                                  struct amdgpu_ib *ib,
                                  unsigned vm_id, bool ctx_switch)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
        amdgpu_ring_write(ring, vm_id);
 static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);
 static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
                                uint32_t data0, uint32_t data1, uint32_t mask)
 {
+       struct amdgpu_device *adev = ring->adev;
+
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
        amdgpu_ring_write(ring, data0);