static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
 
        return 0;
 }
 
                a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
        }
 
-       gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
-               REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+       gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
 
-       gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
-               REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+       gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
 
        return 0;
 }
         * memory rendering at this point in time and we don't want to block off
         * part of the virtual memory space.
         */
-       gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
-               REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+       gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
        gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
 
        /* Put the GPU into 64 bit by default */
                return ret;
 
        /* Set the ringbuffer address */
-       gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
-               gpu->rb[0]->iova);
+       gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
 
        /*
         * If the microcode supports the WHERE_AM_I opcode then we can use that
                }
 
                gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
-                       REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
+                           shadowptr(a5xx_gpu, gpu->rb[0]));
        } else if (gpu->nr_rings > 1) {
                /* Disable preemption if WHERE_AM_I isn't available */
                a5xx_preempt_fini(gpu);
                gpu_read(gpu, REG_A5XX_RBBM_STATUS),
                gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
                gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
-               gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+               gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
                gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
-               gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+               gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
                gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
 
        /* Turn off the hangcheck timer to keep it from bothering us */
 
 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
-               REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+       *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
 
        return 0;
 }
        if (IS_ERR_OR_NULL(dumper->ptr))
                return -EINVAL;
 
-       gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
-               REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+       gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
 
        gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
 
 {
        u64 busy_cycles;
 
-       busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
-                       REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+       busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
        *out_sample_rate = clk_get_rate(gpu->core_clk);
 
        return busy_cycles;
 
 
        /* Set the address of the incoming preemption record */
        gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
-               REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
                a5xx_gpu->preempt_iova[ring->id]);
 
        a5xx_gpu->next_ring = ring;
        }
 
        /* Write a 0 to signal that we aren't switching pagetables */
-       gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
-               REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
+       gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0);
 
        /* Reset the preemption state */
        set_preempt_state(a5xx_gpu, PREEMPT_NONE);
 
        OUT_RING(ring, submit->seqno);
 
        trace_msm_gpu_submit_flush(submit,
-               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
-                       REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
+               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO));
 
        a6xx_flush(gpu, ring);
 }
                }
        }
 
-       gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE,
-               REG_A6XX_CP_SQE_INSTR_BASE+1, a6xx_gpu->sqe_iova);
+       gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
 
        return 0;
 }
         * memory rendering at this point in time and we don't want to block off
         * part of the virtual memory space.
         */
-       gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
-               REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+       gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
        gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
 
        /* Turn on 64 bit addressing for all blocks */
 
        if (!adreno_is_a650_family(adreno_gpu)) {
                /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
-               gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
-                       REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+               gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
 
                gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
-                       REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
                        0x00100000 + adreno_gpu->gmem - 1);
        }
 
                goto out;
 
        /* Set the ringbuffer address */
-       gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
-               gpu->rb[0]->iova);
+       gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
 
        /* Targets that support extended APRIV can use the RPTR shadow from
         * hardware but all the other ones need to disable the feature. Targets
                }
 
                gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
-                       REG_A6XX_CP_RB_RPTR_ADDR_HI,
                        shadowptr(a6xx_gpu, gpu->rb[0]));
        }
 
                gpu_read(gpu, REG_A6XX_RBBM_STATUS),
                gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
                gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
-               gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
+               gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
                gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
-               gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
+               gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
                gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
 
        /* Turn off the hangcheck timer to keep it from bothering us */
        /* Force the GPU power on so we can read this register */
        a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
-       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
-                           REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO);
 
        a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
 
        /* Make sure all pending memory writes are posted */
        wmb();
 
-       gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
-               REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+       gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
 
        gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
 
 
        msm_rmw(gpu->mmio + (reg << 2), mask, or);
 }
 
-static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
 {
        u64 val;
 
         * when the lo is read, so make sure to read the lo first to trigger
         * that
         */
-       val = (u64) msm_readl(gpu->mmio + (lo << 2));
-       val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+       val = (u64) msm_readl(gpu->mmio + (reg << 2));
+       val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
 
        return val;
 }
 
-static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
 {
        /* Why not a writeq here? Read the screed above */
-       msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
-       msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+       msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+       msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
 }
 
 int msm_gpu_pm_suspend(struct msm_gpu *gpu);