rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
-               rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
+               rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
+                       SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
 #endif
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 
 
        rb_bufsz = order_base_2(ring->ring_size / 8);
        tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
-       tmp |= BUF_SWAP_32BIT;
+       tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
 #endif
        WREG32(mmCP_RB0_CNTL, tmp);
 
                mqd->queue_state.cp_hqd_pq_control |=
                        (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
 #ifdef __BIG_ENDIAN
-               mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
+               mqd->queue_state.cp_hqd_pq_control |=
+                       2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
 #endif
                mqd->queue_state.cp_hqd_pq_control &=
                        ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |