/* get the pde for a given mc addr */
        void (*get_vm_pde)(struct amdgpu_device *adev, int level,
                           u64 *dst, u64 *flags);
-       uint32_t (*get_invalidate_req)(unsigned int vm_id);
+       uint32_t (*get_invalidate_req)(unsigned int vmid);
 };
 
 /* provided by the ih block */
        void                    *owner;
        uint64_t                fence_ctx; /* the fence_context this job uses */
        bool                    vm_needs_flush;
-       unsigned                vm_id;
+       unsigned                vmid;
        uint64_t                vm_pd_addr;
        uint32_t                gds_base, gds_size;
        uint32_t                gws_base, gws_size;
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
+#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
 
                return -EINVAL;
        }
 
-       if (vm && !job->vm_id) {
+       if (vm && !job->vmid) {
                dev_err(adev->dev, "VM IB without ID\n");
                return -EINVAL;
        }
                        !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
 
-               amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
+               amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
                                    need_ctx_switch);
                need_ctx_switch = false;
        }
        r = amdgpu_fence_emit(ring, f);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
-               if (job && job->vm_id)
-                       amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id);
+               if (job && job->vmid)
+                       amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
                amdgpu_ring_undo(ring);
                return r;
        }
 
                dma_fence_put(id->last_flush);
                id->last_flush = NULL;
        }
-       job->vm_id = id - id_mgr->ids;
+       job->vmid = id - id_mgr->ids;
        trace_amdgpu_vm_grab_id(vm, ring, job);
 out:
        return r;
 no_flush_needed:
        list_move_tail(&id->list, &id_mgr->ids_lru);
 
-       job->vm_id = id - id_mgr->ids;
+       job->vmid = id - id_mgr->ids;
        trace_amdgpu_vm_grab_id(vm, ring, job);
 
 error:
  * amdgpu_vmid_reset - reset VMID to zero
  *
  * @adev: amdgpu device structure
- * @vm_id: vmid number to use
+ * @vmid: vmid number to use
  *
  * Reset saved GDW, GWS and OA to force switch on next flush.
  */
 
        unsigned client_id;
        unsigned src_id;
        unsigned ring_id;
-       unsigned vm_id;
-       unsigned vm_id_src;
+       unsigned vmid;
+       unsigned vmid_src;
        uint64_t timestamp;
        unsigned timestamp_src;
        unsigned pas_id;
 
                }
        }
 
-       while (fence == NULL && vm && !job->vm_id) {
+       while (fence == NULL && vm && !job->vmid) {
                struct amdgpu_ring *ring = job->ring;
 
                r = amdgpu_vmid_grab(vm, ring, &job->sync,
 
        /* command emit functions */
        void (*emit_ib)(struct amdgpu_ring *ring,
                        struct amdgpu_ib *ib,
-                       unsigned vm_id, bool ctx_switch);
+                       unsigned vmid, bool ctx_switch);
        void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
                           uint64_t seq, unsigned flags);
        void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
-       void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+       void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
                              uint64_t pd_addr);
        void (*emit_hdp_flush)(struct amdgpu_ring *ring);
        void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
 
                             __field(unsigned, client_id)
                             __field(unsigned, src_id)
                             __field(unsigned, ring_id)
-                            __field(unsigned, vm_id)
-                            __field(unsigned, vm_id_src)
+                            __field(unsigned, vmid)
+                            __field(unsigned, vmid_src)
                             __field(uint64_t, timestamp)
                             __field(unsigned, timestamp_src)
                             __field(unsigned, pas_id)
                           __entry->client_id = iv->client_id;
                           __entry->src_id = iv->src_id;
                           __entry->ring_id = iv->ring_id;
-                          __entry->vm_id = iv->vm_id;
-                          __entry->vm_id_src = iv->vm_id_src;
+                          __entry->vmid = iv->vmid;
+                          __entry->vmid_src = iv->vmid_src;
                           __entry->timestamp = iv->timestamp;
                           __entry->timestamp_src = iv->timestamp_src;
                           __entry->pas_id = iv->pas_id;
                           __entry->src_data[2] = iv->src_data[2];
                           __entry->src_data[3] = iv->src_data[3];
                           ),
-           TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
+           TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
                      __entry->client_id, __entry->src_id,
-                     __entry->ring_id, __entry->vm_id,
+                     __entry->ring_id, __entry->vmid,
                      __entry->timestamp, __entry->pas_id,
                      __entry->src_data[0], __entry->src_data[1],
                      __entry->src_data[2], __entry->src_data[3])
            TP_STRUCT__entry(
                             __field(struct amdgpu_vm *, vm)
                             __field(u32, ring)
-                            __field(u32, vm_id)
+                            __field(u32, vmid)
                             __field(u32, vm_hub)
                             __field(u64, pd_addr)
                             __field(u32, needs_flush)
            TP_fast_assign(
                           __entry->vm = vm;
                           __entry->ring = ring->idx;
-                          __entry->vm_id = job->vm_id;
+                          __entry->vmid = job->vmid;
                           __entry->vm_hub = ring->funcs->vmhub,
                           __entry->pd_addr = job->vm_pd_addr;
                           __entry->needs_flush = job->vm_needs_flush;
                           ),
            TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
-                     __entry->vm, __entry->ring, __entry->vm_id,
+                     __entry->vm, __entry->ring, __entry->vmid,
                      __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
 );
 
 );
 
 TRACE_EVENT(amdgpu_vm_flush,
-           TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
+           TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
                     uint64_t pd_addr),
-           TP_ARGS(ring, vm_id, pd_addr),
+           TP_ARGS(ring, vmid, pd_addr),
            TP_STRUCT__entry(
                             __field(u32, ring)
-                            __field(u32, vm_id)
+                            __field(u32, vmid)
                             __field(u32, vm_hub)
                             __field(u64, pd_addr)
                             ),
 
            TP_fast_assign(
                           __entry->ring = ring->idx;
-                          __entry->vm_id = vm_id;
+                          __entry->vmid = vmid;
                           __entry->vm_hub = ring->funcs->vmhub;
                           __entry->pd_addr = pd_addr;
                           ),
            TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
-                     __entry->ring, __entry->vm_id,
+                     __entry->ring, __entry->vmid,
                      __entry->vm_hub,__entry->pd_addr)
 );
 
 
  *
  */
 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-                            unsigned vm_id, bool ctx_switch)
+                            unsigned vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, VCE_CMD_IB);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-                            unsigned vm_id, bool ctx_switch);
+                            unsigned vmid, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
 
        bool gds_switch_needed;
        bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
 
-       if (job->vm_id == 0)
+       if (job->vmid == 0)
                return false;
-       id = &id_mgr->ids[job->vm_id];
+       id = &id_mgr->ids[job->vmid];
        gds_switch_needed = ring->funcs->emit_gds_switch && (
                id->gds_base != job->gds_base ||
                id->gds_size != job->gds_size ||
  * amdgpu_vm_flush - hardware flush the vm
  *
  * @ring: ring to use for flush
- * @vm_id: vmid number to use
+ * @vmid: vmid number to use
  * @pd_addr: address of the page directory
  *
  * Emit a VM flush when it is necessary.
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->funcs->vmhub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-       struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id];
+       struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
        bool gds_switch_needed = ring->funcs->emit_gds_switch && (
                id->gds_base != job->gds_base ||
                id->gds_size != job->gds_size ||
        if (ring->funcs->emit_vm_flush && vm_flush_needed) {
                struct dma_fence *fence;
 
-               trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
-               amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+               trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
+               amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
 
                r = amdgpu_fence_emit(ring, &fence);
                if (r)
                id->gws_size = job->gws_size;
                id->oa_base = job->oa_base;
                id->oa_size = job->oa_size;
-               amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
+               amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
                                            job->gds_size, job->gws_base,
                                            job->gws_size, job->oa_base,
                                            job->oa_size);
 
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
-       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->vmid = (dw[2] >> 8) & 0xff;
        entry->pas_id = (dw[2] >> 16) & 0xffff;
 
        /* wptr/rptr are in bytes! */
 
  */
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
-                                 unsigned vm_id, bool ctx_switch)
+                                 unsigned vmid, bool ctx_switch)
 {
-       u32 extra_bits = vm_id & 0xf;
+       u32 extra_bits = vmid & 0xf;
 
        /* IB packet must end on a 8 DW boundary */
        cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
  * using sDMA (CIK).
  */
 static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                       unsigned vm_id, uint64_t pd_addr)
+                                       unsigned vmid, uint64_t pd_addr)
 {
        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
                          SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
 
        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-       if (vm_id < 8) {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       if (vmid < 8) {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
        } else {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
        }
        amdgpu_ring_write(ring, pd_addr >> 12);
 
        /* flush TLB */
        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
 
        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
-       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->vmid = (dw[2] >> 8) & 0xff;
        entry->pas_id = (dw[2] >> 16) & 0xffff;
 
        /* wptr/rptr are in bytes! */
 
 
 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
-                                 unsigned vm_id, bool ctx_switch)
+                                 unsigned vmid, bool ctx_switch)
 {
        u32 header, control = 0;
 
        else
                header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-       control |= ib->length_dw | (vm_id << 24);
+       control |= ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, header);
        amdgpu_ring_write(ring,
 }
 
 static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                       unsigned vm_id, uint64_t pd_addr)
+                                       unsigned vmid, uint64_t pd_addr)
 {
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
                                 WRITE_DATA_DST_SEL(0)));
-       if (vm_id < 8) {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+       if (vmid < 8) {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid ));
        } else {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
        }
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, pd_addr >> 12);
                                 WRITE_DATA_DST_SEL(0)));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
        amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
 
        /* wait for the invalidate to complete */
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
 
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                      struct amdgpu_ib *ib,
-                                     unsigned vm_id, bool ctx_switch)
+                                     unsigned vmid, bool ctx_switch)
 {
        u32 header, control = 0;
 
        else
                header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-       control |= ib->length_dw | (vm_id << 24);
+       control |= ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, header);
        amdgpu_ring_write(ring,
 
 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                          struct amdgpu_ib *ib,
-                                         unsigned vm_id, bool ctx_switch)
+                                         unsigned vmid, bool ctx_switch)
 {
-       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        amdgpu_ring_write(ring,
  * using the CP (CIK).
  */
 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                       unsigned vm_id, uint64_t pd_addr)
+                                       unsigned vmid, uint64_t pd_addr)
 {
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)));
-       if (vm_id < 8) {
+       if (vmid < 8) {
                amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+                                 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
        } else {
                amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+                                 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
        }
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, pd_addr >> 12);
                                 WRITE_DATA_DST_SEL(0)));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
        amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
 
        /* wait for the invalidate to complete */
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
 
 
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                      struct amdgpu_ib *ib,
-                                     unsigned vm_id, bool ctx_switch)
+                                     unsigned vmid, bool ctx_switch)
 {
        u32 header, control = 0;
 
        else
                header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-       control |= ib->length_dw | (vm_id << 24);
+       control |= ib->length_dw | (vmid << 24);
 
        if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
                control |= INDIRECT_BUFFER_PRE_ENB(1);
 
 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                          struct amdgpu_ib *ib,
-                                         unsigned vm_id, bool ctx_switch)
+                                         unsigned vmid, bool ctx_switch)
 {
-       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        amdgpu_ring_write(ring,
 }
 
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                       unsigned vm_id, uint64_t pd_addr)
+                                       unsigned vmid, uint64_t pd_addr)
 {
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)) |
                                 WR_CONFIRM);
-       if (vm_id < 8) {
+       if (vmid < 8) {
                amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+                                 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
        } else {
                amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+                                 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
        }
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, pd_addr >> 12);
                                 WRITE_DATA_DST_SEL(0)));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
        amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
 
        /* wait for the invalidate to complete */
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
 
 
 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                       struct amdgpu_ib *ib,
-                                      unsigned vm_id, bool ctx_switch)
+                                      unsigned vmid, bool ctx_switch)
 {
        u32 header, control = 0;
 
        else
                header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-       control |= ib->length_dw | (vm_id << 24);
+       control |= ib->length_dw | (vmid << 24);
 
        if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
                control |= INDIRECT_BUFFER_PRE_ENB(1);
 
 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                           struct amdgpu_ib *ib,
-                                          unsigned vm_id, bool ctx_switch)
+                                          unsigned vmid, bool ctx_switch)
 {
-        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
 }
 
 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                       unsigned vm_id, uint64_t pd_addr)
+                                       unsigned vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
        pd_addr |= flags;
 
        gfx_v9_0_write_data_to_reg(ring, usepfp, true,
-                                  hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
+                                  hub->ctx0_ptb_addr_lo32 + (2 * vmid),
                                   lower_32_bits(pd_addr));
 
        gfx_v9_0_write_data_to_reg(ring, usepfp, true,
-                                  hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
+                                  hub->ctx0_ptb_addr_hi32 + (2 * vmid),
                                   upper_32_bits(pd_addr));
 
        gfx_v9_0_write_data_to_reg(ring, usepfp, true,
 
        /* wait for the invalidate to complete */
        gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
-                             eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
+                             eng, 0, 1 << vmid, 1 << vmid, 0x20);
 
        /* compute doesn't have PFP */
        if (usepfp) {
 
                                struct amdgpu_irq_src *source,
                                struct amdgpu_iv_entry *entry)
 {
-       struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
+       struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
        uint32_t status = 0;
        u64 addr;
 
 
        if (printk_ratelimit()) {
                dev_err(adev->dev,
-                       "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
-                       entry->vm_id_src ? "mmhub" : "gfxhub",
-                       entry->src_id, entry->ring_id, entry->vm_id,
+                       "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n",
+                       entry->vmid_src ? "mmhub" : "gfxhub",
+                       entry->src_id, entry->ring_id, entry->vmid,
                        entry->pas_id);
                dev_err(adev->dev, "  at page 0x%016llx from %d\n",
                        addr, entry->client_id);
        adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 }
 
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
 {
        u32 req = 0;
 
-       /* invalidate using legacy mode on vm_id*/
+       /* invalidate using legacy mode on vmid*/
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
-                           PER_VMID_INVALIDATE_REQ, 1 << vm_id);
+                           PER_VMID_INVALIDATE_REQ, 1 << vmid);
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
        req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
-       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->vmid = (dw[2] >> 8) & 0xff;
        entry->pas_id = (dw[2] >> 16) & 0xffff;
 
        /* wptr/rptr are in bytes! */
 
  */
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
                                   struct amdgpu_ib *ib,
-                                  unsigned vm_id, bool ctx_switch)
+                                  unsigned vmid, bool ctx_switch)
 {
-       u32 vmid = vm_id & 0xf;
-
        /* IB packet must end on a 8 DW boundary */
        sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
-                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
        /* base must be 32 byte aligned */
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  * using sDMA (VI).
  */
 static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                        unsigned vm_id, uint64_t pd_addr)
+                                        unsigned vmid, uint64_t pd_addr)
 {
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       if (vm_id < 8) {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       if (vmid < 8) {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
        } else {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
        }
        amdgpu_ring_write(ring, pd_addr >> 12);
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
 
        /* wait for flush */
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 
  */
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
                                   struct amdgpu_ib *ib,
-                                  unsigned vm_id, bool ctx_switch)
+                                  unsigned vmid, bool ctx_switch)
 {
-       u32 vmid = vm_id & 0xf;
-
        /* IB packet must end on a 8 DW boundary */
        sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
-                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
        /* base must be 32 byte aligned */
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  * using sDMA (VI).
  */
 static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                        unsigned vm_id, uint64_t pd_addr)
+                                        unsigned vmid, uint64_t pd_addr)
 {
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       if (vm_id < 8) {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       if (vmid < 8) {
+               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
        } else {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
        }
        amdgpu_ring_write(ring, pd_addr >> 12);
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
 
        /* wait for flush */
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
 
  */
 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
                                        struct amdgpu_ib *ib,
-                                       unsigned vm_id, bool ctx_switch)
+                                       unsigned vmid, bool ctx_switch)
 {
-       u32 vmid = vm_id & 0xf;
-
        /* IB packet must end on a 8 DW boundary */
        sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
-                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+                         SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
        /* base must be 32 byte aligned */
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  * using sDMA (VEGA10).
  */
 static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                        unsigned vm_id, uint64_t pd_addr)
+                                        unsigned vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
+       amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2);
        amdgpu_ring_write(ring, lower_32_bits(pd_addr));
 
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
                          SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
+       amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2);
        amdgpu_ring_write(ring, upper_32_bits(pd_addr));
 
        /* flush TLB */
                          SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
        amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
        amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, 1 << vm_id); /* reference */
-       amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+       amdgpu_ring_write(ring, 1 << vmid); /* reference */
+       amdgpu_ring_write(ring, 1 << vmid); /* mask */
        amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
 }
 
 
 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
                                struct amdgpu_ib *ib,
-                               unsigned vm_id, bool ctx_switch)
+                               unsigned vmid, bool ctx_switch)
 {
        /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
         * Pad as necessary with NOPs.
         */
        while ((lower_32_bits(ring->wptr) & 7) != 5)
                amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
-       amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+       amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
        amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
        amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 
  * using sDMA (VI).
  */
 static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                     unsigned vm_id, uint64_t pd_addr)
+                                     unsigned vmid, uint64_t pd_addr)
 {
        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
-       if (vm_id < 8)
-               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       if (vmid < 8)
+               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
        else
-               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
        amdgpu_ring_write(ring, pd_addr >> 12);
 
        /* bits 0-7 are the VM contexts0-7 */
        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
        amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
 
        /* wait for invalidate to complete */
        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
        amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
        amdgpu_ring_write(ring, 0xff << 16); /* retry */
-       amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+       amdgpu_ring_write(ring, 1 << vmid); /* mask */
        amdgpu_ring_write(ring, 0); /* value */
        amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
 }
 
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
-       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->vmid = (dw[2] >> 8) & 0xff;
 
        adev->irq.ih.rptr += 16;
 }
 
        entry->src_id = dw[0] & 0xff;
        entry->src_data[0] = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
-       entry->vm_id = (dw[2] >> 8) & 0xff;
+       entry->vmid = (dw[2] >> 8) & 0xff;
        entry->pas_id = (dw[2] >> 16) & 0xffff;
 
        /* wptr/rptr are in bytes! */
 
  */
 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
-                                 unsigned vm_id, bool ctx_switch)
+                                 unsigned vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
        amdgpu_ring_write(ring, ib->gpu_addr);
 
  */
 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
-                                 unsigned vm_id, bool ctx_switch)
+                                 unsigned vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 
  */
 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
-                                 unsigned vm_id, bool ctx_switch)
+                                 unsigned vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                        unsigned vm_id, uint64_t pd_addr)
+                                        unsigned vmid, uint64_t pd_addr)
 {
        uint32_t reg;
 
-       if (vm_id < 8)
-               reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
+       if (vmid < 8)
+               reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
        else
-               reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
+               reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
        amdgpu_ring_write(ring, reg << 2);
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
        amdgpu_ring_write(ring, 0x8);
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
-       amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+       amdgpu_ring_write(ring, 1 << vmid); /* mask */
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
        amdgpu_ring_write(ring, 0xC);
 }
 }
 
 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
-        unsigned int vm_id, uint64_t pd_addr)
+        unsigned int vmid, uint64_t pd_addr)
 {
        amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, pd_addr >> 12);
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
 }
 
 static bool uvd_v6_0_is_idle(void *handle)
 
  */
 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
-                                 unsigned vm_id, bool ctx_switch)
+                                 unsigned vmid, bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                       unsigned vm_id, uint64_t pd_addr)
+                                       unsigned vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
        uint32_t data0, data1, mask;
        amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
-       data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
+       data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
        data1 = upper_32_bits(pd_addr);
        uvd_v7_0_vm_reg_write(ring, data0, data1);
 
-       data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+       data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
        data1 = lower_32_bits(pd_addr);
        uvd_v7_0_vm_reg_write(ring, data0, data1);
 
-       data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+       data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
        data1 = lower_32_bits(pd_addr);
        mask = 0xffffffff;
        uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
 
        /* wait for flush */
        data0 = (hub->vm_inv_eng0_ack + eng) << 2;
-       data1 = 1 << vm_id;
-       mask =  1 << vm_id;
+       data1 = 1 << vmid;
+       mask =  1 << vmid;
        uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
 }
 
 }
 
 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                        unsigned int vm_id, uint64_t pd_addr)
+                        unsigned int vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
        pd_addr |= flags;
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
-       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, upper_32_bits(pd_addr));
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
-       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, lower_32_bits(pd_addr));
 
        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
-       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, 0xffffffff);
        amdgpu_ring_write(ring, lower_32_bits(pd_addr));
 
        /* wait for flush */
        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
        amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
-       amdgpu_ring_write(ring, 1 << vm_id);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_ring_write(ring, 1 << vmid);
 }
 
 #if 0
 
 }
 
 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, VCE_CMD_IB_VM);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
 static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
-                        unsigned int vm_id, uint64_t pd_addr)
+                        unsigned int vmid, uint64_t pd_addr)
 {
        amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, pd_addr >> 12);
 
        amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, VCE_CMD_END);
 }
 
 
 #endif
 
 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, VCE_CMD_IB_VM);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
 static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
-                        unsigned int vm_id, uint64_t pd_addr)
+                        unsigned int vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
        pd_addr |= flags;
 
        amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
-       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, upper_32_bits(pd_addr));
 
        amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
-       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, lower_32_bits(pd_addr));
 
        amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
-       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+       amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, 0xffffffff);
        amdgpu_ring_write(ring, lower_32_bits(pd_addr));
 
        /* wait for flush */
        amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
        amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
-       amdgpu_ring_write(ring, 1 << vm_id);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_ring_write(ring, 1 << vmid);
 }
 
 static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
 
  */
 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib,
-                                 unsigned vm_id, bool ctx_switch)
+                                 unsigned vmid, bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
 }
 
 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                                       unsigned vm_id, uint64_t pd_addr)
+                                       unsigned vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
        uint32_t data0, data1, mask;
        amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
        pd_addr |= flags;
 
-       data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
+       data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
        data1 = upper_32_bits(pd_addr);
        vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
 
-       data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+       data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
        data1 = lower_32_bits(pd_addr);
        vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
 
-       data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+       data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
        data1 = lower_32_bits(pd_addr);
        mask = 0xffffffff;
        vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
 
        /* wait for flush */
        data0 = (hub->vm_inv_eng0_ack + eng) << 2;
-       data1 = 1 << vm_id;
-       mask =  1 << vm_id;
+       data1 = 1 << vmid;
+       mask =  1 << vmid;
        vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
 }
 
  * Write enc ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
 {
        amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
-       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
-                        unsigned int vm_id, uint64_t pd_addr)
+                        unsigned int vmid, uint64_t pd_addr)
 {
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+       uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
        uint64_t flags = AMDGPU_PTE_VALID;
        unsigned eng = ring->vm_inv_eng;
 
 
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
        amdgpu_ring_write(ring,
-                         (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+                         (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, upper_32_bits(pd_addr));
 
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
        amdgpu_ring_write(ring,
-                         (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+                         (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, lower_32_bits(pd_addr));
 
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
        amdgpu_ring_write(ring,
-                         (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+                         (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
        amdgpu_ring_write(ring, 0xffffffff);
        amdgpu_ring_write(ring, lower_32_bits(pd_addr));
 
        /* wait for flush */
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
        amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
-       amdgpu_ring_write(ring, 1 << vm_id);
-       amdgpu_ring_write(ring, 1 << vm_id);
+       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_ring_write(ring, 1 << vmid);
 }
 
 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
 
        entry->client_id = dw[0] & 0xff;
        entry->src_id = (dw[0] >> 8) & 0xff;
        entry->ring_id = (dw[0] >> 16) & 0xff;
-       entry->vm_id = (dw[0] >> 24) & 0xf;
-       entry->vm_id_src = (dw[0] >> 31);
+       entry->vmid = (dw[0] >> 24) & 0xf;
+       entry->vmid_src = (dw[0] >> 31);
        entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
        entry->timestamp_src = dw[2] >> 31;
        entry->pas_id = dw[3] & 0xffff;