/* Our framebuffer is the entirety of fbdev's system memory */
                info->fix.smem_start =
-                       (unsigned long)(ggtt->gmadr.start + vma->node.start);
+                       (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
                info->fix.smem_len = vma->size;
        }
 
 
                 const struct i915_vma *vma,
                 unsigned int flags)
 {
-       if (vma->node.size < entry->pad_to_size)
+       const u64 start = i915_vma_offset(vma);
+       const u64 size = i915_vma_size(vma);
+
+       if (size < entry->pad_to_size)
                return true;
 
-       if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
+       if (entry->alignment && !IS_ALIGNED(start, entry->alignment))
                return true;
 
        if (flags & EXEC_OBJECT_PINNED &&
-           vma->node.start != entry->offset)
+           start != entry->offset)
                return true;
 
        if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
-           vma->node.start < BATCH_OFFSET_BIAS)
+           start < BATCH_OFFSET_BIAS)
                return true;
 
        if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
-           (vma->node.start + vma->node.size + 4095) >> 32)
+           (start + size + 4095) >> 32)
                return true;
 
        if (flags & __EXEC_OBJECT_NEEDS_MAP &&
        int err;
 
        if (vma->node.size)
-               pin_flags = vma->node.start;
+               pin_flags =  __i915_vma_offset(vma);
        else
                pin_flags = entry->offset & PIN_OFFSET_MASK;
 
        if (err)
                return err;
 
-       if (entry->offset != vma->node.start) {
-               entry->offset = vma->node.start | UPDATE;
+       if (entry->offset != i915_vma_offset(vma)) {
+               entry->offset = i915_vma_offset(vma) | UPDATE;
                eb->args->flags |= __EXEC_HAS_RELOC;
        }
 
                        return err;
 
                if (!err) {
-                       if (entry->offset != vma->node.start) {
-                               entry->offset = vma->node.start | UPDATE;
+                       if (entry->offset != i915_vma_offset(vma)) {
+                               entry->offset = i915_vma_offset(vma) | UPDATE;
                                eb->args->flags |= __EXEC_HAS_RELOC;
                        }
                } else {
 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
                  const struct i915_vma *target)
 {
-       return gen8_canonical_addr((int)reloc->delta + target->node.start);
+       return gen8_canonical_addr((int)reloc->delta + i915_vma_offset(target));
 }
 
 static void reloc_cache_init(struct reloc_cache *cache,
                        if (err) /* no inactive aperture space, use cpu reloc */
                                return NULL;
                } else {
-                       cache->node.start = vma->node.start;
+                       cache->node.start = i915_ggtt_offset(vma);
                        cache->node.mm = (void *)vma;
                }
        }
         * more work needs to be done.
         */
        if (!DBG_FORCE_RELOC &&
-           gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+           gen8_canonical_addr(i915_vma_offset(target->vma)) == reloc->presumed_offset)
                return 0;
 
        /* Check that the relocation address is valid... */
        }
 
        err = rq->context->engine->emit_bb_start(rq,
-                                                batch->node.start +
+                                                i915_vma_offset(batch) +
                                                 eb->batch_start_offset,
                                                 batch_len,
                                                 eb->batch_flags);
                GEM_BUG_ON(intel_context_is_parallel(rq->context));
                GEM_BUG_ON(eb->batch_start_offset);
                err = rq->context->engine->emit_bb_start(rq,
-                                                        eb->trampoline->node.start +
+                                                        i915_vma_offset(eb->trampoline) +
                                                         batch_len, 0, 0);
                if (err)
                        return err;
 
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
                               area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
-                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+                              (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
                               &ggtt->iomap);
        if (ret)
 
        mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
        list_for_each_entry_safe(vma, next,
                                 &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
-               unsigned long count = vma->node.size >> PAGE_SHIFT;
+               unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (!vma->iomap || i915_vma_is_active(vma))
 
                return true;
 
        size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
-       if (vma->node.size < size)
+       if (i915_vma_size(vma) < size)
                return false;
 
        alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
-       if (!IS_ALIGNED(vma->node.start, alignment))
+       if (!IS_ALIGNED(i915_ggtt_offset(vma), alignment))
                return false;
 
        return true;
 
         * Maintaining alignment is required to utilise huge pages in the ppGGT.
         */
        if (i915_gem_object_is_lmem(obj) &&
-           IS_ALIGNED(vma->node.start, SZ_2M) &&
+           IS_ALIGNED(i915_vma_offset(vma), SZ_2M) &&
            vma->page_sizes.sg & SZ_2M &&
            vma->resource->page_sizes_gtt < SZ_2M) {
                pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
 
                *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
                *cs++ = 0;
                *cs++ = t->height << 16 | t->width;
-               *cs++ = lower_32_bits(dst->vma->node.start);
-               *cs++ = upper_32_bits(dst->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
+               *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
                *cs++ = 0;
                *cs++ = src_pitch;
-               *cs++ = lower_32_bits(src->vma->node.start);
-               *cs++ = upper_32_bits(src->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(src->vma));
+               *cs++ = upper_32_bits(i915_vma_offset(src->vma));
        } else {
                if (ver >= 6) {
                        *cs++ = MI_LOAD_REGISTER_IMM(1);
                *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
                *cs++ = 0;
                *cs++ = t->height << 16 | t->width;
-               *cs++ = lower_32_bits(dst->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
                if (use_64b_reloc)
-                       *cs++ = upper_32_bits(dst->vma->node.start);
+                       *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
                *cs++ = 0;
                *cs++ = src_pitch;
-               *cs++ = lower_32_bits(src->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(src->vma));
                if (use_64b_reloc)
-                       *cs++ = upper_32_bits(src->vma->node.start);
+                       *cs++ = upper_32_bits(i915_vma_offset(src->vma));
        }
 
        *cs++ = MI_BATCH_BUFFER_END;
 {
        int err;
 
-       if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+       if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) {
                err = i915_vma_unbind_unlocked(vma);
                if (err)
                        return err;
        if (err)
                return err;
 
+       GEM_BUG_ON(i915_vma_offset(vma) != addr);
        return 0;
 }
 
                err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
-                                               t->batch->node.start,
-                                               t->batch->node.size,
+                                               i915_vma_offset(t->batch),
+                                               i915_vma_size(t->batch),
                                                0);
        i915_request_get(rq);
        i915_request_add(rq);
 
 
        *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
        *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE(engine->mmio_base));
-       *cmd++ = lower_32_bits(vma->node.start);
-       *cmd++ = upper_32_bits(vma->node.start);
+       *cmd++ = lower_32_bits(i915_vma_offset(vma));
+       *cmd++ = upper_32_bits(i915_vma_offset(vma));
        *cmd = MI_BATCH_BUFFER_END;
 
        __i915_gem_object_flush_map(rpcs, 0, 64);
        }
 
        err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
+                                       i915_vma_offset(batch),
+                                       i915_vma_size(batch),
                                        0);
        if (err)
                goto skip_request;
                        goto skip_request;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+       err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+                                   i915_vma_size(vma), 0);
        if (err)
                goto skip_request;
 
                *cmd++ = offset;
                *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
                *cmd++ = reg;
-               *cmd++ = vma->node.start + result;
+               *cmd++ = i915_vma_offset(vma) + result;
                *cmd = MI_BATCH_BUFFER_END;
 
                i915_gem_object_flush_map(obj);
                        goto skip_request;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+       err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+                                   i915_vma_size(vma), flags);
        if (err)
                goto skip_request;
 
 
 
                err = i915_vma_move_to_active(vma, rq, 0);
 
-               err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+               err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
                i915_request_get(rq);
                i915_request_add(rq);
 
 
                goto err;
        }
 
-       GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
-       offset += vma->node.start;
+       GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma));
+       offset += i915_vma_offset(vma);
 
        for (n = 0; n < count; n++) {
                if (ver >= 8) {
                flags |= I915_DISPATCH_SECURE;
 
        err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
+                                       i915_vma_offset(batch),
+                                       i915_vma_size(batch),
                                        flags);
 
 skip_request:
 
 
 static u32 batch_addr(const struct batch_chunk *bc)
 {
-       return bc->vma->node.start;
+       return i915_vma_offset(bc->vma);
 }
 
 static void batch_add(struct batch_chunk *bc, const u32 d)
 
                                return ret;
                }
 
-               fence->start = vma->node.start;
+               GEM_BUG_ON(vma->fence_size > i915_vma_size(vma));
+               fence->start = i915_ggtt_offset(vma);
                fence->size = vma->fence_size;
                fence->stride = i915_gem_object_get_stride(vma->obj);
                fence->tiling = i915_gem_object_get_tiling(vma->obj);
 
                u32 s = rodata->batch[i];
 
                if (i * 4  == rodata->reloc[reloc_index]) {
-                       u64 r = s + so->vma->node.start;
+                       u64 r = s + i915_vma_offset(so->vma);
 
                        s = lower_32_bits(r);
                        if (HAS_64BIT_RELOC(i915)) {
 
        }
 
        ret = engine->emit_bb_start(rq,
-                                   engine->wa_ctx.vma->node.start, 0,
+                                   i915_vma_offset(engine->wa_ctx.vma), 0,
                                    0);
        if (ret)
                return ret;
 
                                goto out;
 
                        err = rq->engine->emit_bb_start(rq,
-                                                       batch->node.start, 8,
+                                                       i915_vma_offset(batch), 8,
                                                        0);
                        if (err)
                                goto out;
                                goto out;
 
                        err = rq->engine->emit_bb_start(rq,
-                                                       base->node.start, 8,
+                                                       i915_vma_offset(base), 8,
                                                        0);
                        if (err)
                                goto out;
                                goto out;
 
                        err = rq->engine->emit_bb_start(rq,
-                                                       nop->node.start,
-                                                       nop->node.size,
+                                                       i915_vma_offset(nop),
+                                                       i915_vma_size(nop),
                                                        0);
                        if (err)
                                goto out;
 
                MI_SEMAPHORE_POLL |
                MI_SEMAPHORE_SAD_EQ_SDD;
        *cs++ = 0;
-       *cs++ = lower_32_bits(vma->node.start);
-       *cs++ = upper_32_bits(vma->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(vma));
+       *cs++ = upper_32_bits(i915_vma_offset(vma));
 
        if (*prev) {
-               u64 offset = (*prev)->batch->node.start;
+               u64 offset = i915_vma_offset((*prev)->batch);
 
                /* Terminate the spinner in the next lower priority batch. */
                *cs++ = MI_STORE_DWORD_IMM_GEN4;
        err = i915_vma_move_to_active(vma, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
-                                               vma->node.start,
+                                               i915_vma_offset(vma),
                                                PAGE_SIZE, 0);
        i915_vma_unlock(vma);
        i915_request_add(rq);
                *cs++ = MI_MATH_ADD;
                *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
 
-               addr = result->node.start + offset + i * sizeof(*cs);
+               addr = i915_vma_offset(result) + offset + i * sizeof(*cs);
                *cs++ = MI_STORE_REGISTER_MEM_GEN8;
                *cs++ = CS_GPR(engine, 2 * i);
                *cs++ = lower_32_bits(addr);
                        MI_SEMAPHORE_POLL |
                        MI_SEMAPHORE_SAD_GTE_SDD;
                *cs++ = i;
-               *cs++ = lower_32_bits(result->node.start);
-               *cs++ = upper_32_bits(result->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(result));
+               *cs++ = upper_32_bits(i915_vma_offset(result));
        }
 
        *cs++ = MI_BATCH_BUFFER_END;
                err = i915_vma_move_to_active(batch, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
-                                               batch->node.start,
+                                               i915_vma_offset(batch),
                                                PAGE_SIZE, 0);
        i915_vma_unlock(batch);
        i915_vma_unpin(batch);
                err = i915_vma_move_to_active(vma, rq, 0);
                if (!err)
                        err = rq->engine->emit_bb_start(rq,
-                                                       vma->node.start,
+                                                       i915_vma_offset(vma),
                                                        PAGE_SIZE, 0);
                i915_vma_unlock(vma);
        }
 
 static u64 hws_address(const struct i915_vma *hws,
                       const struct i915_request *rq)
 {
-       return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+       return i915_vma_offset(hws) +
+              offset_in_page(sizeof(u32) * rq->fence.context);
 }
 
 static struct i915_request *
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-               *batch++ = lower_32_bits(vma->node.start);
-               *batch++ = upper_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
+               *batch++ = upper_32_bits(i915_vma_offset(vma));
        } else if (GRAPHICS_VER(gt->i915) >= 6) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4;
                *batch++ = 0;
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
-               *batch++ = lower_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
        } else if (GRAPHICS_VER(gt->i915) >= 4) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *batch++ = 0;
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-               *batch++ = lower_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
        } else {
                *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                *batch++ = lower_32_bits(hws_address(hws, rq));
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-               *batch++ = lower_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
        }
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
        intel_gt_chipset_flush(engine->gt);
        if (GRAPHICS_VER(gt->i915) <= 5)
                flags |= I915_DISPATCH_SECURE;
 
-       err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+       err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
 
 cancel_rq:
        if (err) {
 
                while (len--) {
                        *cs++ = MI_STORE_REGISTER_MEM_GEN8;
                        *cs++ = hw[dw];
-                       *cs++ = lower_32_bits(scratch->node.start + x);
-                       *cs++ = upper_32_bits(scratch->node.start + x);
+                       *cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
+                       *cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
 
                        dw += 2;
                        x += 4;
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
        *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
-       *cs++ = lower_32_bits(b_before->node.start);
-       *cs++ = upper_32_bits(b_before->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(b_before));
+       *cs++ = upper_32_bits(i915_vma_offset(b_before));
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
        *cs++ = MI_SEMAPHORE_WAIT |
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
        *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
-       *cs++ = lower_32_bits(b_after->node.start);
-       *cs++ = upper_32_bits(b_after->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(b_after));
+       *cs++ = upper_32_bits(i915_vma_offset(b_after));
 
        intel_ring_advance(rq, cs);
 
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
        *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
-       *cs++ = lower_32_bits(batch->node.start);
-       *cs++ = upper_32_bits(batch->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(batch));
+       *cs++ = upper_32_bits(i915_vma_offset(batch));
 
        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
        *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
 
        } else {
                *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
        }
-       *cs++ = vma->node.start + 4000;
+       *cs++ = i915_vma_offset(vma) + 4000;
        *cs++ = STACK_MAGIC;
 
        *cs++ = MI_BATCH_BUFFER_END;
 
                if (srm) {
                        *cs++ = MI_STORE_REGISTER_MEM_GEN8;
                        *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
-                       *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
-                       *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+                       *cs++ = lower_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
+                       *cs++ = upper_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
                }
        }
 
        *cs++ = MI_BATCH_BUFFER_START_GEN8;
-       *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
-       *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+       *cs++ = lower_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
+       *cs++ = upper_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
        GEM_BUG_ON(cs - base > end);
 
        i915_gem_object_flush_map(obj);
                err = i915_vma_move_to_active(vma, rq, 0);
                if (!err)
                        err = rq->engine->emit_bb_start(rq,
-                                                       vma->node.start,
+                                                       i915_vma_offset(vma),
                                                        PAGE_SIZE, 0);
                i915_request_add(rq);
                if (err)
                err = i915_vma_move_to_active(vma, rq, 0);
                if (!err)
                        err = rq->engine->emit_bb_start(rq,
-                                                       vma->node.start,
+                                                       i915_vma_offset(vma),
                                                        PAGE_SIZE, 0);
                i915_request_add(rq);
                if (err)
 
        for (i = 0; i < engine->whitelist.count; i++) {
                u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
                struct i915_gem_ww_ctx ww;
-               u64 addr = scratch->node.start;
+               u64 addr = i915_vma_offset(scratch);
                struct i915_request *rq;
                u32 srm, lrm, rsvd;
                u32 expect;
                        goto err_request;
 
                err = engine->emit_bb_start(rq,
-                                           batch->node.start, PAGE_SIZE,
+                                           i915_vma_offset(batch), PAGE_SIZE,
                                            0);
                if (err)
                        goto err_request;
        }
 
        for (i = 0; i < engine->whitelist.count; i++) {
-               u64 offset = results->node.start + sizeof(u32) * i;
+               u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
                u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
 
                /* Clear non priv flags */
                goto err_request;
 
        /* Perform the writes from an unprivileged "user" batch */
-       err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+       err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
 
 err_request:
        err = request_add_sync(rq, err);
 
                /* Defer failure until attempted use */
                jump_whitelist = alloc_whitelist(batch_length);
 
-       shadow_addr = gen8_canonical_addr(shadow->node.start);
-       batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
+       shadow_addr = gen8_canonical_addr(i915_vma_offset(shadow));
+       batch_addr = gen8_canonical_addr(i915_vma_offset(batch) + batch_offset);
 
        /*
         * We use the batch length as size because the shadow object is as
 
 
                seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
                           stringify_vma_type(vma),
-                          vma->node.start, vma->node.size,
+                          i915_vma_offset(vma), i915_vma_size(vma),
                           stringify_page_sizes(vma->resource->page_sizes_gtt,
                                                NULL, 0));
                if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
 
                goto err_add_request;
 
        err = rq->engine->emit_bb_start(rq,
-                                       vma->node.start, 0,
+                                       i915_vma_offset(vma), 0,
                                        I915_DISPATCH_SECURE);
        if (err)
                goto err_add_request;
 
        i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
                               obj->mm.rsgt, i915_gem_object_is_readonly(obj),
                               i915_gem_object_is_lmem(obj), obj->mm.region,
-                              vma->ops, vma->private, vma->node.start,
-                              vma->node.size, vma->size);
+                              vma->ops, vma->private, __i915_vma_offset(vma),
+                              __i915_vma_size(vma), vma->size);
 }
 
 /**
 
        lockdep_assert_held(&vma->vm->mutex);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       GEM_BUG_ON(vma->size > vma->node.size);
+       GEM_BUG_ON(vma->size > i915_vma_size(vma));
 
        if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
                                              vma->node.size,
                                                          vma->obj->base.size);
                } else if (i915_vma_is_map_and_fenceable(vma)) {
                        ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
-                                               vma->node.start,
-                                               vma->node.size);
+                                               i915_vma_offset(vma),
+                                               i915_vma_size(vma));
                } else {
                        ptr = (void __iomem *)
                                i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
        if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
                return true;
 
-       if (vma->node.size < size)
+       if (i915_vma_size(vma) < size)
                return true;
 
        GEM_BUG_ON(alignment && !is_power_of_2(alignment));
-       if (alignment && !IS_ALIGNED(vma->node.start, alignment))
+       if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
                return true;
 
        if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
                return true;
 
        if (flags & PIN_OFFSET_BIAS &&
-           vma->node.start < (flags & PIN_OFFSET_MASK))
+           i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
                return true;
 
        if (flags & PIN_OFFSET_FIXED &&
-           vma->node.start != (flags & PIN_OFFSET_MASK))
+           i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
                return true;
 
        return false;
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
        GEM_BUG_ON(!vma->fence_size);
 
-       fenceable = (vma->node.size >= vma->fence_size &&
-                    IS_ALIGNED(vma->node.start, vma->fence_alignment));
+       fenceable = (i915_vma_size(vma) >= vma->fence_size &&
+                    IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
 
-       mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+       mappable = i915_ggtt_offset(vma) + vma->fence_size <=
+                  i915_vm_to_ggtt(vma->vm)->mappable_end;
 
        if (mappable && fenceable)
                set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
 
        return !list_empty(&vma->closed_link);
 }
 
+/* Internal use only. */
+static inline u64 __i915_vma_size(const struct i915_vma *vma)
+{
+       return vma->node.size;
+}
+
+/**
+ * i915_vma_offset - Obtain the va range size of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address range size
+ * with padding subtracted.
+ *
+ * Return: The effective virtual address range size.
+ */
+static inline u64 i915_vma_size(const struct i915_vma *vma)
+{
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+       return __i915_vma_size(vma);
+}
+
+/* Internal use only. */
+static inline u64 __i915_vma_offset(const struct i915_vma *vma)
+{
+       return vma->node.start;
+}
+
+/**
+ * i915_vma_offset - Obtain the va offset of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address offset the gpu
+ * should use to access the bound data.
+ *
+ * Return: The effective virtual address offset.
+ */
+static inline u64 i915_vma_offset(const struct i915_vma *vma)
+{
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+       return __i915_vma_offset(vma);
+}
+
 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
 {
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       GEM_BUG_ON(upper_32_bits(vma->node.start));
-       GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
-       return lower_32_bits(vma->node.start);
+       GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma)));
+       GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma) +
+                                i915_vma_size(vma) - 1));
+       return lower_32_bits(i915_vma_offset(vma));
 }
 
 static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
 
  * @mr: The memory region of the object pointed to by the vma.
  * @ops: Pointer to the backend i915_vma_ops.
  * @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start. Note that
+ * this is after any padding that might have been allocated.
+ * @node_size: Size of the allocated range manager node with padding
+ * subtracted.
  * @vma_size: Bind size.
  * @page_sizes_gtt: Resulting page sizes from the bind operation.
  * @bound_flags: Flags indicating binding status.
  * @mr: The memory region of the object the vma points to.
  * @ops: The backend ops.
  * @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start after padding.
+ * @node_size: Size of the allocated range manager node minus padding.
  * @size: Bind size.
  *
  * Initializes a vma resource allocated using i915_vma_resource_alloc().
 
                return request;
 
        err = engine->emit_bb_start(request,
-                                   batch->node.start,
-                                   batch->node.size,
+                                   i915_vma_offset(batch),
+                                   i915_vma_size(batch),
                                    I915_DISPATCH_SECURE);
        if (err)
                goto out_request;
 
        if (ver >= 8) {
                *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-               *cmd++ = lower_32_bits(vma->node.start);
-               *cmd++ = upper_32_bits(vma->node.start);
+               *cmd++ = lower_32_bits(i915_vma_offset(vma));
+               *cmd++ = upper_32_bits(i915_vma_offset(vma));
        } else if (ver >= 6) {
                *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
-               *cmd++ = lower_32_bits(vma->node.start);
+               *cmd++ = lower_32_bits(i915_vma_offset(vma));
        } else {
                *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
-               *cmd++ = lower_32_bits(vma->node.start);
+               *cmd++ = lower_32_bits(i915_vma_offset(vma));
        }
        *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
 
                GEM_BUG_ON(err);
 
                err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
+                                           i915_vma_offset(batch),
+                                           i915_vma_size(batch),
                                            0);
                GEM_BUG_ON(err);
                request[idx]->batch = batch;
                GEM_BUG_ON(err);
 
                err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
+                                           i915_vma_offset(batch),
+                                           i915_vma_size(batch),
                                            0);
                GEM_BUG_ON(err);
                request[idx]->batch = batch;
 
 static u64 hws_address(const struct i915_vma *hws,
                       const struct i915_request *rq)
 {
-       return hws->node.start + seqno_offset(rq->fence.context);
+       return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
 }
 
 struct i915_request *
                *batch++ = MI_BATCH_BUFFER_START;
        else
                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
-       *batch++ = lower_32_bits(vma->node.start);
-       *batch++ = upper_32_bits(vma->node.start);
+       *batch++ = lower_32_bits(i915_vma_offset(vma));
+       *batch++ = upper_32_bits(i915_vma_offset(vma));
 
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
 
        flags = 0;
        if (GRAPHICS_VER(rq->engine->i915) <= 5)
                flags |= I915_DISPATCH_SECURE;
-       err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+       err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
 
 cancel_rq:
        if (err) {