intel_gt_flush_ggtt_writes(ggtt->vm.gt);
                io_mapping_unmap_atomic((void __iomem *)vaddr);
 
-               if (cache->node.allocated) {
+               if (drm_mm_node_allocated(&cache->node)) {
                        ggtt->vm.clear_range(&ggtt->vm,
                                             cache->node.start,
                                             cache->node.size);
        }
 
        offset = cache->node.start;
-       if (cache->node.allocated) {
+       if (drm_mm_node_allocated(&cache->node)) {
                ggtt->vm.insert_page(&ggtt->vm,
                                     i915_gem_object_get_dma_address(obj, page),
                                     offset, I915_CACHE_NONE, 0);
 
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
                        goto out_unlock;
-               GEM_BUG_ON(!node.allocated);
+               GEM_BUG_ON(!drm_mm_node_allocated(&node));
        }
 
        mutex_unlock(&i915->drm.struct_mutex);
                unsigned page_offset = offset_in_page(offset);
                unsigned page_length = PAGE_SIZE - page_offset;
                page_length = remain < page_length ? remain : page_length;
-               if (node.allocated) {
+               if (drm_mm_node_allocated(&node)) {
                        ggtt->vm.insert_page(&ggtt->vm,
                                             i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
                                             node.start, I915_CACHE_NONE, 0);
        i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
        mutex_lock(&i915->drm.struct_mutex);
-       if (node.allocated) {
+       if (drm_mm_node_allocated(&node)) {
                ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
                        goto out_rpm;
-               GEM_BUG_ON(!node.allocated);
+               GEM_BUG_ON(!drm_mm_node_allocated(&node));
        }
 
        mutex_unlock(&i915->drm.struct_mutex);
                unsigned int page_offset = offset_in_page(offset);
                unsigned int page_length = PAGE_SIZE - page_offset;
                page_length = remain < page_length ? remain : page_length;
-               if (node.allocated) {
+               if (drm_mm_node_allocated(&node)) {
                        /* flush the write before we modify the GGTT */
                        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
                        ggtt->vm.insert_page(&ggtt->vm,
 out_unpin:
        mutex_lock(&i915->drm.struct_mutex);
        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-       if (node.allocated) {
+       if (drm_mm_node_allocated(&node)) {
                ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
 
 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
 {
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
-       GEM_BUG_ON(!vma->node.allocated);
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(upper_32_bits(vma->node.start));
        GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
        return lower_32_bits(vma->node.start);
 static inline bool i915_node_color_differs(const struct drm_mm_node *node,
                                           unsigned long color)
 {
-       return node->allocated && node->color != color;
+       return drm_mm_node_allocated(node) && node->color != color;
 }
 
 /**