if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
                return false;
 
-       list_add(&vma->exec_list, unwind);
+       list_add(&vma->evict_link, unwind);
        return drm_mm_scan_add_block(scan, &vma->node);
 }
 
        } while (*++phase);
 
        /* Nothing found, clean up and bail out! */
-       list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+       list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
                ret = drm_mm_scan_remove_block(&scan, &vma->node);
                BUG_ON(ret);
        }
         * calling unbind (which may remove the active reference
         * of any of our objects, thus corrupting the list).
         */
-       list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+       list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
                if (drm_mm_scan_remove_block(&scan, &vma->node))
                        __i915_vma_pin(vma);
                else
-                       list_del(&vma->exec_list);
+                       list_del(&vma->evict_link);
        }
 
        /* Unbinding will emit any required flushes */
        ret = 0;
-       list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+       list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
                __i915_vma_unpin(vma);
                if (ret == 0)
                        ret = i915_vma_unbind(vma);
                 * reference) another in our eviction list.
                 */
                __i915_vma_pin(vma);
-               list_add(&vma->exec_list, &eviction_list);
+               list_add(&vma->evict_link, &eviction_list);
        }
 
-       list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+       list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
                __i915_vma_unpin(vma);
                if (ret == 0)
                        ret = i915_vma_unbind(vma);
 
 {
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &eb->vmas, exec_list) {
+       list_for_each_entry(vma, &eb->vmas, exec_link) {
                eb_unreserve_vma(vma);
                i915_vma_put(vma);
                vma->exec_entry = NULL;
 static struct i915_vma *
 eb_get_batch(struct i915_execbuffer *eb)
 {
-       struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+       struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_link);
 
        /*
         * SNA is doing fancy tricks with compressing batch buffers, which leads
                }
 
                /* Transfer ownership from the objects list to the vmas list. */
-               list_add_tail(&vma->exec_list, &eb->vmas);
+               list_add_tail(&vma->exec_link, &eb->vmas);
                list_del_init(&obj->obj_exec_link);
 
                vma->exec_entry = &eb->exec[i];
 {
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &eb->vmas, exec_list) {
+       list_for_each_entry(vma, &eb->vmas, exec_link) {
                if (!vma->exec_entry)
                        continue;
 
        struct i915_vma *vma;
        int ret = 0;
 
-       list_for_each_entry(vma, &eb->vmas, exec_list) {
+       list_for_each_entry(vma, &eb->vmas, exec_link) {
                ret = eb_relocate_vma(vma, eb);
                if (ret)
                        break;
                struct drm_i915_gem_exec_object2 *entry;
                bool need_fence, need_mappable;
 
-               vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+               vma = list_first_entry(&eb->vmas, struct i915_vma, exec_link);
                obj = vma->obj;
                entry = vma->exec_entry;
 
                need_mappable = need_fence || need_reloc_mappable(vma);
 
                if (entry->flags & EXEC_OBJECT_PINNED)
-                       list_move_tail(&vma->exec_list, &pinned_vmas);
+                       list_move_tail(&vma->exec_link, &pinned_vmas);
                else if (need_mappable) {
                        entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
-                       list_move(&vma->exec_list, &ordered_vmas);
+                       list_move(&vma->exec_link, &ordered_vmas);
                } else
-                       list_move_tail(&vma->exec_list, &ordered_vmas);
+                       list_move_tail(&vma->exec_link, &ordered_vmas);
 
                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
                obj->base.pending_write_domain = 0;
                int ret = 0;
 
                /* Unbind any ill-fitting objects or pin. */
-               list_for_each_entry(vma, &eb->vmas, exec_list) {
+               list_for_each_entry(vma, &eb->vmas, exec_link) {
                        if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
                }
 
                /* Bind fresh objects */
-               list_for_each_entry(vma, &eb->vmas, exec_list) {
+               list_for_each_entry(vma, &eb->vmas, exec_link) {
                        if (drm_mm_node_allocated(&vma->node))
                                continue;
 
                        return ret;
 
                /* Decrement pin count for bound objects */
-               list_for_each_entry(vma, &eb->vmas, exec_list)
+               list_for_each_entry(vma, &eb->vmas, exec_link)
                        eb_unreserve_vma(vma);
 
                ret = i915_gem_evict_vm(eb->vm, true);
        if (ret)
                goto err;
 
-       list_for_each_entry(vma, &eb->vmas, exec_list) {
+       list_for_each_entry(vma, &eb->vmas, exec_link) {
                int idx = vma->exec_entry - eb->exec;
 
                ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]);
        struct i915_vma *vma;
        int ret;
 
-       list_for_each_entry(vma, &eb->vmas, exec_list) {
+       list_for_each_entry(vma, &eb->vmas, exec_link) {
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
 {
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &eb->vmas, exec_list) {
+       list_for_each_entry(vma, &eb->vmas, exec_link) {
                struct drm_i915_gem_object *obj = vma->obj;
 
                obj->base.write_domain = obj->base.pending_write_domain;
                memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry));
        vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
        i915_gem_object_get(shadow_batch_obj);
-       list_add_tail(&vma->exec_list, &eb->vmas);
+       list_add_tail(&vma->exec_link, &eb->vmas);
 
 out:
        i915_gem_object_unpin_pages(shadow_batch_obj);