#include "gt/intel_gt_regs.h"
 #include "gem/i915_gem_lmem.h"
 
+#include "gem/selftests/igt_gem_utils.h"
 #include "selftests/igt_flush_test.h"
 #include "selftests/mock_drm.h"
 #include "selftests/i915_random.h"
        return ret;
 }
 
-static int move_to_active(struct i915_vma *vma,
-                         struct i915_request *rq,
-                         unsigned int flags)
-{
-       int err;
-
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, flags);
-       i915_vma_unlock(vma);
-
-       return err;
-}
-
 static int pin_buffer(struct i915_vma *vma, u64 addr)
 {
        int err;
                goto err_bb;
        }
 
-       err = move_to_active(t->batch, rq, 0);
+       err = igt_vma_move_to_active_unlocked(t->batch, rq, 0);
        if (!err)
-               err = move_to_active(src->vma, rq, 0);
+               err = igt_vma_move_to_active_unlocked(src->vma, rq, 0);
        if (!err)
-               err = move_to_active(dst->vma, rq, 0);
+               err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
                                                t->batch->node.start,
 
 
 #include <linux/types.h>
 
+#include "i915_vma.h"
+
 struct i915_request;
 struct i915_gem_context;
 struct i915_vma;
                    struct i915_vma *vma, u64 offset,
                    unsigned long count, u32 val);
 
+static inline int __must_check
+igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq,
+                               unsigned int flags)
+{
+       int err;
+
+       i915_vma_lock(vma);
+       err = _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
+       i915_vma_unlock(vma);
+       return err;
+}
+
 #endif /* __IGT_GEM_UTILS_H__ */
 
        return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
 }
 
-static int move_to_active(struct i915_vma *vma,
-                         struct i915_request *rq,
-                         unsigned int flags)
-{
-       int err;
-
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, flags);
-       i915_vma_unlock(vma);
-
-       return err;
-}
-
 static struct i915_request *
 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 {
                goto unpin_hws;
        }
 
-       err = move_to_active(vma, rq, 0);
+       err = igt_vma_move_to_active_unlocked(vma, rq, 0);
        if (err)
                goto cancel_rq;
 
-       err = move_to_active(hws, rq, 0);
+       err = igt_vma_move_to_active_unlocked(hws, rq, 0);
        if (err)
                goto cancel_rq;
 
                }
        }
 
-       i915_vma_lock(arg.vma);
-       err = i915_vma_move_to_active(arg.vma, rq, flags);
+       err = igt_vma_move_to_active_unlocked(arg.vma, rq, flags);
        if (err)
                pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
 
-       i915_vma_unlock(arg.vma);
-
        if (flags & EXEC_OBJECT_NEEDS_FENCE)
                i915_vma_unpin_fence(arg.vma);
        i915_vma_unpin(arg.vma);
 
        return batch;
 }
 
-static int move_to_active(struct i915_request *rq,
-                         struct i915_vma *vma,
-                         unsigned int flags)
-{
-       int err;
-
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, flags);
-       i915_vma_unlock(vma);
-
-       return err;
-}
-
 static struct i915_request *
 record_registers(struct intel_context *ce,
                 struct i915_vma *before,
        if (IS_ERR(rq))
                goto err_after;
 
-       err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+       err = igt_vma_move_to_active_unlocked(before, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto err_rq;
 
-       err = move_to_active(rq, b_before, 0);
+       err = igt_vma_move_to_active_unlocked(b_before, rq, 0);
        if (err)
                goto err_rq;
 
-       err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+       err = igt_vma_move_to_active_unlocked(after, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto err_rq;
 
-       err = move_to_active(rq, b_after, 0);
+       err = igt_vma_move_to_active_unlocked(b_after, rq, 0);
        if (err)
                goto err_rq;
 
                goto err_batch;
        }
 
-       err = move_to_active(rq, batch, 0);
+       err = igt_vma_move_to_active_unlocked(batch, rq, 0);
        if (err)
                goto err_rq;
 
 
 #include "gt/intel_gpu_commands.h"
 #include "i915_selftest.h"
 
+#include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
 #include "selftests/igt_reset.h"
 #include "selftests/igt_spinner.h"
 
        return hws->node.start + seqno_offset(rq->fence.context);
 }
 
-static int move_to_active(struct i915_vma *vma,
-                         struct i915_request *rq,
-                         unsigned int flags)
-{
-       int err;
-
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, flags);
-       i915_vma_unlock(vma);
-
-       return err;
-}
-
 struct i915_request *
 igt_spinner_create_request(struct igt_spinner *spin,
                           struct intel_context *ce,
        if (IS_ERR(rq))
                return ERR_CAST(rq);
 
-       err = move_to_active(vma, rq, 0);
+       err = igt_vma_move_to_active_unlocked(vma, rq, 0);
        if (err)
                goto cancel_rq;
 
-       err = move_to_active(hws, rq, 0);
+       err = igt_vma_move_to_active_unlocked(hws, rq, 0);
        if (err)
                goto cancel_rq;