{
        struct intel_gt_timelines *timelines = >->timelines;
        struct intel_timeline *tl, *tn;
-       unsigned long flags;
        bool interruptible;
        LIST_HEAD(free);
 
 
        flush_submission(gt); /* kick the ksoftirqd tasklets */
 
-       spin_lock_irqsave(&timelines->lock, flags);
+       spin_lock(&timelines->lock);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
                if (!mutex_trylock(&tl->mutex))
                        continue;
                intel_timeline_get(tl);
                GEM_BUG_ON(!atomic_read(&tl->active_count));
                atomic_inc(&tl->active_count); /* pin the list element */
-               spin_unlock_irqrestore(&timelines->lock, flags);
+               spin_unlock(&timelines->lock);
 
                if (timeout > 0) {
                        struct dma_fence *fence;
 
                retire_requests(tl);
 
-               spin_lock_irqsave(&timelines->lock, flags);
+               spin_lock(&timelines->lock);
 
                /* Resume iteration after dropping lock */
                list_safe_reset_next(tl, tn, link);
                        list_add(&tl->link, &free);
                }
        }
-       spin_unlock_irqrestore(&timelines->lock, flags);
+       spin_unlock(&timelines->lock);
 
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);
 
 {
        struct intel_gt_timelines *timelines = >->timelines;
        struct intel_timeline *tl;
-       unsigned long flags;
        bool ok;
 
        if (!test_bit(I915_WEDGED, >->reset.flags))
         *
         * No more can be submitted until we reset the wedged bit.
         */
-       spin_lock_irqsave(&timelines->lock, flags);
+       spin_lock(&timelines->lock);
        list_for_each_entry(tl, &timelines->active_list, link) {
                struct dma_fence *fence;
 
                if (!fence)
                        continue;
 
-               spin_unlock_irqrestore(&timelines->lock, flags);
+               spin_unlock(&timelines->lock);
 
                /*
                 * All internal dependencies (i915_requests) will have
                dma_fence_put(fence);
 
                /* Restart iteration after droping lock */
-               spin_lock_irqsave(&timelines->lock, flags);
+               spin_lock(&timelines->lock);
                tl = list_entry(&timelines->active_list, typeof(*tl), link);
        }
-       spin_unlock_irqrestore(&timelines->lock, flags);
+       spin_unlock(&timelines->lock);
 
        /* We must reset pending GPU events before restoring our submission */
        ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
 
 void intel_timeline_enter(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
-       unsigned long flags;
 
        /*
         * Pretend we are serialised by the timeline->mutex.
        if (atomic_add_unless(&tl->active_count, 1, 0))
                return;
 
-       spin_lock_irqsave(&timelines->lock, flags);
+       spin_lock(&timelines->lock);
        if (!atomic_fetch_inc(&tl->active_count))
                list_add_tail(&tl->link, &timelines->active_list);
-       spin_unlock_irqrestore(&timelines->lock, flags);
+       spin_unlock(&timelines->lock);
 }
 
 void intel_timeline_exit(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
-       unsigned long flags;
 
        /* See intel_timeline_enter() */
        lockdep_assert_held(&tl->mutex);
        if (atomic_add_unless(&tl->active_count, -1, 1))
                return;
 
-       spin_lock_irqsave(&timelines->lock, flags);
+       spin_lock(&timelines->lock);
        if (atomic_dec_and_test(&tl->active_count))
                list_del(&tl->link);
-       spin_unlock_irqrestore(&timelines->lock, flags);
+       spin_unlock(&timelines->lock);
 
        /*
         * Since this timeline is idle, all bariers upon which we were waiting