If CONFIG_PREEMPT_NONE is set and the task_work chains are long, we
could be running into issues blocking others for too long. Add a
reschedule check in handle_tw_list(), and flush the ctx if we need to
reschedule.
Cc: stable@vger.kernel.org # 5.10+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
                        /* if not contended, grab and improve batching */
                        *locked = mutex_trylock(&(*ctx)->uring_lock);
                        percpu_ref_get(&(*ctx)->refs);
-               }
+               } else if (!*locked)
+                       *locked = mutex_trylock(&(*ctx)->uring_lock);
                req->io_task_work.func(req, locked);
                node = next;
                count++;
+               if (unlikely(need_resched())) {
+                       ctx_flush_and_put(*ctx, locked);
+                       *ctx = NULL;
+                       cond_resched();
+               }
        }
 
        return count;