]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
fuse: Allow to queue bg requests through io-uring
authorBernd Schubert <bschubert@ddn.com>
Mon, 20 Jan 2025 01:29:07 +0000 (02:29 +0100)
committerMiklos Szeredi <mszeredi@redhat.com>
Mon, 27 Jan 2025 17:01:22 +0000 (18:01 +0100)
This prepares queueing and sending background requests through
io-uring.

Signed-off-by: Bernd Schubert <bschubert@ddn.com>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> # io_uring
Reviewed-by: Luis Henriques <luis@igalia.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
fs/fuse/dev.c
fs/fuse/dev_uring.c
fs/fuse/dev_uring_i.h

index ecf2f805f456222fda02598397beba41fc356460..1b593b23f7b8c319ec38c7e726dabf516965500e 100644 (file)
@@ -568,7 +568,25 @@ ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
        return ret;
 }
 
-static bool fuse_request_queue_background(struct fuse_req *req)
+#ifdef CONFIG_FUSE_IO_URING
+static bool fuse_request_queue_background_uring(struct fuse_conn *fc,
+                                              struct fuse_req *req)
+{
+       struct fuse_iqueue *fiq = &fc->iq;
+
+       req->in.h.unique = fuse_get_unique(fiq);
+       req->in.h.len = sizeof(struct fuse_in_header) +
+               fuse_len_args(req->args->in_numargs,
+                             (struct fuse_arg *) req->args->in_args);
+
+       return fuse_uring_queue_bq_req(req);
+}
+#endif
+
+/*
+ * @return true if queued
+ */
+static int fuse_request_queue_background(struct fuse_req *req)
 {
        struct fuse_mount *fm = req->fm;
        struct fuse_conn *fc = fm->fc;
@@ -580,6 +598,12 @@ static bool fuse_request_queue_background(struct fuse_req *req)
                atomic_inc(&fc->num_waiting);
        }
        __set_bit(FR_ISREPLY, &req->flags);
+
+#ifdef CONFIG_FUSE_IO_URING
+       if (fuse_uring_ready(fc))
+               return fuse_request_queue_background_uring(fc, req);
+#endif
+
        spin_lock(&fc->bg_lock);
        if (likely(fc->connected)) {
                fc->num_background++;
index 728000434589bfff5da1876658f7d8cff91492dc..27bc103c17c89c858c34a923875078232c975f6a 100644 (file)
@@ -47,10 +47,53 @@ static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
        return pdu->ent;
 }
 
+static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
+{
+       struct fuse_ring *ring = queue->ring;
+       struct fuse_conn *fc = ring->fc;
+
+       lockdep_assert_held(&queue->lock);
+       lockdep_assert_held(&fc->bg_lock);
+
+       /*
+        * Allow one bg request per queue, ignoring global fc limits.
+        * This prevents a single queue from consuming all resources and
+        * eliminates the need for remote queue wake-ups when global
+        * limits are met but this queue has no more waiting requests.
+        */
+       while ((fc->active_background < fc->max_background ||
+               !queue->active_background) &&
+              (!list_empty(&queue->fuse_req_bg_queue))) {
+               struct fuse_req *req;
+
+               req = list_first_entry(&queue->fuse_req_bg_queue,
+                                      struct fuse_req, list);
+               fc->active_background++;
+               queue->active_background++;
+
+               list_move_tail(&req->list, &queue->fuse_req_queue);
+       }
+}
+
 static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
                               int error)
 {
+       struct fuse_ring_queue *queue = ent->queue;
+       struct fuse_ring *ring = queue->ring;
+       struct fuse_conn *fc = ring->fc;
+
+       lockdep_assert_not_held(&queue->lock);
+       spin_lock(&queue->lock);
        ent->fuse_req = NULL;
+       if (test_bit(FR_BACKGROUND, &req->flags)) {
+               queue->active_background--;
+               spin_lock(&fc->bg_lock);
+               fuse_uring_flush_bg(queue);
+               spin_unlock(&fc->bg_lock);
+       }
+
+       spin_unlock(&queue->lock);
+
        if (error)
                req->out.h.error = error;
 
@@ -78,6 +121,7 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
 {
        int qid;
        struct fuse_ring_queue *queue;
+       struct fuse_conn *fc = ring->fc;
 
        for (qid = 0; qid < ring->nr_queues; qid++) {
                queue = READ_ONCE(ring->queues[qid]);
@@ -85,6 +129,13 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
                        continue;
 
                queue->stopped = true;
+
+               WARN_ON_ONCE(ring->fc->max_background != UINT_MAX);
+               spin_lock(&queue->lock);
+               spin_lock(&fc->bg_lock);
+               fuse_uring_flush_bg(queue);
+               spin_unlock(&fc->bg_lock);
+               spin_unlock(&queue->lock);
                fuse_uring_abort_end_queue_requests(queue);
        }
 }
@@ -190,6 +241,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
        INIT_LIST_HEAD(&queue->ent_w_req_queue);
        INIT_LIST_HEAD(&queue->ent_in_userspace);
        INIT_LIST_HEAD(&queue->fuse_req_queue);
+       INIT_LIST_HEAD(&queue->fuse_req_bg_queue);
 
        queue->fpq.processing = pq;
        fuse_pqueue_init(&queue->fpq);
@@ -1141,6 +1193,53 @@ err:
        fuse_request_end(req);
 }
 
+bool fuse_uring_queue_bq_req(struct fuse_req *req)
+{
+       struct fuse_conn *fc = req->fm->fc;
+       struct fuse_ring *ring = fc->ring;
+       struct fuse_ring_queue *queue;
+       struct fuse_ring_ent *ent = NULL;
+
+       queue = fuse_uring_task_to_queue(ring);
+       if (!queue)
+               return false;
+
+       spin_lock(&queue->lock);
+       if (unlikely(queue->stopped)) {
+               spin_unlock(&queue->lock);
+               return false;
+       }
+
+       list_add_tail(&req->list, &queue->fuse_req_bg_queue);
+
+       ent = list_first_entry_or_null(&queue->ent_avail_queue,
+                                      struct fuse_ring_ent, list);
+       spin_lock(&fc->bg_lock);
+       fc->num_background++;
+       if (fc->num_background == fc->max_background)
+               fc->blocked = 1;
+       fuse_uring_flush_bg(queue);
+       spin_unlock(&fc->bg_lock);
+
+       /*
+        * Due to bg_queue flush limits there might be other bg requests
+        * in the queue that need to be handled first. Or no further req
+        * might be available.
+        */
+       req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req,
+                                      list);
+       if (ent && req) {
+               fuse_uring_add_req_to_ring_ent(ent, req);
+               spin_unlock(&queue->lock);
+
+               fuse_uring_dispatch_ent(ent);
+       } else {
+               spin_unlock(&queue->lock);
+       }
+
+       return true;
+}
+
 static const struct fuse_iqueue_ops fuse_io_uring_ops = {
        /* should be send over io-uring as enhancement */
        .send_forget = fuse_dev_queue_forget,
index 0517a6eafc9173475d34445c42a88606ceda2e0f..0182be61778b26a94bda2607289a7b668df6362f 100644 (file)
@@ -82,8 +82,13 @@ struct fuse_ring_queue {
        /* fuse requests waiting for an entry slot */
        struct list_head fuse_req_queue;
 
+       /* background fuse requests */
+       struct list_head fuse_req_bg_queue;
+
        struct fuse_pqueue fpq;
 
+       unsigned int active_background;
+
        bool stopped;
 };
 
@@ -127,6 +132,7 @@ void fuse_uring_stop_queues(struct fuse_ring *ring);
 void fuse_uring_abort_end_requests(struct fuse_ring *ring);
 int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
 void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_uring_queue_bq_req(struct fuse_req *req);
 
 static inline void fuse_uring_abort(struct fuse_conn *fc)
 {
@@ -179,6 +185,12 @@ static inline void fuse_uring_abort(struct fuse_conn *fc)
 static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
 {
 }
+
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+       return false;
+}
+
 #endif /* CONFIG_FUSE_IO_URING */
 
 #endif /* _FS_FUSE_DEV_URING_I_H */