In GC thread, we record the latest GC key in gc_done, which is expected
to be used for incremental GC, but in currently code, we didn't realize
it. When GC runs, front side IO would be blocked until the GC over, it
would be a long time if there is a lot of btree nodes.
This patch realizes incremental GC, the main ideal is that, when there
are front side I/Os, after GC some nodes (100), we stop GC, release locker
of the btree node, and go to process the front side I/Os for some times
(100 ms), then go back to GC again.
By this patch, when we doing GC, I/Os are not blocked all the time, and
there is no obvious I/Os zero jump problem any more.
Patch v2: Rename some variables and macros name as Coly suggested.
Signed-off-by: Tang Junhui <tang.junhui@zte.com.cn>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
 
 struct gc_stat {
        size_t                  nodes;
+       size_t                  nodes_pre;
        size_t                  key_bytes;
 
        size_t                  nkeys;
         * rescale; when it hits 0 we rescale all the bucket priorities.
         */
        atomic_t                rescale;
+       /*
+        * used for GC, identify if any front side I/Os is inflight
+        */
+       atomic_t                search_inflight;
        /*
         * When we invalidate buckets, we use both the priority and the amount
         * of good data to determine which buckets to reuse first - to weight
 
 
 #define MAX_NEED_GC            64
 #define MAX_SAVE_PRIO          72
+#define MIN_GC_NODES           100
+#define GC_SLEEP_MS            100
 
 #define PTR_DIRTY_BIT          (((uint64_t) 1 << 36))
 
                memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
                r->b = NULL;
 
+               if (atomic_read(&b->c->search_inflight) &&
+                   gc->nodes >= gc->nodes_pre + MIN_GC_NODES) {
+                       gc->nodes_pre =  gc->nodes;
+                       ret = -EAGAIN;
+                       break;
+               }
+
                if (need_resched()) {
                        ret = -EAGAIN;
                        break;
                closure_sync(&writes);
                cond_resched();
 
-               if (ret && ret != -EAGAIN)
+               if (ret == -EAGAIN)
+                       schedule_timeout_interruptible(msecs_to_jiffies
+                                                      (GC_SLEEP_MS));
+               else if (ret)
                        pr_warn("gc failed!");
        } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
 
 
 {
        struct search *s = container_of(cl, struct search, cl);
 
+       atomic_dec(&s->d->c->search_inflight);
+
        if (s->iop.bio)
                bio_put(s->iop.bio);
 
 
        closure_init(&s->cl, NULL);
        do_bio_hook(s, bio, request_endio);
+       atomic_inc(&d->c->search_inflight);
 
        s->orig_bio             = bio;
        s->cache_miss           = NULL;