]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
nfsd: dynamically allocate the nfsd-reply shrinker
authorQi Zheng <zhengqi.arch@bytedance.com>
Mon, 11 Sep 2023 09:44:32 +0000 (17:44 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Oct 2023 17:32:25 +0000 (10:32 -0700)
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the nfsd-reply shrinker, so that it can be freed
asynchronously via RCU. Then it doesn't need to wait for RCU read-side
critical section when releasing the struct nfsd_net.

Link: https://lkml.kernel.org/r/20230911094444.68966-34-zhengqi.arch@bytedance.com
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Chuck Lever <chuck.lever@oracle.com>
Acked-by: Jeff Layton <jlayton@kernel.org>
Acked-by: Muchun Song <songmuchun@bytedance.com>
Cc: Neil Brown <neilb@suse.de>
Cc: Olga Kornievskaia <kolga@netapp.com>
Cc: Dai Ngo <Dai.Ngo@oracle.com>
Cc: Tom Talpey <tom@talpey.com>
Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Anna Schumaker <anna@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Chandan Babu R <chandan.babu@oracle.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Chris Mason <clm@fb.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Chuck Lever <cel@kernel.org>
Cc: Coly Li <colyli@suse.de>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Airlie <airlied@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Sterba <dsterba@suse.com>
Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Cc: Gao Xiang <hsiangkao@linux.alibaba.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Huang Rui <ray.huang@amd.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jeffle Xu <jefflexu@linux.alibaba.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Cc: Kirill Tkhai <tkhai@ya.ru>
Cc: Marijn Suijten <marijn.suijten@somainline.org>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Mike Snitzer <snitzer@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nadav Amit <namit@vmware.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sean Paul <sean@poorly.run>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Song Liu <song@kernel.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: Yue Hu <huyue2@coolpad.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/nfsd/netns.h
fs/nfsd/nfscache.c

index f669444d5336c50cefbf02f9a745ceeb268f50b5..ab303a8b77d5377b405d0382b3115ba53f9f4297 100644 (file)
@@ -177,7 +177,7 @@ struct nfsd_net {
        /* size of cache when we saw the longest hash chain */
        unsigned int             longest_chain_cachesize;
 
-       struct shrinker         nfsd_reply_cache_shrinker;
+       struct shrinker         *nfsd_reply_cache_shrinker;
 
        /* tracking server-to-server copy mounts */
        spinlock_t              nfsd_ssc_lock;
index 80621a70951073138bd6da22220d67eec89fcad0..fd56a52aa5fb678859e5fd12eb8e00492e671372 100644 (file)
@@ -201,26 +201,29 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
 {
        unsigned int hashsize;
        unsigned int i;
-       int status = 0;
 
        nn->max_drc_entries = nfsd_cache_size_limit();
        atomic_set(&nn->num_drc_entries, 0);
        hashsize = nfsd_hashsize(nn->max_drc_entries);
        nn->maskbits = ilog2(hashsize);
 
-       nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
-       nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
-       nn->nfsd_reply_cache_shrinker.seeks = 1;
-       status = register_shrinker(&nn->nfsd_reply_cache_shrinker,
-                                  "nfsd-reply:%s", nn->nfsd_name);
-       if (status)
-               return status;
-
        nn->drc_hashtbl = kvzalloc(array_size(hashsize,
                                sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
        if (!nn->drc_hashtbl)
+               return -ENOMEM;
+
+       nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s",
+                                                      nn->nfsd_name);
+       if (!nn->nfsd_reply_cache_shrinker)
                goto out_shrinker;
 
+       nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan;
+       nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count;
+       nn->nfsd_reply_cache_shrinker->seeks = 1;
+       nn->nfsd_reply_cache_shrinker->private_data = nn;
+
+       shrinker_register(nn->nfsd_reply_cache_shrinker);
+
        for (i = 0; i < hashsize; i++) {
                INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
                spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
@@ -229,7 +232,7 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
 
        return 0;
 out_shrinker:
-       unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
+       kvfree(nn->drc_hashtbl);
        printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
        return -ENOMEM;
 }
@@ -239,7 +242,7 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
        struct nfsd_cacherep *rp;
        unsigned int i;
 
-       unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
+       shrinker_free(nn->nfsd_reply_cache_shrinker);
 
        for (i = 0; i < nn->drc_hashsize; i++) {
                struct list_head *head = &nn->drc_hashtbl[i].lru_head;
@@ -323,8 +326,7 @@ nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
 static unsigned long
 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
 {
-       struct nfsd_net *nn = container_of(shrink,
-                               struct nfsd_net, nfsd_reply_cache_shrinker);
+       struct nfsd_net *nn = shrink->private_data;
 
        return atomic_read(&nn->num_drc_entries);
 }
@@ -343,8 +345,7 @@ nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
 static unsigned long
 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       struct nfsd_net *nn = container_of(shrink,
-                               struct nfsd_net, nfsd_reply_cache_shrinker);
+       struct nfsd_net *nn = shrink->private_data;
        unsigned long freed = 0;
        LIST_HEAD(dispose);
        unsigned int i;