]> www.infradead.org Git - users/hch/misc.git/commitdiff
md: switch md-cluster to use md_submodle_head
authorYu Kuai <yukuai3@huawei.com>
Sat, 15 Feb 2025 09:22:25 +0000 (17:22 +0800)
committerYu Kuai <yukuai@kernel.org>
Tue, 4 Mar 2025 16:28:39 +0000 (00:28 +0800)
To make code cleaner, and prepare to add kconfig for bitmap.

Also remove the unsed global variables pers_lock, md_cluster_ops and
md_cluster_mod, and exported symbols register_md_cluster_operations(),
unregister_md_cluster_operations() and md_cluster_ops.

Link: https://lore.kernel.org/linux-raid/20250215092225.2427977-8-yukuai1@huaweicloud.com
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Su Yue <glass.su@suse.com>
drivers/md/md-cluster.c
drivers/md/md-cluster.h
drivers/md/md.c
drivers/md/md.h

index 6fd436a1d373a0066a5a0e8a354743727ef72595..94221d964d4fd692d059c131ec6dc27119009e3a 100644 (file)
@@ -1612,7 +1612,14 @@ out:
        return err;
 }
 
-static const struct md_cluster_operations cluster_ops = {
+static struct md_cluster_operations cluster_ops = {
+       .head = {
+               .type   = MD_CLUSTER,
+               .id     = ID_CLUSTER,
+               .name   = "cluster",
+               .owner  = THIS_MODULE,
+       },
+
        .join   = join,
        .leave  = leave,
        .slot_number = slot_number,
@@ -1642,13 +1649,12 @@ static int __init cluster_init(void)
 {
        pr_warn("md-cluster: support raid1 and raid10 (limited support)\n");
        pr_info("Registering Cluster MD functions\n");
-       register_md_cluster_operations(&cluster_ops, THIS_MODULE);
-       return 0;
+       return register_md_submodule(&cluster_ops.head);
 }
 
 static void cluster_exit(void)
 {
-       unregister_md_cluster_operations();
+       unregister_md_submodule(&cluster_ops.head);
 }
 
 module_init(cluster_init);
index 4e842af11fb463b9f1426882632f701a3b873ee2..8fb06d853173c34239cc761ccf362266ebe9b735 100644 (file)
@@ -37,9 +37,6 @@ struct md_cluster_operations {
        void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
 };
 
-extern int register_md_cluster_operations(const struct md_cluster_operations *ops,
-               struct module *module);
-extern int unregister_md_cluster_operations(void);
 extern int md_setup_cluster(struct mddev *mddev, int nodes);
 extern void md_cluster_stop(struct mddev *mddev);
 extern void md_reload_sb(struct mddev *mddev, int raid_disk);
index 461c6b6c962810aecc30b9dffb6ba497b845bb29..590c9070cee55854efe9a57a3c04d2c657e63736 100644 (file)
@@ -81,13 +81,8 @@ static const char *action_name[NR_SYNC_ACTIONS] = {
 
 static DEFINE_XARRAY(md_submodule);
 
-static DEFINE_SPINLOCK(pers_lock);
-
 static const struct kobj_type md_ktype;
 
-static const struct md_cluster_operations *md_cluster_ops;
-static struct module *md_cluster_mod;
-
 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
 static struct workqueue_struct *md_wq;
 
@@ -7452,11 +7447,12 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
 
 static int get_cluster_ops(struct mddev *mddev)
 {
-       spin_lock(&pers_lock);
-       mddev->cluster_ops = md_cluster_ops;
-       if (mddev->cluster_ops && !try_module_get(md_cluster_mod))
+       xa_lock(&md_submodule);
+       mddev->cluster_ops = xa_load(&md_submodule, ID_CLUSTER);
+       if (mddev->cluster_ops &&
+           !try_module_get(mddev->cluster_ops->head.owner))
                mddev->cluster_ops = NULL;
-       spin_unlock(&pers_lock);
+       xa_unlock(&md_submodule);
 
        return mddev->cluster_ops == NULL ? -ENOENT : 0;
 }
@@ -7467,7 +7463,7 @@ static void put_cluster_ops(struct mddev *mddev)
                return;
 
        mddev->cluster_ops->leave(mddev);
-       module_put(md_cluster_mod);
+       module_put(mddev->cluster_ops->head.owner);
        mddev->cluster_ops = NULL;
 }
 
@@ -8559,31 +8555,6 @@ void unregister_md_submodule(struct md_submodule_head *msh)
 }
 EXPORT_SYMBOL_GPL(unregister_md_submodule);
 
-int register_md_cluster_operations(const struct md_cluster_operations *ops,
-                                  struct module *module)
-{
-       int ret = 0;
-       spin_lock(&pers_lock);
-       if (md_cluster_ops != NULL)
-               ret = -EALREADY;
-       else {
-               md_cluster_ops = ops;
-               md_cluster_mod = module;
-       }
-       spin_unlock(&pers_lock);
-       return ret;
-}
-EXPORT_SYMBOL(register_md_cluster_operations);
-
-int unregister_md_cluster_operations(void)
-{
-       spin_lock(&pers_lock);
-       md_cluster_ops = NULL;
-       spin_unlock(&pers_lock);
-       return 0;
-}
-EXPORT_SYMBOL(unregister_md_cluster_operations);
-
 int md_setup_cluster(struct mddev *mddev, int nodes)
 {
        int ret = get_cluster_ops(mddev);
index 873f33e2a1f6860774bc38e4add3db728c3e51d2..dd6a28f5d8e62e87a634f9ecb261ff6b25e7926a 100644 (file)
@@ -603,7 +603,7 @@ struct mddev {
        mempool_t *serial_info_pool;
        void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
        struct md_cluster_info          *cluster_info;
-       const struct md_cluster_operations *cluster_ops;
+       struct md_cluster_operations *cluster_ops;
        unsigned int                    good_device_nr; /* good device num within cluster raid */
        unsigned int                    noio_flag; /* for memalloc scope API */