rdev->recovery_offset < sectors;
 }
 
+static enum sync_action md_get_active_sync_action(struct mddev *mddev)
+{
+       struct md_rdev *rdev;
+       bool is_recover = false;
+
+       if (mddev->resync_offset < MaxSector)
+               return ACTION_RESYNC;
+
+       if (mddev->reshape_position != MaxSector)
+               return ACTION_RESHAPE;
+
+       rcu_read_lock();
+       rdev_for_each_rcu(rdev, mddev) {
+               if (rdev_needs_recovery(rdev, MaxSector)) {
+                       is_recover = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return is_recover ? ACTION_RECOVER : ACTION_IDLE;
+}
+
 enum sync_action md_sync_action(struct mddev *mddev)
 {
        unsigned long recovery = mddev->recovery;
+       enum sync_action active_action;
 
        /*
         * frozen has the highest priority, means running sync_thread will be
            !test_bit(MD_RECOVERY_NEEDED, &recovery))
                return ACTION_IDLE;
 
-       if (test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
-           mddev->reshape_position != MaxSector)
+       /*
+        * Check if any sync operation (resync/recover/reshape) is
+        * currently active. This ensures that only one sync operation
+        * can run at a time. Returns the type of active operation, or
+        * ACTION_IDLE if none are active.
+        */
+       active_action = md_get_active_sync_action(mddev);
+       if (active_action != ACTION_IDLE)
+               return active_action;
+
+       if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
                return ACTION_RESHAPE;
 
        if (test_bit(MD_RECOVERY_RECOVER, &recovery))