]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
rcu/kvfree: Move some functions under CONFIG_TINY_RCU
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Thu, 12 Dec 2024 18:02:05 +0000 (19:02 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Sat, 11 Jan 2025 19:39:19 +0000 (20:39 +0100)
Currently when a tiny RCU is enabled, the tree.c file is not
compiled, thus duplicating function names do not conflict with
each other.

Because of moving of kvfree_rcu() functionality to the SLAB,
we have to reorder some functions and place them together under
CONFIG_TINY_RCU macro definition. Therefore, those functions name
will not conflict when a kernel is compiled for CONFIG_TINY_RCU
flavor.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Acked-by: Hyeonggon Yoo <hyeonggon.yoo@sk.com>
Tested-by: Hyeonggon Yoo <hyeonggon.yoo@sk.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
kernel/rcu/tree.c

index e69b867de8ef4eda93ee222cb3f5cc3814e0c58e..b3853ae6e8697623eb55924d70c9a4241fc2e670 100644 (file)
@@ -3653,16 +3653,6 @@ static void kfree_rcu_monitor(struct work_struct *work)
                schedule_delayed_monitor_work(krcp);
 }
 
-static enum hrtimer_restart
-schedule_page_work_fn(struct hrtimer *t)
-{
-       struct kfree_rcu_cpu *krcp =
-               container_of(t, struct kfree_rcu_cpu, hrtimer);
-
-       queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
-       return HRTIMER_NORESTART;
-}
-
 static void fill_page_cache_func(struct work_struct *work)
 {
        struct kvfree_rcu_bulk_data *bnode;
@@ -3698,27 +3688,6 @@ static void fill_page_cache_func(struct work_struct *work)
        atomic_set(&krcp->backoff_page_cache_fill, 0);
 }
 
-static void
-run_page_cache_worker(struct kfree_rcu_cpu *krcp)
-{
-       // If cache disabled, bail out.
-       if (!rcu_min_cached_objs)
-               return;
-
-       if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
-                       !atomic_xchg(&krcp->work_in_progress, 1)) {
-               if (atomic_read(&krcp->backoff_page_cache_fill)) {
-                       queue_delayed_work(system_unbound_wq,
-                               &krcp->page_cache_work,
-                                       msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
-               } else {
-                       hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-                       krcp->hrtimer.function = schedule_page_work_fn;
-                       hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
-               }
-       }
-}
-
 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
 // state specified by flags.  If can_alloc is true, the caller must
 // be schedulable and not be holding any locks or mutexes that might be
@@ -3779,6 +3748,51 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
        return true;
 }
 
+#if !defined(CONFIG_TINY_RCU)
+
+static enum hrtimer_restart
+schedule_page_work_fn(struct hrtimer *t)
+{
+       struct kfree_rcu_cpu *krcp =
+               container_of(t, struct kfree_rcu_cpu, hrtimer);
+
+       queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
+       return HRTIMER_NORESTART;
+}
+
+static void
+run_page_cache_worker(struct kfree_rcu_cpu *krcp)
+{
+       // If cache disabled, bail out.
+       if (!rcu_min_cached_objs)
+               return;
+
+       if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
+                       !atomic_xchg(&krcp->work_in_progress, 1)) {
+               if (atomic_read(&krcp->backoff_page_cache_fill)) {
+                       queue_delayed_work(system_unbound_wq,
+                               &krcp->page_cache_work,
+                                       msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
+               } else {
+                       hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+                       krcp->hrtimer.function = schedule_page_work_fn;
+                       hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
+               }
+       }
+}
+
+void __init kfree_rcu_scheduler_running(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
+
+               if (need_offload_krc(krcp))
+                       schedule_delayed_monitor_work(krcp);
+       }
+}
+
 /*
  * Queue a request for lazy invocation of the appropriate free routine
  * after a grace period.  Please note that three paths are maintained,
@@ -3944,6 +3958,8 @@ void kvfree_rcu_barrier(void)
 }
 EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
 
+#endif /* #if !defined(CONFIG_TINY_RCU) */
+
 static unsigned long
 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
@@ -3985,18 +4001,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        return freed == 0 ? SHRINK_STOP : freed;
 }
 
-void __init kfree_rcu_scheduler_running(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
-
-               if (need_offload_krc(krcp))
-                       schedule_delayed_monitor_work(krcp);
-       }
-}
-
 /*
  * During early boot, any blocking grace-period wait automatically
  * implies a grace period.