]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sparc64: Use cpu_poke to resume idle cpu
authorVijay Kumar <vijay.ac.kumar@oracle.com>
Tue, 11 Jul 2017 01:02:58 +0000 (19:02 -0600)
committerAllen Pais <allen.pais@oracle.com>
Tue, 18 Jul 2017 12:25:00 +0000 (17:55 +0530)
Use cpu_poke hypervisor call to resume idle cpu if supported.

Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Orabug: 25575672
Signed-off-by: Allen Pais <allen.pais@oracle.com>
Reviewed-by: Anthony Yznaga <anthony.yznaga@oracle.com>
arch/sparc/include/asm/smp_64.h
arch/sparc/kernel/hvapi.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c

index 9e9af7662d62ab706c38d76eaf63c5de1259f720..a787a6889bbe2078e3deebf94e1c264e333ad309 100644 (file)
@@ -33,6 +33,9 @@
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern cpumask_t cpu_core_map[NR_CPUS];
 
+void smp_init_cpu_poke(void);
+void scheduler_poke(void);
+
 void arch_send_call_function_single_ipi(int cpu);
 void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
@@ -75,6 +78,8 @@ void arch_unregister_cpu(int cpu);
 #define smp_fetch_global_regs() do { } while (0)
 #define smp_fetch_global_pmu() do { } while (0)
 #define smp_fill_in_cpu_possible_map() do { } while (0)
+#define smp_init_cpu_poke() do { } while (0)
+#define scheduler_poke() do { } while (0)
 
 #endif /* !(CONFIG_SMP) */
 
index d06a9fcad91955f5a07ec7b2b13a5be4cb90c174..55834ad74cb7f90a1fd788a96de69f457d851374 100644 (file)
@@ -194,7 +194,7 @@ void __init sun4v_hvapi_init(void)
 
        group = HV_GRP_CORE;
        major = 1;
-       minor = 2;
+       minor = 6; /* CPU POKE */
        if (sun4v_hvapi_register(group, major, &minor))
                goto bad;
 
index 6ea1a3ebe9c8ae841d7ae74b42950629dca81684..49f129e9b025039e09cb156812f18c64a36b4534 100644 (file)
@@ -74,8 +74,13 @@ void arch_cpu_idle(void)
                        : "=&r" (pstate)
                        : "i" (PSTATE_IE));
 
-               if (!need_resched() && !cpu_is_offline(smp_processor_id()))
+               if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
                        sun4v_cpu_yield();
+                       /* If resumed by cpu_poke then we need to explicitly
+                        * call scheduler_ipi().
+                        */
+                       scheduler_poke();
+               }
 
                /* Re-enable interrupts. */
                __asm__ __volatile__(
index a4fddcbfc9e45b7112a12bbc21928c4ba02b2fdf..9145a5a678c262079a490184eb8ea530bc4cdda7 100644 (file)
@@ -338,6 +338,7 @@ void __init start_early_boot(void)
        check_if_starfire();
        per_cpu_patch();
        sun4v_patch();
+       smp_init_cpu_poke();
 
        cpu = hard_smp_processor_id();
        if (cpu >= NR_CPUS) {
index 96e7c6e5796f570242470eb394929933d5f42350..ff8a7501909aaa107440895ba7aac86fc4b4e917 100644 (file)
@@ -75,6 +75,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map);
 
 static cpumask_t smp_commenced_mask;
 
+static DEFINE_PER_CPU(bool, poke);
+static bool cpu_poke;
+
 void smp_info(struct seq_file *m)
 {
        int i;
@@ -1485,15 +1488,86 @@ void __init smp_cpus_done(unsigned int max_cpus)
 #endif
 }
 
+static void send_cpu_ipi(int cpu)
+{
+       xcall_deliver((u64) &xcall_receive_signal,
+                       0, 0, cpumask_of(cpu));
+}
+
+void scheduler_poke(void)
+{
+       if (!cpu_poke)
+               return;
+
+       if (!__this_cpu_read(poke))
+               return;
+
+       __this_cpu_write(poke, false);
+       set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
+}
+
+static unsigned long send_cpu_poke(int cpu)
+{
+       unsigned long hv_err;
+
+       per_cpu(poke, cpu) = true;
+       hv_err = sun4v_cpu_poke(cpu);
+       if (hv_err != HV_EOK) {
+               per_cpu(poke, cpu) = false;
+               pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
+                                   __func__, hv_err);
+       }
+
+       return hv_err;
+}
+
 void smp_send_reschedule(int cpu)
 {
        if (cpu == smp_processor_id()) {
                WARN_ON_ONCE(preemptible());
                set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
-       } else {
-               xcall_deliver((u64) &xcall_receive_signal,
-                             0, 0, cpumask_of(cpu));
+               return;
+       }
+
+       /* Use cpu poke to resume idle cpu if supported*/
+       if (cpu_poke && idle_cpu(cpu)) {
+               unsigned long ret;
+
+               ret = send_cpu_poke(cpu);
+               if (ret == HV_EOK)
+                       return;
        }
+
+       /* Use IPI in following cases:
+        * - cpu poke not supported
+        * - cpu not idle
+        * - send_cpu_poke() returns with error.
+        */
+       send_cpu_ipi(cpu);
+}
+
+void smp_init_cpu_poke(void)
+{
+       unsigned long major;
+       unsigned long minor;
+       int ret;
+
+       if (tlb_type != hypervisor)
+               return;
+
+       ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
+       if (ret) {
+               pr_debug("HV_GRP_CORE is not registered\n");
+               return;
+       }
+
+       if (major == 1 && minor >= 6) {
+               /*cpu poke is registered. */
+               cpu_poke = true;
+               return;
+       }
+
+       pr_debug("CPU_POKE not supported\n");
 }
 
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)