{
        SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
 }
+#else /* CONFIG_RISCV_SBI */
+/* stub for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
+void sbi_remote_fence_i(const unsigned long *hart_mask);
 #endif /* CONFIG_RISCV_SBI */
 #endif /* _ASM_RISCV_SBI_H */
 
 
 #include <asm/sbi.h>
 
+static void ipi_remote_fence_i(void *info)
+{
+       return local_flush_icache_all();
+}
+
 void flush_icache_all(void)
 {
-       sbi_remote_fence_i(NULL);
+       if (IS_ENABLED(CONFIG_RISCV_SBI))
+               sbi_remote_fence_i(NULL);
+       else
+               on_each_cpu(ipi_remote_fence_i, NULL, 1);
 }
 
 /*
 void flush_icache_mm(struct mm_struct *mm, bool local)
 {
        unsigned int cpu;
-       cpumask_t others, hmask, *mask;
+       cpumask_t others, *mask;
 
        preempt_disable();
 
         */
        cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
        local |= cpumask_empty(&others);
-       if (mm != current->active_mm || !local) {
-               riscv_cpuid_to_hartid_mask(&others, &hmask);
-               sbi_remote_fence_i(hmask.bits);
-       } else {
+       if (mm == current->active_mm && local) {
                /*
                 * It's assumed that at least one strongly ordered operation is
                 * performed on this hart between setting a hart's cpumask bit
                 * with flush_icache_deferred().
                 */
                smp_mb();
+       } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
+               cpumask_t hartid_mask;
+
+               riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
+               sbi_remote_fence_i(cpumask_bits(&hartid_mask));
+       } else {
+               on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
        }
 
        preempt_enable();