this_cpu_ptr is meant for use in kernel proper because it selects between
TPIDR_EL1/2 based on nVHE/VHE. __hyp_this_cpu_ptr was used in hyp to always
select TPIDR_EL2. Unify all users behind this_cpu_ptr and friends by
selecting _EL2 register under __KVM_NVHE_HYPERVISOR__. VHE continues
selecting the register using alternatives.
Under CONFIG_DEBUG_PREEMPT, the kernel helpers perform a preemption check
which is omitted by the hyp helpers. Preserve the behavior for nVHE by
overriding the corresponding macros under __KVM_NVHE_HYPERVISOR__. Extend
the checks into VHE hyp code.
Signed-off-by: David Brazdil <dbrazdil@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Acked-by: Andrew Scull <ascull@google.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20200922204910.7265-5-dbrazdil@google.com
                addr;                                                   \
        })
 
-/*
- * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
- * provided that sym is really a *symbol* and not a pointer obtained from
- * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
- * sparse quiet.
- */
-#define __hyp_this_cpu_ptr(sym)                                                \
-       ({                                                              \
-               void *__ptr;                                            \
-               __verify_pcpu_ptr(&sym);                                \
-               __ptr = hyp_symbol_addr(sym);                           \
-               __ptr += read_sysreg(tpidr_el2);                        \
-               (typeof(sym) __kernel __force *)__ptr;                  \
-        })
-
-#define __hyp_this_cpu_read(sym)                                       \
-       ({                                                              \
-               *__hyp_this_cpu_ptr(sym);                               \
-        })
-
 #define __KVM_EXTABLE(from, to)                                                \
        "       .pushsection    __kvm_ex_table, \"a\"\n"                \
        "       .align          3\n"                                    \
 
                        :: "r" (off) : "memory");
 }
 
-static inline unsigned long __my_cpu_offset(void)
+static inline unsigned long __hyp_my_cpu_offset(void)
+{
+       /*
+        * Non-VHE hyp code runs with preemption disabled. No need to hazard
+        * the register access against barrier() as in __kern_my_cpu_offset.
+        */
+       return read_sysreg(tpidr_el2);
+}
+
+static inline unsigned long __kern_my_cpu_offset(void)
 {
        unsigned long off;
 
 
        return off;
 }
-#define __my_cpu_offset __my_cpu_offset()
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define __my_cpu_offset __hyp_my_cpu_offset()
+#else
+#define __my_cpu_offset __kern_my_cpu_offset()
+#endif
 
 #define PERCPU_RW_OPS(sz)                                              \
 static inline unsigned long __percpu_read_##sz(void *ptr)              \
 
 #include <asm-generic/percpu.h>
 
+/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
+#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
+#undef this_cpu_ptr
+#define        this_cpu_ptr            raw_cpu_ptr
+#undef __this_cpu_read
+#define        __this_cpu_read         raw_cpu_read
+#undef __this_cpu_write
+#define        __this_cpu_write        raw_cpu_write
+#endif
+
 #endif /* __ASM_PERCPU_H */
 
        if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
                return;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        guest_ctxt = &vcpu->arch.ctxt;
        host_dbg = &vcpu->arch.host_debug_state.regs;
        guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
        if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
                return;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        guest_ctxt = &vcpu->arch.ctxt;
        host_dbg = &vcpu->arch.host_debug_state.regs;
        guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
 
            !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
                return false;
 
-       ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        __ptrauth_save_key(ctxt, APIA);
        __ptrauth_save_key(ctxt, APIB);
        __ptrauth_save_key(ctxt, APDA);
         * guest wants it disabled, so be it...
         */
        if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
+           __this_cpu_read(arm64_ssbd_callback_required))
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
 #endif
 }
         * If the guest has disabled the workaround, bring it back on.
         */
        if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
+           __this_cpu_read(arm64_ssbd_callback_required))
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
 #endif
 }
 
        entry = hyp_symbol_addr(__start___kvm_ex_table);
        end = hyp_symbol_addr(__stop___kvm_ex_table);
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
 
        while (entry < end) {
                addr = (unsigned long)&entry->insn + entry->insn;
 
 
        vcpu = kern_hyp_va(vcpu);
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        host_ctxt->__hyp_running_vcpu = vcpu;
        guest_ctxt = &vcpu->arch.ctxt;
 
 
        struct kvm_cpu_context *guest_ctxt;
        u64 exit_code;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        host_ctxt->__hyp_running_vcpu = vcpu;
        guest_ctxt = &vcpu->arch.ctxt;
 
 
        struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
        struct kvm_cpu_context *host_ctxt;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        __sysreg_save_user_state(host_ctxt);
 
        /*
        struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
        struct kvm_cpu_context *host_ctxt;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        deactivate_traps_vhe_put();
 
        __sysreg_save_el1_state(guest_ctxt);