]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
s390/fpu: provide and use lfpc, sfpc, and stfpc inline assemblies
authorHeiko Carstens <hca@linux.ibm.com>
Sat, 3 Feb 2024 10:45:06 +0000 (11:45 +0100)
committerHeiko Carstens <hca@linux.ibm.com>
Fri, 16 Feb 2024 13:30:15 +0000 (14:30 +0100)
Instead of open-coding lfpc, sfpc, and stfpc inline assemblies at
several locations, provide an fpu_* function for each instruction and
use the function instead.

Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/include/asm/fpu-insn.h
arch/s390/kernel/fpu.c
arch/s390/kernel/process.c

index 1ce8e2f9786c8faf66a81e5dd3ad0633fc17c18b..df2cad95b598c60f265ea1a9611f332f84ae6a88 100644 (file)
@@ -45,6 +45,15 @@ static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
                     : "memory");
 }
 
+static __always_inline void fpu_lfpc(unsigned int *fpc)
+{
+       instrument_read(fpc, sizeof(*fpc));
+       asm volatile("lfpc      %[fpc]"
+                    :
+                    : [fpc] "Q" (*fpc)
+                    : "memory");
+}
+
 /**
  * fpu_lfpc_safe - Load floating point control register safely.
  * @fpc: new value for floating point control register
@@ -82,5 +91,22 @@ static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
                     : "memory");
 }
 
+static __always_inline void fpu_sfpc(unsigned int fpc)
+{
+       asm volatile("sfpc      %[fpc]"
+                    :
+                    : [fpc] "d" (fpc)
+                    : "memory");
+}
+
+static __always_inline void fpu_stfpc(unsigned int *fpc)
+{
+       instrument_write(fpc, sizeof(*fpc));
+       asm volatile("stfpc     %[fpc]"
+                    : [fpc] "=Q" (*fpc)
+                    :
+                    : "memory");
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_S390_FPU_INSN_H */
index f25c54caf32b06fb357ba14292cf02d56e71d995..6bfd4d0f33e1a49abb457f582159f91df62d80eb 100644 (file)
@@ -17,10 +17,8 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
         * in use by the previous context.
         */
        flags &= state->mask;
-       if (flags & KERNEL_FPC) {
-               /* Save floating point control */
-               asm volatile("stfpc %0" : "=Q" (state->fpc));
-       }
+       if (flags & KERNEL_FPC)
+               fpu_stfpc(&state->fpc);
        if (!cpu_has_vx()) {
                if (flags & KERNEL_VXR_LOW)
                        save_fp_regs(state->fprs);
@@ -80,10 +78,8 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
         * current context.
         */
        flags &= state->mask;
-       if (flags & KERNEL_FPC) {
-               /* Restore floating-point controls */
-               asm volatile("lfpc %0" : : "Q" (state->fpc));
-       }
+       if (flags & KERNEL_FPC)
+               fpu_lfpc(&state->fpc);
        if (!cpu_has_vx()) {
                if (flags & KERNEL_VXR_LOW)
                        load_fp_regs(state->fprs);
@@ -176,7 +172,7 @@ void save_fpu_regs(void)
        state = &current->thread.fpu;
        regs = current->thread.fpu.regs;
 
-       asm volatile("stfpc %0" : "=Q" (state->fpc));
+       fpu_stfpc(&state->fpc);
        if (likely(cpu_has_vx())) {
                asm volatile("lgr       1,%0\n"
                             "VSTM      0,15,0,1\n"
index e502192da5f7938903b1a4c133c48df90dfe8fba..b0578ea230e7348d8ec891f46db83188feef2d95 100644 (file)
@@ -191,7 +191,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 void execve_tail(void)
 {
        current->thread.fpu.fpc = 0;
-       asm volatile("sfpc %0" : : "d" (0));
+       fpu_sfpc(0);
 }
 
 struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)