#define KERNEL_VXR_V16V23 8
#define KERNEL_VXR_V24V31 16
-#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
-#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
-#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
+#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7 | KERNEL_VXR_V8V15)
+#define KERNEL_VXR_MID (KERNEL_VXR_V8V15 | KERNEL_VXR_V16V23)
+#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23 | KERNEL_VXR_V24V31)
-#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
-#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
+#define KERNEL_VXR (KERNEL_VXR_LOW | KERNEL_VXR_HIGH)
+#define KERNEL_FPR (KERNEL_FPC | KERNEL_VXR_LOW)
struct kernel_fpu;
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
-
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
preempt_disable();
state->mask = S390_lowcore.fpu_flags;
- if (!test_cpu_flag(CIF_FPU))
+ if (!test_cpu_flag(CIF_FPU)) {
/* Save user space FPU state and register contents */
save_fpu_regs();
- else if (state->mask & flags)
+ } else if (state->mask & flags) {
/* Save FPU/vector register in-use by the kernel */
__kernel_fpu_begin(state, flags);
+ }
S390_lowcore.fpu_flags |= flags;
}
static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{
S390_lowcore.fpu_flags = state->mask;
- if (state->mask & flags)
+ if (state->mask & flags) {
/* Restore FPU/vector register in-use by the kernel */
__kernel_fpu_end(state, flags);
+ }
preempt_enable();
}
static inline void save_vx_regs(__vector128 *vxrs)
{
- asm volatile(
+ asm volatile("\n"
" la 1,%0\n"
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
- : "=Q" (*(struct vx_array *) vxrs) : : "1");
+ : "=Q" (*(struct vx_array *)vxrs) : : "1");
}
static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
if (cpu_has_vx())
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else
- memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
- sizeof(fpregs->fprs));
+ memcpy((freg_t *)&fpregs->fprs, fpu->fprs, sizeof(fpregs->fprs));
}
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
if (cpu_has_vx())
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else
- memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
- sizeof(fpregs->fprs));
+ memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, sizeof(fpregs->fprs));
}
#endif /* _ASM_S390_FPU_INTERNAL_H */
};
/* VX array structure for address operand constraints in inline assemblies */
-struct vx_array { __vector128 _[__NUM_VXRS]; };
+struct vx_array {
+ __vector128 _[__NUM_VXRS];
+};
/* In-kernel FPU state structure */
struct kernel_fpu {
{
/*
* Limit the save to the FPU/vector registers already
- * in use by the previous context
+ * in use by the previous context.
*/
flags &= state->mask;
-
- if (flags & KERNEL_FPC)
+ if (flags & KERNEL_FPC) {
/* Save floating point control */
asm volatile("stfpc %0" : "=Q" (state->fpc));
-
+ }
if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_LOW) {
/* Save floating-point registers */
}
return;
}
-
/* Test and save vector registers */
asm volatile (
/*
{
/*
* Limit the restore to the FPU/vector registers of the
- * previous context that have been overwritte by the
- * current context
+ * previous context that have been overwritten by the
+ * current context.
*/
flags &= state->mask;
-
- if (flags & KERNEL_FPC)
+ if (flags & KERNEL_FPC) {
/* Restore floating-point controls */
asm volatile("lfpc %0" : : "Q" (state->fpc));
-
+ }
if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_LOW) {
/* Restore floating-point registers */
}
return;
}
-
/* Test and restore (load) vector registers */
asm volatile (
/*