static __always_inline void arch_exit_to_user_mode(void)
{
- if (test_cpu_flag(CIF_FPU))
+ if (test_thread_flag(TIF_FPU))
__load_fpu_regs();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
{
preempt_disable();
state->mask = S390_lowcore.fpu_flags;
- if (!test_cpu_flag(CIF_FPU)) {
+ if (!test_thread_flag(TIF_FPU)) {
/* Save user space FPU state and register contents */
save_fpu_regs();
} else if (state->mask & flags) {
#include <linux/bits.h>
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
-#define CIF_FPU 3 /* restore FPU registers */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
-#define _CIF_FPU BIT(CIF_FPU)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
+#define TIF_FPU 8 /* restore FPU registers on exit to usermode */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
+#define _TIF_FPU BIT(TIF_FPU)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
tm __SIE_PROG20+3(%r14),3 # last exit...
jnz .Lsie_skip
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
+ TSTMSK __SF_SIE_FLAGS(%r15),_TIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
load_vx_regs(regs);
else
load_fp_regs(regs);
- clear_cpu_flag(CIF_FPU);
+ clear_thread_flag(TIF_FPU);
}
void load_fpu_regs(void)
local_irq_save(flags);
- if (test_cpu_flag(CIF_FPU))
+ if (test_thread_flag(TIF_FPU))
goto out;
state = ¤t->thread.fpu;
save_vx_regs(regs);
else
save_fp_regs(regs);
- set_cpu_flag(CIF_FPU);
+ set_thread_flag(TIF_FPU);
out:
local_irq_restore(flags);
}
{
/*
* Save the floating-point or vector register state of the current
- * task and set the CIF_FPU flag to lazy restore the FPU register
+ * task and set the TIF_FPU flag to lazy restore the FPU register
* state when returning to user space.
*/
save_fpu_regs();
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
{
- /*
- * save_fpu_regs() sets the CIF_FPU flag, which enforces
- * a restore of the floating point / vector registers as
- * soon as the next task returns to user space.
- */
save_fpu_regs();
save_access_regs(&prev->thread.acrs[0]);
save_ri_cb(prev->thread.ri_cb);
vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs));
}
- if (test_cpu_flag(CIF_FPU))
+ if (test_thread_flag(TIF_FPU))
load_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
*/
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier();
- if (test_cpu_flag(CIF_FPU))
+ if (test_thread_flag(TIF_FPU))
load_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);