@@ -514,6 +514,26 @@ static inline void fpregs_activate(struct fpu *fpu)
trace_x86_fpu_regs_activated(fpu);
}
+/*
+ * Load the FPU state for the current task. Call with preemption disabled.
+ */
+static inline void __fpregs_load_activate(struct fpu *fpu, int cpu)
+{
+ if (!fpregs_state_valid(fpu, cpu))
+ copy_kernel_to_fpregs(&fpu->state);
+ fpregs_activate(fpu);
+}
+
+static inline void __fpregs_changes_begin(void)
+{
+ preempt_disable();
+}
+
+static inline void __fpregs_changes_end(void)
+{
+ preempt_enable();
+}
+
/*
* FPU state switching for scheduling.
*
@@ -553,11 +573,8 @@ static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
bool preload = static_cpu_has(X86_FEATURE_FPU) &&
new_fpu->initialized;
- if (preload) {
- if (!fpregs_state_valid(new_fpu, cpu))
- copy_kernel_to_fpregs(&new_fpu->state);
- fpregs_activate(new_fpu);
- }
+ if (preload)
+ __fpregs_load_activate(new_fpu, cpu);
}
/*