@@ -112,6 +112,7 @@ extern void fpu__init_cpu(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
+extern void fpstate_cache_init(void);
#ifdef CONFIG_MATH_EMULATION
extern void fpstate_init_soft(struct swregs_state *soft);
@@ -59,6 +59,8 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
*/
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
+struct kmem_cache *fpstate_cachep;
+
static bool kernel_fpu_disabled(void)
{
return this_cpu_read(in_kernel_fpu);
@@ -443,7 +445,9 @@ static void __fpstate_reset(struct fpstate *fpstate)
void fpstate_reset(struct fpu *fpu)
{
/* Set the fpstate pointer to the default fpstate */
- fpu->fpstate = &fpu->__fpstate;
+ if (!cpu_feature_enabled(X86_FEATURE_ASI))
+ fpu->fpstate = &fpu->__fpstate;
+
__fpstate_reset(fpu->fpstate);
/* Initialize the permission related info in fpu */
@@ -464,6 +468,26 @@ static inline void fpu_inherit_perms(struct fpu *dst_fpu)
}
}
+void fpstate_cache_init(void)
+{
+ if (cpu_feature_enabled(X86_FEATURE_ASI)) {
+ size_t fpstate_size;
+
+ /* TODO: Is the ALIGN-64 really needed? */
+ fpstate_size = fpu_kernel_cfg.default_size +
+ ALIGN(offsetof(struct fpstate, regs), 64);
+
+ fpstate_cachep = kmem_cache_create_usercopy(
+ "fpstate",
+ fpstate_size,
+ __alignof__(struct fpstate),
+ SLAB_PANIC | SLAB_ACCOUNT,
+ offsetof(struct fpstate, regs),
+ fpu_kernel_cfg.default_size,
+ NULL);
+ }
+}
+
/* Clone current's FPU state on fork */
int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
{
@@ -473,6 +497,22 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
/* The new task's FPU state cannot be valid in the hardware. */
dst_fpu->last_cpu = -1;
+ if (cpu_feature_enabled(X86_FEATURE_ASI)) {
+ dst_fpu->fpstate = kmem_cache_alloc_node(
+ fpstate_cachep, GFP_KERNEL,
+ page_to_nid(virt_to_page(dst)));
+ if (!dst_fpu->fpstate)
+ return -ENOMEM;
+
+ /*
+ * TODO: We may be able to skip the copy since the registers are
+ * restored below anyway.
+ */
+ memcpy(dst_fpu->fpstate, src_fpu->fpstate,
+ fpu_kernel_cfg.default_size +
+ offsetof(struct fpstate, regs));
+ }
+
fpstate_reset(dst_fpu);
if (!cpu_feature_enabled(X86_FEATURE_FPU))
@@ -531,7 +571,8 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags)
void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
{
*offset = offsetof(struct thread_struct, fpu.__fpstate.regs);
- *size = fpu_kernel_cfg.default_size;
+ *size = cpu_feature_enabled(X86_FEATURE_ASI)
+ ? 0 : fpu_kernel_cfg.default_size;
}
/*
@@ -161,9 +161,11 @@ static void __init fpu__init_task_struct_size(void)
/*
* Add back the dynamically-calculated register state
- * size.
+ * size, except when ASI is enabled, since in that case
+ * the FPU state is always allocated dynamically.
*/
- task_size += fpu_kernel_cfg.default_size;
+ if (!cpu_feature_enabled(X86_FEATURE_ASI))
+ task_size += fpu_kernel_cfg.default_size;
/*
* We dynamically size 'struct fpu', so we require that
@@ -223,6 +225,7 @@ static void __init fpu__init_init_fpstate(void)
*/
void __init fpu__init_system(struct cpuinfo_x86 *c)
{
+ current->thread.fpu.fpstate = ¤t->thread.fpu.__fpstate;
fpstate_reset(¤t->thread.fpu);
fpu__init_system_early_generic(c);
@@ -3,6 +3,7 @@
#define __X86_KERNEL_FPU_INTERNAL_H
extern struct fpstate init_fpstate;
+extern struct kmem_cache *fpstate_cachep;
/* CPU feature check wrappers */
static __always_inline __pure bool use_xsave(void)
@@ -13,6 +13,7 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <asm/fpu/api.h>
#include <asm/fpu/regset.h>
@@ -1495,8 +1496,15 @@ arch_initcall(xfd_update_static_branch)
void fpstate_free(struct fpu *fpu)
{
- if (fpu->fpstate && fpu->fpstate != &fpu->__fpstate)
- vfree(fpu->fpstate);
+ WARN_ON_ONCE(cpu_feature_enabled(X86_FEATURE_ASI) &&
+ fpu->fpstate == &fpu->__fpstate);
+
+ if (fpu->fpstate && fpu->fpstate != &fpu->__fpstate) {
+ if (fpu->fpstate->is_valloc)
+ vfree(fpu->fpstate);
+ else
+ kmem_cache_free(fpstate_cachep, fpu->fpstate);
+ }
}
/**
@@ -1574,7 +1582,14 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
fpregs_unlock();
- vfree(curfps);
+ WARN_ON_ONCE(cpu_feature_enabled(X86_FEATURE_ASI) && !curfps);
+ if (curfps) {
+ if (curfps->is_valloc)
+ vfree(curfps);
+ else
+ kmem_cache_free(fpstate_cachep, curfps);
+ }
+
return 0;
}
@@ -80,6 +80,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
DEFINE_PER_CPU(bool, __tss_limit_invalid);
EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
+void __init arch_task_cache_init(void)
+{
+ fpstate_cache_init();
+}
+
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
@@ -101,7 +106,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
#ifdef CONFIG_X86_64
void arch_release_task_struct(struct task_struct *tsk)
{
- if (fpu_state_size_dynamic())
+ if (fpu_state_size_dynamic() || cpu_feature_enabled(X86_FEATURE_ASI))
fpstate_free(&tsk->thread.fpu);
}
#endif
We are going to be mapping the task_struct in the restricted ASI address space. However, the task_struct also contains the FPU register state embedded inside it, which can contain sensitive information. So when ASI is enabled, always allocate the FPU state from a separate slab cache to keep it out of task_struct. Signed-off-by: Junaid Shahid <junaids@google.com> --- arch/x86/include/asm/fpu/api.h | 1 + arch/x86/kernel/fpu/core.c | 45 ++++++++++++++++++++++++++++++++-- arch/x86/kernel/fpu/init.c | 7 ++++-- arch/x86/kernel/fpu/internal.h | 1 + arch/x86/kernel/fpu/xstate.c | 21 +++++++++++++--- arch/x86/kernel/process.c | 7 +++++- 6 files changed, 74 insertions(+), 8 deletions(-)