@@ -979,6 +979,13 @@ config HAVE_SPARSE_SYSCALL_NR
entries at 4000, 5000 and 6000 locations. This option turns on syscall
related optimizations for a given architecture.
+config HAVE_SECRETMEM_UNCACHED
+ bool
+ help
+ An architecture can select this if its semantics of non-cached
+ mappings can be used to prevent speculative loads and it is
+ useful for secret protection.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
@@ -218,6 +218,7 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
select HAVE_GENERIC_VDSO
+ select HAVE_SECRETMEM_UNCACHED
select HOTPLUG_SMT if SMP
select IRQ_FORCED_THREADING
select NEED_SG_DMA_LENGTH
@@ -97,5 +97,6 @@
#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */
#define Z3FOLD_MAGIC 0x33
#define PPC_CMM_MAGIC 0xc7571590
+#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */
#endif /* __LINUX_MAGIC_H__ */
new file mode 100644
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_SECRERTMEM_H
+#define _UAPI_LINUX_SECRERTMEM_H
+
+/* secretmem operation modes */
+#define SECRETMEM_EXCLUSIVE 0x1
+#define SECRETMEM_UNCACHED 0x2
+
+#endif /* _UAPI_LINUX_SECRERTMEM_H */
@@ -349,6 +349,8 @@ COND_SYSCALL(pkey_mprotect);
COND_SYSCALL(pkey_alloc);
COND_SYSCALL(pkey_free);
+/* memfd_secret */
+COND_SYSCALL(memfd_secret);
/*
* Architecture specific weak syscall entries.
@@ -872,4 +872,8 @@ config ARCH_HAS_HUGEPD
config MAPPING_DIRTY_HELPERS
bool
+config SECRETMEM
+ def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED
+ select GENERIC_ALLOCATOR
+
endmenu
@@ -121,3 +121,4 @@ obj-$(CONFIG_MEMFD_CREATE) += memfd.o
obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
+obj-$(CONFIG_SECRETMEM) += secretmem.o
new file mode 100644
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/memfd.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/pagemap.h>
+#include <linux/syscalls.h>
+#include <linux/pseudo_fs.h>
+#include <linux/set_memory.h>
+#include <linux/sched/signal.h>
+
+#include <uapi/linux/secretmem.h>
+#include <uapi/linux/magic.h>
+
+#include <asm/tlbflush.h>
+
+#include "internal.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "secretmem: " fmt
+
+#ifdef CONFIG_HAVE_SECRETMEM_UNCACHED
+#define SECRETMEM_MODE_MASK (SECRETMEM_EXCLUSIVE | SECRETMEM_UNCACHED)
+#else
+#define SECRETMEM_MODE_MASK (SECRETMEM_EXCLUSIVE)
+#endif
+
+#define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK
+
+struct secretmem_ctx {
+ unsigned int mode;
+};
+
+static struct page *secretmem_alloc_page(gfp_t gfp)
+{
+ /*
+ * FIXME: use a cache of large pages to reduce the direct map
+ * fragmentation
+ */
+ return alloc_page(gfp);
+}
+
+static vm_fault_t secretmem_fault(struct vm_fault *vmf)
+{
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ pgoff_t offset = vmf->pgoff;
+ unsigned long addr;
+ struct page *page;
+ int ret = 0;
+
+ if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
+ return vmf_error(-EINVAL);
+
+ page = find_get_entry(mapping, offset);
+ if (!page) {
+ page = secretmem_alloc_page(vmf->gfp_mask);
+ if (!page)
+ return vmf_error(-ENOMEM);
+
+ ret = add_to_page_cache(page, mapping, offset, vmf->gfp_mask);
+ if (unlikely(ret))
+ goto err_put_page;
+
+ ret = set_direct_map_invalid_noflush(page);
+ if (ret)
+ goto err_del_page_cache;
+
+ addr = (unsigned long)page_address(page);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ __SetPageUptodate(page);
+
+ ret = VM_FAULT_LOCKED;
+ }
+
+ vmf->page = page;
+ return ret;
+
+err_del_page_cache:
+ delete_from_page_cache(page);
+err_put_page:
+ put_page(page);
+ return vmf_error(ret);
+}
+
+static const struct vm_operations_struct secretmem_vm_ops = {
+ .fault = secretmem_fault,
+};
+
+static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct secretmem_ctx *ctx = file->private_data;
+ unsigned long mode = ctx->mode;
+ unsigned long len = vma->vm_end - vma->vm_start;
+
+ if (!mode)
+ return -EINVAL;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
+ return -EAGAIN;
+
+ switch (mode) {
+ case SECRETMEM_UNCACHED:
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ fallthrough;
+ case SECRETMEM_EXCLUSIVE:
+ vma->vm_ops = &secretmem_vm_ops;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_LOCKED;
+
+ return 0;
+}
+
+const struct file_operations secretmem_fops = {
+ .mmap = secretmem_mmap,
+};
+
+static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode)
+{
+ return false;
+}
+
+static int secretmem_migratepage(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
+{
+ return -EBUSY;
+}
+
+static void secretmem_freepage(struct page *page)
+{
+ set_direct_map_default_noflush(page);
+}
+
+static const struct address_space_operations secretmem_aops = {
+ .freepage = secretmem_freepage,
+ .migratepage = secretmem_migratepage,
+ .isolate_page = secretmem_isolate_page,
+};
+
+static struct vfsmount *secretmem_mnt;
+
+static struct file *secretmem_file_create(unsigned long flags)
+{
+ struct file *file = ERR_PTR(-ENOMEM);
+ struct secretmem_ctx *ctx;
+ struct inode *inode;
+
+ inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ goto err_free_inode;
+
+ file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
+ O_RDWR, &secretmem_fops);
+ if (IS_ERR(file))
+ goto err_free_ctx;
+
+ mapping_set_unevictable(inode->i_mapping);
+
+ inode->i_mapping->private_data = ctx;
+ inode->i_mapping->a_ops = &secretmem_aops;
+
+ /* pretend we are a normal file with zero size */
+ inode->i_mode |= S_IFREG;
+ inode->i_size = 0;
+
+ file->private_data = ctx;
+
+ ctx->mode = flags & SECRETMEM_MODE_MASK;
+
+ return file;
+
+err_free_ctx:
+ kfree(ctx);
+err_free_inode:
+ iput(inode);
+ return file;
+}
+
+SYSCALL_DEFINE1(memfd_secret, unsigned long, flags)
+{
+ struct file *file;
+ unsigned int mode;
+ int fd, err;
+
+ /* make sure local flags do not confict with global fcntl.h */
+ BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
+
+ if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
+ return -EINVAL;
+
+ /* modes are mutually exclusive, only one mode bit should be set */
+ mode = flags & SECRETMEM_FLAGS_MASK;
+ if (ffs(mode) != fls(mode))
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(flags & O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ file = secretmem_file_create(flags);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto err_put_fd;
+ }
+
+ file->f_flags |= O_LARGEFILE;
+
+ fd_install(fd, file);
+ return fd;
+
+err_put_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+static void secretmem_evict_inode(struct inode *inode)
+{
+ struct secretmem_ctx *ctx = inode->i_private;
+
+ truncate_inode_pages_final(&inode->i_data);
+ clear_inode(inode);
+ kfree(ctx);
+}
+
+static const struct super_operations secretmem_super_ops = {
+ .evict_inode = secretmem_evict_inode,
+};
+
+static int secretmem_init_fs_context(struct fs_context *fc)
+{
+ struct pseudo_fs_context *ctx = init_pseudo(fc, SECRETMEM_MAGIC);
+
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &secretmem_super_ops;
+
+ return 0;
+}
+
+static struct file_system_type secretmem_fs = {
+ .name = "secretmem",
+ .init_fs_context = secretmem_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int secretmem_init(void)
+{
+ int ret = 0;
+
+ secretmem_mnt = kern_mount(&secretmem_fs);
+ if (IS_ERR(secretmem_mnt))
+ ret = PTR_ERR(secretmem_mnt);
+
+ return ret;
+}
+fs_initcall(secretmem_init);