@@ -53,3 +53,5 @@ obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
+
+obj-$(CONFIG_PRMEM) += prmem.o
new file mode 100644
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * prmem.c: Memory Protection Library - x86_64 backend
+ *
+ * (C) Copyright 2018-2019 Huawei Technologies Co. Ltd.
+ * Author: Igor Stoppa <igor.stoppa@huawei.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+
+unsigned long __init __init_wr_base(void)
+{
+ /*
+ * Place 64TB of kernel address space within 128TB of user address
+ * space, at a random page aligned offset.
+ */
+ return (((unsigned long)kaslr_get_random_long("WR Poke")) &
+ PAGE_MASK) % (64 * _BITUL(40));
+}
x86_64 specialized way of defining the base address for the alternate mapping used by write-rare. Since the kernel address space spans across 64TB and it is mapped into a used address space of 128TB, the kernel address space can be shifted by a random offset that is up to 64TB and page aligned. This is accomplished by providing arch-specific version of the function __init_wr_base() Signed-off-by: Igor Stoppa <igor.stoppa@huawei.com> CC: Andy Lutomirski <luto@amacapital.net> CC: Nadav Amit <nadav.amit@gmail.com> CC: Matthew Wilcox <willy@infradead.org> CC: Peter Zijlstra <peterz@infradead.org> CC: Kees Cook <keescook@chromium.org> CC: Dave Hansen <dave.hansen@linux.intel.com> CC: Mimi Zohar <zohar@linux.vnet.ibm.com> CC: Thiago Jung Bauermann <bauerman@linux.ibm.com> CC: Ahmed Soliman <ahmedsoliman@mena.vt.edu> CC: linux-integrity@vger.kernel.org CC: kernel-hardening@lists.openwall.com CC: linux-mm@kvack.org CC: linux-kernel@vger.kernel.org --- arch/x86/mm/Makefile | 2 ++ arch/x86/mm/prmem.c (new) | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+)