new file mode 100644
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 ARM Ltd.
+ */
+
+#ifndef __ASM_RSI_H_
+#define __ASM_RSI_H_
+
+#include <linux/jump_label.h>
+#include <asm/rsi_cmds.h>
+
+DECLARE_STATIC_KEY_FALSE(rsi_present);
+
+void __init arm64_rsi_init(void);
+void __init arm64_rsi_setup_memory(void);
+static inline bool is_realm_world(void)
+{
+ return static_branch_unlikely(&rsi_present);
+}
+
+static inline int rsi_set_memory_range(phys_addr_t start, phys_addr_t end,
+ enum ripas state, unsigned long flags)
+{
+ unsigned long ret;
+ phys_addr_t top;
+
+ while (start != end) {
+ ret = rsi_set_addr_range_state(start, end, state, flags, &top);
+ if (WARN_ON(ret || top < start || top > end))
+ return -EINVAL;
+ start = top;
+ }
+
+ return 0;
+}
+
+/*
+ * Convert the specified range to RAM. Do not use this if you rely on the
+ * contents of a page that may already be in RAM state.
+ */
+static inline int rsi_set_memory_range_protected(phys_addr_t start,
+ phys_addr_t end)
+{
+ return rsi_set_memory_range(start, end, RSI_RIPAS_RAM,
+ RSI_CHANGE_DESTROYED);
+}
+
+/*
+ * Convert the specified range to RAM. Do not convert any pages that may have
+ * been DESTROYED, without our permission.
+ */
+static inline int rsi_set_memory_range_protected_safe(phys_addr_t start,
+ phys_addr_t end)
+{
+ return rsi_set_memory_range(start, end, RSI_RIPAS_RAM,
+ RSI_NO_CHANGE_DESTROYED);
+}
+
+static inline int rsi_set_memory_range_shared(phys_addr_t start,
+ phys_addr_t end)
+{
+ return rsi_set_memory_range(start, end, RSI_RIPAS_EMPTY, 0);
+}
+#endif
@@ -10,6 +10,11 @@
#include <asm/rsi_smc.h>
+enum ripas {
+ RSI_RIPAS_EMPTY,
+ RSI_RIPAS_RAM,
+};
+
static inline unsigned long rsi_request_version(unsigned long req,
unsigned long *out_lower,
unsigned long *out_higher)
@@ -35,4 +40,21 @@ static inline unsigned long rsi_get_realm_config(struct realm_config *cfg)
return res.a0;
}
+static inline unsigned long rsi_set_addr_range_state(phys_addr_t start,
+ phys_addr_t end,
+ enum ripas state,
+ unsigned long flags,
+ phys_addr_t *top)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(SMC_RSI_IPA_STATE_SET, start, end, state,
+ flags, 0, 0, 0, &res);
+
+ if (top)
+ *top = res.a1;
+
+ return res.a0;
+}
+
#endif
@@ -33,7 +33,8 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o idle.o patching.o pi/
+ syscall.o proton-pack.o idle.o patching.o pi/ \
+ rsi.o
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
new file mode 100644
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Ltd.
+ */
+
+#include <linux/jump_label.h>
+#include <linux/memblock.h>
+#include <linux/psci.h>
+#include <asm/rsi.h>
+
+DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
+EXPORT_SYMBOL(rsi_present);
+
+static bool rsi_version_matches(void)
+{
+ unsigned long ver_lower, ver_higher;
+ unsigned long ret = rsi_request_version(RSI_ABI_VERSION,
+ &ver_lower,
+ &ver_higher);
+
+ if (ret == SMCCC_RET_NOT_SUPPORTED)
+ return false;
+
+ if (ret != RSI_SUCCESS) {
+ pr_err("RME: RMM doesn't support RSI version %u.%u. Supported range: %lu.%lu-%lu.%lu\n",
+ RSI_ABI_VERSION_MAJOR, RSI_ABI_VERSION_MINOR,
+ RSI_ABI_VERSION_GET_MAJOR(ver_lower),
+ RSI_ABI_VERSION_GET_MINOR(ver_lower),
+ RSI_ABI_VERSION_GET_MAJOR(ver_higher),
+ RSI_ABI_VERSION_GET_MINOR(ver_higher));
+ return false;
+ }
+
+ pr_info("RME: Using RSI version %lu.%lu\n",
+ RSI_ABI_VERSION_GET_MAJOR(ver_lower),
+ RSI_ABI_VERSION_GET_MINOR(ver_lower));
+
+ return true;
+}
+
+void __init arm64_rsi_setup_memory(void)
+{
+ u64 i;
+ phys_addr_t start, end;
+
+ if (!is_realm_world())
+ return;
+
+ /*
+ * Iterate over the available memory ranges and convert the state to
+ * protected memory. We should take extra care to ensure that we DO NOT
+ * permit any "DESTROYED" pages to be converted to "RAM".
+ *
+ * BUG_ON is used because if the attempt to switch the memory to
+ * protected has failed here, then future accesses to the memory are
+ * simply going to be reflected as a fault which we can't handle.
+ * Bailing out early prevents the guest limping on and dieing later.
+ */
+ for_each_mem_range(i, &start, &end) {
+ BUG_ON(rsi_set_memory_range_protected_safe(start, end));
+ }
+}
+
+void __init arm64_rsi_init(void)
+{
+ /*
+ * If PSCI isn't using SMC, RMM isn't present. Don't try to execute an
+ * SMC as it could be UNDEFINED.
+ */
+ if (!psci_early_test_conduit(SMCCC_CONDUIT_SMC))
+ return;
+ if (!rsi_version_matches())
+ return;
+
+ static_branch_enable(&rsi_present);
+}
+
@@ -43,6 +43,7 @@
#include <asm/cpu_ops.h>
#include <asm/kasan.h>
#include <asm/numa.h>
+#include <asm/rsi.h>
#include <asm/scs.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -293,6 +294,11 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
* cpufeature code and early parameters.
*/
jump_label_init();
+ /*
+ * Init RSI before early param so that "earlycon" console uses the
+ * shared alias when in a realm
+ */
+ arm64_rsi_init();
parse_early_param();
dynamic_scs_init();
@@ -328,6 +334,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
arm64_memblock_init();
+ arm64_rsi_setup_memory();
+
paging_init();
acpi_table_upgrade();