@@ -265,6 +265,15 @@ struct midr_range {
#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+struct target_impl_cpu {
+ u64 midr;
+ u64 revidr;
+ u64 aidr;
+};
+
+extern u32 target_impl_cpu_num;
+extern struct target_impl_cpu *target_impl_cpus;
+
static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
u32 rv_max)
{
@@ -276,8 +285,19 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
static inline bool is_midr_in_range(struct midr_range const *range)
{
- return midr_is_cpu_model_range(read_cpuid_id(), range->model,
- range->rv_min, range->rv_max);
+ int i;
+
+ if (!target_impl_cpu_num)
+ return midr_is_cpu_model_range(read_cpuid_id(), range->model,
+ range->rv_min, range->rv_max);
+
+ for (i = 0; i < target_impl_cpu_num; i++) {
+ if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
+ range->model,
+ range->rv_min, range->rv_max))
+ return true;
+ }
+ return false;
}
static inline bool
@@ -19,11 +19,14 @@ static inline u64 paravirt_steal_clock(int cpu)
}
int __init pv_time_init(void);
+void __init pv_target_impl_cpu_init(void);
#else
#define pv_time_init() do {} while (0)
+#define pv_target_impl_cpu_init() do {} while (0)
+
#endif // CONFIG_PARAVIRT
#endif
@@ -14,6 +14,9 @@
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
+u32 target_impl_cpu_num;
+struct target_impl_cpu *target_impl_cpus;
+
static bool __maybe_unused
__is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
u32 midr, u32 revidr)
@@ -32,9 +35,20 @@ __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
{
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return __is_affected_midr_range(entry, read_cpuid_id(),
- read_cpuid(REVIDR_EL1));
+ int i;
+
+ if (!target_impl_cpu_num) {
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return __is_affected_midr_range(entry, read_cpuid_id(),
+ read_cpuid(REVIDR_EL1));
+ }
+
+ for (i = 0; i < target_impl_cpu_num; i++) {
+ if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
+ target_impl_cpus[i].midr))
+ return true;
+ }
+ return false;
}
static bool __maybe_unused
@@ -85,6 +85,7 @@
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
+#include <asm/paravirt.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
@@ -3642,6 +3643,7 @@ unsigned long cpu_get_elf_hwcap3(void)
static void __init setup_boot_cpu_capabilities(void)
{
+ pv_target_impl_cpu_init();
/*
* The boot CPU's feature register values have been recorded. Detect
* boot cpucaps and local cpucaps for the boot CPU, then enable and
@@ -49,6 +49,8 @@ PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
#ifdef CONFIG_CAVIUM_ERRATUM_27456
PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
+PROVIDE(__pi_target_impl_cpu_num = target_impl_cpu_num);
+PROVIDE(__pi_target_impl_cpus = target_impl_cpus);
#endif
PROVIDE(__pi__ctype = _ctype);
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/io.h>
#include <linux/jump_label.h>
+#include <linux/memblock.h>
#include <linux/printk.h>
#include <linux/psci.h>
#include <linux/reboot.h>
@@ -20,6 +21,7 @@
#include <linux/types.h>
#include <linux/static_call.h>
+#include <asm/hypervisor.h>
#include <asm/paravirt.h>
#include <asm/pvclock-abi.h>
#include <asm/smp_plat.h>
@@ -153,6 +155,62 @@ static bool __init has_pv_steal_clock(void)
return (res.a0 == SMCCC_RET_SUCCESS);
}
+void __init pv_target_impl_cpu_init(void)
+{
+ int i;
+ unsigned long max_cpus;
+ struct arm_smccc_res res;
+ const u32 funcs[] = {
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER,
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS,
+ };
+
+ /* Check we have already set targets */
+ if (target_impl_cpu_num)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(funcs); ++i) {
+ if (!kvm_arm_hyp_service_available(funcs[i]))
+ return;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+ 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return;
+
+ if (res.a1 != ARM_SMCCC_KVM_DISCOVER_IMPL_VER_1_0 || !res.a2) {
+ pr_warn("Unsupported target impl version or CPU implementations\n");
+ return;
+ }
+
+ max_cpus = res.a2;
+ target_impl_cpus = memblock_alloc(sizeof(*target_impl_cpus) * max_cpus,
+ __alignof__(*target_impl_cpus));
+ if (!target_impl_cpus) {
+ pr_warn("Not enough memory for struct target_impl_cpu\n");
+ return;
+ }
+
+ for (i = 0; i < max_cpus; i++) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+ i, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ memblock_free(target_impl_cpus,
+ sizeof(*target_impl_cpus) * max_cpus);
+ target_impl_cpus = NULL;
+ pr_warn("Discovering target implementation CPUs failed\n");
+ return;
+ }
+ target_impl_cpus[i].midr = res.a1;
+ target_impl_cpus[i].revidr = res.a2;
+ target_impl_cpus[i].aidr = res.a3;
+ };
+
+ target_impl_cpu_num = max_cpus;
+ pr_info("Number of target implementation CPUs is %d\n", target_impl_cpu_num);
+}
+
int __init pv_time_init(void)
{
int ret;
@@ -228,6 +228,8 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
+#define ARM_SMCCC_KVM_DISCOVER_IMPL_VER_1_0 0x100000000
+
#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
Retrieve any migration target implementation CPUs using the hypercall and enable associated errata. Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> --- arch/arm64/include/asm/cputype.h | 24 +++++++++++-- arch/arm64/include/asm/paravirt.h | 3 ++ arch/arm64/kernel/cpu_errata.c | 20 +++++++++-- arch/arm64/kernel/cpufeature.c | 2 ++ arch/arm64/kernel/image-vars.h | 2 ++ arch/arm64/kernel/paravirt.c | 58 +++++++++++++++++++++++++++++++ include/linux/arm-smccc.h | 2 ++ 7 files changed, 106 insertions(+), 5 deletions(-)