new file mode 100644
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017-2024 Kalray Inc.
+ * Author(s): Clement Leger
+ * Jonathan Borne
+ * Yann Sionneau
+ */
+
+#ifndef _ASM_KVX_SMP_H
+#define _ASM_KVX_SMP_H
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+
+#include <asm/sfr.h>
+
+void smp_init_cpus(void);
+
+#ifdef CONFIG_SMP
+
+extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+asmlinkage void __init start_kernel_secondary(void);
+
+/* Hook for the generic smp_call_function_many() routine. */
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+/* Hook for the generic smp_call_function_single() routine. */
+void arch_send_call_function_single_ipi(int cpu);
+
+void __init setup_processor(void);
+int __init setup_smp(void);
+
+#define raw_smp_processor_id() ((int) \
+ ((kvx_sfr_get(PCR) & KVX_SFR_PCR_PID_MASK) \
+ >> KVX_SFR_PCR_PID_SHIFT))
+
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+extern void handle_IPI(unsigned long ops);
+
+struct smp_operations {
+ int (*smp_boot_secondary)(unsigned int cpu);
+};
+
+struct of_cpu_method {
+ const char *method;
+ const struct smp_operations *ops;
+};
+
+#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpu_method __cpu_method_of_table_##name \
+ __used __section("__cpu_method_of_table") \
+ = { .method = _method, .ops = _ops }
+
+extern void smp_set_ops(const struct smp_operations *ops);
+
+#else
+
+void smp_init_cpus(void) {}
+
+#endif /* CONFIG_SMP */
+
+#endif
new file mode 100644
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2024 Kalray Inc.
+ * Author(s): Clement Leger
+ * Jonathan Borne
+ * Yann Sionneau
+ */
+
+#include <linux/cpumask.h>
+#include <linux/irq_work.h>
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
+enum ipi_message_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_IRQ_WORK,
+ IPI_MAX
+};
+
+void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
+{
+ smp_cross_call = fn;
+}
+
+static void send_ipi_message(const struct cpumask *mask,
+ enum ipi_message_type operation)
+{
+ if (!smp_cross_call)
+ panic("ipi controller init failed\n");
+ smp_cross_call(mask, (unsigned int)operation);
+}
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
+static void ipi_stop(void *unused)
+{
+ local_cpu_stop();
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+
+ smp_call_function_many(&targets, ipi_stop, NULL, 0);
+}
+
+void arch_smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void handle_IPI(unsigned long ops)
+{
+ if (ops & (1 << IPI_RESCHEDULE))
+ scheduler_ipi();
+
+ if (ops & (1 << IPI_CALL_FUNC))
+ generic_smp_call_function_interrupt();
+
+ if (ops & (1 << IPI_IRQ_WORK))
+ irq_work_run();
+
+ WARN_ON_ONCE((ops >> IPI_MAX) != 0);
+}
new file mode 100644
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2024 Kalray Inc.
+ * Author(s): Clement Leger
+ * Julian Vetter
+ * Yann Sionneau
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/sched/mm.h>
+#include <linux/mm_types.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/tlbflush.h>
+#include <asm/ipi.h>
+
+void *__cpu_up_stack_pointer[NR_CPUS];
+void *__cpu_up_task_pointer[NR_CPUS];
+static struct smp_operations smp_ops __ro_after_init;
+extern struct of_cpu_method __cpu_method_of_table[];
+
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+void __init smp_set_ops(const struct smp_operations *ops)
+{
+ if (ops)
+ smp_ops = *ops;
+};
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ int ret;
+
+ __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE;
+ __cpu_up_task_pointer[cpu] = tidle;
+ /* We need to be sure writes are committed */
+ smp_mb();
+
+ if (!smp_ops.smp_boot_secondary) {
+ pr_err_once("No smp_ops registered: could not bring up secondary CPUs\n");
+ return -ENOSYS;
+ }
+
+ ret = smp_ops.smp_boot_secondary(cpu);
+ if (ret == 0) {
+ /* CPU was successfully started */
+ while (!cpu_online(cpu))
+ cpu_relax();
+ } else {
+ pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
+ }
+
+ return ret;
+}
+
+
+
+static int __init set_smp_ops_by_method(struct device_node *node)
+{
+ const char *method;
+ struct of_cpu_method *m = __cpu_method_of_table;
+
+ if (of_property_read_string(node, "enable-method", &method))
+ return 0;
+
+ for (; m->method; m++)
+ if (!strcmp(m->method, method)) {
+ smp_set_ops(m->ops);
+ return 1;
+ }
+
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+void __init smp_init_cpus(void)
+{
+ struct device_node *cpu, *cpus;
+ u32 cpu_id;
+ unsigned int nr_cpus = 0;
+ int found_method = 0;
+
+ cpus = of_find_node_by_path("/cpus");
+ for_each_of_cpu_node(cpu) {
+ if (!of_device_is_available(cpu))
+ continue;
+
+ cpu_id = of_get_cpu_hwid(cpu, 0);
+ if ((cpu_id < NR_CPUS) && (nr_cpus < nr_cpu_ids)) {
+ nr_cpus++;
+ set_cpu_possible(cpu_id, true);
+ if (!found_method)
+ found_method = set_smp_ops_by_method(cpu);
+ }
+ }
+
+ if (!found_method)
+ set_smp_ops_by_method(cpus);
+
+ pr_info("%d possible cpus\n", nr_cpus);
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ if (num_present_cpus() <= 1)
+ init_cpu_present(cpu_possible_mask);
+}
+
+/*
+ * C entry point for a secondary processor.
+ */
+asmlinkage void __init start_kernel_secondary(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ setup_processor();
+ kvx_mmu_early_setup();
+
+ /* All kernel threads share the same mm context. */
+ mmgrab(mm);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+ trace_hardirqs_off();
+
+ local_flush_tlb_all();
+
+ local_irq_enable();
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
@@ -147,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
+ CPUHP_AP_IRQ_KVX_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
@@ -176,6 +177,7 @@ enum cpuhp_state {
CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_TI_GP_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
+ CPUHP_AP_KVX_TIMER_STARTING,
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,