@@ -20,6 +20,7 @@ struct alt_instr {
u8 alt_len; /* size of new instruction(s), <= orig_len */
};
+void __init apply_alternatives_early(void);
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
@@ -28,6 +28,18 @@
#include <asm/sections.h>
#include <linux/stop_machine.h>
+/*
+ * early-apply features are detected using only the boot CPU and checked on
+ * secondary CPUs startup, even then,
+ * These early-apply features should only include features where we must
+ * patch the kernel very early in the boot process.
+ *
+ * Note that the cpufeature logic *must* be made aware of early-apply
+ * features to ensure they are reported as enabled without waiting
+ * for other CPUs to boot.
+ */
+#define EARLY_APPLY_FEATURE_MASK BIT(ARM64_HAS_SYSREG_GIC_CPUIF)
+
#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
@@ -105,7 +117,8 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
return insn;
}
-static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+static void __apply_alternatives(void *alt_region, bool use_linear_alias,
+ unsigned long feature_mask)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -115,6 +128,9 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
u32 insn;
int i, nr_inst;
+ if ((BIT(alt->cpufeature) & feature_mask) == 0)
+ continue;
+
if (!cpus_have_cap(alt->cpufeature))
continue;
@@ -138,6 +154,21 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
}
/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+void apply_alternatives_early(void)
+{
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+ };
+
+ __apply_alternatives(®ion, true, EARLY_APPLY_FEATURE_MASK);
+}
+
+/*
* We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here.
*/
@@ -156,7 +187,9 @@ static int __apply_alternatives_multi_stop(void *unused)
isb();
} else {
BUG_ON(patched);
- __apply_alternatives(®ion, true);
+
+ __apply_alternatives(®ion, true, ~EARLY_APPLY_FEATURE_MASK);
+
/* Barriers provided by the cache flushing */
WRITE_ONCE(patched, 1);
}
@@ -177,5 +210,5 @@ void apply_alternatives(void *start, size_t length)
.end = start + length,
};
- __apply_alternatives(®ion, false);
+ __apply_alternatives(®ion, false, -1);
}
@@ -453,6 +453,12 @@ void __init smp_prepare_boot_cpu(void)
* cpuinfo_store_boot_cpu() above.
*/
update_cpu_errata_workarounds();
+ /*
+ * We now know enough about the boot CPU to apply the
+ * alternatives that cannot wait until interrupt handling
+ * and/or scheduling is enabled.
+ */
+ apply_alternatives_early();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)