@@ -232,4 +232,12 @@ static inline bool arch_timer_have_evtstrm_feature(void)
{
return cpu_have_named_feature(EVTSTRM);
}
+
+#ifdef CONFIG_APPLE_AIC
+#define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
+DECLARE_STATIC_KEY_FALSE(aic_impdef_timer_control);
+#endif
+
+void __aic_timer_fiq_clear_set(u64 clear, u64 set);
+
#endif
@@ -75,6 +75,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
+ __KVM_HOST_SMCCC_FUNC___aic_timer_fiq_clear_set,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
@@ -106,6 +106,9 @@ KVM_NVHE_ALIAS(__hyp_rodata_end);
/* pKVM static key */
KVM_NVHE_ALIAS(kvm_protected_mode_initialized);
+/* Hack for M1 timer control in hVHE mode */
+KVM_NVHE_ALIAS(aic_impdef_timer_control);
+
#endif /* CONFIG_KVM */
#ifdef CONFIG_EFI_ZBOOT
@@ -1259,6 +1259,11 @@ static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
return 0;
}
+void __aic_timer_fiq_clear_set(u64 clear, u64 set)
+{
+ kvm_call_hyp_nvhe(__aic_timer_fiq_clear_set, clear, set);
+}
+
static int timer_irq_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool val)
{
@@ -192,6 +192,16 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
}
+#define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
+
+static void handle___aic_timer_fiq_clear_set(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, clear, host_ctxt, 1);
+ DECLARE_REG(u64, set, host_ctxt, 2);
+
+ __aic_timer_fiq_clear_set(clear, set);
+}
+
static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
@@ -322,6 +332,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__vgic_v3_write_vmcr),
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_aprs),
+ HANDLE_FUNC(__aic_timer_fiq_clear_set),
HANDLE_FUNC(__pkvm_vcpu_init_traps),
HANDLE_FUNC(__pkvm_init_vm),
HANDLE_FUNC(__pkvm_init_vcpu),
@@ -60,3 +60,12 @@ void __timer_enable_traps(struct kvm_vcpu *vcpu)
sysreg_clear_set(cnthctl_el2, clr, set);
}
+
+
+void __aic_timer_fiq_clear_set(u64 clear, u64 set)
+{
+#ifdef CONFIG_APPLE_AIC
+ if (has_hvhe() && static_branch_likely(&aic_impdef_timer_control))
+ sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, clear, set);
+#endif
+}
@@ -180,7 +180,6 @@
#define IPI_SR_PENDING BIT(0)
/* Guest timer FIQ enable register */
-#define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
#define VM_TMR_FIQ_ENABLE_V BIT(0)
#define VM_TMR_FIQ_ENABLE_P BIT(1)
@@ -236,6 +235,8 @@ enum fiq_hwirq {
static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+DEFINE_STATIC_KEY_FALSE(aic_impdef_timer_control);
+
struct aic_info {
int version;
@@ -458,6 +459,40 @@ static unsigned long aic_fiq_get_idx(struct irq_data *d)
return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
}
+void __weak __aic_timer_fiq_clear_set(u64 clear, u64 set) { }
+
+static bool aic_check_timer_enabled(int timer)
+{
+ if (IS_ENABLED(CONFIG_KVM) &&
+ static_branch_unlikely(&aic_impdef_timer_control))
+ return __this_cpu_read(aic_fiq_unmasked) & BIT(timer);
+ return true;
+}
+
+static void aic_hvhe_timer_mask(int timer, bool mask)
+{
+ u64 clr, set, bit;
+
+ if (!(IS_ENABLED(CONFIG_KVM) &&
+ static_branch_unlikely(&aic_impdef_timer_control)))
+ return;
+
+ if (timer == AIC_TMR_EL0_VIRT)
+ bit = VM_TMR_FIQ_ENABLE_V;
+ else
+ bit = VM_TMR_FIQ_ENABLE_P;
+
+ if (mask) {
+ clr = bit;
+ set = 0;
+ } else {
+ clr = 0;
+ set = bit;
+ }
+
+ __aic_timer_fiq_clear_set(clr, set);
+}
+
static void aic_fiq_set_mask(struct irq_data *d)
{
/* Only the guest timers have real mask bits, unfortunately. */
@@ -470,6 +505,9 @@ static void aic_fiq_set_mask(struct irq_data *d)
sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
isb();
break;
+ case AIC_TMR_EL0_VIRT:
+ aic_hvhe_timer_mask(AIC_TMR_EL0_VIRT, true);
+ break;
default:
break;
}
@@ -486,6 +524,9 @@ static void aic_fiq_clear_mask(struct irq_data *d)
sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
isb();
break;
+ case AIC_TMR_EL0_VIRT:
+ aic_hvhe_timer_mask(AIC_TMR_EL0_VIRT, false);
+ break;
default:
break;
}
@@ -545,7 +586,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
- if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
+ if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)) &&
+ aic_check_timer_enabled(AIC_TMR_EL0_VIRT))
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
@@ -1041,6 +1083,10 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
if (static_branch_likely(&use_fast_ipi))
pr_info("Using Fast IPIs");
+ /* Caps are not final at this stage :-/ */
+ if (cpus_have_cap(ARM64_KVM_HVHE))
+ static_branch_enable(&aic_impdef_timer_control);
+
cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
"irqchip/apple-aic/ipi:starting",
aic_init_cpu, NULL);
As our M1 friend doesn't have a GIC, it relies on a special hack to deal with masking the guest timers, in the form of an IMPDEF system register. Unfortunately, this sysreg is EL2-only, which means that the kernel cannot mask the interrupts itself, but has to kindly ask EL2 to do it. Yes, this is terrible, but we should be used to it by now. Add a M1-specific hypercall to deal with this. No, I'm not seriously suggesting we merge this crap. Not-seriously-suggested-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/arch_timer.h | 8 +++++ arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kernel/image-vars.h | 3 ++ arch/arm64/kvm/arch_timer.c | 5 +++ arch/arm64/kvm/hyp/nvhe/hyp-main.c | 11 +++++++ arch/arm64/kvm/hyp/nvhe/timer-sr.c | 9 ++++++ drivers/irqchip/irq-apple-aic.c | 50 +++++++++++++++++++++++++++-- 7 files changed, 85 insertions(+), 2 deletions(-)