@@ -78,6 +78,13 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+/*
+ * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
+ * are potentially stolen by the secure side. Some code, especially code dealing
+ * with hwirq IDs, is simplified by accounting for all 16.
+ */
+#define SGI_NR 16
+
/*
* The behaviours of RPR and PMR registers differ depending on the value of
* SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
@@ -125,8 +132,8 @@ EXPORT_SYMBOL(gic_nonsecure_priorities);
__priority; \
})
-/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
-static refcount_t *ppi_nmi_refs;
+/* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
+static refcount_t *rdist_nmi_refs;
static struct gic_kvm_info gic_v3_kvm_info __initdata;
static DEFINE_PER_CPU(bool, has_rss);
@@ -519,9 +526,22 @@ static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
}
}
-static u32 gic_get_ppi_index(struct irq_data *d)
+static u32 __gic_get_rdist_index(irq_hw_number_t hwirq)
+{
+ switch (__get_intid_range(hwirq)) {
+ case SGI_RANGE:
+ case PPI_RANGE:
+ return hwirq;
+ case EPPI_RANGE:
+ return hwirq - EPPI_BASE_INTID + 32;
+ default:
+ unreachable();
+ }
+}
+
+static u32 gic_get_rdist_index(struct irq_data *d)
{
- return __gic_get_ppi_index(d->hwirq);
+ return __gic_get_rdist_index(d->hwirq);
}
static int gic_irq_nmi_setup(struct irq_data *d)
@@ -545,11 +565,14 @@ static int gic_irq_nmi_setup(struct irq_data *d)
/* desc lock should already be held */
if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_ppi_index(d);
+ u32 idx = gic_get_rdist_index(d);
- /* Setting up PPI as NMI, only switch handler for first NMI */
- if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
- refcount_set(&ppi_nmi_refs[idx], 1);
+ /*
+ * Setting up a percpu interrupt as NMI, only switch handler
+ * for first NMI
+ */
+ if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
+ refcount_set(&rdist_nmi_refs[idx], 1);
desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
}
} else {
@@ -582,10 +605,10 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
/* desc lock should already be held */
if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_ppi_index(d);
+ u32 idx = gic_get_rdist_index(d);
/* Tearing down NMI, only switch handler for last NMI */
- if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
+ if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
desc->handle_irq = handle_percpu_devid_irq;
} else {
desc->handle_irq = handle_fasteoi_irq;
@@ -1279,10 +1302,10 @@ static void gic_cpu_init(void)
rbase = gic_data_rdist_sgi_base();
/* Configure SGIs/PPIs as non-secure Group-1 */
- for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
+ for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32)
writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
- gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
+ gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, gic_redist_wait_for_rwp);
/* initialise system registers */
gic_cpu_sys_reg_init();
@@ -1939,12 +1962,13 @@ static void gic_enable_nmi_support(void)
return;
}
- ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
- if (!ppi_nmi_refs)
+ rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
+ sizeof(*rdist_nmi_refs), GFP_KERNEL);
+ if (!rdist_nmi_refs)
return;
- for (i = 0; i < gic_data.ppi_nr; i++)
- refcount_set(&ppi_nmi_refs[i], 0);
+ for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++)
+ refcount_set(&rdist_nmi_refs[i], 0);
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
@@ -2061,6 +2085,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_dist_init();
gic_cpu_init();
+ gic_enable_nmi_support();
gic_smp_init();
gic_cpu_pm_init();
@@ -2073,8 +2098,6 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gicv2m_init(handle, gic_data.domain);
}
- gic_enable_nmi_support();
-
return 0;
out_free: