@@ -77,6 +77,12 @@ static void imsic_handle_irq(struct irq_desc *desc)
struct imsic_vector *vec;
unsigned long local_id;
+ /*
+ * Process pending local synchronization instead of waiting
+ * for per-CPU local timer to expire.
+ */
+ imsic_local_sync_all(false);
+
chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) {
@@ -120,7 +126,7 @@ static int imsic_starting_cpu(unsigned int cpu)
* Interrupts identities might have been enabled/disabled while
* this CPU was not running so sync-up local enable/disable state.
*/
- imsic_local_sync_all();
+ imsic_local_sync_all(true);
/* Enable local interrupt delivery */
imsic_local_delivery(true);
@@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
}
}
-static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{
struct imsic_local_config *mlocal;
struct imsic_vector *vec, *mvec;
+ bool ret = true;
int i;
lockdep_assert_held(&lpriv->lock);
@@ -143,35 +144,75 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
__imsic_id_clear_enable(i);
/*
- * If the ID was being moved to a new ID on some other CPU
- * then we can get a MSI during the movement so check the
- * ID pending bit and re-trigger the new ID on other CPU
- * using MMIO write.
+ * Clear the pervious vector pointer of the new vector only
+ * after the movement is complete on the old CPU.
*/
- mvec = READ_ONCE(vec->move);
- WRITE_ONCE(vec->move, NULL);
- if (mvec && mvec != vec) {
+ mvec = READ_ONCE(vec->move_prev);
+ if (mvec) {
+ /*
+ * If the old vector has not been updated then
+ * try again in the next sync-up call.
+ */
+ if (READ_ONCE(mvec->move_next)) {
+ ret = false;
+ continue;
+ }
+
+ WRITE_ONCE(vec->move_prev, NULL);
+ }
+
+ /*
+ * If a vector was being moved to a new vector on some other
+ * CPU then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU using
+ * MMIO write.
+ */
+ mvec = READ_ONCE(vec->move_next);
+ if (mvec) {
if (__imsic_id_read_clear_pending(i)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}
+ WRITE_ONCE(vec->move_next, NULL);
imsic_vector_free(&lpriv->vectors[i]);
}
skip:
bitmap_clear(lpriv->dirty_bitmap, i, 1);
}
+
+ return ret;
}
-void imsic_local_sync_all(void)
+#ifdef CONFIG_SMP
+static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, smp_processor_id());
+ }
+}
+#else
+static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+}
+#endif
+
+void imsic_local_sync_all(bool force_all)
{
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags);
- bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
- __imsic_local_sync(lpriv);
+
+ if (force_all)
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ if (!__imsic_local_sync(lpriv))
+ __imsic_local_timer_start(lpriv);
+
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@@ -190,12 +231,7 @@ void imsic_local_delivery(bool enable)
#ifdef CONFIG_SMP
static void imsic_local_timer_callback(struct timer_list *timer)
{
- struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&lpriv->lock, flags);
- __imsic_local_sync(lpriv);
- raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+ imsic_local_sync_all(false);
}
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
@@ -216,14 +252,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
*/
if (cpu_online(cpu)) {
if (cpu == smp_processor_id()) {
- __imsic_local_sync(lpriv);
- return;
+ if (__imsic_local_sync(lpriv))
+ return;
}
- if (!timer_pending(&lpriv->timer)) {
- lpriv->timer.expires = jiffies + 1;
- add_timer_on(&lpriv->timer, cpu);
- }
+ __imsic_local_timer_start(lpriv);
}
}
#else
@@ -278,8 +311,9 @@ void imsic_vector_unmask(struct imsic_vector *vec)
raw_spin_unlock(&lpriv->lock);
}
-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
- bool new_enable, struct imsic_vector *new_move)
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
+ struct imsic_vector *vec, bool is_old_vec,
+ bool new_enable, struct imsic_vector *move_vec)
{
unsigned long flags;
bool enabled;
@@ -289,7 +323,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
/* Update enable and move details */
enabled = READ_ONCE(vec->enable);
WRITE_ONCE(vec->enable, new_enable);
- WRITE_ONCE(vec->move, new_move);
+ if (is_old_vec)
+ WRITE_ONCE(vec->move_next, move_vec);
+ else
+ WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */
bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
@@ -322,8 +359,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
* interrupt on the old vector while device was being moved
* to the new vector.
*/
- enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
- imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
@@ -386,7 +423,8 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
vec = &lpriv->vectors[local_id];
vec->hwirq = hwirq;
vec->enable = false;
- vec->move = NULL;
+ vec->move_next = NULL;
+ vec->move_prev = NULL;
return vec;
}
@@ -23,7 +23,8 @@ struct imsic_vector {
unsigned int hwirq;
/* Details accessed using local lock held */
bool enable;
- struct imsic_vector *move;
+ struct imsic_vector *move_next;
+ struct imsic_vector *move_prev;
};
struct imsic_local_priv {
@@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
__imsic_eix_update(id, 1, false, false);
}
-void imsic_local_sync_all(void);
+void imsic_local_sync_all(bool force_all);
void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec);
@@ -87,7 +88,7 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
{
- return READ_ONCE(vec->move);
+ return READ_ONCE(vec->move_prev);
}
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
Currently, there is only one "move" pointer in the struct imsic_vector so during vector movement the old vector points to the new vector and new vector points to itself. To support force cleanup of old vector, add separate "move_next" and "move_prev" pointers in the struct imsic_vector where during vector movement the "move_next" pointer of the old vector points to the new vector and the "move_prev" pointer of the new vector points to the old vector. Both "move_next" and "move_prev" pointers are cleared separately by __imsic_local_sync() with a restriction that "move_prev" on the new CPU is cleared only after old CPU has cleared "move_next". Signed-off-by: Anup Patel <apatel@ventanamicro.com> --- drivers/irqchip/irq-riscv-imsic-early.c | 8 ++- drivers/irqchip/irq-riscv-imsic-state.c | 96 +++++++++++++++++-------- drivers/irqchip/irq-riscv-imsic-state.h | 7 +- 3 files changed, 78 insertions(+), 33 deletions(-)