From patchwork Tue Mar 26 11:18:18 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bastian Hecht X-Patchwork-Id: 2335761 Return-Path: X-Original-To: patchwork-linux-sh@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id 70215DF264 for ; Tue, 26 Mar 2013 11:18:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754802Ab3CZLSZ (ORCPT ); Tue, 26 Mar 2013 07:18:25 -0400 Received: from mail-ee0-f52.google.com ([74.125.83.52]:37033 "EHLO mail-ee0-f52.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754749Ab3CZLSZ (ORCPT ); Tue, 26 Mar 2013 07:18:25 -0400 Received: by mail-ee0-f52.google.com with SMTP id d17so175247eek.25 for ; Tue, 26 Mar 2013 04:18:23 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=x-received:from:to:cc:subject:date:message-id:x-mailer; bh=c+H7yRYxwObUE7bUkUrssWofAioPSsn+yB2RjziUPAw=; b=E/gCmxDuQuTBe90zKp2K+uwTpq+kTr6Je5ZG+QivMqC8xoeaf7wgJDramJZNC5Xg+t vmbhrx5fSyX6bxvkdJ4syXBFHU/nmGa1xPjPJOT5XIWZtMnoU0jc6NDcq9BdGOiZl1T6 GXRirUIL67XJJyu/XzzMGmCW+mHdftSJDPn/AC9utLybQyP2MhIeq5KA3KYim7G4S/d9 DMGYN89MCgKGGuxQ7Q0CRxsSVCcFTD70/IM1Xkb09TJK1rodbXe6gG4TFZ0Ix0unwJly 0SqO1IFOM+khxrjBtPfgg0pFYu1hkCLlNB51RoKVjz3ZsEIWKjkmiBFNacDjRpd5Dxs6 6NgQ== X-Received: by 10.14.109.71 with SMTP id r47mr17487048eeg.25.1364296703628; Tue, 26 Mar 2013 04:18:23 -0700 (PDT) Received: from localhost.localdomain (p4FD2260C.dip.t-dialin.net. [79.210.38.12]) by mx.google.com with ESMTPS id t4sm24427810eel.0.2013.03.26.04.18.22 (version=TLSv1.1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Tue, 26 Mar 2013 04:18:23 -0700 (PDT) From: Bastian Hecht To: linux-sh@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org, Magnus Damm , Simon Horman , Arnd Bergmann , Kuninori Morimoto Subject: [PATCH v3 1/2] irqchip: intc-irqpin: Add support for shared interrupt lines Date: Tue, 26 Mar 2013 12:18:18 +0100 Message-Id: <1364296699-20919-1-git-send-email-hechtb+renesas@gmail.com> X-Mailer: git-send-email 1.7.9.5 Sender: linux-sh-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-sh@vger.kernel.org On some hardware we don't have a 1-1 mapping from the external interrupts coming from INTC to the GIC SPI pins. We can however share lines to demux incoming IRQs on these SoCs. This patch enables the intc_irqpin driver to detect requests for shared interrupt lines and demuxes them properly by querying the INTC INTREQx0A registers. If you need multiple shared intc_irqpin device instances, be sure to mask out all interrupts on the INTC that share the one line before you start to register them. Else you run into IRQ floods that would be caused by interrupts for which no handler has been set up yet when the first intc_irqpin device is registered. Signed-off-by: Bastian Hecht Acked-by: Magnus Damm --- v3: no changes. drivers/irqchip/irq-renesas-intc-irqpin.c | 90 ++++++++++++++++++++++++++--- 1 file changed, 83 insertions(+), 7 deletions(-) diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index fd5dabc..5a68e5a 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -74,6 +74,8 @@ struct intc_irqpin_priv { struct platform_device *pdev; struct irq_chip irq_chip; struct irq_domain *irq_domain; + bool shared_irqs; + u8 shared_irq_mask; }; static unsigned long intc_irqpin_read32(void __iomem *iomem) @@ -193,6 +195,28 @@ static void intc_irqpin_irq_disable(struct irq_data *d) intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq); } +static void intc_irqpin_shared_irq_enable(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + intc_irqpin_dbg(&p->irq[hw_irq], "shared enable"); + intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq); + + p->shared_irq_mask &= ~BIT(hw_irq); +} + +static void intc_irqpin_shared_irq_disable(struct irq_data *d) +{ + struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); + int hw_irq = irqd_to_hwirq(d); + + intc_irqpin_dbg(&p->irq[hw_irq], "shared disable"); + intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq); + + p->shared_irq_mask |= BIT(hw_irq); +} + static void intc_irqpin_irq_enable_force(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); @@ -261,6 +285,25 @@ static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id) return IRQ_NONE; } +static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id) +{ + struct intc_irqpin_priv *p = dev_id; + unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE); + irqreturn_t status = IRQ_NONE; + int k; + + for (k = 0; k < 8; k++) { + if (reg_source & BIT(7 - k)) { + if (BIT(k) & p->shared_irq_mask) + continue; + + status |= intc_irqpin_irq_handler(irq, &p->irq[k]); + } + } + + return status; +} + static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { @@ -292,6 +335,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); const char *name = dev_name(&pdev->dev); + int ref_irq; int ret; int k; @@ -372,13 +416,29 @@ static int intc_irqpin_probe(struct platform_device *pdev) for (k = 0; k < p->number_of_irqs; k++) intc_irqpin_mask_unmask_prio(p, k, 1); + /* clear all pending interrupts */ + intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0); + + /* scan for shared interrupt lines */ + ref_irq = p->irq[0].requested_irq; + p->shared_irqs = true; + for (k = 1; k < p->number_of_irqs; k++) { + if (ref_irq != p->irq[k].requested_irq) { + p->shared_irqs = false; + break; + } + } + /* use more severe masking method if requested */ if (p->config.control_parent) { enable_fn = intc_irqpin_irq_enable_force; disable_fn = intc_irqpin_irq_disable_force; - } else { + } else if (!p->shared_irqs) { enable_fn = intc_irqpin_irq_enable; disable_fn = intc_irqpin_irq_disable; + } else { + enable_fn = intc_irqpin_shared_irq_enable; + disable_fn = intc_irqpin_shared_irq_disable; } irq_chip = &p->irq_chip; @@ -400,18 +460,34 @@ static int intc_irqpin_probe(struct platform_device *pdev) goto err0; } - /* request and set priority on interrupts one by one */ - for (k = 0; k < p->number_of_irqs; k++) { - if (devm_request_irq(&pdev->dev, p->irq[k].requested_irq, - intc_irqpin_irq_handler, - 0, name, &p->irq[k])) { + if (p->shared_irqs) { + /* request one shared interrupt */ + if (devm_request_irq(&pdev->dev, p->irq[0].requested_irq, + intc_irqpin_shared_irq_handler, + IRQF_SHARED, name, p)) { dev_err(&pdev->dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } - intc_irqpin_mask_unmask_prio(p, k, 0); + } else { + /* request interrupts one by one */ + for (k = 0; k < p->number_of_irqs; k++) { + if (devm_request_irq(&pdev->dev, + p->irq[k].requested_irq, + intc_irqpin_irq_handler, + 0, name, &p->irq[k])) { + dev_err(&pdev->dev, + "failed to request low IRQ\n"); + ret = -ENOENT; + goto err1; + } + } } + /* unmask all interrupts on prio level */ + for (k = 0; k < p->number_of_irqs; k++) + intc_irqpin_mask_unmask_prio(p, k, 0); + dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); /* warn in case of mismatch if irq base is specified */