From patchwork Thu Jan 2 15:13:02 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Russell King X-Patchwork-Id: 3425211 Return-Path: X-Original-To: patchwork-dmaengine@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 6E57C9F2E9 for ; Thu, 2 Jan 2014 15:13:08 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id D1B4020145 for ; Thu, 2 Jan 2014 15:13:06 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 929CF2013A for ; Thu, 2 Jan 2014 15:13:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752045AbaABPNF (ORCPT ); Thu, 2 Jan 2014 10:13:05 -0500 Received: from gw-1.arm.linux.org.uk ([78.32.30.217]:54152 "EHLO pandora.arm.linux.org.uk" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1751966AbaABPNE (ORCPT ); Thu, 2 Jan 2014 10:13:04 -0500 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=arm.linux.org.uk; s=pandora; h=Date:Sender:Message-Id:Subject:Cc:To:From:References:In-Reply-To; bh=OR0srDbEZC34Oy6QHpp0gfkzWflcABMW7MMGZNlR/XA=; b=WIwFnP5cbs6jijn9xFgqvMrgY1vSFSsm0Y2nkbqVjni/zNoUQdlsrdg0XjGGA+RVUVZEVpPygm8gYQXrLSTy/FmoLguFNeHBXvGxz5xOZbSTcYZ8YNvNY395EXhxy1goG5dcmaDIj21cVTS/o+y8+o44iXAmgY8j3IMJaaiwxKA=; Received: from e0022681537dd.dyn.arm.linux.org.uk ([2002:4e20:1eda:1:222:68ff:fe15:37dd]:44157 helo=rmk-PC.arm.linux.org.uk) by pandora.arm.linux.org.uk with esmtpsa (TLSv1:AES256-SHA:256) (Exim 4.76) (envelope-from ) id 1VyjxG-0003es-Qo; Thu, 02 Jan 2014 15:13:02 +0000 Received: from rmk by rmk-PC.arm.linux.org.uk with local (Exim 4.76) (envelope-from ) id 1VyjxG-0005Fc-Cr; Thu, 02 Jan 2014 15:13:02 +0000 In-Reply-To: <20140102150836.GA3826@n2100.arm.linux.org.uk> References: <20140102150836.GA3826@n2100.arm.linux.org.uk> From: Russell King To: dmaengine@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-omap@vger.kernel.org Cc: Vinod Koul , Dan Williams Subject: [PATCH RFC 25/26] dmaengine: omap-dma: move IRQ handling to omap-dma Message-Id: Date: Thu, 02 Jan 2014 15:13:02 +0000 Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Spam-Status: No, score=-7.3 required=5.0 tests=BAYES_00,DKIM_SIGNED, RCVD_IN_DNSWL_HI,RP_MATCHES_RCVD,T_DKIM_INVALID,UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Signed-off-by: Russell King --- drivers/dma/omap-dma.c | 121 +++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed, 115 insertions(+), 6 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 611963e82623..f219c9a4dab7 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -30,6 +30,10 @@ struct omap_dmadev { void __iomem *base; const struct omap_dma_reg *reg_map; struct omap_system_dma_plat_info *plat; + bool legacy; + spinlock_t irq_lock; + uint32_t irq_enable_mask; + struct omap_chan *lch_map[32]; }; struct omap_chan { @@ -253,10 +257,22 @@ static void omap_dma_clear_csr(struct omap_chan *c) omap_dma_chan_write(c, CSR, ~0); } +static unsigned omap_dma_get_csr(struct omap_chan *c) +{ + unsigned val = omap_dma_chan_read(c, CSR); + + if (!dma_omap1()) + omap_dma_chan_write(c, CSR, val); + + return val; +} + static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, unsigned lch) { c->channel_base = od->base + od->plat->channel_stride * lch; + + od->lch_map[lch] = c; } static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) @@ -460,32 +476,103 @@ static void omap_dma_sched(unsigned long data) } } +static irqreturn_t omap_dma_irq(int irq, void *devid) +{ + struct omap_dmadev *od = devid; + unsigned status, channel; + + spin_lock(&od->irq_lock); + + status = omap_dma_glbl_read(od, IRQSTATUS_L1); + status &= od->irq_enable_mask; + if (status == 0) { + spin_unlock(&od->irq_lock); + return IRQ_NONE; + } + + while ((channel = ffs(status)) != 0) { + unsigned mask, csr; + struct omap_chan *c; + + channel -= 1; + mask = BIT(channel); + status &= ~mask; + + c = od->lch_map[channel]; + if (c == NULL) { + /* This should never happen */ + dev_err(od->ddev.dev, "invalid channel %u\n", channel); + continue; + } + + csr = omap_dma_get_csr(c); + omap_dma_glbl_write(od, IRQSTATUS_L1, mask); + + omap_dma_callback(channel, csr, c); + } + + spin_unlock(&od->irq_lock); + + return IRQ_HANDLED; +} + static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); int ret; - dev_info(od->ddev.dev, "allocating channel for %u\n", c->dma_sig); + if (od->legacy) { + ret = omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); + } else { + ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, + &c->dma_ch); + } - ret = omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback, - c, &c->dma_ch); + dev_info(od->ddev.dev, "allocating channel %u for %u\n", + c->dma_ch, c->dma_sig); - if (ret >= 0) + if (ret >= 0) { omap_dma_assign(od, c, c->dma_ch); + if (!od->legacy) { + unsigned val; + + spin_lock_irq(&od->irq_lock); + val = BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQSTATUS_L1, val); + od->irq_enable_mask |= val; + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); + + val = omap_dma_glbl_read(od, IRQENABLE_L0); + val &= ~BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQENABLE_L0, val); + spin_unlock_irq(&od->irq_lock); + } + } + return ret; } static void omap_dma_free_chan_resources(struct dma_chan *chan) { + struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); + if (!od->legacy) { + spin_lock_irq(&od->irq_lock); + od->irq_enable_mask &= ~BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); + spin_unlock_irq(&od->irq_lock); + } + c->channel_base = NULL; + od->lch_map[c->dma_ch] = NULL; vchan_free_chan_resources(&c->vc); omap_free_dma(c->dma_ch); - dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); + dev_info(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); } static size_t omap_dma_sg_size(struct omap_sg *sg) @@ -999,7 +1086,7 @@ static int omap_dma_probe(struct platform_device *pdev) { struct omap_dmadev *od; struct resource *res; - int rc, i; + int rc, i, irq; od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) @@ -1029,6 +1116,7 @@ static int omap_dma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&od->ddev.channels); INIT_LIST_HEAD(&od->pending); spin_lock_init(&od->lock); + spin_lock_init(&od->irq_lock); tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); @@ -1040,6 +1128,21 @@ static int omap_dma_probe(struct platform_device *pdev) } } + irq = platform_get_irq(pdev, 1); + if (irq <= 0) { + dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); + od->legacy = true; + } else { + /* Disable all interrupts */ + od->irq_enable_mask = 0; + omap_dma_glbl_write(od, IRQENABLE_L1, 0); + + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, + IRQF_SHARED, "omap-dma-engine", od); + if (rc) + return rc; + } + rc = dma_async_device_register(&od->ddev); if (rc) { pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", @@ -1076,6 +1179,12 @@ static int omap_dma_remove(struct platform_device *pdev) of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&od->ddev); + + if (!od->legacy) { + /* Disable all interrupts */ + omap_dma_glbl_write(od, IRQENABLE_L0, 0); + } + omap_dma_free(od); return 0;