From patchwork Tue Jul 21 03:01:06 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jun Nie X-Patchwork-Id: 6832011 Return-Path: X-Original-To: patchwork-dmaengine@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 326649F1D4 for ; Tue, 21 Jul 2015 03:01:33 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 2A87220690 for ; Tue, 21 Jul 2015 03:01:32 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 29DAB20678 for ; Tue, 21 Jul 2015 03:01:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752134AbbGUDBa (ORCPT ); Mon, 20 Jul 2015 23:01:30 -0400 Received: from mail-pa0-f49.google.com ([209.85.220.49]:34393 "EHLO mail-pa0-f49.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752000AbbGUDBa (ORCPT ); Mon, 20 Jul 2015 23:01:30 -0400 Received: by pacan13 with SMTP id an13so112162864pac.1 for ; Mon, 20 Jul 2015 20:01:29 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=aSaNmJZhJjWgHIlAniwyhu61I/9/UhBjWWOpru61LrA=; b=OCP57OXRpgMRc9L2dDJnYRciWaPa/iEKPdutBIeHdK6HyHQp0e+UJLuZtlUhww+Vvu iA1N7J+Hor2KrkVQSUc7kAjO6GFxW/FuuPRMm0sDGENTq48V1Oksiozl425MzX+UY5Qi Bm0WREU1iLtxoHqnBfucqP+wSymQml3lHyox28W1nmnYObzTANgsXkgG7Xe85h7gWL26 a5sP5ldnYzZ7sisTkbEPxz2qvGMVph4tDo2NL/XBOrijqRAK8ZlmlnLh7ntZNIuNYL6W aWIzusRPQL+ab1yYYJK2cDgs3YZRT1UNE7mJvV0DNRcYGvlcT8/iQvn1GzQtIB7cXTQX 1KuQ== X-Gm-Message-State: ALoCoQnnE9rbzL5cnkkJww/Ed5uThIPtm/96LMRWUBexskIuj+/djQFEOkuR/sCaM06Y6d70jhW5 X-Received: by 10.70.109.162 with SMTP id ht2mr33235393pdb.101.1437447689886; Mon, 20 Jul 2015 20:01:29 -0700 (PDT) Received: from localhost.localdomain ([107.6.117.178]) by smtp.gmail.com with ESMTPSA id ml10sm24424659pab.47.2015.07.20.20.01.24 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Mon, 20 Jul 2015 20:01:28 -0700 (PDT) From: Jun Nie To: maxime.ripard@free-electrons.com, vinod.koul@intel.com, dmaengine@vger.kernel.org Cc: shawn.guo@linaro.org, wan.zhijun@zte.com.cn, Jun Nie Subject: [PATCH 2/2] dmaengine: zxdma: Support cyclic dma Date: Tue, 21 Jul 2015 11:01:06 +0800 Message-Id: <1437447666-7012-2-git-send-email-jun.nie@linaro.org> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1437447666-7012-1-git-send-email-jun.nie@linaro.org> References: <1437447666-7012-1-git-send-email-jun.nie@linaro.org> Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Spam-Status: No, score=-8.1 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Support cyclic dma for audio playback Signed-off-by: Jun Nie --- drivers/dma/zx296702_dma.c | 93 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 84 insertions(+), 9 deletions(-) diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 4757f74..9e5d1fb 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -101,6 +101,7 @@ struct zx_dma_chan { struct dma_slave_config slave_cfg; int id; /* Request phy chan id */ u32 ccfg; + u32 cyclic; struct virt_dma_chan vc; struct zx_dma_phy *phy; struct list_head node; @@ -279,7 +280,7 @@ static irqreturn_t zx_dma_int_handler(int irq, void *dev_id) u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ); u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ); u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ); - u32 i, irq_chan = 0; + u32 i, irq_chan = 0, task = 0; while (tc) { i = __ffs(tc); @@ -290,11 +291,16 @@ static irqreturn_t zx_dma_int_handler(int irq, void *dev_id) unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); - vchan_cookie_complete(&p->ds_run->vd); - p->ds_done = p->ds_run; + if (c->cyclic) { + vchan_cyclic_callback(&p->ds_run->vd); + } else { + vchan_cookie_complete(&p->ds_run->vd); + p->ds_done = p->ds_run; + task = 1; + } spin_unlock_irqrestore(&c->vc.lock, flags); + irq_chan |= BIT(i); } - irq_chan |= BIT(i); } if (serr || derr || cfg) @@ -306,12 +312,9 @@ static irqreturn_t zx_dma_int_handler(int irq, void *dev_id) writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW); writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW); - if (irq_chan) { + if (task) zx_dma_task(d); - return IRQ_HANDLED; - } else { - return IRQ_NONE; - } + return IRQ_HANDLED; } static void zx_dma_free_chan_resources(struct dma_chan *chan) @@ -534,6 +537,7 @@ static struct dma_async_tx_descriptor *zx_dma_prep_memcpy( len -= copy; } while (len); + c->cyclic = 0; ds->desc_hw[num - 1].lli = 0; /* end of link */ ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; return vchan_tx_prep(&c->vc, &ds->vd, flags); @@ -566,6 +570,7 @@ static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg( if (!ds) return NULL; + c->cyclic = 0; num = 0; for_each_sg(sgl, sg, sglen, i) { addr = sg_dma_address(sg); @@ -596,6 +601,49 @@ static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg( return vchan_tx_prep(&c->vc, &ds->vd, flags); } +static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + struct zx_dma_desc_sw *ds; + dma_addr_t src = 0, dst = 0; + int num_periods = buf_len / period_len; + int buf = 0, num = 0; + + if (period_len > DMA_MAX_SIZE) { + dev_err(chan->device->dev, "maximum period size exceeded\n"); + return NULL; + } + + if (zx_pre_config(c, dir)) + return NULL; + + ds = zx_alloc_desc_resource(num_periods, chan); + if (!ds) + return NULL; + c->cyclic = 1; + + while (buf < buf_len) { + if (dir == DMA_MEM_TO_DEV) { + src = dma_addr; + dst = c->dev_addr; + } else if (dir == DMA_DEV_TO_MEM) { + src = c->dev_addr; + dst = dma_addr; + } + zx_dma_fill_desc(ds, dst, src, period_len, num++, + c->ccfg | ZX_IRQ_ENABLE_ALL); + dma_addr += period_len; + buf += period_len; + } + + ds->desc_hw[num - 1].lli = ds->desc_hw_lli; + ds->size = buf_len; + return vchan_tx_prep(&c->vc, &ds->vd, flags); +} + static int zx_dma_config(struct dma_chan *chan, struct dma_slave_config *cfg) { @@ -641,6 +689,30 @@ static int zx_dma_terminate_all(struct dma_chan *chan) return 0; } +static int zx_dma_transfer_pause(struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + u32 val = 0; + + val = readl_relaxed(c->phy->base + REG_ZX_CTRL); + val &= ~ZX_CH_ENABLE; + writel_relaxed(val, c->phy->base + REG_ZX_CTRL); + + return 0; +} + +static int zx_dma_transfer_resume(struct dma_chan *chan) +{ + struct zx_dma_chan *c = to_zx_chan(chan); + u32 val = 0; + + val = readl_relaxed(c->phy->base + REG_ZX_CTRL); + val |= ZX_CH_ENABLE; + writel_relaxed(val, c->phy->base + REG_ZX_CTRL); + + return 0; +} + static void zx_dma_free_desc(struct virt_dma_desc *vd) { struct zx_dma_desc_sw *ds = @@ -745,9 +817,12 @@ static int zx_dma_probe(struct platform_device *op) d->slave.device_tx_status = zx_dma_tx_status; d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy; d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic; d->slave.device_issue_pending = zx_dma_issue_pending; d->slave.device_config = zx_dma_config; d->slave.device_terminate_all = zx_dma_terminate_all; + d->slave.device_pause = zx_dma_transfer_pause; + d->slave.device_resume = zx_dma_transfer_resume; d->slave.copy_align = DMA_ALIGN; d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS; d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;