From patchwork Mon Jun 8 08:33:14 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ludovic Desroches X-Patchwork-Id: 6563371 Return-Path: X-Original-To: patchwork-dmaengine@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id A63039F3D1 for ; Mon, 8 Jun 2015 08:33:15 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 59D0420431 for ; Mon, 8 Jun 2015 08:33:14 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id C08DB2041C for ; Mon, 8 Jun 2015 08:33:12 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751133AbbFHIdM (ORCPT ); Mon, 8 Jun 2015 04:33:12 -0400 Received: from eusmtp01.atmel.com ([212.144.249.243]:47324 "EHLO eusmtp01.atmel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751173AbbFHIdK (ORCPT ); Mon, 8 Jun 2015 04:33:10 -0400 Received: from ibiza.corp.atmel.com (10.161.101.13) by eusmtp01.atmel.com (10.161.101.31) with Microsoft SMTP Server id 14.3.235.1; Mon, 8 Jun 2015 10:33:07 +0200 From: Ludovic Desroches To: , , CC: , , , Ludovic Desroches Subject: [PATCH v2 1/3] dmaengine: at_xdmac: lock fixes Date: Mon, 8 Jun 2015 10:33:14 +0200 Message-ID: <1433752399-26231-1-git-send-email-ludovic.desroches@atmel.com> X-Mailer: git-send-email 2.2.0 In-Reply-To: <1433751820-26142-1-git-send-email-ludovic.desroches@atmel.com> References: <1433751820-26142-1-git-send-email-ludovic.desroches@atmel.com> MIME-Version: 1.0 Sender: dmaengine-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: dmaengine@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Using _bh variant for spin locks causes this kind of warning: Starting logging: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 3 at /ssd_drive/linux/kernel/softirq.c:151 __local_bh_enable_ip+0xe8/0xf4() Modules linked in: CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.1.0-rc2+ #94 Hardware name: Atmel SAMA5 [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (warn_slowpath_common+0x80/0xac) [] (warn_slowpath_common) from [] (warn_slowpath_null+0x1c/0x24) [] (warn_slowpath_null) from [] (__local_bh_enable_ip+0xe8/0xf4) [] (__local_bh_enable_ip) from [] (at_xdmac_device_terminate_all+0xf4/0x100) [] (at_xdmac_device_terminate_all) from [] (atmel_complete_tx_dma+0x34/0xf4) [] (atmel_complete_tx_dma) from [] (at_xdmac_tasklet+0x14c/0x1ac) [] (at_xdmac_tasklet) from [] (tasklet_action+0x68/0xb4) [] (tasklet_action) from [] (__do_softirq+0xfc/0x238) [] (__do_softirq) from [] (run_ksoftirqd+0x28/0x34) [] (run_ksoftirqd) from [] (smpboot_thread_fn+0x138/0x18c) [] (smpboot_thread_fn) from [] (kthread+0xdc/0xf0) [] (kthread) from [] (ret_from_fork+0x14/0x34) ---[ end trace b57b14a99c1d8812 ]--- It comes from the fact that devices can called some code from the DMA controller with irq disabled. _bh variant is not intended to be used in this case since it can enable irqs. Switch to irqsave/irqrestore variant to avoid this situation. Signed-off-by: Ludovic Desroches Cc: stable@vger.kernel.org # 4.0 and later --- drivers/dma/at_xdmac.c | 75 +++++++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 9b602a6..ec931a8 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -421,8 +421,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) struct at_xdmac_desc *desc = txd_to_at_desc(tx); struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); dma_cookie_t cookie; + unsigned long irqflags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", @@ -431,7 +432,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) if (list_is_singular(&atchan->xfers_list)) at_xdmac_start_xfer(atchan, desc); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } @@ -596,6 +597,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct scatterlist *sg; int i; unsigned int xfer_size = 0; + unsigned long irqflags; + struct dma_async_tx_descriptor *ret = NULL; if (!sgl) return NULL; @@ -611,7 +614,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); /* Protect dma_sconfig field that can be modified by set_slave_conf. */ - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { @@ -622,8 +625,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); if (unlikely(!len)) { dev_err(chan2dev(chan), "sg data length is zero\n"); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", __func__, i, len, mem); @@ -633,8 +635,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } /* Linked list descriptor setup. */ @@ -673,13 +674,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, xfer_size += len; } - spin_unlock_bh(&atchan->lock); first->tx_dma_desc.flags = flags; first->xfer_size = xfer_size; first->direction = direction; + ret = &first->tx_dma_desc; - return &first->tx_dma_desc; +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, irqflags); + return ret; } static struct dma_async_tx_descriptor * @@ -692,6 +695,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, struct at_xdmac_desc *first = NULL, *prev = NULL; unsigned int periods = buf_len / period_len; int i; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", __func__, &buf_addr, buf_len, period_len, @@ -710,16 +714,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, for (i = 0; i < periods; i++) { struct at_xdmac_desc *desc = NULL; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return NULL; } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", __func__, desc, &desc->tx_dma_desc.phys); @@ -1036,6 +1040,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | AT_XDMAC_CC_SIF(0) | AT_XDMAC_CC_MBSIZE_SIXTEEN | AT_XDMAC_CC_TYPE_MEM_TRAN; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", __func__, &src, &dest, len, flags); @@ -1051,9 +1056,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) @@ -1123,6 +1128,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, int residue; u32 cur_nda, mask, value; u8 dwidth = 0; + unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -1131,7 +1137,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (!txstate) return ret; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); @@ -1141,8 +1147,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, */ if (!desc->active_xfer) { dma_set_residue(txstate, desc->xfer_size); - spin_unlock_bh(&atchan->lock); - return ret; + goto spin_unlock; } residue = desc->xfer_size; @@ -1173,14 +1178,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; - spin_unlock_bh(&atchan->lock); - dma_set_residue(txstate, residue); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -1201,8 +1206,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) { struct at_xdmac_desc *desc; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); /* * If channel is enabled, do nothing, advance_work will be triggered @@ -1217,7 +1223,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) at_xdmac_start_xfer(atchan, desc); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) @@ -1353,12 +1359,13 @@ static int at_xdmac_device_config(struct dma_chan *chan, { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); int ret; + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); ret = at_xdmac_set_slave_config(chan, config); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -1367,18 +1374,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) return 0; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) cpu_relax(); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1387,18 +1395,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (!at_xdmac_chan_is_paused(atchan)) { - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1408,10 +1417,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) struct at_xdmac_desc *desc, *_desc; struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) cpu_relax(); @@ -1421,7 +1431,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) at_xdmac_remove_xfer(atchan, desc); clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1431,8 +1441,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *desc; int i; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (at_xdmac_chan_is_enabled(atchan)) { dev_err(chan2dev(chan), @@ -1463,7 +1474,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); spin_unlock: - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return i; }