diff mbox

[PATCH/RFC,v3,2/4] serial: sh-sci: Get rid of the workqueue to handle receive DMA requests

Message ID 1440181546-7334-3-git-send-email-geert+renesas@glider.be (mailing list archive)
State Superseded
Delegated to: Geert Uytterhoeven
Headers show

Commit Message

Geert Uytterhoeven Aug. 21, 2015, 6:25 p.m. UTC
The receive DMA workqueue function work_fn_rx() handles two things:
  1. Reception of a full buffer on completion of a receive DMA request,
  2. Reception of a partial buffer on receive DMA time-out.
The workqueue is kicked by both the receive DMA completion handler, and
by a timer to handle DMA time-out.

As there are always two receive DMA requests active, it's possible that
the receive DMA completion handler is called a second time before the
workqueue function runs.

As the time-out handler re-enables the receive interrupt, an interrupt
may come in before time-out has been fully handled.

Move part 1 into the receive DMA completion handler, and move part 2
into the receive DMA time-out handler, to fix these race conditions.

Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
---
v3:
  - New.
---
 drivers/tty/serial/sh-sci.c | 165 ++++++++++++++++++++------------------------
 1 file changed, 76 insertions(+), 89 deletions(-)
diff mbox

Patch

diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0622cafaf1c71cab..0b6a367ac343221c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -115,7 +115,6 @@  struct sci_port {
 	struct sh_dmae_slave		param_tx;
 	struct sh_dmae_slave		param_rx;
 	struct work_struct		work_tx;
-	struct work_struct		work_rx;
 	struct timer_list		rx_timer;
 	unsigned int			rx_timeout;
 #endif
@@ -1336,10 +1335,29 @@  static int sci_dma_rx_find_active(struct sci_port *s)
 	return -1;
 }
 
+static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
+{
+	struct dma_chan *chan = s->chan_rx;
+	struct uart_port *port = &s->port;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	s->chan_rx = NULL;
+	s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
+	spin_unlock_irqrestore(&port->lock, flags);
+	dmaengine_terminate_all(chan);
+	dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
+			  sg_dma_address(&s->sg_rx[0]));
+	dma_release_channel(chan);
+	if (enable_pio)
+		sci_start_rx(port);
+}
+
 static void sci_dma_rx_complete(void *arg)
 {
 	struct sci_port *s = arg;
 	struct uart_port *port = &s->port;
+	struct dma_async_tx_descriptor *desc;
 	unsigned long flags;
 	int active, count = 0;
 
@@ -1354,30 +1372,32 @@  static void sci_dma_rx_complete(void *arg)
 
 	mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
 
-	spin_unlock_irqrestore(&port->lock, flags);
-
 	if (count)
 		tty_flip_buffer_push(&port->state->port);
 
-	schedule_work(&s->work_rx);
-}
+	desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[active], 1,
+				       DMA_DEV_TO_MEM,
+				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		goto fail;
 
-static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
-{
-	struct dma_chan *chan = s->chan_rx;
-	struct uart_port *port = &s->port;
-	unsigned long flags;
+	desc->callback = sci_dma_rx_complete;
+	desc->callback_param = s;
+	s->cookie_rx[active] = dmaengine_submit(desc);
+	if (dma_submit_error(s->cookie_rx[active]))
+		goto fail;
 
-	spin_lock_irqsave(&port->lock, flags);
-	s->chan_rx = NULL;
-	s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
+	s->active_rx = s->cookie_rx[!active];
+
+	dev_dbg(port->dev, "  cookie %d #%d, new active cookie %d\n",
+		s->cookie_rx[active], active, s->active_rx);
 	spin_unlock_irqrestore(&port->lock, flags);
-	dmaengine_terminate_all(chan);
-	dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
-			  sg_dma_address(&s->sg_rx[0]));
-	dma_release_channel(chan);
-	if (enable_pio)
-		sci_start_rx(port);
+	return;
+
+fail:
+	spin_unlock_irqrestore(&port->lock, flags);
+	dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
+	sci_rx_dma_release(s, true);
 }
 
 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
@@ -1438,72 +1458,6 @@  fail:
 	sci_rx_dma_release(s, true);
 }
 
-static void work_fn_rx(struct work_struct *work)
-{
-	struct sci_port *s = container_of(work, struct sci_port, work_rx);
-	struct uart_port *port = &s->port;
-	struct dma_async_tx_descriptor *desc;
-	struct dma_tx_state state;
-	enum dma_status status;
-	unsigned long flags;
-	int new;
-
-	spin_lock_irqsave(&port->lock, flags);
-	new = sci_dma_rx_find_active(s);
-	if (new < 0) {
-		spin_unlock_irqrestore(&port->lock, flags);
-		return;
-	}
-
-	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
-	if (status != DMA_COMPLETE) {
-		/* Handle incomplete DMA receive */
-		struct dma_chan *chan = s->chan_rx;
-		unsigned int read;
-		int count;
-
-		dmaengine_terminate_all(chan);
-		read = sg_dma_len(&s->sg_rx[new]) - state.residue;
-		dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
-			s->active_rx);
-
-		if (read) {
-			count = sci_dma_rx_push(s, s->rx_buf[new], read);
-			if (count)
-				tty_flip_buffer_push(&port->state->port);
-		}
-
-		spin_unlock_irqrestore(&port->lock, flags);
-
-		sci_submit_rx(s);
-		return;
-	}
-
-	desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1,
-				       DMA_DEV_TO_MEM,
-				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!desc)
-		goto fail;
-
-	desc->callback = sci_dma_rx_complete;
-	desc->callback_param = s;
-	s->cookie_rx[new] = dmaengine_submit(desc);
-	if (dma_submit_error(s->cookie_rx[new]))
-		goto fail;
-
-	s->active_rx = s->cookie_rx[!new];
-
-	dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
-		__func__, s->cookie_rx[new], new, s->active_rx);
-	spin_unlock_irqrestore(&port->lock, flags);
-	return;
-
-fail:
-	spin_unlock_irqrestore(&port->lock, flags);
-	dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
-	sci_rx_dma_release(s, true);
-}
-
 static void work_fn_tx(struct work_struct *work)
 {
 	struct sci_port *s = container_of(work, struct sci_port, work_tx);
@@ -1676,15 +1630,49 @@  static void rx_timer_fn(unsigned long arg)
 {
 	struct sci_port *s = (struct sci_port *)arg;
 	struct uart_port *port = &s->port;
-	u16 scr = serial_port_in(port, SCSCR);
+	struct dma_tx_state state;
+	enum dma_status status;
+	unsigned long flags;
+	unsigned int read;
+	int active, count;
+	u16 scr;
 
+	spin_lock_irqsave(&port->lock, flags);
+
+	dev_dbg(port->dev, "DMA Rx timed out\n");
+	scr = serial_port_in(port, SCSCR);
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		scr &= ~SCSCR_RDRQE;
 		enable_irq(s->irqs[SCIx_RXI_IRQ]);
 	}
 	serial_port_out(port, SCSCR, scr | SCSCR_RIE);
-	dev_dbg(port->dev, "DMA Rx timed out\n");
-	schedule_work(&s->work_rx);
+
+	active = sci_dma_rx_find_active(s);
+	if (active < 0) {
+		spin_unlock_irqrestore(&port->lock, flags);
+		return;
+	}
+
+	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
+	if (status == DMA_COMPLETE)
+		dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
+			s->active_rx, active);
+
+	/* Handle incomplete DMA receive */
+	dmaengine_terminate_all(s->chan_rx);
+	read = sg_dma_len(&s->sg_rx[active]) - state.residue;
+	dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
+		s->active_rx);
+
+	if (read) {
+		count = sci_dma_rx_push(s, s->rx_buf[active], read);
+		if (count)
+			tty_flip_buffer_push(&port->state->port);
+	}
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	sci_submit_rx(s);
 }
 
 static void sci_request_dma(struct uart_port *port)
@@ -1768,7 +1756,6 @@  static void sci_request_dma(struct uart_port *port)
 			dma += s->buf_len_rx;
 		}
 
-		INIT_WORK(&s->work_rx, work_fn_rx);
 		setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
 
 		sci_submit_rx(s);