diff mbox series

[2/3] dma: xilinx_dpdma: Remove unnecessary use of irqsave/restore

Message ID 20240308210034.3634938-3-sean.anderson@linux.dev (mailing list archive)
State New, archived
Headers show
Series dma: xilinx_dpdma: Fix locking | expand

Commit Message

Sean Anderson March 8, 2024, 9 p.m. UTC
xilinx_dpdma_chan_done_irq and xilinx_dpdma_chan_vsync_irq are always
called with IRQs disabled from xilinx_dpdma_irq_handler. Therefore we
don't need to save/restore the IRQ flags.

Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
---

 drivers/dma/xilinx/xilinx_dpdma.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

Comments

Tomi Valkeinen March 27, 2024, 12:27 p.m. UTC | #1
Hi,

On 08/03/2024 23:00, Sean Anderson wrote:
> xilinx_dpdma_chan_done_irq and xilinx_dpdma_chan_vsync_irq are always
> called with IRQs disabled from xilinx_dpdma_irq_handler. Therefore we
> don't need to save/restore the IRQ flags.

I think this is fine, but a few thoughts:

- Is spin_lock clearly faster than the irqsave variant, or is this a 
pointless optimization? It's safer to just use irqsave variant, instead 
of making sure the code is always called from the expected contexts.
- Is this style documented/recommended anywhere? Going through docs, I 
only found docs telling to use irqsave when mixing irq and non-irq contexts.
- Does this cause issues on PREEMPT_RT?

  Tomi

> Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
> ---
> 
>   drivers/dma/xilinx/xilinx_dpdma.c | 10 ++++------
>   1 file changed, 4 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
> index eb0637d90342..36bd4825d389 100644
> --- a/drivers/dma/xilinx/xilinx_dpdma.c
> +++ b/drivers/dma/xilinx/xilinx_dpdma.c
> @@ -1043,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
>   static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
>   {
>   	struct xilinx_dpdma_tx_desc *active;
> -	unsigned long flags;
>   
> -	spin_lock_irqsave(&chan->lock, flags);
> +	spin_lock(&chan->lock);
>   
>   	xilinx_dpdma_debugfs_desc_done_irq(chan);
>   
> @@ -1057,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
>   			 "chan%u: DONE IRQ with no active descriptor!\n",
>   			 chan->id);
>   
> -	spin_unlock_irqrestore(&chan->lock, flags);
> +	spin_unlock(&chan->lock);
>   }
>   
>   /**
> @@ -1072,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
>   {
>   	struct xilinx_dpdma_tx_desc *pending;
>   	struct xilinx_dpdma_sw_desc *sw_desc;
> -	unsigned long flags;
>   	u32 desc_id;
>   
> -	spin_lock_irqsave(&chan->lock, flags);
> +	spin_lock(&chan->lock);
>   
>   	pending = chan->desc.pending;
>   	if (!chan->running || !pending)
> @@ -1108,7 +1106,7 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
>   	spin_unlock(&chan->vchan.lock);
>   
>   out:
> -	spin_unlock_irqrestore(&chan->lock, flags);
> +	spin_unlock(&chan->lock);
>   }
>   
>   /**
Sean Anderson March 28, 2024, 3 p.m. UTC | #2
On 3/27/24 08:27, Tomi Valkeinen wrote:
> Hi,
> 
> On 08/03/2024 23:00, Sean Anderson wrote:
>> xilinx_dpdma_chan_done_irq and xilinx_dpdma_chan_vsync_irq are always
>> called with IRQs disabled from xilinx_dpdma_irq_handler. Therefore we
>> don't need to save/restore the IRQ flags.
> 
> I think this is fine, but a few thoughts:
> 
> - Is spin_lock clearly faster than the irqsave variant, or is this a pointless optimization? It's safer to just use irqsave variant, instead of making sure the code is always called from the expected contexts.

It's not an optimization. Technically this will save a few instructions,
but...

> - Is this style documented/recommended anywhere? Going through docs, I only found docs telling to use irqsave when mixing irq and non-irq contexts.

The purpose is mainly to make it clear that this is meant to be called
in IRQ context. With irqsave, there's an implication that this could be
called in non-IRQ context, which it never is.

> - Does this cause issues on PREEMPT_RT?

Why would it?

--Sean

> 
>  Tomi
> 
>> Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
>> ---
>>
>>   drivers/dma/xilinx/xilinx_dpdma.c | 10 ++++------
>>   1 file changed, 4 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
>> index eb0637d90342..36bd4825d389 100644
>> --- a/drivers/dma/xilinx/xilinx_dpdma.c
>> +++ b/drivers/dma/xilinx/xilinx_dpdma.c
>> @@ -1043,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
>>   static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
>>   {
>>       struct xilinx_dpdma_tx_desc *active;
>> -    unsigned long flags;
>>   -    spin_lock_irqsave(&chan->lock, flags);
>> +    spin_lock(&chan->lock);
>>         xilinx_dpdma_debugfs_desc_done_irq(chan);
>>   @@ -1057,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
>>                "chan%u: DONE IRQ with no active descriptor!\n",
>>                chan->id);
>>   -    spin_unlock_irqrestore(&chan->lock, flags);
>> +    spin_unlock(&chan->lock);
>>   }
>>     /**
>> @@ -1072,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
>>   {
>>       struct xilinx_dpdma_tx_desc *pending;
>>       struct xilinx_dpdma_sw_desc *sw_desc;
>> -    unsigned long flags;
>>       u32 desc_id;
>>   -    spin_lock_irqsave(&chan->lock, flags);
>> +    spin_lock(&chan->lock);
>>         pending = chan->desc.pending;
>>       if (!chan->running || !pending)
>> @@ -1108,7 +1106,7 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
>>       spin_unlock(&chan->vchan.lock);
>>     out:
>> -    spin_unlock_irqrestore(&chan->lock, flags);
>> +    spin_unlock(&chan->lock);
>>   }
>>     /**
>
Tomi Valkeinen March 28, 2024, 4:37 p.m. UTC | #3
On 28/03/2024 17:00, Sean Anderson wrote:
> On 3/27/24 08:27, Tomi Valkeinen wrote:
>> Hi,
>>
>> On 08/03/2024 23:00, Sean Anderson wrote:
>>> xilinx_dpdma_chan_done_irq and xilinx_dpdma_chan_vsync_irq are always
>>> called with IRQs disabled from xilinx_dpdma_irq_handler. Therefore we
>>> don't need to save/restore the IRQ flags.
>>
>> I think this is fine, but a few thoughts:
>>
>> - Is spin_lock clearly faster than the irqsave variant, or is this a pointless optimization? It's safer to just use irqsave variant, instead of making sure the code is always called from the expected contexts.
> 
> It's not an optimization. Technically this will save a few instructions,
> but...
> 
>> - Is this style documented/recommended anywhere? Going through docs, I only found docs telling to use irqsave when mixing irq and non-irq contexts.
> 
> The purpose is mainly to make it clear that this is meant to be called
> in IRQ context. With irqsave, there's an implication that this could be
> called in non-IRQ context, which it never is.

Hmm, I see. Yes, I think that makes sense.

>> - Does this cause issues on PREEMPT_RT?
> 
> Why would it?

I was reading locktypes.rst, I started wondering what it means if 
spinlocks are changed into sleeping locks. But thinking about it again, 
it doesn't matter, as the irq will still be masked when in irq-context.

So:

Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>

  Tomi
diff mbox series

Patch

diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index eb0637d90342..36bd4825d389 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -1043,9 +1043,8 @@  static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
 {
 	struct xilinx_dpdma_tx_desc *active;
-	unsigned long flags;
 
-	spin_lock_irqsave(&chan->lock, flags);
+	spin_lock(&chan->lock);
 
 	xilinx_dpdma_debugfs_desc_done_irq(chan);
 
@@ -1057,7 +1056,7 @@  static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
 			 "chan%u: DONE IRQ with no active descriptor!\n",
 			 chan->id);
 
-	spin_unlock_irqrestore(&chan->lock, flags);
+	spin_unlock(&chan->lock);
 }
 
 /**
@@ -1072,10 +1071,9 @@  static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
 {
 	struct xilinx_dpdma_tx_desc *pending;
 	struct xilinx_dpdma_sw_desc *sw_desc;
-	unsigned long flags;
 	u32 desc_id;
 
-	spin_lock_irqsave(&chan->lock, flags);
+	spin_lock(&chan->lock);
 
 	pending = chan->desc.pending;
 	if (!chan->running || !pending)
@@ -1108,7 +1106,7 @@  static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
 	spin_unlock(&chan->vchan.lock);
 
 out:
-	spin_unlock_irqrestore(&chan->lock, flags);
+	spin_unlock(&chan->lock);
 }
 
 /**