diff mbox series

[-next] net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr()

Message ID 20220518090738.2694556-1-yangyingliang@huawei.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series [-next] net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr() | expand

Checks

Context Check Description
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers fail 2 blamed authors not CCed: ilpo.jarvinen@linux.intel.com ryazanov.s.a@gmail.com; 9 maintainers not CCed: johannes@sipsolutions.net chiranjeevi.rapolu@linux.intel.com linuxwwan@intel.com ilpo.jarvinen@linux.intel.com ryazanov.s.a@gmail.com pabeni@redhat.com edumazet@google.com matthias.bgg@gmail.com m.chetan.kumar@linux.intel.com
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 95 exceeds 80 columns WARNING: line length of 98 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Yang Yingliang May 18, 2022, 9:07 a.m. UTC
Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
so add a parameter in t7xx_cldma_gpd_set_next_ptr() to make if it
use GFP_ATOMIC flag.

Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
Reported-by: Hulk Robot <hulkci@huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
---
 drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

Comments

Loic Poulain May 18, 2022, 9:13 a.m. UTC | #1
Hi Yang,

On Wed, 18 May 2022 at 10:57, Yang Yingliang <yangyingliang@huawei.com> wrote:
>
> Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
> so add a parameter in t7xx_cldma_gpd_set_next_ptr() to make if it
> use GFP_ATOMIC flag.
>
> Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
> Reported-by: Hulk Robot <hulkci@huawei.com>
> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
> ---
>  drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 13 ++++++++-----
>  1 file changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
> index 0c52801ed0de..1fa9bb763831 100644
> --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
> +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
> @@ -91,9 +91,12 @@ static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_p
>  }
>
>  static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
> -                                       size_t size)
> +                                       size_t size, bool is_atomic)

Would be simpler to directly pass the gfp_mask as a parameter.


>  {
> -       req->skb = __dev_alloc_skb(size, GFP_KERNEL);
> +       if (is_atomic)
> +               req->skb = __dev_alloc_skb(size, GFP_ATOMIC);
> +       else
> +               req->skb = __dev_alloc_skb(size, GFP_KERNEL);
>         if (!req->skb)
>                 return -ENOMEM;
>
> @@ -174,7 +177,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
>                 spin_unlock_irqrestore(&queue->ring_lock, flags);
>                 req = queue->rx_refill;
>
> -               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
> +               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, false);
>                 if (ret)
>                         return ret;
>
> @@ -402,7 +405,7 @@ static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, s
>         if (!req->gpd)
>                 goto err_free_req;
>
> -       val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
> +       val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, false);
>         if (val)
>                 goto err_free_pool;
>
> @@ -801,7 +804,7 @@ static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
>                 if (req->skb)
>                         continue;
>
> -               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
> +               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, true);
>                 if (ret)
>                         break;
>
> --
> 2.25.1
>
Yang Yingliang May 19, 2022, 1:52 a.m. UTC | #2
Hi,

On 2022/5/18 17:13, Loic Poulain wrote:
> Hi Yang,
>
> On Wed, 18 May 2022 at 10:57, Yang Yingliang <yangyingliang@huawei.com> wrote:
>> Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
>> so add a parameter in t7xx_cldma_gpd_set_next_ptr() to make if it
>> use GFP_ATOMIC flag.
>>
>> Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
>> Reported-by: Hulk Robot <hulkci@huawei.com>
>> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
>> ---
>>   drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 13 ++++++++-----
>>   1 file changed, 8 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
>> index 0c52801ed0de..1fa9bb763831 100644
>> --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
>> +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
>> @@ -91,9 +91,12 @@ static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_p
>>   }
>>
>>   static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
>> -                                       size_t size)
>> +                                       size_t size, bool is_atomic)
> Would be simpler to directly pass the gfp_mask as a parameter.
Yes, I will send a v2 with this change later.

Thanks,
Yang
>
>
>>   {
>> -       req->skb = __dev_alloc_skb(size, GFP_KERNEL);
>> +       if (is_atomic)
>> +               req->skb = __dev_alloc_skb(size, GFP_ATOMIC);
>> +       else
>> +               req->skb = __dev_alloc_skb(size, GFP_KERNEL);
>>          if (!req->skb)
>>                  return -ENOMEM;
>>
>> @@ -174,7 +177,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
>>                  spin_unlock_irqrestore(&queue->ring_lock, flags);
>>                  req = queue->rx_refill;
>>
>> -               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
>> +               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, false);
>>                  if (ret)
>>                          return ret;
>>
>> @@ -402,7 +405,7 @@ static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, s
>>          if (!req->gpd)
>>                  goto err_free_req;
>>
>> -       val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
>> +       val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, false);
>>          if (val)
>>                  goto err_free_pool;
>>
>> @@ -801,7 +804,7 @@ static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
>>                  if (req->skb)
>>                          continue;
>>
>> -               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
>> +               ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, true);
>>                  if (ret)
>>                          break;
>>
>> --
>> 2.25.1
>>
> .
diff mbox series

Patch

diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index 0c52801ed0de..1fa9bb763831 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -91,9 +91,12 @@  static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_p
 }
 
 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
-					size_t size)
+					size_t size, bool is_atomic)
 {
-	req->skb = __dev_alloc_skb(size, GFP_KERNEL);
+	if (is_atomic)
+		req->skb = __dev_alloc_skb(size, GFP_ATOMIC);
+	else
+		req->skb = __dev_alloc_skb(size, GFP_KERNEL);
 	if (!req->skb)
 		return -ENOMEM;
 
@@ -174,7 +177,7 @@  static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
 		spin_unlock_irqrestore(&queue->ring_lock, flags);
 		req = queue->rx_refill;
 
-		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
+		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, false);
 		if (ret)
 			return ret;
 
@@ -402,7 +405,7 @@  static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, s
 	if (!req->gpd)
 		goto err_free_req;
 
-	val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
+	val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, false);
 	if (val)
 		goto err_free_pool;
 
@@ -801,7 +804,7 @@  static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
 		if (req->skb)
 			continue;
 
-		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
+		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, true);
 		if (ret)
 			break;