diff mbox series

[12/21] dmaengine: stm32-dma: Annotate struct stm32_dma_desc with __counted_by

Message ID 20230817235859.49846-12-keescook@chromium.org (mailing list archive)
State Mainlined
Commit 195e46df2d996ff4bbf624891b1d3ae8ea9f315d
Headers show
Series dmaengine: Annotate with __counted_by | expand

Commit Message

Kees Cook Aug. 17, 2023, 11:58 p.m. UTC
Prepare for the coming implementation by GCC and Clang of the __counted_by
attribute. Flexible array members annotated with __counted_by can have
their accesses bounds-checked at run-time checking via CONFIG_UBSAN_BOUNDS
(for array indexing) and CONFIG_FORTIFY_SOURCE (for strcpy/memcpy-family
functions).

As found with Coccinelle[1], add __counted_by for struct stm32_dma_desc.
Additionally, since the element count member must be set before accessing
the annotated flexible array member, move its initialization earlier.

[1] https://github.com/kees/kernel-tools/blob/trunk/coccinelle/examples/counted_by.cocci

Cc: Vinod Koul <vkoul@kernel.org>
Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com>
Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>
Cc: dmaengine@vger.kernel.org
Cc: linux-stm32@st-md-mailman.stormreply.com
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Kees Cook <keescook@chromium.org>
---
 drivers/dma/stm32-dma.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

Comments

Gustavo A. R. Silva Aug. 18, 2023, 1:25 a.m. UTC | #1
On 8/17/23 17:58, Kees Cook wrote:
> Prepare for the coming implementation by GCC and Clang of the __counted_by
> attribute. Flexible array members annotated with __counted_by can have
> their accesses bounds-checked at run-time checking via CONFIG_UBSAN_BOUNDS
> (for array indexing) and CONFIG_FORTIFY_SOURCE (for strcpy/memcpy-family
> functions).
> 
> As found with Coccinelle[1], add __counted_by for struct stm32_dma_desc.
> Additionally, since the element count member must be set before accessing
> the annotated flexible array member, move its initialization earlier.
> 
> [1] https://github.com/kees/kernel-tools/blob/trunk/coccinelle/examples/counted_by.cocci
> 
> Cc: Vinod Koul <vkoul@kernel.org>
> Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com>
> Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>
> Cc: dmaengine@vger.kernel.org
> Cc: linux-stm32@st-md-mailman.stormreply.com
> Cc: linux-arm-kernel@lists.infradead.org
> Signed-off-by: Kees Cook <keescook@chromium.org>

Reviewed-by: Gustavo A. R. Silva <gustavoars@kernel.org>

Thanks
--
Gustavo

> ---
>   drivers/dma/stm32-dma.c | 11 ++++-------
>   1 file changed, 4 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
> index 5c36811aa134..a732b3807b11 100644
> --- a/drivers/dma/stm32-dma.c
> +++ b/drivers/dma/stm32-dma.c
> @@ -191,7 +191,7 @@ struct stm32_dma_desc {
>   	struct virt_dma_desc vdesc;
>   	bool cyclic;
>   	u32 num_sgs;
> -	struct stm32_dma_sg_req sg_req[];
> +	struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs);
>   };
>   
>   /**
> @@ -1105,6 +1105,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
>   	desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
>   	if (!desc)
>   		return NULL;
> +	desc->num_sgs = sg_len;
>   
>   	/* Set peripheral flow controller */
>   	if (chan->dma_sconfig.device_fc)
> @@ -1141,8 +1142,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
>   			desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
>   		desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
>   	}
> -
> -	desc->num_sgs = sg_len;
>   	desc->cyclic = false;
>   
>   	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
> @@ -1216,6 +1215,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
>   	desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
>   	if (!desc)
>   		return NULL;
> +	desc->num_sgs = num_periods;
>   
>   	for (i = 0; i < num_periods; i++) {
>   		desc->sg_req[i].len = period_len;
> @@ -1232,8 +1232,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
>   		if (!chan->trig_mdma)
>   			buf_addr += period_len;
>   	}
> -
> -	desc->num_sgs = num_periods;
>   	desc->cyclic = true;
>   
>   	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
> @@ -1254,6 +1252,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
>   	desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
>   	if (!desc)
>   		return NULL;
> +	desc->num_sgs = num_sgs;
>   
>   	threshold = chan->threshold;
>   
> @@ -1283,8 +1282,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
>   		desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
>   		desc->sg_req[i].len = xfer_count;
>   	}
> -
> -	desc->num_sgs = num_sgs;
>   	desc->cyclic = false;
>   
>   	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
diff mbox series

Patch

diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 5c36811aa134..a732b3807b11 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -191,7 +191,7 @@  struct stm32_dma_desc {
 	struct virt_dma_desc vdesc;
 	bool cyclic;
 	u32 num_sgs;
-	struct stm32_dma_sg_req sg_req[];
+	struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs);
 };
 
 /**
@@ -1105,6 +1105,7 @@  static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
 	desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
 	if (!desc)
 		return NULL;
+	desc->num_sgs = sg_len;
 
 	/* Set peripheral flow controller */
 	if (chan->dma_sconfig.device_fc)
@@ -1141,8 +1142,6 @@  static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
 			desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
 		desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
 	}
-
-	desc->num_sgs = sg_len;
 	desc->cyclic = false;
 
 	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -1216,6 +1215,7 @@  static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
 	desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
 	if (!desc)
 		return NULL;
+	desc->num_sgs = num_periods;
 
 	for (i = 0; i < num_periods; i++) {
 		desc->sg_req[i].len = period_len;
@@ -1232,8 +1232,6 @@  static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
 		if (!chan->trig_mdma)
 			buf_addr += period_len;
 	}
-
-	desc->num_sgs = num_periods;
 	desc->cyclic = true;
 
 	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -1254,6 +1252,7 @@  static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
 	desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
 	if (!desc)
 		return NULL;
+	desc->num_sgs = num_sgs;
 
 	threshold = chan->threshold;
 
@@ -1283,8 +1282,6 @@  static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
 		desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
 		desc->sg_req[i].len = xfer_count;
 	}
-
-	desc->num_sgs = num_sgs;
 	desc->cyclic = false;
 
 	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);