diff mbox series

[v2] drm/panthor: Fix access to uninitialized variable in tick_ctx_cleanup()

Message ID 20240930163742.87036-1-boris.brezillon@collabora.com (mailing list archive)
State New, archived
Headers show
Series [v2] drm/panthor: Fix access to uninitialized variable in tick_ctx_cleanup() | expand

Commit Message

Boris Brezillon Sept. 30, 2024, 4:37 p.m. UTC
The group variable can't be used to retrieve ptdev in our second loop,
because it points to the previously iterated list_head, not a valid
group. Get the ptdev object from the scheduler instead.

Cc: <stable@vger.kernel.org>
Fixes: d72f049087d4 ("drm/panthor: Allow driver compilation")
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Julia Lawall <julia.lawall@inria.fr>
Closes: https://lore.kernel.org/r/202409302306.UDikqa03-lkp@intel.com/
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
---
 drivers/gpu/drm/panthor/panthor_sched.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

Comments

Liviu Dudau Sept. 30, 2024, 5:39 p.m. UTC | #1
On Mon, Sep 30, 2024 at 06:37:42PM +0200, Boris Brezillon wrote:
> The group variable can't be used to retrieve ptdev in our second loop,
> because it points to the previously iterated list_head, not a valid
> group. Get the ptdev object from the scheduler instead.
> 
> Cc: <stable@vger.kernel.org>
> Fixes: d72f049087d4 ("drm/panthor: Allow driver compilation")
> Reported-by: kernel test robot <lkp@intel.com>
> Reported-by: Julia Lawall <julia.lawall@inria.fr>
> Closes: https://lore.kernel.org/r/202409302306.UDikqa03-lkp@intel.com/
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>

Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>

Best regards,
Liviu

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index 201d5e7a921e..24ff91c084e4 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2052,6 +2052,7 @@ static void
>  tick_ctx_cleanup(struct panthor_scheduler *sched,
>  		 struct panthor_sched_tick_ctx *ctx)
>  {
> +	struct panthor_device *ptdev = sched->ptdev;
>  	struct panthor_group *group, *tmp;
>  	u32 i;
>  
> @@ -2060,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
>  			/* If everything went fine, we should only have groups
>  			 * to be terminated in the old_groups lists.
>  			 */
> -			drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
> +			drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
>  				    group_can_run(group));
>  
>  			if (!group_can_run(group)) {
> @@ -2083,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
>  		/* If everything went fine, the groups to schedule lists should
>  		 * be empty.
>  		 */
> -		drm_WARN_ON(&group->ptdev->base,
> +		drm_WARN_ON(&ptdev->base,
>  			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
>  
>  		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
> -- 
> 2.46.0
>
Boris Brezillon Oct. 1, 2024, 4:44 p.m. UTC | #2
On Mon, 30 Sep 2024 18:37:42 +0200
Boris Brezillon <boris.brezillon@collabora.com> wrote:

> The group variable can't be used to retrieve ptdev in our second loop,
> because it points to the previously iterated list_head, not a valid
> group. Get the ptdev object from the scheduler instead.
> 
> Cc: <stable@vger.kernel.org>
> Fixes: d72f049087d4 ("drm/panthor: Allow driver compilation")
> Reported-by: kernel test robot <lkp@intel.com>
> Reported-by: Julia Lawall <julia.lawall@inria.fr>
> Closes: https://lore.kernel.org/r/202409302306.UDikqa03-lkp@intel.com/
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>

Queued to drm-misc-fixes.

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index 201d5e7a921e..24ff91c084e4 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2052,6 +2052,7 @@ static void
>  tick_ctx_cleanup(struct panthor_scheduler *sched,
>  		 struct panthor_sched_tick_ctx *ctx)
>  {
> +	struct panthor_device *ptdev = sched->ptdev;
>  	struct panthor_group *group, *tmp;
>  	u32 i;
>  
> @@ -2060,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
>  			/* If everything went fine, we should only have groups
>  			 * to be terminated in the old_groups lists.
>  			 */
> -			drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
> +			drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
>  				    group_can_run(group));
>  
>  			if (!group_can_run(group)) {
> @@ -2083,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
>  		/* If everything went fine, the groups to schedule lists should
>  		 * be empty.
>  		 */
> -		drm_WARN_ON(&group->ptdev->base,
> +		drm_WARN_ON(&ptdev->base,
>  			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
>  
>  		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
diff mbox series

Patch

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 201d5e7a921e..24ff91c084e4 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -2052,6 +2052,7 @@  static void
 tick_ctx_cleanup(struct panthor_scheduler *sched,
 		 struct panthor_sched_tick_ctx *ctx)
 {
+	struct panthor_device *ptdev = sched->ptdev;
 	struct panthor_group *group, *tmp;
 	u32 i;
 
@@ -2060,7 +2061,7 @@  tick_ctx_cleanup(struct panthor_scheduler *sched,
 			/* If everything went fine, we should only have groups
 			 * to be terminated in the old_groups lists.
 			 */
-			drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
+			drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
 				    group_can_run(group));
 
 			if (!group_can_run(group)) {
@@ -2083,7 +2084,7 @@  tick_ctx_cleanup(struct panthor_scheduler *sched,
 		/* If everything went fine, the groups to schedule lists should
 		 * be empty.
 		 */
-		drm_WARN_ON(&group->ptdev->base,
+		drm_WARN_ON(&ptdev->base,
 			    !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
 
 		list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {