@@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_file_priv *file_priv = file->driver_priv;
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
@@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
- job->file_priv = file->driver_priv;
+ job->mmu = file_priv->mmu;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
- &job->file_priv->sched_entity[slot],
+ &file_priv->sched_entity[slot],
NULL);
if (ret)
goto out_put_job;
@@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
return;
}
- cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
+ cfg = panfrost_mmu_as_get(pfdev, job->mmu);
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
@@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
job->jc = 0;
}
- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
+ panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
if (signal_fence)
@@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev,
* happen when we receive the DONE interrupt while doing a GPU reset).
*/
job->jc = 0;
- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
+ panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
dma_fence_signal_locked(job->done_fence);
@@ -17,7 +17,7 @@ struct panfrost_job {
struct kref refcount;
struct panfrost_device *pfdev;
- struct panfrost_file_priv *file_priv;
+ struct panfrost_mmu *mmu;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
For a while now it's been allowed for a MMU context to outlive it's corresponding panfrost_priv, however the job structure still references panfrost_priv to get hold of the MMU context. If panfrost_priv has been freed this is a use-after-free which I've been able to trigger resulting in a splat. To fix this, drop the reference to panfrost_priv in the job structure and add a direct reference to the MMU structure which is what's actually needed. Fixes: 7fdc48cc63a3 ("drm/panfrost: Make sure MMU context lifetime is not bound to panfrost_priv") Signed-off-by: Steven Price <steven.price@arm.com> --- drivers/gpu/drm/panfrost/panfrost_drv.c | 5 +++-- drivers/gpu/drm/panfrost/panfrost_job.c | 6 +++--- drivers/gpu/drm/panfrost/panfrost_job.h | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-)