diff mbox series

[v3,7/8] drm/panfrost: Implement generic DRM object RSS reporting function

Message ID 20230905184533.959171-8-adrian.larumbe@collabora.com (mailing list archive)
State New, archived
Headers show
Series Add fdinfo support to Panfrost | expand

Commit Message

Adrián Larumbe Sept. 5, 2023, 6:45 p.m. UTC
BO's RSS is updated every time new pages are allocated on demand and mapped
for the object at GPU page fault's IRQ handler, but only for heap buffers.
The reason this is unnecessary for non-heap buffers is that they are mapped
onto the GPU's VA space and backed by physical memory in their entirety at
BO creation time.

This calculation is unnecessary for imported PRIME objects, since heap
buffers cannot be exported by our driver, and the actual BO RSS size is the
one reported in its attached dmabuf structure.

Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
---
 drivers/gpu/drm/panfrost/panfrost_gem.c | 14 ++++++++++++++
 drivers/gpu/drm/panfrost/panfrost_gem.h |  5 +++++
 drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++++++++----
 3 files changed, 27 insertions(+), 4 deletions(-)

Comments

Boris Brezillon Sept. 6, 2023, 8:01 a.m. UTC | #1
On Tue,  5 Sep 2023 19:45:23 +0100
Adrián Larumbe <adrian.larumbe@collabora.com> wrote:

> BO's RSS is updated every time new pages are allocated on demand and mapped
> for the object at GPU page fault's IRQ handler, but only for heap buffers.
> The reason this is unnecessary for non-heap buffers is that they are mapped
> onto the GPU's VA space and backed by physical memory in their entirety at
> BO creation time.
> 
> This calculation is unnecessary for imported PRIME objects, since heap
> buffers cannot be exported by our driver, and the actual BO RSS size is the
> one reported in its attached dmabuf structure.
> 
> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
> ---
>  drivers/gpu/drm/panfrost/panfrost_gem.c | 14 ++++++++++++++
>  drivers/gpu/drm/panfrost/panfrost_gem.h |  5 +++++
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++++++++----
>  3 files changed, 27 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
> index 7d8f83d20539..cb92c0ed7615 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_gem.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
> @@ -208,6 +208,19 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
>  	return res;
>  }
>  
> +static size_t panfrost_gem_rss(struct drm_gem_object *obj)
> +{
> +	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
> +
> +	if (bo->is_heap)
> +		return bo->heap_rss_size;
> +	else if (bo->base.pages) {
> +		WARN_ON(bo->heap_rss_size);
> +		return bo->base.base.size;
> +	} else
> +		return 0;

Nit: please add brackets on all conditional blocks, even if only the
second one needs it.

> +}
> +
>  static const struct drm_gem_object_funcs panfrost_gem_funcs = {
>  	.free = panfrost_gem_free_object,
>  	.open = panfrost_gem_open,
> @@ -220,6 +233,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
>  	.vunmap = drm_gem_shmem_object_vunmap,
>  	.mmap = drm_gem_shmem_object_mmap,
>  	.status = panfrost_gem_status,
> +	.rss = panfrost_gem_rss,
>  	.vm_ops = &drm_gem_shmem_vm_ops,
>  };
>  
> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
> index ad2877eeeccd..13c0a8149c3a 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_gem.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
> @@ -36,6 +36,11 @@ struct panfrost_gem_object {
>  	 */
>  	atomic_t gpu_usecount;
>  
> +	/*
> +	 * Object chunk size currently mapped onto physical memory
> +	 */
> +	size_t heap_rss_size;
> +
>  	bool noexec		:1;
>  	bool is_heap		:1;
>  };
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index d54d4e7b2195..67c206124781 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -285,17 +285,19 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
>  	pm_runtime_put_autosuspend(pfdev->dev);
>  }
>  
> -static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
> +static size_t mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
>  		      u64 iova, int prot, struct sg_table *sgt)
>  {
>  	unsigned int count;
>  	struct scatterlist *sgl;
>  	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
>  	u64 start_iova = iova;
> +	size_t total = 0;
>  
>  	for_each_sgtable_dma_sg(sgt, sgl, count) {
>  		unsigned long paddr = sg_dma_address(sgl);
>  		size_t len = sg_dma_len(sgl);
> +		total += len;
>  
>  		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
>  
> @@ -315,7 +317,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
>  
>  	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
>  
> -	return 0;
> +	return total;
>  }
>  
>  int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
> @@ -447,6 +449,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>  	pgoff_t page_offset;
>  	struct sg_table *sgt;
>  	struct page **pages;
> +	size_t mapped_size;
>  
>  	bomapping = addr_to_mapping(pfdev, as, addr);
>  	if (!bomapping)
> @@ -518,10 +521,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>  	if (ret)
>  		goto err_map;
>  
> -	mmu_map_sg(pfdev, bomapping->mmu, addr,
> -		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
> +	mapped_size = mmu_map_sg(pfdev, bomapping->mmu, addr,
> +				 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
>  
>  	bomapping->active = true;
> +	bo->heap_rss_size += mapped_size;

The alloc-on-fault granularity is set static (2MB), so no need to
make mmu_map_sg() return the mapped size, we can just do += SZ_2M if
things worked.

>  
>  	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
>
Adrián Larumbe Sept. 9, 2023, 4:42 p.m. UTC | #2
On 06.09.2023 10:01, Boris Brezillon wrote:
>On Tue,  5 Sep 2023 19:45:23 +0100
>Adrián Larumbe <adrian.larumbe@collabora.com> wrote:
>
>> BO's RSS is updated every time new pages are allocated on demand and mapped
>> for the object at GPU page fault's IRQ handler, but only for heap buffers.
>> The reason this is unnecessary for non-heap buffers is that they are mapped
>> onto the GPU's VA space and backed by physical memory in their entirety at
>> BO creation time.
>> 
>> This calculation is unnecessary for imported PRIME objects, since heap
>> buffers cannot be exported by our driver, and the actual BO RSS size is the
>> one reported in its attached dmabuf structure.
>> 
>> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
>> ---
>>  drivers/gpu/drm/panfrost/panfrost_gem.c | 14 ++++++++++++++
>>  drivers/gpu/drm/panfrost/panfrost_gem.h |  5 +++++
>>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++++++++----
>>  3 files changed, 27 insertions(+), 4 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
>> index 7d8f83d20539..cb92c0ed7615 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_gem.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
>> @@ -208,6 +208,19 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
>>  	return res;
>>  }
>>  
>> +static size_t panfrost_gem_rss(struct drm_gem_object *obj)
>> +{
>> +	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
>> +
>> +	if (bo->is_heap)
>> +		return bo->heap_rss_size;
>> +	else if (bo->base.pages) {
>> +		WARN_ON(bo->heap_rss_size);
>> +		return bo->base.base.size;
>> +	} else
>> +		return 0;
>
>Nit: please add brackets on all conditional blocks, even if only the
>second one needs it.
>
>> +}
>> +
>>  static const struct drm_gem_object_funcs panfrost_gem_funcs = {
>>  	.free = panfrost_gem_free_object,
>>  	.open = panfrost_gem_open,
>> @@ -220,6 +233,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
>>  	.vunmap = drm_gem_shmem_object_vunmap,
>>  	.mmap = drm_gem_shmem_object_mmap,
>>  	.status = panfrost_gem_status,
>> +	.rss = panfrost_gem_rss,
>>  	.vm_ops = &drm_gem_shmem_vm_ops,
>>  };
>>  
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
>> index ad2877eeeccd..13c0a8149c3a 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_gem.h
>> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
>> @@ -36,6 +36,11 @@ struct panfrost_gem_object {
>>  	 */
>>  	atomic_t gpu_usecount;
>>  
>> +	/*
>> +	 * Object chunk size currently mapped onto physical memory
>> +	 */
>> +	size_t heap_rss_size;
>> +
>>  	bool noexec		:1;
>>  	bool is_heap		:1;
>>  };
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> index d54d4e7b2195..67c206124781 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> @@ -285,17 +285,19 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
>>  	pm_runtime_put_autosuspend(pfdev->dev);
>>  }
>>  
>> -static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
>> +static size_t mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
>>  		      u64 iova, int prot, struct sg_table *sgt)
>>  {
>>  	unsigned int count;
>>  	struct scatterlist *sgl;
>>  	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
>>  	u64 start_iova = iova;
>> +	size_t total = 0;
>>  
>>  	for_each_sgtable_dma_sg(sgt, sgl, count) {
>>  		unsigned long paddr = sg_dma_address(sgl);
>>  		size_t len = sg_dma_len(sgl);
>> +		total += len;
>>  
>>  		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
>>  
>> @@ -315,7 +317,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
>>  
>>  	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
>>  
>> -	return 0;
>> +	return total;
>>  }
>>  
>>  int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
>> @@ -447,6 +449,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>>  	pgoff_t page_offset;
>>  	struct sg_table *sgt;
>>  	struct page **pages;
>> +	size_t mapped_size;
>>  
>>  	bomapping = addr_to_mapping(pfdev, as, addr);
>>  	if (!bomapping)
>> @@ -518,10 +521,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>>  	if (ret)
>>  		goto err_map;
>>  
>> -	mmu_map_sg(pfdev, bomapping->mmu, addr,
>> -		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
>> +	mapped_size = mmu_map_sg(pfdev, bomapping->mmu, addr,
>> +				 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
>>  
>>  	bomapping->active = true;
>> +	bo->heap_rss_size += mapped_size;
>
>The alloc-on-fault granularity is set static (2MB), so no need to
>make mmu_map_sg() return the mapped size, we can just do += SZ_2M if
>things worked.

At the moment mmu_map_sg is treated as though it always succeeds in mapping the
page. Would it be alright if I changed it so that we take into account the
unlikely case that ops->map_pages might fail?
Something like this: https://gitlab.collabora.com/-/snippets/323

>>  
>>  	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
>>
Boris Brezillon Sept. 11, 2023, 7:31 a.m. UTC | #3
On Sat, 9 Sep 2023 17:42:02 +0100
Adrián Larumbe <adrian.larumbe@collabora.com> wrote:

> On 06.09.2023 10:01, Boris Brezillon wrote:
> >On Tue,  5 Sep 2023 19:45:23 +0100
> >Adrián Larumbe <adrian.larumbe@collabora.com> wrote:
> >  
> >> BO's RSS is updated every time new pages are allocated on demand and mapped
> >> for the object at GPU page fault's IRQ handler, but only for heap buffers.
> >> The reason this is unnecessary for non-heap buffers is that they are mapped
> >> onto the GPU's VA space and backed by physical memory in their entirety at
> >> BO creation time.
> >> 
> >> This calculation is unnecessary for imported PRIME objects, since heap
> >> buffers cannot be exported by our driver, and the actual BO RSS size is the
> >> one reported in its attached dmabuf structure.
> >> 
> >> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
> >> ---
> >>  drivers/gpu/drm/panfrost/panfrost_gem.c | 14 ++++++++++++++
> >>  drivers/gpu/drm/panfrost/panfrost_gem.h |  5 +++++
> >>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++++++++----
> >>  3 files changed, 27 insertions(+), 4 deletions(-)
> >> 
> >> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
> >> index 7d8f83d20539..cb92c0ed7615 100644
> >> --- a/drivers/gpu/drm/panfrost/panfrost_gem.c
> >> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
> >> @@ -208,6 +208,19 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
> >>  	return res;
> >>  }
> >>  
> >> +static size_t panfrost_gem_rss(struct drm_gem_object *obj)
> >> +{
> >> +	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
> >> +
> >> +	if (bo->is_heap)
> >> +		return bo->heap_rss_size;
> >> +	else if (bo->base.pages) {
> >> +		WARN_ON(bo->heap_rss_size);
> >> +		return bo->base.base.size;
> >> +	} else
> >> +		return 0;  
> >
> >Nit: please add brackets on all conditional blocks, even if only the
> >second one needs it.
> >  
> >> +}
> >> +
> >>  static const struct drm_gem_object_funcs panfrost_gem_funcs = {
> >>  	.free = panfrost_gem_free_object,
> >>  	.open = panfrost_gem_open,
> >> @@ -220,6 +233,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
> >>  	.vunmap = drm_gem_shmem_object_vunmap,
> >>  	.mmap = drm_gem_shmem_object_mmap,
> >>  	.status = panfrost_gem_status,
> >> +	.rss = panfrost_gem_rss,
> >>  	.vm_ops = &drm_gem_shmem_vm_ops,
> >>  };
> >>  
> >> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
> >> index ad2877eeeccd..13c0a8149c3a 100644
> >> --- a/drivers/gpu/drm/panfrost/panfrost_gem.h
> >> +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
> >> @@ -36,6 +36,11 @@ struct panfrost_gem_object {
> >>  	 */
> >>  	atomic_t gpu_usecount;
> >>  
> >> +	/*
> >> +	 * Object chunk size currently mapped onto physical memory
> >> +	 */
> >> +	size_t heap_rss_size;
> >> +
> >>  	bool noexec		:1;
> >>  	bool is_heap		:1;
> >>  };
> >> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> >> index d54d4e7b2195..67c206124781 100644
> >> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> >> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> >> @@ -285,17 +285,19 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
> >>  	pm_runtime_put_autosuspend(pfdev->dev);
> >>  }
> >>  
> >> -static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
> >> +static size_t mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
> >>  		      u64 iova, int prot, struct sg_table *sgt)
> >>  {
> >>  	unsigned int count;
> >>  	struct scatterlist *sgl;
> >>  	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
> >>  	u64 start_iova = iova;
> >> +	size_t total = 0;
> >>  
> >>  	for_each_sgtable_dma_sg(sgt, sgl, count) {
> >>  		unsigned long paddr = sg_dma_address(sgl);
> >>  		size_t len = sg_dma_len(sgl);
> >> +		total += len;
> >>  
> >>  		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
> >>  
> >> @@ -315,7 +317,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
> >>  
> >>  	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
> >>  
> >> -	return 0;
> >> +	return total;
> >>  }
> >>  
> >>  int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
> >> @@ -447,6 +449,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
> >>  	pgoff_t page_offset;
> >>  	struct sg_table *sgt;
> >>  	struct page **pages;
> >> +	size_t mapped_size;
> >>  
> >>  	bomapping = addr_to_mapping(pfdev, as, addr);
> >>  	if (!bomapping)
> >> @@ -518,10 +521,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
> >>  	if (ret)
> >>  		goto err_map;
> >>  
> >> -	mmu_map_sg(pfdev, bomapping->mmu, addr,
> >> -		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
> >> +	mapped_size = mmu_map_sg(pfdev, bomapping->mmu, addr,
> >> +				 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
> >>  
> >>  	bomapping->active = true;
> >> +	bo->heap_rss_size += mapped_size;  
> >
> >The alloc-on-fault granularity is set static (2MB), so no need to
> >make mmu_map_sg() return the mapped size, we can just do += SZ_2M if
> >things worked.  
> 
> At the moment mmu_map_sg is treated as though it always succeeds in mapping the
> page. Would it be alright if I changed it so that we take into account the
> unlikely case that ops->map_pages might fail?

Yep, that would probably be a good thing to gracefully handle
allocation failures happening in ops->map_pages(), but I'd do that in a
follow-up patch, because that's orthogonal to the fdinfo stuff.

> Something like this: https://gitlab.collabora.com/-/snippets/323

Nit: I would change the mmu_unmap_range() prototype for something like:

static void mmu_unmap_range(struct panfrost_mmu *mmu,
			    u64 iova, size_t len);

No need for this is_heap argument if you pass rss_size to
mmu_unmap_range() for heap BOs.

Note that ops->unmap_pages() can fail on mem allocation too, when an
unmap triggers a 2M -> 4k page table split. But I don't think this can
happen in panfrost, because, for regular BOs, we always map/unmap the
whole BO, and for heaps, we map/unmap 2M at a time.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 7d8f83d20539..cb92c0ed7615 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -208,6 +208,19 @@  static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
 	return res;
 }
 
+static size_t panfrost_gem_rss(struct drm_gem_object *obj)
+{
+	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+
+	if (bo->is_heap)
+		return bo->heap_rss_size;
+	else if (bo->base.pages) {
+		WARN_ON(bo->heap_rss_size);
+		return bo->base.base.size;
+	} else
+		return 0;
+}
+
 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
 	.free = panfrost_gem_free_object,
 	.open = panfrost_gem_open,
@@ -220,6 +233,7 @@  static const struct drm_gem_object_funcs panfrost_gem_funcs = {
 	.vunmap = drm_gem_shmem_object_vunmap,
 	.mmap = drm_gem_shmem_object_mmap,
 	.status = panfrost_gem_status,
+	.rss = panfrost_gem_rss,
 	.vm_ops = &drm_gem_shmem_vm_ops,
 };
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index ad2877eeeccd..13c0a8149c3a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -36,6 +36,11 @@  struct panfrost_gem_object {
 	 */
 	atomic_t gpu_usecount;
 
+	/*
+	 * Object chunk size currently mapped onto physical memory
+	 */
+	size_t heap_rss_size;
+
 	bool noexec		:1;
 	bool is_heap		:1;
 };
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index d54d4e7b2195..67c206124781 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -285,17 +285,19 @@  static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
 	pm_runtime_put_autosuspend(pfdev->dev);
 }
 
-static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+static size_t mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
 		      u64 iova, int prot, struct sg_table *sgt)
 {
 	unsigned int count;
 	struct scatterlist *sgl;
 	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
 	u64 start_iova = iova;
+	size_t total = 0;
 
 	for_each_sgtable_dma_sg(sgt, sgl, count) {
 		unsigned long paddr = sg_dma_address(sgl);
 		size_t len = sg_dma_len(sgl);
+		total += len;
 
 		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
 
@@ -315,7 +317,7 @@  static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
 
 	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
 
-	return 0;
+	return total;
 }
 
 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
@@ -447,6 +449,7 @@  static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
 	pgoff_t page_offset;
 	struct sg_table *sgt;
 	struct page **pages;
+	size_t mapped_size;
 
 	bomapping = addr_to_mapping(pfdev, as, addr);
 	if (!bomapping)
@@ -518,10 +521,11 @@  static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
 	if (ret)
 		goto err_map;
 
-	mmu_map_sg(pfdev, bomapping->mmu, addr,
-		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+	mapped_size = mmu_map_sg(pfdev, bomapping->mmu, addr,
+				 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
 
 	bomapping->active = true;
+	bo->heap_rss_size += mapped_size;
 
 	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);