diff mbox series

[v5,1/2] drm/panthor: Expose size of driver internal BO's over fdinfo

Message ID 20241218181844.886043-2-adrian.larumbe@collabora.com (mailing list archive)
State New
Headers show
Series drm/panthor: Display size of internal kernel BOs through fdinfo | expand

Commit Message

Adrián Larumbe Dec. 18, 2024, 6:18 p.m. UTC
From: Adrián Larumbe <adrian.larumbe@collabora.com>

This will display the sizes of kenrel BO's bound to an open file, which are
otherwise not exposed to UM through a handle.

The sizes recorded are as follows:
 - Per group: suspend buffer, protm-suspend buffer, syncobjcs
 - Per queue: ringbuffer, profiling slots, firmware interface
 - For all heaps in all heap pools across all VM's bound to an open file,
 record size of all heap chuks, and for each pool the gpu_context BO too.

This does not record the size of FW regions, as these aren't bound to a
specific open file and remain active through the whole life of the driver.

Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
---
 drivers/gpu/drm/panthor/panthor_drv.c   | 12 ++++++
 drivers/gpu/drm/panthor/panthor_heap.c  | 26 +++++++++++++
 drivers/gpu/drm/panthor/panthor_heap.h  |  2 +
 drivers/gpu/drm/panthor/panthor_mmu.c   | 35 +++++++++++++++++
 drivers/gpu/drm/panthor/panthor_mmu.h   |  4 ++
 drivers/gpu/drm/panthor/panthor_sched.c | 52 ++++++++++++++++++++++++-
 drivers/gpu/drm/panthor/panthor_sched.h |  4 ++
 7 files changed, 134 insertions(+), 1 deletion(-)

Comments

Adrián Larumbe Dec. 18, 2024, 7:14 p.m. UTC | #1
On 19.12.2024 02:18, Adrián Martínez Larumbe wrote:
> From: Adrián Larumbe <adrian.larumbe@collabora.com>
> 
> This will display the sizes of kenrel BO's bound to an open file, which are
> otherwise not exposed to UM through a handle.
> 
> The sizes recorded are as follows:
>  - Per group: suspend buffer, protm-suspend buffer, syncobjcs
>  - Per queue: ringbuffer, profiling slots, firmware interface
>  - For all heaps in all heap pools across all VM's bound to an open file,
>  record size of all heap chuks, and for each pool the gpu_context BO too.
> 
> This does not record the size of FW regions, as these aren't bound to a
> specific open file and remain active through the whole life of the driver.
> 
> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
> Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
> ---
>  drivers/gpu/drm/panthor/panthor_drv.c   | 12 ++++++
>  drivers/gpu/drm/panthor/panthor_heap.c  | 26 +++++++++++++
>  drivers/gpu/drm/panthor/panthor_heap.h  |  2 +
>  drivers/gpu/drm/panthor/panthor_mmu.c   | 35 +++++++++++++++++
>  drivers/gpu/drm/panthor/panthor_mmu.h   |  4 ++
>  drivers/gpu/drm/panthor/panthor_sched.c | 52 ++++++++++++++++++++++++-
>  drivers/gpu/drm/panthor/panthor_sched.h |  4 ++
>  7 files changed, 134 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
> index d5dcd3d1b33a..277babcdae12 100644
> --- a/drivers/gpu/drm/panthor/panthor_drv.c
> +++ b/drivers/gpu/drm/panthor/panthor_drv.c
> @@ -1457,12 +1457,24 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
>  	drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
>  }
>  
> +static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
> +{
> +	struct panthor_file *pfile = file->driver_priv;
> +	struct drm_memory_stats status = {0};
> +
> +	panthor_group_kbo_sizes(pfile, &status);
> +	panthor_vm_heaps_sizes(pfile, &status);
> +
> +	drm_print_memory_stats(p, &status, DRM_GEM_OBJECT_RESIDENT, "internal");
> +}
> +
>  static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
>  {
>  	struct drm_device *dev = file->minor->dev;
>  	struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
>  
>  	panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
> +	panthor_show_internal_memory_stats(p, file);
>  
>  	drm_show_memory_stats(p, file);
>  }
> diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
> index 3796a9eb22af..49e426fc2a31 100644
> --- a/drivers/gpu/drm/panthor/panthor_heap.c
> +++ b/drivers/gpu/drm/panthor/panthor_heap.c
> @@ -603,3 +603,29 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
>  
>  	panthor_heap_pool_put(pool);
>  }
> +
> +/**
> + * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
> + * @pool: Pool whose total chunk size to calculate.
> + *
> + * This function adds the size of all heap chunks across all heaps in the
> + * argument pool. It also adds the size of the gpu contexts kernel bo.
> + * It is meant to be used by fdinfo for displaying the size of internal
> + * driver BO's that aren't exposed to userspace through a GEM handle.
> + *
> + */
> +size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
> +{
> +	struct panthor_heap *heap;
> +	unsigned long i;
> +	size_t size = 0;
> +
> +	down_read(&pool->lock);
> +	xa_for_each(&pool->xa, i, heap)
> +		size += heap->chunk_size * heap->chunk_count;
> +	up_write(&pool->lock);

Oh well, just realised I forgot to change this to up_read() so will do in a final revision.

> +	size += pool->gpu_contexts->obj->size;
> +
> +	return size;
> +}
> diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h
> index 25a5f2bba445..e3358d4e8edb 100644
> --- a/drivers/gpu/drm/panthor/panthor_heap.h
> +++ b/drivers/gpu/drm/panthor/panthor_heap.h
> @@ -27,6 +27,8 @@ struct panthor_heap_pool *
>  panthor_heap_pool_get(struct panthor_heap_pool *pool);
>  void panthor_heap_pool_put(struct panthor_heap_pool *pool);
>  
> +size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
> +
>  int panthor_heap_grow(struct panthor_heap_pool *pool,
>  		      u64 heap_gpu_va,
>  		      u32 renderpasses_in_flight,
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index c39e3eb1c15d..51f6e66df3f5 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -1941,6 +1941,41 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
>  	return pool;
>  }
>  
> +/**
> + * panthor_vm_heaps_size() - Calculate size of all heap chunks across all
> + * heaps over all the heap pools in a VM
> + * @pfile: File.
> + * @status: Memory status to be updated.
> + *
> + * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
> + * is active, record the size as active as well.
> + */
> +void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *status)
> +{
> +	struct panthor_vm *vm;
> +	unsigned long i;
> +
> +	if (!pfile->vms)
> +		return;
> +
> +	xa_for_each(&pfile->vms->xa, i, vm) {
> +		size_t size;
> +
> +		mutex_lock(&vm->heaps.lock);
> +		if (!vm->heaps.pool) {
> +			mutex_unlock(&vm->heaps.lock);
> +			continue;
> +		}
> +		size = panthor_heap_pool_size(vm->heaps.pool);
> +		mutex_unlock(&vm->heaps.lock);
> +
> +		status->resident += size;
> +		status->private += size;
> +		if (vm->as.id >= 0)
> +			status->active += size;
> +	}
> +}
> +
>  static u64 mair_to_memattr(u64 mair, bool coherent)
>  {
>  	u64 memattr = 0;
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
> index 8d21e83d8aba..2aeb2522cdfa 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.h
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.h
> @@ -5,10 +5,12 @@
>  #ifndef __PANTHOR_MMU_H__
>  #define __PANTHOR_MMU_H__
>  
> +#include <linux/types.h>
>  #include <linux/dma-resv.h>
>  
>  struct drm_exec;
>  struct drm_sched_job;
> +struct drm_memory_stats;
>  struct panthor_gem_object;
>  struct panthor_heap_pool;
>  struct panthor_vm;
> @@ -37,6 +39,8 @@ int panthor_vm_flush_all(struct panthor_vm *vm);
>  struct panthor_heap_pool *
>  panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
>  
> +void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *status);
> +
>  struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
>  void panthor_vm_put(struct panthor_vm *vm);
>  struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index 77b184c3fb0c..bb4b3ffadcd1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -628,7 +628,7 @@ struct panthor_group {
>  	 */
>  	struct panthor_kernel_bo *syncobjs;
>  
> -	/** @fdinfo: Per-file total cycle and timestamp values reference. */
> +	/** @fdinfo: Per-group total cycle and timestamp values and kernel BO sizes. */
>  	struct {
>  		/** @data: Total sampled values for jobs in queues from this group. */
>  		struct panthor_gpu_usage data;
> @@ -638,6 +638,9 @@ struct panthor_group {
>  		 * and job post-completion processing function
>  		 */
>  		struct mutex lock;
> +
> +		/** @bo_sizes: Aggregate size of private kernel BO's held by the group. */
> +		size_t kbo_sizes;
>  	} fdinfo;
>  
>  	/** @state: Group state. */
> @@ -3381,6 +3384,29 @@ group_create_queue(struct panthor_group *group,
>  	return ERR_PTR(ret);
>  }
>  
> +static void add_group_kbo_sizes(struct panthor_device *ptdev,
> +				struct panthor_group *group)
> +{
> +	struct panthor_queue *queue;
> +	int i;
> +
> +	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
> +		return;
> +	if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
> +		return;
> +
> +	group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
> +	group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
> +	group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
> +
> +	for (i = 0; i < group->queue_count; i++) {
> +		queue =	group->queues[i];
> +		group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
> +		group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
> +		group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
> +	}
> +}
> +
>  #define MAX_GROUPS_PER_POOL		128
>  
>  int panthor_group_create(struct panthor_file *pfile,
> @@ -3505,6 +3531,7 @@ int panthor_group_create(struct panthor_file *pfile,
>  	}
>  	mutex_unlock(&sched->reset.lock);
>  
> +	add_group_kbo_sizes(group->ptdev, group);
>  	mutex_init(&group->fdinfo.lock);
>  
>  	return gid;
> @@ -3624,6 +3651,29 @@ void panthor_group_pool_destroy(struct panthor_file *pfile)
>  	pfile->groups = NULL;
>  }
>  
> +/**
> + * panthor_group_kbo_sizes() - Retrieve aggregate size of all private kernel BO's
> + * belonging to all the groups owned by an open Panthor file
> + * @pfile: File.
> + * @status: Memory status to be updated.
> + *
> + */
> +void panthor_group_kbo_sizes(struct panthor_file *pfile, struct drm_memory_stats *status)
> +{
> +	struct panthor_group_pool *gpool = pfile->groups;
> +	struct panthor_group *group;
> +	unsigned long i;
> +
> +	if (IS_ERR_OR_NULL(gpool))
> +		return;
> +	xa_for_each(&gpool->xa, i, group) {
> +		status->resident += group->fdinfo.kbo_sizes;
> +		status->private += group->fdinfo.kbo_sizes;
> +		if (group->csg_id >= 0)
> +			status->active += group->fdinfo.kbo_sizes;
> +	}
> +}
> +
>  static void job_release(struct kref *ref)
>  {
>  	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
> index 5ae6b4bde7c5..4dd6a7fc8fbd 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.h
> +++ b/drivers/gpu/drm/panthor/panthor_sched.h
> @@ -4,11 +4,14 @@
>  #ifndef __PANTHOR_SCHED_H__
>  #define __PANTHOR_SCHED_H__
>  
> +#include <linux/types.h>
> +
>  struct drm_exec;
>  struct dma_fence;
>  struct drm_file;
>  struct drm_gem_object;
>  struct drm_sched_job;
> +struct drm_memory_stats;
>  struct drm_panthor_group_create;
>  struct drm_panthor_queue_create;
>  struct drm_panthor_group_get_state;
> @@ -36,6 +39,7 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
>  
>  int panthor_group_pool_create(struct panthor_file *pfile);
>  void panthor_group_pool_destroy(struct panthor_file *pfile);
> +void panthor_group_kbo_sizes(struct panthor_file *pfile, struct drm_memory_stats *status);
>  
>  int panthor_sched_init(struct panthor_device *ptdev);
>  void panthor_sched_unplug(struct panthor_device *ptdev);
> -- 
> 2.47.0

Adrian Larumbe
Mihail Atanassov Dec. 19, 2024, 4:30 p.m. UTC | #2
On 18/12/2024 18:18, Adrián Martínez Larumbe wrote:
> From: Adrián Larumbe <adrian.larumbe@collabora.com>
> 
> This will display the sizes of kenrel BO's bound to an open file, which are
> otherwise not exposed to UM through a handle.
> 
> The sizes recorded are as follows:
>   - Per group: suspend buffer, protm-suspend buffer, syncobjcs
>   - Per queue: ringbuffer, profiling slots, firmware interface
>   - For all heaps in all heap pools across all VM's bound to an open file,
>   record size of all heap chuks, and for each pool the gpu_context BO too.
> 
> This does not record the size of FW regions, as these aren't bound to a
> specific open file and remain active through the whole life of the driver.
> 
> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
> Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
> ---
>   drivers/gpu/drm/panthor/panthor_drv.c   | 12 ++++++
>   drivers/gpu/drm/panthor/panthor_heap.c  | 26 +++++++++++++
>   drivers/gpu/drm/panthor/panthor_heap.h  |  2 +
>   drivers/gpu/drm/panthor/panthor_mmu.c   | 35 +++++++++++++++++
>   drivers/gpu/drm/panthor/panthor_mmu.h   |  4 ++
>   drivers/gpu/drm/panthor/panthor_sched.c | 52 ++++++++++++++++++++++++-
>   drivers/gpu/drm/panthor/panthor_sched.h |  4 ++
>   7 files changed, 134 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
> index d5dcd3d1b33a..277babcdae12 100644
> --- a/drivers/gpu/drm/panthor/panthor_drv.c
> +++ b/drivers/gpu/drm/panthor/panthor_drv.c
> @@ -1457,12 +1457,24 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
>   	drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
>   }
>   
> +static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
> +{
> +	struct panthor_file *pfile = file->driver_priv;
> +	struct drm_memory_stats status = {0};
> +
> +	panthor_group_kbo_sizes(pfile, &status);
> +	panthor_vm_heaps_sizes(pfile, &status);
> +
> +	drm_print_memory_stats(p, &status, DRM_GEM_OBJECT_RESIDENT, "internal");
> +}
> +
>   static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
>   {
>   	struct drm_device *dev = file->minor->dev;
>   	struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
>   
>   	panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
> +	panthor_show_internal_memory_stats(p, file);
>   
>   	drm_show_memory_stats(p, file);
>   }
> diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
> index 3796a9eb22af..49e426fc2a31 100644
> --- a/drivers/gpu/drm/panthor/panthor_heap.c
> +++ b/drivers/gpu/drm/panthor/panthor_heap.c
> @@ -603,3 +603,29 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
>   
>   	panthor_heap_pool_put(pool);
>   }
> +
> +/**
> + * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
> + * @pool: Pool whose total chunk size to calculate.
> + *
> + * This function adds the size of all heap chunks across all heaps in the
> + * argument pool. It also adds the size of the gpu contexts kernel bo.
> + * It is meant to be used by fdinfo for displaying the size of internal
> + * driver BO's that aren't exposed to userspace through a GEM handle.
> + *
> + */
> +size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
> +{
> +	struct panthor_heap *heap;
> +	unsigned long i;
> +	size_t size = 0;
> +
> +	down_read(&pool->lock);
> +	xa_for_each(&pool->xa, i, heap)
> +		size += heap->chunk_size * heap->chunk_count;
> +	up_write(&pool->lock);
> +
> +	size += pool->gpu_contexts->obj->size;
> +
> +	return size;
> +}
> diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h
> index 25a5f2bba445..e3358d4e8edb 100644
> --- a/drivers/gpu/drm/panthor/panthor_heap.h
> +++ b/drivers/gpu/drm/panthor/panthor_heap.h
> @@ -27,6 +27,8 @@ struct panthor_heap_pool *
>   panthor_heap_pool_get(struct panthor_heap_pool *pool);
>   void panthor_heap_pool_put(struct panthor_heap_pool *pool);
>   
> +size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
> +
>   int panthor_heap_grow(struct panthor_heap_pool *pool,
>   		      u64 heap_gpu_va,
>   		      u32 renderpasses_in_flight,
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index c39e3eb1c15d..51f6e66df3f5 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -1941,6 +1941,41 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
>   	return pool;
>   }
>   
> +/**
> + * panthor_vm_heaps_size() - Calculate size of all heap chunks across all
> + * heaps over all the heap pools in a VM
> + * @pfile: File.
> + * @status: Memory status to be updated.
> + *
> + * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
> + * is active, record the size as active as well.
> + */
> +void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *status)
> +{
> +	struct panthor_vm *vm;
> +	unsigned long i;
> +
> +	if (!pfile->vms)
> +		return;
> +
> +	xa_for_each(&pfile->vms->xa, i, vm) {
> +		size_t size;
> +
> +		mutex_lock(&vm->heaps.lock);

Use `scoped_guard` instead?

#include <linux/cleanup.h>

/* ... */

	xa_for_each(...) {
		size_t size;

		scoped_guard(mutex, &vm->heaps.lock) {
			if (!vm->heaps.pool)
				continue;

			size = panthor_heap_pool_size(vm->heaps.pool);
		}
		/* ... */

> +		if (!vm->heaps.pool) {
> +			mutex_unlock(&vm->heaps.lock);
> +			continue;
> +		}
> +		size = panthor_heap_pool_size(vm->heaps.pool);
> +		mutex_unlock(&vm->heaps.lock);
> +
> +		status->resident += size;
> +		status->private += size;
> +		if (vm->as.id >= 0)
> +			status->active += size;
> +	}
> +}
> +
>   static u64 mair_to_memattr(u64 mair, bool coherent)
>   {
>   	u64 memattr = 0;
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
> index 8d21e83d8aba..2aeb2522cdfa 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.h
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.h
> @@ -5,10 +5,12 @@
>   #ifndef __PANTHOR_MMU_H__
>   #define __PANTHOR_MMU_H__
>   
> +#include <linux/types.h>

[nit] Is this related? Nothing in this file's other hunks suggest that 
it's required.

>   #include <linux/dma-resv.h>
>   
>   struct drm_exec;
>   struct drm_sched_job;
> +struct drm_memory_stats;
>   struct panthor_gem_object;
>   struct panthor_heap_pool;
>   struct panthor_vm;
> @@ -37,6 +39,8 @@ int panthor_vm_flush_all(struct panthor_vm *vm);
>   struct panthor_heap_pool *
>   panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
>   
> +void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *status);
> +
>   struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
>   void panthor_vm_put(struct panthor_vm *vm);
>   struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index 77b184c3fb0c..bb4b3ffadcd1 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -628,7 +628,7 @@ struct panthor_group {
>   	 */
>   	struct panthor_kernel_bo *syncobjs;
>   
> -	/** @fdinfo: Per-file total cycle and timestamp values reference. */
> +	/** @fdinfo: Per-group total cycle and timestamp values and kernel BO sizes. */
>   	struct {
>   		/** @data: Total sampled values for jobs in queues from this group. */
>   		struct panthor_gpu_usage data;
> @@ -638,6 +638,9 @@ struct panthor_group {
>   		 * and job post-completion processing function
>   		 */
>   		struct mutex lock;
> +
> +		/** @bo_sizes: Aggregate size of private kernel BO's held by the group. */
> +		size_t kbo_sizes;
>   	} fdinfo;
>   
>   	/** @state: Group state. */
> @@ -3381,6 +3384,29 @@ group_create_queue(struct panthor_group *group,
>   	return ERR_PTR(ret);
>   }
>   
> +static void add_group_kbo_sizes(struct panthor_device *ptdev,
> +				struct panthor_group *group)
> +{
> +	struct panthor_queue *queue;
> +	int i;
> +
> +	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
> +		return;
> +	if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
> +		return;
> +
> +	group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
> +	group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
> +	group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
> +
> +	for (i = 0; i < group->queue_count; i++) {
> +		queue =	group->queues[i];
> +		group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
> +		group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
> +		group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
> +	}
> +}
> +
>   #define MAX_GROUPS_PER_POOL		128
>   
>   int panthor_group_create(struct panthor_file *pfile,
> @@ -3505,6 +3531,7 @@ int panthor_group_create(struct panthor_file *pfile,
>   	}
>   	mutex_unlock(&sched->reset.lock);
>   
> +	add_group_kbo_sizes(group->ptdev, group);
>   	mutex_init(&group->fdinfo.lock);
>   
>   	return gid;
> @@ -3624,6 +3651,29 @@ void panthor_group_pool_destroy(struct panthor_file *pfile)
>   	pfile->groups = NULL;
>   }
>   
> +/**
> + * panthor_group_kbo_sizes() - Retrieve aggregate size of all private kernel BO's
> + * belonging to all the groups owned by an open Panthor file
> + * @pfile: File.
> + * @status: Memory status to be updated.
> + *
> + */
> +void panthor_group_kbo_sizes(struct panthor_file *pfile, struct drm_memory_stats *status)
> +{
> +	struct panthor_group_pool *gpool = pfile->groups;
> +	struct panthor_group *group;
> +	unsigned long i;
> +
> +	if (IS_ERR_OR_NULL(gpool))
> +		return;
> +	xa_for_each(&gpool->xa, i, group) {
> +		status->resident += group->fdinfo.kbo_sizes;
> +		status->private += group->fdinfo.kbo_sizes;
> +		if (group->csg_id >= 0)
> +			status->active += group->fdinfo.kbo_sizes;
> +	}
> +}
> +
>   static void job_release(struct kref *ref)
>   {
>   	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
> index 5ae6b4bde7c5..4dd6a7fc8fbd 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.h
> +++ b/drivers/gpu/drm/panthor/panthor_sched.h
> @@ -4,11 +4,14 @@
>   #ifndef __PANTHOR_SCHED_H__
>   #define __PANTHOR_SCHED_H__
>   
> +#include <linux/types.h>
> +

As above.

>   struct drm_exec;
>   struct dma_fence;
>   struct drm_file;
>   struct drm_gem_object;
>   struct drm_sched_job;
> +struct drm_memory_stats;
>   struct drm_panthor_group_create;
>   struct drm_panthor_queue_create;
>   struct drm_panthor_group_get_state;
> @@ -36,6 +39,7 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
>   
>   int panthor_group_pool_create(struct panthor_file *pfile);
>   void panthor_group_pool_destroy(struct panthor_file *pfile);
> +void panthor_group_kbo_sizes(struct panthor_file *pfile, struct drm_memory_stats *status);
>   
>   int panthor_sched_init(struct panthor_device *ptdev);
>   void panthor_sched_unplug(struct panthor_device *ptdev);

With the mutex cleanup 'modernised',

Reviewed-by: Mihail Atanassov <mihail.atanassov@arm.com>
Steven Price Dec. 20, 2024, 11:08 a.m. UTC | #3
On 19/12/2024 16:30, Mihail Atanassov wrote:
> 
> 
> On 18/12/2024 18:18, Adrián Martínez Larumbe wrote:
>> From: Adrián Larumbe <adrian.larumbe@collabora.com>
>>
>> This will display the sizes of kenrel BO's bound to an open file,
>> which are
>> otherwise not exposed to UM through a handle.
>>
>> The sizes recorded are as follows:
>>   - Per group: suspend buffer, protm-suspend buffer, syncobjcs
>>   - Per queue: ringbuffer, profiling slots, firmware interface
>>   - For all heaps in all heap pools across all VM's bound to an open
>> file,
>>   record size of all heap chuks, and for each pool the gpu_context BO
>> too.
>>
>> This does not record the size of FW regions, as these aren't bound to a
>> specific open file and remain active through the whole life of the
>> driver.
>>
>> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
>> Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
>> ---

[...]

>> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/
>> panthor/panthor_mmu.c
>> index c39e3eb1c15d..51f6e66df3f5 100644
>> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
>> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
>> @@ -1941,6 +1941,41 @@ struct panthor_heap_pool
>> *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
>>       return pool;
>>   }
>>   +/**
>> + * panthor_vm_heaps_size() - Calculate size of all heap chunks across
>> all
>> + * heaps over all the heap pools in a VM
>> + * @pfile: File.
>> + * @status: Memory status to be updated.
>> + *
>> + * Calculate all heap chunk sizes in all heap pools bound to a VM. If
>> the VM
>> + * is active, record the size as active as well.
>> + */
>> +void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct
>> drm_memory_stats *status)
>> +{
>> +    struct panthor_vm *vm;
>> +    unsigned long i;
>> +
>> +    if (!pfile->vms)
>> +        return;
>> +
>> +    xa_for_each(&pfile->vms->xa, i, vm) {
>> +        size_t size;
>> +
>> +        mutex_lock(&vm->heaps.lock);
> 
> Use `scoped_guard` instead?
> 
> #include <linux/cleanup.h>
> 
> /* ... */
> 
>     xa_for_each(...) {
>         size_t size;
> 
>         scoped_guard(mutex, &vm->heaps.lock) {
>             if (!vm->heaps.pool)
>                 continue;
> 
>             size = panthor_heap_pool_size(vm->heaps.pool);
>         }
>         /* ... */

I don't believe this actually works. The implementation of scoped_guard
uses a for() loop. So the "continue" will be applied to this (hidden)
internal loop rather than the xa_for_each() loop intended.

An alternative would be:

	xa_for_each(&pfile->vms->xa, i, vm) {
		size_t size = 0;

		mutex_lock(&vm->heaps.lock);
		if (vm->heaps.pool)
			size = panthor_heap_pool_size(vm->heaps.pool);
		mutex_unlock(&vm->heaps.lock);

		status->resident += size;
		status->private += size;
		if (vm->as.id >= 0)
			status->active += size;
	}

(relying on size=0 being a no-op for the additions). Although I was
personally also happy with the original - but perhaps that's just
because I'm old and still feel anxious when I see scoped_guard() ;)

Steve
Steven Price Dec. 20, 2024, 11:12 a.m. UTC | #4
On 18/12/2024 19:14, Adrián Larumbe wrote:
> On 19.12.2024 02:18, Adrián Martínez Larumbe wrote:
>> From: Adrián Larumbe <adrian.larumbe@collabora.com>
>>
>> This will display the sizes of kenrel BO's bound to an open file, which are
>> otherwise not exposed to UM through a handle.
>>
>> The sizes recorded are as follows:
>>  - Per group: suspend buffer, protm-suspend buffer, syncobjcs
>>  - Per queue: ringbuffer, profiling slots, firmware interface
>>  - For all heaps in all heap pools across all VM's bound to an open file,
>>  record size of all heap chuks, and for each pool the gpu_context BO too.
>>
>> This does not record the size of FW regions, as these aren't bound to a
>> specific open file and remain active through the whole life of the driver.
>>
>> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
>> Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
>> ---

[...]

>> +size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
>> +{
>> +	struct panthor_heap *heap;
>> +	unsigned long i;
>> +	size_t size = 0;
>> +
>> +	down_read(&pool->lock);
>> +	xa_for_each(&pool->xa, i, heap)
>> +		size += heap->chunk_size * heap->chunk_count;
>> +	up_write(&pool->lock);
> 
> Oh well, just realised I forgot to change this to up_read() so will do in a final revision.

With that fixed, feel free to add:

Reviewed-by: Steven Price <steven.price@arm.com>

Thanks,
Steve
Mihail Atanassov Dec. 20, 2024, 11:14 a.m. UTC | #5
On 20/12/2024 11:08, Steven Price wrote:
> On 19/12/2024 16:30, Mihail Atanassov wrote:
>>
>>
>> On 18/12/2024 18:18, Adrián Martínez Larumbe wrote:
>>> From: Adrián Larumbe <adrian.larumbe@collabora.com>
>>>
>>> This will display the sizes of kenrel BO's bound to an open file,
>>> which are
>>> otherwise not exposed to UM through a handle.
>>>
>>> The sizes recorded are as follows:
>>>    - Per group: suspend buffer, protm-suspend buffer, syncobjcs
>>>    - Per queue: ringbuffer, profiling slots, firmware interface
>>>    - For all heaps in all heap pools across all VM's bound to an open
>>> file,
>>>    record size of all heap chuks, and for each pool the gpu_context BO
>>> too.
>>>
>>> This does not record the size of FW regions, as these aren't bound to a
>>> specific open file and remain active through the whole life of the
>>> driver.
>>>
>>> Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
>>> Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
>>> ---
> 
> [...]
> 
>>> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/
>>> panthor/panthor_mmu.c
>>> index c39e3eb1c15d..51f6e66df3f5 100644
>>> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
>>> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
>>> @@ -1941,6 +1941,41 @@ struct panthor_heap_pool
>>> *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
>>>        return pool;
>>>    }
>>>    +/**
>>> + * panthor_vm_heaps_size() - Calculate size of all heap chunks across
>>> all
>>> + * heaps over all the heap pools in a VM
>>> + * @pfile: File.
>>> + * @status: Memory status to be updated.
>>> + *
>>> + * Calculate all heap chunk sizes in all heap pools bound to a VM. If
>>> the VM
>>> + * is active, record the size as active as well.
>>> + */
>>> +void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct
>>> drm_memory_stats *status)
>>> +{
>>> +    struct panthor_vm *vm;
>>> +    unsigned long i;
>>> +
>>> +    if (!pfile->vms)
>>> +        return;
>>> +
>>> +    xa_for_each(&pfile->vms->xa, i, vm) {
>>> +        size_t size;
>>> +
>>> +        mutex_lock(&vm->heaps.lock);
>>
>> Use `scoped_guard` instead?
>>
>> #include <linux/cleanup.h>
>>
>> /* ... */
>>
>>      xa_for_each(...) {
>>          size_t size;
>>
>>          scoped_guard(mutex, &vm->heaps.lock) {
>>              if (!vm->heaps.pool)
>>                  continue;
>>
>>              size = panthor_heap_pool_size(vm->heaps.pool);
>>          }
>>          /* ... */
> 
> I don't believe this actually works. The implementation of scoped_guard
> uses a for() loop. So the "continue" will be applied to this (hidden)
> internal loop rather than the xa_for_each() loop intended.

Yikes, good call-out! I ought to have checked... I'll make a mental note 
of that limitation.

> 
> An alternative would be:
> 
> 	xa_for_each(&pfile->vms->xa, i, vm) {
> 		size_t size = 0;
> 
> 		mutex_lock(&vm->heaps.lock);
> 		if (vm->heaps.pool)
> 			size = panthor_heap_pool_size(vm->heaps.pool);
> 		mutex_unlock(&vm->heaps.lock);

Well then you can do a:

		scoped_guard(mutex)(&vm->heaps.lock) {
			if (vm->heaps.pool)
				size = panthor_heap_pool_size(vm->heaps.pool);
		}

		/* ;) */

> 
> 		status->resident += size;
> 		status->private += size;
> 		if (vm->as.id >= 0)
> 			status->active += size;
> 	}
> 
> (relying on size=0 being a no-op for the additions). Although I was
> personally also happy with the original - but perhaps that's just
> because I'm old and still feel anxious when I see scoped_guard() ;)
> 
> Steve
>
kernel test robot Dec. 20, 2024, 9:06 p.m. UTC | #6
Hi Adrián,

kernel test robot noticed the following build warnings:

[auto build test WARNING on 6a8d72b80807ad45229c0f5a17e3be843b15a703]

url:    https://github.com/intel-lab-lkp/linux/commits/Adri-n-Mart-nez-Larumbe/drm-panthor-Expose-size-of-driver-internal-BO-s-over-fdinfo/20241219-022128
base:   6a8d72b80807ad45229c0f5a17e3be843b15a703
patch link:    https://lore.kernel.org/r/20241218181844.886043-2-adrian.larumbe%40collabora.com
patch subject: [PATCH v5 1/2] drm/panthor: Expose size of driver internal BO's over fdinfo
config: loongarch-allyesconfig (https://download.01.org/0day-ci/archive/20241221/202412210405.NpsdRCQY-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241221/202412210405.NpsdRCQY-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202412210405.NpsdRCQY-lkp@intel.com/

All warnings (new ones prefixed by >>):

   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Function parameter or struct member 'as' not described in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'slots_lock' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'alloc_mask' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'faulty_mask' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'slots' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'lru_list' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'lock' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'list' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'reset_in_progress' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:102: warning: Excess struct member 'wq' description in 'panthor_mmu'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'count' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'ptr' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'page' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'addr' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'range' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'vm_bo' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'bo_offset' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'sgt' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:217: warning: Excess struct member 'new_vma' description in 'panthor_vm_op_ctx'
   drivers/gpu/drm/panthor/panthor_mmu.c:389: warning: Excess struct member 'start' description in 'panthor_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:389: warning: Excess struct member 'size' description in 'panthor_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:389: warning: Excess struct member 'id' description in 'panthor_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:389: warning: Excess struct member 'active_cnt' description in 'panthor_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:389: warning: Excess struct member 'lru_node' description in 'panthor_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:389: warning: Excess struct member 'pool' description in 'panthor_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:389: warning: Excess struct member 'lock' description in 'panthor_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:412: warning: Cannot understand  * @pt_cache: Cache used to allocate MMU page tables.
    on line 412 - I thought it was a doc line
   drivers/gpu/drm/panthor/panthor_mmu.c:481: warning: Cannot understand  * @free_pt() - Custom page table free function
    on line 481 - I thought it was a doc line
   drivers/gpu/drm/panthor/panthor_mmu.c:707: warning: Function parameter or struct member 'vm' not described in 'panthor_vm_active'
   drivers/gpu/drm/panthor/panthor_mmu.c:707: warning: Excess function parameter 'VM' description in 'panthor_vm_active'
   drivers/gpu/drm/panthor/panthor_mmu.c:816: warning: Function parameter or struct member 'vm' not described in 'panthor_vm_idle'
   drivers/gpu/drm/panthor/panthor_mmu.c:816: warning: Excess function parameter 'VM' description in 'panthor_vm_idle'
   drivers/gpu/drm/panthor/panthor_mmu.c:1037: warning: Function parameter or struct member 'vm' not described in 'panthor_vm_alloc_va'
   drivers/gpu/drm/panthor/panthor_mmu.c:1037: warning: Excess function parameter 'VM' description in 'panthor_vm_alloc_va'
   drivers/gpu/drm/panthor/panthor_mmu.c:1070: warning: Function parameter or struct member 'vm' not described in 'panthor_vm_free_va'
   drivers/gpu/drm/panthor/panthor_mmu.c:1070: warning: Excess function parameter 'VM' description in 'panthor_vm_free_va'
   drivers/gpu/drm/panthor/panthor_mmu.c:1504: warning: Function parameter or struct member 'ptdev' not described in 'panthor_vm_pool_create_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:1504: warning: Function parameter or struct member 'args' not described in 'panthor_vm_pool_create_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:1504: warning: Excess function parameter 'kernel_va_start' description in 'panthor_vm_pool_create_vm'
   drivers/gpu/drm/panthor/panthor_mmu.c:1504: warning: Excess function parameter 'kernel_va_range' description in 'panthor_vm_pool_create_vm'
>> drivers/gpu/drm/panthor/panthor_mmu.c:1954: warning: expecting prototype for panthor_vm_heaps_size(). Prototype was for panthor_vm_heaps_sizes() instead
--
   drivers/gpu/drm/panthor/panthor_sched.c:320: warning: Excess struct member 'runnable' description in 'panthor_scheduler'
   drivers/gpu/drm/panthor/panthor_sched.c:320: warning: Excess struct member 'idle' description in 'panthor_scheduler'
   drivers/gpu/drm/panthor/panthor_sched.c:320: warning: Excess struct member 'waiting' description in 'panthor_scheduler'
   drivers/gpu/drm/panthor/panthor_sched.c:320: warning: Excess struct member 'has_ref' description in 'panthor_scheduler'
   drivers/gpu/drm/panthor/panthor_sched.c:320: warning: Excess struct member 'in_progress' description in 'panthor_scheduler'
   drivers/gpu/drm/panthor/panthor_sched.c:320: warning: Excess struct member 'stopped_groups' description in 'panthor_scheduler'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'mem' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'input' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'output' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'input_fw_va' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'output_fw_va' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'gpu_va' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'ref' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'gt' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'sync64' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'bo' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'offset' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'kmap' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'lock' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'id' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'seqno' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'last_fence' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'in_flight_jobs' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'slots' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'slot_count' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:492: warning: Excess struct member 'seqno' description in 'panthor_queue'
   drivers/gpu/drm/panthor/panthor_sched.c:701: warning: Excess struct member 'data' description in 'panthor_group'
   drivers/gpu/drm/panthor/panthor_sched.c:701: warning: Excess struct member 'lock' description in 'panthor_group'
>> drivers/gpu/drm/panthor/panthor_sched.c:701: warning: Excess struct member 'bo_sizes' description in 'panthor_group'
   drivers/gpu/drm/panthor/panthor_sched.c:837: warning: Excess struct member 'start' description in 'panthor_job'
   drivers/gpu/drm/panthor/panthor_sched.c:837: warning: Excess struct member 'size' description in 'panthor_job'
   drivers/gpu/drm/panthor/panthor_sched.c:837: warning: Excess struct member 'latest_flush' description in 'panthor_job'
   drivers/gpu/drm/panthor/panthor_sched.c:837: warning: Excess struct member 'start' description in 'panthor_job'
   drivers/gpu/drm/panthor/panthor_sched.c:837: warning: Excess struct member 'end' description in 'panthor_job'
   drivers/gpu/drm/panthor/panthor_sched.c:837: warning: Excess struct member 'mask' description in 'panthor_job'
   drivers/gpu/drm/panthor/panthor_sched.c:837: warning: Excess struct member 'slot' description in 'panthor_job'
   drivers/gpu/drm/panthor/panthor_sched.c:1766: warning: Function parameter or struct member 'ptdev' not described in 'panthor_sched_report_fw_events'
   drivers/gpu/drm/panthor/panthor_sched.c:1766: warning: Function parameter or struct member 'events' not described in 'panthor_sched_report_fw_events'
   drivers/gpu/drm/panthor/panthor_sched.c:2659: warning: Function parameter or struct member 'ptdev' not described in 'panthor_sched_report_mmu_fault'


vim +1954 drivers/gpu/drm/panthor/panthor_mmu.c

  1943	
  1944	/**
  1945	 * panthor_vm_heaps_size() - Calculate size of all heap chunks across all
  1946	 * heaps over all the heap pools in a VM
  1947	 * @pfile: File.
  1948	 * @status: Memory status to be updated.
  1949	 *
  1950	 * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
  1951	 * is active, record the size as active as well.
  1952	 */
  1953	void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *status)
> 1954	{
  1955		struct panthor_vm *vm;
  1956		unsigned long i;
  1957	
  1958		if (!pfile->vms)
  1959			return;
  1960	
  1961		xa_for_each(&pfile->vms->xa, i, vm) {
  1962			size_t size;
  1963	
  1964			mutex_lock(&vm->heaps.lock);
  1965			if (!vm->heaps.pool) {
  1966				mutex_unlock(&vm->heaps.lock);
  1967				continue;
  1968			}
  1969			size = panthor_heap_pool_size(vm->heaps.pool);
  1970			mutex_unlock(&vm->heaps.lock);
  1971	
  1972			status->resident += size;
  1973			status->private += size;
  1974			if (vm->as.id >= 0)
  1975				status->active += size;
  1976		}
  1977	}
  1978
diff mbox series

Patch

diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index d5dcd3d1b33a..277babcdae12 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -1457,12 +1457,24 @@  static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
 	drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
 }
 
+static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
+{
+	struct panthor_file *pfile = file->driver_priv;
+	struct drm_memory_stats status = {0};
+
+	panthor_group_kbo_sizes(pfile, &status);
+	panthor_vm_heaps_sizes(pfile, &status);
+
+	drm_print_memory_stats(p, &status, DRM_GEM_OBJECT_RESIDENT, "internal");
+}
+
 static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
 {
 	struct drm_device *dev = file->minor->dev;
 	struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
 
 	panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
+	panthor_show_internal_memory_stats(p, file);
 
 	drm_show_memory_stats(p, file);
 }
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 3796a9eb22af..49e426fc2a31 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -603,3 +603,29 @@  void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
 
 	panthor_heap_pool_put(pool);
 }
+
+/**
+ * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
+ * @pool: Pool whose total chunk size to calculate.
+ *
+ * This function adds the size of all heap chunks across all heaps in the
+ * argument pool. It also adds the size of the gpu contexts kernel bo.
+ * It is meant to be used by fdinfo for displaying the size of internal
+ * driver BO's that aren't exposed to userspace through a GEM handle.
+ *
+ */
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
+{
+	struct panthor_heap *heap;
+	unsigned long i;
+	size_t size = 0;
+
+	down_read(&pool->lock);
+	xa_for_each(&pool->xa, i, heap)
+		size += heap->chunk_size * heap->chunk_count;
+	up_write(&pool->lock);
+
+	size += pool->gpu_contexts->obj->size;
+
+	return size;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h
index 25a5f2bba445..e3358d4e8edb 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.h
+++ b/drivers/gpu/drm/panthor/panthor_heap.h
@@ -27,6 +27,8 @@  struct panthor_heap_pool *
 panthor_heap_pool_get(struct panthor_heap_pool *pool);
 void panthor_heap_pool_put(struct panthor_heap_pool *pool);
 
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
+
 int panthor_heap_grow(struct panthor_heap_pool *pool,
 		      u64 heap_gpu_va,
 		      u32 renderpasses_in_flight,
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index c39e3eb1c15d..51f6e66df3f5 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1941,6 +1941,41 @@  struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
 	return pool;
 }
 
+/**
+ * panthor_vm_heaps_size() - Calculate size of all heap chunks across all
+ * heaps over all the heap pools in a VM
+ * @pfile: File.
+ * @status: Memory status to be updated.
+ *
+ * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
+ * is active, record the size as active as well.
+ */
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *status)
+{
+	struct panthor_vm *vm;
+	unsigned long i;
+
+	if (!pfile->vms)
+		return;
+
+	xa_for_each(&pfile->vms->xa, i, vm) {
+		size_t size;
+
+		mutex_lock(&vm->heaps.lock);
+		if (!vm->heaps.pool) {
+			mutex_unlock(&vm->heaps.lock);
+			continue;
+		}
+		size = panthor_heap_pool_size(vm->heaps.pool);
+		mutex_unlock(&vm->heaps.lock);
+
+		status->resident += size;
+		status->private += size;
+		if (vm->as.id >= 0)
+			status->active += size;
+	}
+}
+
 static u64 mair_to_memattr(u64 mair, bool coherent)
 {
 	u64 memattr = 0;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index 8d21e83d8aba..2aeb2522cdfa 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -5,10 +5,12 @@ 
 #ifndef __PANTHOR_MMU_H__
 #define __PANTHOR_MMU_H__
 
+#include <linux/types.h>
 #include <linux/dma-resv.h>
 
 struct drm_exec;
 struct drm_sched_job;
+struct drm_memory_stats;
 struct panthor_gem_object;
 struct panthor_heap_pool;
 struct panthor_vm;
@@ -37,6 +39,8 @@  int panthor_vm_flush_all(struct panthor_vm *vm);
 struct panthor_heap_pool *
 panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
 
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *status);
+
 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
 void panthor_vm_put(struct panthor_vm *vm);
 struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 77b184c3fb0c..bb4b3ffadcd1 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -628,7 +628,7 @@  struct panthor_group {
 	 */
 	struct panthor_kernel_bo *syncobjs;
 
-	/** @fdinfo: Per-file total cycle and timestamp values reference. */
+	/** @fdinfo: Per-group total cycle and timestamp values and kernel BO sizes. */
 	struct {
 		/** @data: Total sampled values for jobs in queues from this group. */
 		struct panthor_gpu_usage data;
@@ -638,6 +638,9 @@  struct panthor_group {
 		 * and job post-completion processing function
 		 */
 		struct mutex lock;
+
+		/** @bo_sizes: Aggregate size of private kernel BO's held by the group. */
+		size_t kbo_sizes;
 	} fdinfo;
 
 	/** @state: Group state. */
@@ -3381,6 +3384,29 @@  group_create_queue(struct panthor_group *group,
 	return ERR_PTR(ret);
 }
 
+static void add_group_kbo_sizes(struct panthor_device *ptdev,
+				struct panthor_group *group)
+{
+	struct panthor_queue *queue;
+	int i;
+
+	if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
+		return;
+	if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
+		return;
+
+	group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
+	group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
+	group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
+
+	for (i = 0; i < group->queue_count; i++) {
+		queue =	group->queues[i];
+		group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
+		group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
+		group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
+	}
+}
+
 #define MAX_GROUPS_PER_POOL		128
 
 int panthor_group_create(struct panthor_file *pfile,
@@ -3505,6 +3531,7 @@  int panthor_group_create(struct panthor_file *pfile,
 	}
 	mutex_unlock(&sched->reset.lock);
 
+	add_group_kbo_sizes(group->ptdev, group);
 	mutex_init(&group->fdinfo.lock);
 
 	return gid;
@@ -3624,6 +3651,29 @@  void panthor_group_pool_destroy(struct panthor_file *pfile)
 	pfile->groups = NULL;
 }
 
+/**
+ * panthor_group_kbo_sizes() - Retrieve aggregate size of all private kernel BO's
+ * belonging to all the groups owned by an open Panthor file
+ * @pfile: File.
+ * @status: Memory status to be updated.
+ *
+ */
+void panthor_group_kbo_sizes(struct panthor_file *pfile, struct drm_memory_stats *status)
+{
+	struct panthor_group_pool *gpool = pfile->groups;
+	struct panthor_group *group;
+	unsigned long i;
+
+	if (IS_ERR_OR_NULL(gpool))
+		return;
+	xa_for_each(&gpool->xa, i, group) {
+		status->resident += group->fdinfo.kbo_sizes;
+		status->private += group->fdinfo.kbo_sizes;
+		if (group->csg_id >= 0)
+			status->active += group->fdinfo.kbo_sizes;
+	}
+}
+
 static void job_release(struct kref *ref)
 {
 	struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 5ae6b4bde7c5..4dd6a7fc8fbd 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -4,11 +4,14 @@ 
 #ifndef __PANTHOR_SCHED_H__
 #define __PANTHOR_SCHED_H__
 
+#include <linux/types.h>
+
 struct drm_exec;
 struct dma_fence;
 struct drm_file;
 struct drm_gem_object;
 struct drm_sched_job;
+struct drm_memory_stats;
 struct drm_panthor_group_create;
 struct drm_panthor_queue_create;
 struct drm_panthor_group_get_state;
@@ -36,6 +39,7 @@  void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
 
 int panthor_group_pool_create(struct panthor_file *pfile);
 void panthor_group_pool_destroy(struct panthor_file *pfile);
+void panthor_group_kbo_sizes(struct panthor_file *pfile, struct drm_memory_stats *status);
 
 int panthor_sched_init(struct panthor_device *ptdev);
 void panthor_sched_unplug(struct panthor_device *ptdev);