@@ -192,6 +192,11 @@ struct panthor_device {
unsigned long fast_rate;
};
+struct panthor_gpu_usage {
+ u64 time;
+ u64 cycles;
+};
+
/**
* struct panthor_file - Panthor file
*/
@@ -204,6 +209,9 @@ struct panthor_file {
/** @groups: Scheduling group pool attached to this file. */
struct panthor_group_pool *groups;
+
+ /** @stats: cycle and timestamp measures for job execution. */
+ struct panthor_gpu_usage stats;
};
int panthor_device_init(struct panthor_device *ptdev);
@@ -13,6 +13,7 @@
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/time64.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
@@ -1414,6 +1415,37 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
+static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
+ struct panthor_file *pfile,
+ struct drm_printer *p)
+{
+ if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_ALL)
+ panthor_fdinfo_gather_group_samples(pfile);
+
+ if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) {
+#ifdef CONFIG_ARM_ARCH_TIMER
+ drm_printf(p, "drm-engine-panthor:\t%llu ns\n",
+ DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC),
+ arch_timer_get_cntfrq()));
+#endif
+ }
+ if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+ drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles);
+
+ drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate);
+ drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
+}
+
+static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_device *dev = file->minor->dev;
+ struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
+
+ panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
+
+ drm_show_memory_stats(p, file);
+}
+
static const struct file_operations panthor_drm_driver_fops = {
.open = drm_open,
.release = drm_release,
@@ -1423,6 +1455,7 @@ static const struct file_operations panthor_drm_driver_fops = {
.read = drm_read,
.llseek = noop_llseek,
.mmap = panthor_mmap,
+ .show_fdinfo = drm_show_fdinfo,
};
#ifdef CONFIG_DEBUG_FS
@@ -1442,6 +1475,7 @@ static const struct drm_driver panthor_drm_driver = {
DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
.open = panthor_open,
.postclose = panthor_postclose,
+ .show_fdinfo = panthor_show_fdinfo,
.ioctls = panthor_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
.fops = &panthor_drm_driver_fops,
@@ -619,6 +619,18 @@ struct panthor_group {
*/
struct panthor_kernel_bo *syncobjs;
+ /** @fdinfo: Per-file total cycle and timestamp values reference. */
+ struct {
+ /** @data: Total sampled values for jobs in queues from this group. */
+ struct panthor_gpu_usage data;
+
+ /**
+ * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
+ * and job post-completion processing function
+ */
+ struct mutex lock;
+ } fdinfo;
+
/** @state: Group state. */
enum panthor_group_state state;
@@ -889,6 +901,8 @@ static void group_release_work(struct work_struct *work)
release_work);
u32 i;
+ mutex_destroy(&group->fdinfo.lock);
+
for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]);
@@ -2811,6 +2825,44 @@ void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
}
}
+static void update_fdinfo_stats(struct panthor_job *job)
+{
+ struct panthor_group *group = job->group;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+ struct panthor_gpu_usage *fdinfo = &group->fdinfo.data;
+ struct panthor_job_profiling_data *times;
+
+ times = (struct panthor_job_profiling_data *)
+ ((unsigned long) queue->profiling.slots->kmap +
+ (job->profiling.slot * sizeof(struct panthor_job_profiling_data)));
+
+ mutex_lock(&group->fdinfo.lock);
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+ fdinfo->cycles += times->cycles.after - times->cycles.before;
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
+ fdinfo->time += times->time.after - times->time.before;
+ mutex_unlock(&group->fdinfo.lock);
+}
+
+void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_group *group;
+ unsigned long i;
+
+ if (IS_ERR_OR_NULL(gpool))
+ return;
+
+ xa_for_each(&gpool->xa, i, group) {
+ mutex_lock(&group->fdinfo.lock);
+ pfile->stats.cycles += group->fdinfo.data.cycles;
+ pfile->stats.time += group->fdinfo.data.time;
+ group->fdinfo.data.cycles = 0;
+ group->fdinfo.data.time = 0;
+ mutex_unlock(&group->fdinfo.lock);
+ }
+}
+
static void group_sync_upd_work(struct work_struct *work)
{
struct panthor_group *group =
@@ -2843,6 +2895,8 @@ static void group_sync_upd_work(struct work_struct *work)
dma_fence_end_signalling(cookie);
list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
+ if (job->profiling.mask)
+ update_fdinfo_stats(job);
list_del_init(&job->node);
panthor_job_put(&job->base);
}
@@ -3421,6 +3475,8 @@ int panthor_group_create(struct panthor_file *pfile,
}
mutex_unlock(&sched->reset.lock);
+ mutex_init(&group->fdinfo.lock);
+
return gid;
err_put_group:
@@ -47,4 +47,6 @@ void panthor_sched_resume(struct panthor_device *ptdev);
void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
+void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);
+
#endif