Message ID | 20240903202541.430225-3-adrian.larumbe@collabora.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Support fdinfo runtime and memory stats on Panthor | expand |
On Tue, 3 Sep 2024 21:25:36 +0100 Adrián Larumbe <adrian.larumbe@collabora.com> wrote: > Drawing from the FW-calculated values in the previous commit, we can > increase the numbers for an open file by collecting them from finished jobs > when updating their group synchronisation objects. > > Display of fdinfo key-value pairs is governed by a flag that is by default > disabled in the present commit, and supporting manual toggle of it will be > the matter of a later commit. > > Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com> > --- > drivers/gpu/drm/panthor/panthor_devfreq.c | 18 ++++++++- > drivers/gpu/drm/panthor/panthor_device.h | 14 +++++++ > drivers/gpu/drm/panthor/panthor_drv.c | 35 ++++++++++++++++++ > drivers/gpu/drm/panthor/panthor_sched.c | 45 +++++++++++++++++++++++ > 4 files changed, 111 insertions(+), 1 deletion(-) > > diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c > index c6d3c327cc24..9d0f891b9b53 100644 > --- a/drivers/gpu/drm/panthor/panthor_devfreq.c > +++ b/drivers/gpu/drm/panthor/panthor_devfreq.c > @@ -62,14 +62,20 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq) > static int panthor_devfreq_target(struct device *dev, unsigned long *freq, > u32 flags) > { > + struct panthor_device *ptdev = dev_get_drvdata(dev); > struct dev_pm_opp *opp; > + int err; > > opp = devfreq_recommended_opp(dev, freq, flags); > if (IS_ERR(opp)) > return PTR_ERR(opp); > dev_pm_opp_put(opp); > > - return dev_pm_opp_set_rate(dev, *freq); > + err = dev_pm_opp_set_rate(dev, *freq); > + if (!err) > + ptdev->current_frequency = *freq; > + > + return err; > } > > static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq) > @@ -130,6 +136,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev) > struct panthor_devfreq *pdevfreq; > struct dev_pm_opp *opp; > unsigned long cur_freq; > + unsigned long freq = ULONG_MAX; > int ret; > > pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL); > @@ -161,6 +168,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev) > return PTR_ERR(opp); > > panthor_devfreq_profile.initial_freq = cur_freq; > + ptdev->current_frequency = cur_freq; > > /* Regulator coupling only takes care of synchronizing/balancing voltage > * updates, but the coupled regulator needs to be enabled manually. > @@ -204,6 +212,14 @@ int panthor_devfreq_init(struct panthor_device *ptdev) > > dev_pm_opp_put(opp); > > + /* Find the fastest defined rate */ > + opp = dev_pm_opp_find_freq_floor(dev, &freq); > + if (IS_ERR(opp)) > + return PTR_ERR(opp); > + ptdev->fast_rate = freq; > + > + dev_pm_opp_put(opp); > + > /* > * Setup default thresholds for the simple_ondemand governor. > * The values are chosen based on experiments. > diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h > index a48e30d0af30..0e68f5a70d20 100644 > --- a/drivers/gpu/drm/panthor/panthor_device.h > +++ b/drivers/gpu/drm/panthor/panthor_device.h > @@ -184,6 +184,17 @@ struct panthor_device { > > /** @profile_mask: User-set profiling flags for job accounting. */ > u32 profile_mask; > + > + /** @current_frequency: Device clock frequency at present. Set by DVFS*/ > + unsigned long current_frequency; > + > + /** @fast_rate: Maximum device clock frequency. Set by DVFS */ > + unsigned long fast_rate; > +}; Can we move the current_frequency/fast_rate retrieval in a separate patch? > + > +struct panthor_gpu_usage { > + u64 time; > + u64 cycles; > }; > > /** > @@ -198,6 +209,9 @@ struct panthor_file { > > /** @groups: Scheduling group pool attached to this file. */ > struct panthor_group_pool *groups; > + > + /** @stats: cycle and timestamp measures for job execution. */ > + struct panthor_gpu_usage stats; > }; > > int panthor_device_init(struct panthor_device *ptdev); > diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c > index b5e7b919f241..e18838754963 100644 > --- a/drivers/gpu/drm/panthor/panthor_drv.c > +++ b/drivers/gpu/drm/panthor/panthor_drv.c > @@ -3,12 +3,17 @@ > /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ > /* Copyright 2019 Collabora ltd. */ > > +#ifdef CONFIG_ARM_ARCH_TIMER > +#include <asm/arch_timer.h> > +#endif > + > #include <linux/list.h> > #include <linux/module.h> > #include <linux/of_platform.h> > #include <linux/pagemap.h> > #include <linux/platform_device.h> > #include <linux/pm_runtime.h> > +#include <linux/time64.h> > > #include <drm/drm_debugfs.h> > #include <drm/drm_drv.h> > @@ -1351,6 +1356,34 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) > return ret; > } > > +static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev, > + struct panthor_file *pfile, > + struct drm_printer *p) > +{ > + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) { > +#ifdef CONFIG_ARM_ARCH_TIMER > + drm_printf(p, "drm-engine-panthor:\t%llu ns\n", > + DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC), > + arch_timer_get_cntfrq())); > +#endif > + } > + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES) > + drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles); Don't know if that's an issue, but another thread might be updating the stats while show_fdinfo() is run, which means the data you return might be coming from two different sampling points. > + > + drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate); > + drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency); > +} > + > +static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file) > +{ > + struct drm_device *dev = file->minor->dev; > + struct panthor_device *ptdev = container_of(dev, struct panthor_device, base); > + > + panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p); > + > + drm_show_memory_stats(p, file); > +} > + > static const struct file_operations panthor_drm_driver_fops = { > .open = drm_open, > .release = drm_release, > @@ -1360,6 +1393,7 @@ static const struct file_operations panthor_drm_driver_fops = { > .read = drm_read, > .llseek = noop_llseek, > .mmap = panthor_mmap, > + .show_fdinfo = drm_show_fdinfo, > }; > > #ifdef CONFIG_DEBUG_FS > @@ -1378,6 +1412,7 @@ static const struct drm_driver panthor_drm_driver = { > DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, > .open = panthor_open, > .postclose = panthor_postclose, > + .show_fdinfo = panthor_show_fdinfo, > .ioctls = panthor_drm_driver_ioctls, > .num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls), > .fops = &panthor_drm_driver_fops, > diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c > index b087648bf59a..e69ab5175ae8 100644 > --- a/drivers/gpu/drm/panthor/panthor_sched.c > +++ b/drivers/gpu/drm/panthor/panthor_sched.c > @@ -619,6 +619,18 @@ struct panthor_group { > */ > struct panthor_kernel_bo *syncobjs; > > + /** @fdinfo: Per-file total cycle and timestamp values reference. */ > + struct { > + /** @data: Pointer to actual per-file sample data. */ > + struct panthor_gpu_usage *data; > + > + /** > + * @lock: Mutex to govern concurrent access from drm file's fdinfo callback > + * and job post-completion processing function > + */ > + struct mutex lock; > + } fdinfo; > + > /** @state: Group state. */ > enum panthor_group_state state; > > @@ -886,6 +898,8 @@ static void group_release_work(struct work_struct *work) > release_work); > u32 i; > > + mutex_destroy(&group->fdinfo.lock); > + > for (i = 0; i < group->queue_count; i++) > group_free_queue(group, group->queues[i]); > > @@ -2808,6 +2822,28 @@ void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) > } > } > > +static void update_fdinfo_stats(struct panthor_job *job) > +{ > + struct panthor_group *group = job->group; > + struct panthor_queue *queue = group->queues[job->queue_idx]; > + struct panthor_gpu_usage *fdinfo; > + struct panthor_job_profiling_data *times; > + > + times = (struct panthor_job_profiling_data *) > + ((unsigned long) queue->profiling_info.slots->kmap + > + (job->profiling_slot * sizeof(struct panthor_job_profiling_data))); > + > + mutex_lock(&group->fdinfo.lock); > + if ((group->fdinfo.data)) { > + fdinfo = group->fdinfo.data; > + if (job->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES) > + fdinfo->cycles += times->cycles.after - times->cycles.before; > + if (job->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) > + fdinfo->time += times->time.after - times->time.before; > + } > + mutex_unlock(&group->fdinfo.lock); > +} > + > static void group_sync_upd_work(struct work_struct *work) > { > struct panthor_group *group = > @@ -2840,6 +2876,8 @@ static void group_sync_upd_work(struct work_struct *work) > dma_fence_end_signalling(cookie); > > list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { > + if (job->profile_mask) > + update_fdinfo_stats(job); > list_del_init(&job->node); > panthor_job_put(&job->base); > } > @@ -3430,6 +3468,9 @@ int panthor_group_create(struct panthor_file *pfile, > } > mutex_unlock(&sched->reset.lock); > > + group->fdinfo.data = &pfile->stats; > + mutex_init(&group->fdinfo.lock); > + > return gid; > > err_put_group: > @@ -3469,6 +3510,10 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) > mutex_unlock(&sched->lock); > mutex_unlock(&sched->reset.lock); > > + mutex_lock(&group->fdinfo.lock); > + group->fdinfo.data = NULL; > + mutex_unlock(&group->fdinfo.lock); > + > group_put(group); > return 0; > }
Hi Adrián, kernel test robot noticed the following build warnings: [auto build test WARNING on drm-misc/drm-misc-next] [also build test WARNING on linus/master v6.11-rc6 next-20240904] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Adri-n-Larumbe/drm-panthor-introduce-job-cycle-and-timestamp-accounting/20240904-042645 base: git://anongit.freedesktop.org/drm/drm-misc drm-misc-next patch link: https://lore.kernel.org/r/20240903202541.430225-3-adrian.larumbe%40collabora.com patch subject: [PATCH v5 2/4] drm/panthor: add DRM fdinfo support config: x86_64-buildonly-randconfig-002-20240904 (https://download.01.org/0day-ci/archive/20240905/202409050134.uxrIkhqc-lkp@intel.com/config) compiler: gcc-12 (Debian 12.2.0-14) 12.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240905/202409050134.uxrIkhqc-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202409050134.uxrIkhqc-lkp@intel.com/ All warnings (new ones prefixed by >>): drivers/gpu/drm/panthor/panthor_sched.c:322: warning: Excess struct member 'runnable' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:322: warning: Excess struct member 'idle' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:322: warning: Excess struct member 'waiting' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:322: warning: Excess struct member 'has_ref' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:322: warning: Excess struct member 'in_progress' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:322: warning: Excess struct member 'stopped_groups' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'mem' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'input' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'output' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'input_fw_va' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'output_fw_va' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'gpu_va' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'ref' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'gt' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'sync64' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'bo' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'offset' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'kmap' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'lock' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'id' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'seqno' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'last_fence' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'in_flight_jobs' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'slots' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'slot_count' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:494: warning: Excess struct member 'profiling_seqno' description in 'panthor_queue' >> drivers/gpu/drm/panthor/panthor_sched.c:689: warning: Excess struct member 'data' description in 'panthor_group' >> drivers/gpu/drm/panthor/panthor_sched.c:689: warning: Excess struct member 'lock' description in 'panthor_group' drivers/gpu/drm/panthor/panthor_sched.c:822: warning: Function parameter or struct member 'profiling_slot' not described in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:822: warning: Excess struct member 'start' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:822: warning: Excess struct member 'size' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:822: warning: Excess struct member 'latest_flush' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:822: warning: Excess struct member 'start' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:822: warning: Excess struct member 'end' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:822: warning: Excess struct member 'profile_slot' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:1745: warning: Function parameter or struct member 'ptdev' not described in 'panthor_sched_report_fw_events' drivers/gpu/drm/panthor/panthor_sched.c:1745: warning: Function parameter or struct member 'events' not described in 'panthor_sched_report_fw_events' drivers/gpu/drm/panthor/panthor_sched.c:2637: warning: Function parameter or struct member 'ptdev' not described in 'panthor_sched_report_mmu_fault' vim +689 drivers/gpu/drm/panthor/panthor_sched.c de85488138247d0 Boris Brezillon 2024-02-29 531 de85488138247d0 Boris Brezillon 2024-02-29 532 /** de85488138247d0 Boris Brezillon 2024-02-29 533 * struct panthor_group - Scheduling group object de85488138247d0 Boris Brezillon 2024-02-29 534 */ de85488138247d0 Boris Brezillon 2024-02-29 535 struct panthor_group { de85488138247d0 Boris Brezillon 2024-02-29 536 /** @refcount: Reference count */ de85488138247d0 Boris Brezillon 2024-02-29 537 struct kref refcount; de85488138247d0 Boris Brezillon 2024-02-29 538 de85488138247d0 Boris Brezillon 2024-02-29 539 /** @ptdev: Device. */ de85488138247d0 Boris Brezillon 2024-02-29 540 struct panthor_device *ptdev; de85488138247d0 Boris Brezillon 2024-02-29 541 de85488138247d0 Boris Brezillon 2024-02-29 542 /** @vm: VM bound to the group. */ de85488138247d0 Boris Brezillon 2024-02-29 543 struct panthor_vm *vm; de85488138247d0 Boris Brezillon 2024-02-29 544 de85488138247d0 Boris Brezillon 2024-02-29 545 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */ de85488138247d0 Boris Brezillon 2024-02-29 546 u64 compute_core_mask; de85488138247d0 Boris Brezillon 2024-02-29 547 de85488138247d0 Boris Brezillon 2024-02-29 548 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */ de85488138247d0 Boris Brezillon 2024-02-29 549 u64 fragment_core_mask; de85488138247d0 Boris Brezillon 2024-02-29 550 de85488138247d0 Boris Brezillon 2024-02-29 551 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */ de85488138247d0 Boris Brezillon 2024-02-29 552 u64 tiler_core_mask; de85488138247d0 Boris Brezillon 2024-02-29 553 de85488138247d0 Boris Brezillon 2024-02-29 554 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */ de85488138247d0 Boris Brezillon 2024-02-29 555 u8 max_compute_cores; de85488138247d0 Boris Brezillon 2024-02-29 556 be7ffc821f5fc2e Liviu Dudau 2024-04-02 557 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */ de85488138247d0 Boris Brezillon 2024-02-29 558 u8 max_fragment_cores; de85488138247d0 Boris Brezillon 2024-02-29 559 de85488138247d0 Boris Brezillon 2024-02-29 560 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */ de85488138247d0 Boris Brezillon 2024-02-29 561 u8 max_tiler_cores; de85488138247d0 Boris Brezillon 2024-02-29 562 de85488138247d0 Boris Brezillon 2024-02-29 563 /** @priority: Group priority (check panthor_csg_priority). */ de85488138247d0 Boris Brezillon 2024-02-29 564 u8 priority; de85488138247d0 Boris Brezillon 2024-02-29 565 de85488138247d0 Boris Brezillon 2024-02-29 566 /** @blocked_queues: Bitmask reflecting the blocked queues. */ de85488138247d0 Boris Brezillon 2024-02-29 567 u32 blocked_queues; de85488138247d0 Boris Brezillon 2024-02-29 568 de85488138247d0 Boris Brezillon 2024-02-29 569 /** @idle_queues: Bitmask reflecting the idle queues. */ de85488138247d0 Boris Brezillon 2024-02-29 570 u32 idle_queues; de85488138247d0 Boris Brezillon 2024-02-29 571 de85488138247d0 Boris Brezillon 2024-02-29 572 /** @fatal_lock: Lock used to protect access to fatal fields. */ de85488138247d0 Boris Brezillon 2024-02-29 573 spinlock_t fatal_lock; de85488138247d0 Boris Brezillon 2024-02-29 574 de85488138247d0 Boris Brezillon 2024-02-29 575 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */ de85488138247d0 Boris Brezillon 2024-02-29 576 u32 fatal_queues; de85488138247d0 Boris Brezillon 2024-02-29 577 de85488138247d0 Boris Brezillon 2024-02-29 578 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */ de85488138247d0 Boris Brezillon 2024-02-29 579 atomic_t tiler_oom; de85488138247d0 Boris Brezillon 2024-02-29 580 de85488138247d0 Boris Brezillon 2024-02-29 581 /** @queue_count: Number of queues in this group. */ de85488138247d0 Boris Brezillon 2024-02-29 582 u32 queue_count; de85488138247d0 Boris Brezillon 2024-02-29 583 de85488138247d0 Boris Brezillon 2024-02-29 584 /** @queues: Queues owned by this group. */ de85488138247d0 Boris Brezillon 2024-02-29 585 struct panthor_queue *queues[MAX_CS_PER_CSG]; de85488138247d0 Boris Brezillon 2024-02-29 586 de85488138247d0 Boris Brezillon 2024-02-29 587 /** de85488138247d0 Boris Brezillon 2024-02-29 588 * @csg_id: ID of the FW group slot. de85488138247d0 Boris Brezillon 2024-02-29 589 * de85488138247d0 Boris Brezillon 2024-02-29 590 * -1 when the group is not scheduled/active. de85488138247d0 Boris Brezillon 2024-02-29 591 */ de85488138247d0 Boris Brezillon 2024-02-29 592 int csg_id; de85488138247d0 Boris Brezillon 2024-02-29 593 de85488138247d0 Boris Brezillon 2024-02-29 594 /** de85488138247d0 Boris Brezillon 2024-02-29 595 * @destroyed: True when the group has been destroyed. de85488138247d0 Boris Brezillon 2024-02-29 596 * de85488138247d0 Boris Brezillon 2024-02-29 597 * If a group is destroyed it becomes useless: no further jobs can be submitted de85488138247d0 Boris Brezillon 2024-02-29 598 * to its queues. We simply wait for all references to be dropped so we can de85488138247d0 Boris Brezillon 2024-02-29 599 * release the group object. de85488138247d0 Boris Brezillon 2024-02-29 600 */ de85488138247d0 Boris Brezillon 2024-02-29 601 bool destroyed; de85488138247d0 Boris Brezillon 2024-02-29 602 de85488138247d0 Boris Brezillon 2024-02-29 603 /** de85488138247d0 Boris Brezillon 2024-02-29 604 * @timedout: True when a timeout occurred on any of the queues owned by de85488138247d0 Boris Brezillon 2024-02-29 605 * this group. de85488138247d0 Boris Brezillon 2024-02-29 606 * de85488138247d0 Boris Brezillon 2024-02-29 607 * Timeouts can be reported by drm_sched or by the FW. In any case, any de85488138247d0 Boris Brezillon 2024-02-29 608 * timeout situation is unrecoverable, and the group becomes useless. de85488138247d0 Boris Brezillon 2024-02-29 609 * We simply wait for all references to be dropped so we can release the de85488138247d0 Boris Brezillon 2024-02-29 610 * group object. de85488138247d0 Boris Brezillon 2024-02-29 611 */ de85488138247d0 Boris Brezillon 2024-02-29 612 bool timedout; de85488138247d0 Boris Brezillon 2024-02-29 613 de85488138247d0 Boris Brezillon 2024-02-29 614 /** de85488138247d0 Boris Brezillon 2024-02-29 615 * @syncobjs: Pool of per-queue synchronization objects. de85488138247d0 Boris Brezillon 2024-02-29 616 * de85488138247d0 Boris Brezillon 2024-02-29 617 * One sync object per queue. The position of the sync object is de85488138247d0 Boris Brezillon 2024-02-29 618 * determined by the queue index. de85488138247d0 Boris Brezillon 2024-02-29 619 */ de85488138247d0 Boris Brezillon 2024-02-29 620 struct panthor_kernel_bo *syncobjs; de85488138247d0 Boris Brezillon 2024-02-29 621 d7baaf2591f58fc Adrián Larumbe 2024-09-03 622 /** @fdinfo: Per-file total cycle and timestamp values reference. */ d7baaf2591f58fc Adrián Larumbe 2024-09-03 623 struct { d7baaf2591f58fc Adrián Larumbe 2024-09-03 624 /** @data: Pointer to actual per-file sample data. */ d7baaf2591f58fc Adrián Larumbe 2024-09-03 625 struct panthor_gpu_usage *data; d7baaf2591f58fc Adrián Larumbe 2024-09-03 626 d7baaf2591f58fc Adrián Larumbe 2024-09-03 627 /** d7baaf2591f58fc Adrián Larumbe 2024-09-03 628 * @lock: Mutex to govern concurrent access from drm file's fdinfo callback d7baaf2591f58fc Adrián Larumbe 2024-09-03 629 * and job post-completion processing function d7baaf2591f58fc Adrián Larumbe 2024-09-03 630 */ d7baaf2591f58fc Adrián Larumbe 2024-09-03 631 struct mutex lock; d7baaf2591f58fc Adrián Larumbe 2024-09-03 632 } fdinfo; d7baaf2591f58fc Adrián Larumbe 2024-09-03 633 de85488138247d0 Boris Brezillon 2024-02-29 634 /** @state: Group state. */ de85488138247d0 Boris Brezillon 2024-02-29 635 enum panthor_group_state state; de85488138247d0 Boris Brezillon 2024-02-29 636 de85488138247d0 Boris Brezillon 2024-02-29 637 /** de85488138247d0 Boris Brezillon 2024-02-29 638 * @suspend_buf: Suspend buffer. de85488138247d0 Boris Brezillon 2024-02-29 639 * de85488138247d0 Boris Brezillon 2024-02-29 640 * Stores the state of the group and its queues when a group is suspended. de85488138247d0 Boris Brezillon 2024-02-29 641 * Used at resume time to restore the group in its previous state. de85488138247d0 Boris Brezillon 2024-02-29 642 * de85488138247d0 Boris Brezillon 2024-02-29 643 * The size of the suspend buffer is exposed through the FW interface. de85488138247d0 Boris Brezillon 2024-02-29 644 */ de85488138247d0 Boris Brezillon 2024-02-29 645 struct panthor_kernel_bo *suspend_buf; de85488138247d0 Boris Brezillon 2024-02-29 646 de85488138247d0 Boris Brezillon 2024-02-29 647 /** de85488138247d0 Boris Brezillon 2024-02-29 648 * @protm_suspend_buf: Protection mode suspend buffer. de85488138247d0 Boris Brezillon 2024-02-29 649 * de85488138247d0 Boris Brezillon 2024-02-29 650 * Stores the state of the group and its queues when a group that's in de85488138247d0 Boris Brezillon 2024-02-29 651 * protection mode is suspended. de85488138247d0 Boris Brezillon 2024-02-29 652 * de85488138247d0 Boris Brezillon 2024-02-29 653 * Used at resume time to restore the group in its previous state. de85488138247d0 Boris Brezillon 2024-02-29 654 * de85488138247d0 Boris Brezillon 2024-02-29 655 * The size of the protection mode suspend buffer is exposed through the de85488138247d0 Boris Brezillon 2024-02-29 656 * FW interface. de85488138247d0 Boris Brezillon 2024-02-29 657 */ de85488138247d0 Boris Brezillon 2024-02-29 658 struct panthor_kernel_bo *protm_suspend_buf; de85488138247d0 Boris Brezillon 2024-02-29 659 de85488138247d0 Boris Brezillon 2024-02-29 660 /** @sync_upd_work: Work used to check/signal job fences. */ de85488138247d0 Boris Brezillon 2024-02-29 661 struct work_struct sync_upd_work; de85488138247d0 Boris Brezillon 2024-02-29 662 de85488138247d0 Boris Brezillon 2024-02-29 663 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */ de85488138247d0 Boris Brezillon 2024-02-29 664 struct work_struct tiler_oom_work; de85488138247d0 Boris Brezillon 2024-02-29 665 de85488138247d0 Boris Brezillon 2024-02-29 666 /** @term_work: Work used to finish the group termination procedure. */ de85488138247d0 Boris Brezillon 2024-02-29 667 struct work_struct term_work; de85488138247d0 Boris Brezillon 2024-02-29 668 de85488138247d0 Boris Brezillon 2024-02-29 669 /** de85488138247d0 Boris Brezillon 2024-02-29 670 * @release_work: Work used to release group resources. de85488138247d0 Boris Brezillon 2024-02-29 671 * de85488138247d0 Boris Brezillon 2024-02-29 672 * We need to postpone the group release to avoid a deadlock when de85488138247d0 Boris Brezillon 2024-02-29 673 * the last ref is released in the tick work. de85488138247d0 Boris Brezillon 2024-02-29 674 */ de85488138247d0 Boris Brezillon 2024-02-29 675 struct work_struct release_work; de85488138247d0 Boris Brezillon 2024-02-29 676 de85488138247d0 Boris Brezillon 2024-02-29 677 /** de85488138247d0 Boris Brezillon 2024-02-29 678 * @run_node: Node used to insert the group in the de85488138247d0 Boris Brezillon 2024-02-29 679 * panthor_group::groups::{runnable,idle} and de85488138247d0 Boris Brezillon 2024-02-29 680 * panthor_group::reset.stopped_groups lists. de85488138247d0 Boris Brezillon 2024-02-29 681 */ de85488138247d0 Boris Brezillon 2024-02-29 682 struct list_head run_node; de85488138247d0 Boris Brezillon 2024-02-29 683 de85488138247d0 Boris Brezillon 2024-02-29 684 /** de85488138247d0 Boris Brezillon 2024-02-29 685 * @wait_node: Node used to insert the group in the de85488138247d0 Boris Brezillon 2024-02-29 686 * panthor_group::groups::waiting list. de85488138247d0 Boris Brezillon 2024-02-29 687 */ de85488138247d0 Boris Brezillon 2024-02-29 688 struct list_head wait_node; de85488138247d0 Boris Brezillon 2024-02-29 @689 }; de85488138247d0 Boris Brezillon 2024-02-29 690
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c index c6d3c327cc24..9d0f891b9b53 100644 --- a/drivers/gpu/drm/panthor/panthor_devfreq.c +++ b/drivers/gpu/drm/panthor/panthor_devfreq.c @@ -62,14 +62,20 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq) static int panthor_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { + struct panthor_device *ptdev = dev_get_drvdata(dev); struct dev_pm_opp *opp; + int err; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); dev_pm_opp_put(opp); - return dev_pm_opp_set_rate(dev, *freq); + err = dev_pm_opp_set_rate(dev, *freq); + if (!err) + ptdev->current_frequency = *freq; + + return err; } static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq) @@ -130,6 +136,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev) struct panthor_devfreq *pdevfreq; struct dev_pm_opp *opp; unsigned long cur_freq; + unsigned long freq = ULONG_MAX; int ret; pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL); @@ -161,6 +168,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev) return PTR_ERR(opp); panthor_devfreq_profile.initial_freq = cur_freq; + ptdev->current_frequency = cur_freq; /* Regulator coupling only takes care of synchronizing/balancing voltage * updates, but the coupled regulator needs to be enabled manually. @@ -204,6 +212,14 @@ int panthor_devfreq_init(struct panthor_device *ptdev) dev_pm_opp_put(opp); + /* Find the fastest defined rate */ + opp = dev_pm_opp_find_freq_floor(dev, &freq); + if (IS_ERR(opp)) + return PTR_ERR(opp); + ptdev->fast_rate = freq; + + dev_pm_opp_put(opp); + /* * Setup default thresholds for the simple_ondemand governor. * The values are chosen based on experiments. diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index a48e30d0af30..0e68f5a70d20 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -184,6 +184,17 @@ struct panthor_device { /** @profile_mask: User-set profiling flags for job accounting. */ u32 profile_mask; + + /** @current_frequency: Device clock frequency at present. Set by DVFS*/ + unsigned long current_frequency; + + /** @fast_rate: Maximum device clock frequency. Set by DVFS */ + unsigned long fast_rate; +}; + +struct panthor_gpu_usage { + u64 time; + u64 cycles; }; /** @@ -198,6 +209,9 @@ struct panthor_file { /** @groups: Scheduling group pool attached to this file. */ struct panthor_group_pool *groups; + + /** @stats: cycle and timestamp measures for job execution. */ + struct panthor_gpu_usage stats; }; int panthor_device_init(struct panthor_device *ptdev); diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index b5e7b919f241..e18838754963 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -3,12 +3,17 @@ /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ /* Copyright 2019 Collabora ltd. */ +#ifdef CONFIG_ARM_ARCH_TIMER +#include <asm/arch_timer.h> +#endif + #include <linux/list.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/pagemap.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/time64.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> @@ -1351,6 +1356,34 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } +static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev, + struct panthor_file *pfile, + struct drm_printer *p) +{ + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) { +#ifdef CONFIG_ARM_ARCH_TIMER + drm_printf(p, "drm-engine-panthor:\t%llu ns\n", + DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC), + arch_timer_get_cntfrq())); +#endif + } + if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES) + drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles); + + drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate); + drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency); +} + +static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file) +{ + struct drm_device *dev = file->minor->dev; + struct panthor_device *ptdev = container_of(dev, struct panthor_device, base); + + panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p); + + drm_show_memory_stats(p, file); +} + static const struct file_operations panthor_drm_driver_fops = { .open = drm_open, .release = drm_release, @@ -1360,6 +1393,7 @@ static const struct file_operations panthor_drm_driver_fops = { .read = drm_read, .llseek = noop_llseek, .mmap = panthor_mmap, + .show_fdinfo = drm_show_fdinfo, }; #ifdef CONFIG_DEBUG_FS @@ -1378,6 +1412,7 @@ static const struct drm_driver panthor_drm_driver = { DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, .open = panthor_open, .postclose = panthor_postclose, + .show_fdinfo = panthor_show_fdinfo, .ioctls = panthor_drm_driver_ioctls, .num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls), .fops = &panthor_drm_driver_fops, diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index b087648bf59a..e69ab5175ae8 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -619,6 +619,18 @@ struct panthor_group { */ struct panthor_kernel_bo *syncobjs; + /** @fdinfo: Per-file total cycle and timestamp values reference. */ + struct { + /** @data: Pointer to actual per-file sample data. */ + struct panthor_gpu_usage *data; + + /** + * @lock: Mutex to govern concurrent access from drm file's fdinfo callback + * and job post-completion processing function + */ + struct mutex lock; + } fdinfo; + /** @state: Group state. */ enum panthor_group_state state; @@ -886,6 +898,8 @@ static void group_release_work(struct work_struct *work) release_work); u32 i; + mutex_destroy(&group->fdinfo.lock); + for (i = 0; i < group->queue_count; i++) group_free_queue(group, group->queues[i]); @@ -2808,6 +2822,28 @@ void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) } } +static void update_fdinfo_stats(struct panthor_job *job) +{ + struct panthor_group *group = job->group; + struct panthor_queue *queue = group->queues[job->queue_idx]; + struct panthor_gpu_usage *fdinfo; + struct panthor_job_profiling_data *times; + + times = (struct panthor_job_profiling_data *) + ((unsigned long) queue->profiling_info.slots->kmap + + (job->profiling_slot * sizeof(struct panthor_job_profiling_data))); + + mutex_lock(&group->fdinfo.lock); + if ((group->fdinfo.data)) { + fdinfo = group->fdinfo.data; + if (job->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES) + fdinfo->cycles += times->cycles.after - times->cycles.before; + if (job->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) + fdinfo->time += times->time.after - times->time.before; + } + mutex_unlock(&group->fdinfo.lock); +} + static void group_sync_upd_work(struct work_struct *work) { struct panthor_group *group = @@ -2840,6 +2876,8 @@ static void group_sync_upd_work(struct work_struct *work) dma_fence_end_signalling(cookie); list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { + if (job->profile_mask) + update_fdinfo_stats(job); list_del_init(&job->node); panthor_job_put(&job->base); } @@ -3430,6 +3468,9 @@ int panthor_group_create(struct panthor_file *pfile, } mutex_unlock(&sched->reset.lock); + group->fdinfo.data = &pfile->stats; + mutex_init(&group->fdinfo.lock); + return gid; err_put_group: @@ -3469,6 +3510,10 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) mutex_unlock(&sched->lock); mutex_unlock(&sched->reset.lock); + mutex_lock(&group->fdinfo.lock); + group->fdinfo.data = NULL; + mutex_unlock(&group->fdinfo.lock); + group_put(group); return 0; }
Drawing from the FW-calculated values in the previous commit, we can increase the numbers for an open file by collecting them from finished jobs when updating their group synchronisation objects. Display of fdinfo key-value pairs is governed by a flag that is by default disabled in the present commit, and supporting manual toggle of it will be the matter of a later commit. Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com> --- drivers/gpu/drm/panthor/panthor_devfreq.c | 18 ++++++++- drivers/gpu/drm/panthor/panthor_device.h | 14 +++++++ drivers/gpu/drm/panthor/panthor_drv.c | 35 ++++++++++++++++++ drivers/gpu/drm/panthor/panthor_sched.c | 45 +++++++++++++++++++++++ 4 files changed, 111 insertions(+), 1 deletion(-)