diff mbox series

drm/msm: Add gpu devfreq tuning debugfs

Message ID 20230109221938.1803322-1-robdclark@gmail.com (mailing list archive)
State New, archived
Headers show
Series drm/msm: Add gpu devfreq tuning debugfs | expand

Commit Message

Rob Clark Jan. 9, 2023, 10:19 p.m. UTC
From: Rob Clark <robdclark@chromium.org>

Make the handful of tuning knobs available visible via debugfs.

Signed-off-by: Rob Clark <robdclark@chromium.org>
---
 drivers/gpu/drm/msm/adreno/a6xx_gpu.c |  2 +-
 drivers/gpu/drm/msm/msm_debugfs.c     | 12 ++++++++++++
 drivers/gpu/drm/msm/msm_drv.h         |  9 +++++++++
 drivers/gpu/drm/msm/msm_gpu.h         |  3 ---
 drivers/gpu/drm/msm/msm_gpu_devfreq.c |  6 ++++--
 5 files changed, 26 insertions(+), 6 deletions(-)

Comments

kernel test robot Jan. 10, 2023, 9:48 p.m. UTC | #1
Hi Rob,

I love your patch! Yet something to improve:

[auto build test ERROR on drm/drm-next]
[also build test ERROR on drm-exynos/exynos-drm-next drm-intel/for-linux-next drm-intel/for-linux-next-fixes drm-tip/drm-tip linus/master v6.2-rc3 next-20230110]
[cannot apply to drm-misc/drm-misc-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Rob-Clark/drm-msm-Add-gpu-devfreq-tuning-debugfs/20230110-062022
base:   git://anongit.freedesktop.org/drm/drm drm-next
patch link:    https://lore.kernel.org/r/20230109221938.1803322-1-robdclark%40gmail.com
patch subject: [PATCH] drm/msm: Add gpu devfreq tuning debugfs
config: s390-buildonly-randconfig-r002-20230109
compiler: s390-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/92628bf5ed1631fe692c34ca5632da8cf69ac37b
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Rob-Clark/drm-msm-Add-gpu-devfreq-tuning-debugfs/20230110-062022
        git checkout 92628bf5ed1631fe692c34ca5632da8cf69ac37b
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=s390 olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=s390 SHELL=/bin/bash drivers/gpu/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from drivers/gpu/drm/msm/msm_gpu.h:18,
                    from drivers/gpu/drm/msm/adreno/adreno_gpu.h:15,
                    from drivers/gpu/drm/msm/adreno/adreno_device.c:9:
>> drivers/gpu/drm/msm/msm_drv.h:238:45: error: field 'gpu_devfreq_config' has incomplete type
     238 |         struct devfreq_simple_ondemand_data gpu_devfreq_config;
         |                                             ^~~~~~~~~~~~~~~~~~


vim +/gpu_devfreq_config +238 drivers/gpu/drm/msm/msm_drv.h

   107	
   108		struct drm_device *dev;
   109	
   110		struct msm_kms *kms;
   111		int (*kms_init)(struct drm_device *dev);
   112	
   113		/* subordinate devices, if present: */
   114		struct platform_device *gpu_pdev;
   115	
   116		/* possibly this should be in the kms component, but it is
   117		 * shared by both mdp4 and mdp5..
   118		 */
   119		struct hdmi *hdmi;
   120	
   121		/* DSI is shared by mdp4 and mdp5 */
   122		struct msm_dsi *dsi[2];
   123	
   124		struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
   125	
   126		/* when we have more than one 'msm_gpu' these need to be an array: */
   127		struct msm_gpu *gpu;
   128	
   129		/* gpu is only set on open(), but we need this info earlier */
   130		bool is_a2xx;
   131		bool has_cached_coherent;
   132	
   133		struct drm_fb_helper *fbdev;
   134	
   135		struct msm_rd_state *rd;       /* debugfs to dump all submits */
   136		struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
   137		struct msm_perf_state *perf;
   138	
   139		/**
   140		 * List of all GEM objects (mainly for debugfs, protected by obj_lock
   141		 * (acquire before per GEM object lock)
   142		 */
   143		struct list_head objects;
   144		struct mutex obj_lock;
   145	
   146		/**
   147		 * lru:
   148		 *
   149		 * The various LRU's that a GEM object is in at various stages of
   150		 * it's lifetime.  Objects start out in the unbacked LRU.  When
   151		 * pinned (for scannout or permanently mapped GPU buffers, like
   152		 * ringbuffer, memptr, fw, etc) it moves to the pinned LRU.  When
   153		 * unpinned, it moves into willneed or dontneed LRU depending on
   154		 * madvise state.  When backing pages are evicted (willneed) or
   155		 * purged (dontneed) it moves back into the unbacked LRU.
   156		 *
   157		 * The dontneed LRU is considered by the shrinker for objects
   158		 * that are candidate for purging, and the willneed LRU is
   159		 * considered for objects that could be evicted.
   160		 */
   161		struct {
   162			/**
   163			 * unbacked:
   164			 *
   165			 * The LRU for GEM objects without backing pages allocated.
   166			 * This mostly exists so that objects are always is one
   167			 * LRU.
   168			 */
   169			struct drm_gem_lru unbacked;
   170	
   171			/**
   172			 * pinned:
   173			 *
   174			 * The LRU for pinned GEM objects
   175			 */
   176			struct drm_gem_lru pinned;
   177	
   178			/**
   179			 * willneed:
   180			 *
   181			 * The LRU for unpinned GEM objects which are in madvise
   182			 * WILLNEED state (ie. can be evicted)
   183			 */
   184			struct drm_gem_lru willneed;
   185	
   186			/**
   187			 * dontneed:
   188			 *
   189			 * The LRU for unpinned GEM objects which are in madvise
   190			 * DONTNEED state (ie. can be purged)
   191			 */
   192			struct drm_gem_lru dontneed;
   193	
   194			/**
   195			 * lock:
   196			 *
   197			 * Protects manipulation of all of the LRUs.
   198			 */
   199			struct mutex lock;
   200		} lru;
   201	
   202		struct workqueue_struct *wq;
   203	
   204		unsigned int num_crtcs;
   205		struct drm_crtc *crtcs[MAX_CRTCS];
   206	
   207		struct msm_drm_thread event_thread[MAX_CRTCS];
   208	
   209		unsigned int num_bridges;
   210		struct drm_bridge *bridges[MAX_BRIDGES];
   211	
   212		/* VRAM carveout, used when no IOMMU: */
   213		struct {
   214			unsigned long size;
   215			dma_addr_t paddr;
   216			/* NOTE: mm managed at the page level, size is in # of pages
   217			 * and position mm_node->start is in # of pages:
   218			 */
   219			struct drm_mm mm;
   220			spinlock_t lock; /* Protects drm_mm node allocation/removal */
   221		} vram;
   222	
   223		struct notifier_block vmap_notifier;
   224		struct shrinker shrinker;
   225	
   226		struct drm_atomic_state *pm_state;
   227	
   228		/**
   229		 * hangcheck_period: For hang detection, in ms
   230		 *
   231		 * Note that in practice, a submit/job will get at least two hangcheck
   232		 * periods, due to checking for progress being implemented as simply
   233		 * "have the CP position registers changed since last time?"
   234		 */
   235		unsigned int hangcheck_period;
   236	
   237		/** gpu_devfreq_config: Devfreq tuning config for the GPU. */
 > 238		struct devfreq_simple_ondemand_data gpu_devfreq_config;
   239	
   240		/**
   241		 * gpu_clamp_to_idle: Enable clamping to idle freq when inactive
   242		 */
   243		bool gpu_clamp_to_idle;
   244	
   245		/**
   246		 * disable_err_irq:
   247		 *
   248		 * Disable handling of GPU hw error interrupts, to force fallback to
   249		 * sw hangcheck timer.  Written (via debugfs) by igt tests to test
   250		 * the sw hangcheck mechanism.
   251		 */
   252		bool disable_err_irq;
   253	};
   254
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 36c8fb699b56..6f7401f2acda 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2021,7 +2021,7 @@  struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
 	 * to cause power supply issues:
 	 */
 	if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
-		gpu->clamp_to_idle = true;
+		priv->gpu_clamp_to_idle = true;
 
 	/* Check if there is a GMU phandle and set it up */
 	node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 95f4374ae21c..d6ecff0ab618 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -305,6 +305,7 @@  void msm_debugfs_init(struct drm_minor *minor)
 {
 	struct drm_device *dev = minor->dev;
 	struct msm_drm_private *priv = dev->dev_private;
+	struct dentry *gpu_devfreq;
 
 	drm_debugfs_create_files(msm_debugfs_list,
 				 ARRAY_SIZE(msm_debugfs_list),
@@ -325,6 +326,17 @@  void msm_debugfs_init(struct drm_minor *minor)
 	debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
 		dev, &shrink_fops);
 
+	gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
+
+	debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
+			    &priv->gpu_clamp_to_idle);
+
+	debugfs_create_u32("upthreshold",0600, gpu_devfreq,
+			   &priv->gpu_devfreq_config.upthreshold);
+
+	debugfs_create_u32("downdifferential",0600, gpu_devfreq,
+			   &priv->gpu_devfreq_config.downdifferential);
+
 	if (priv->kms && priv->kms->funcs->debugfs_init)
 		priv->kms->funcs->debugfs_init(priv->kms, minor);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 876d8d5eec2f..6cb1c6d230e8 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -11,6 +11,7 @@ 
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/cpufreq.h>
+#include <linux/devfreq.h>
 #include <linux/module.h>
 #include <linux/component.h>
 #include <linux/platform_device.h>
@@ -234,6 +235,14 @@  struct msm_drm_private {
 	 */
 	unsigned int hangcheck_period;
 
+	/** gpu_devfreq_config: Devfreq tuning config for the GPU. */
+	struct devfreq_simple_ondemand_data gpu_devfreq_config;
+
+	/**
+	 * gpu_clamp_to_idle: Enable clamping to idle freq when inactive
+	 */
+	bool gpu_clamp_to_idle;
+
 	/**
 	 * disable_err_irq:
 	 *
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 651786bc55e5..9e36f6c9bc29 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -275,9 +275,6 @@  struct msm_gpu {
 
 	struct msm_gpu_state *crashstate;
 
-	/* Enable clamping to idle freq when inactive: */
-	bool clamp_to_idle;
-
 	/* True if the hardware supports expanded apriv (a650 and newer) */
 	bool hw_apriv;
 
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 025940eb08d1..0d7ff7ddc029 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -183,6 +183,7 @@  static bool has_devfreq(struct msm_gpu *gpu)
 void msm_devfreq_init(struct msm_gpu *gpu)
 {
 	struct msm_gpu_devfreq *df = &gpu->devfreq;
+	struct msm_drm_private *priv = gpu->dev->dev_private;
 
 	/* We need target support to do devfreq */
 	if (!gpu->funcs->gpu_busy)
@@ -209,7 +210,7 @@  void msm_devfreq_init(struct msm_gpu *gpu)
 
 	df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
 			&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
-			NULL);
+			&priv->gpu_devfreq_config);
 
 	if (IS_ERR(df->devfreq)) {
 		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
@@ -358,10 +359,11 @@  static void msm_devfreq_idle_work(struct kthread_work *work)
 	struct msm_gpu_devfreq *df = container_of(work,
 			struct msm_gpu_devfreq, idle_work.work);
 	struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
+	struct msm_drm_private *priv = gpu->dev->dev_private;
 
 	df->idle_time = ktime_get();
 
-	if (gpu->clamp_to_idle)
+	if (priv->gpu_clamp_to_idle)
 		dev_pm_qos_update_request(&df->idle_freq, 0);
 }