diff mbox

[09/20] drm/i915/slpc: Allocate/Release/Initialize SLPC shared data

Message ID 1504250723-32018-10-git-send-email-sagar.a.kamble@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

sagar.a.kamble@intel.com Sept. 1, 2017, 7:25 a.m. UTC
Populate SLPC shared data with required default values for
Slice count, Power source/plan, IA Perf MSRs.

v1: Update for SLPC interface version 2015.2.4
    intel_slpc_active() returns 1 if slpc initialized (Paulo)
    change default host_os to "Windows"
    Spelling fixes (Sagar Kamble and Nick Hoath)
    Added WARN for checking if upper 32bits of GTT offset
    of shared object are zero. (ChrisW)
    Changed function call from gem_allocate/release_guc_obj to
    i915_guc_allocate/release_gem_obj. (Sagar)
    Updated commit message and moved POWER_PLAN and POWER_SOURCE
    definition from later patch. (Akash)
    Add struct_mutex locking while allocating/releasing slpc shared
    object. This was caught by CI BAT. Adding SLPC state variable
    to determine if it is active as it not just dependent on shared
    data setup.
    Rebase with guc_allocate_vma related changes.

v2: WARN_ON for platform_sku validity and space changes.(David)
    Checkpatch update.

v3: Fixing WARNING in igt@drv_module_reload_basic found in trybot BAT
    with SLPC Enabled.

v4: Updated support for GuC v9. s/slice_total/hweight8(slice_mask)/(Dave).

v5: SLPC vma mapping changes and removed explicit type conversions.(Chris).
    s/freq_unslice_max|min/unslice__max|min_freq.

v6: Commit message update. s/msr_value/val for reuse later.

v7: Set default values for tasks and min frequency parameters.
    Moved initialization with allocation of data so that post GuC load
    earlier parameters persist.

v8: Added check for SLPC status during cleanup of shared data. SLPC
    disabling is asynchronous and should complete within 10us.

v9: Enabling Balancer task in SLPC.

Signed-off-by: Tom O'Rourke <Tom.O'Rourke@intel.com>
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
---
 drivers/gpu/drm/i915/intel_drv.h  |   1 +
 drivers/gpu/drm/i915/intel_pm.c   |   2 +-
 drivers/gpu/drm/i915/intel_slpc.c | 167 ++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_slpc.h |   1 +
 4 files changed, 170 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3292221..4633f19 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1866,6 +1866,7 @@  bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct drm_i915_gem_request *rq,
 		    struct intel_rps_client *rps);
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv);
 void g4x_wm_get_hw_state(struct drm_device *dev);
 void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2452e2d..c6494a9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6378,7 +6378,7 @@  int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
 	return INTEL_RC6_ENABLE;
 }
 
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 {
 	/* All of these values are in units of 50MHz */
 
diff --git a/drivers/gpu/drm/i915/intel_slpc.c b/drivers/gpu/drm/i915/intel_slpc.c
index a3db63c..818b9bc 100644
--- a/drivers/gpu/drm/i915/intel_slpc.c
+++ b/drivers/gpu/drm/i915/intel_slpc.c
@@ -22,6 +22,7 @@ 
  *
  */
 #include <linux/firmware.h>
+#include <asm/msr-index.h>
 #include "i915_drv.h"
 #include "intel_uc.h"
 
@@ -64,12 +65,178 @@  struct slpc_param slpc_paramlist[SLPC_MAX_PARAM] = {
 				"Enable IBC when non-Gaming Mode is enabled"}
 };
 
+static unsigned int slpc_get_platform_sku(struct drm_i915_private *dev_priv)
+{
+	enum slpc_platform_sku platform_sku;
+
+	if (IS_SKL_ULX(dev_priv))
+		platform_sku = SLPC_PLATFORM_SKU_ULX;
+	else if (IS_SKL_ULT(dev_priv))
+		platform_sku = SLPC_PLATFORM_SKU_ULT;
+	else
+		platform_sku = SLPC_PLATFORM_SKU_DT;
+
+	WARN_ON(platform_sku > 0xFF);
+
+	return platform_sku;
+}
+
+static unsigned int slpc_get_slice_count(struct drm_i915_private *dev_priv)
+{
+	unsigned int slice_count = 1;
+
+	if (IS_SKYLAKE(dev_priv))
+		slice_count = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
+
+	return slice_count;
+}
+
+void slpc_mem_set_param(struct slpc_shared_data *data,
+			      u32 id,
+			      u32 value)
+{
+	data->override_parameters_set_bits[id >> 5]
+						|= (1 << (id % 32));
+	data->override_parameters_values[id] = value;
+}
+
+void slpc_mem_unset_param(struct slpc_shared_data *data,
+				u32 id)
+{
+	data->override_parameters_set_bits[id >> 5]
+						&= (~(1 << (id % 32)));
+	data->override_parameters_values[id] = 0;
+}
+
+int slpc_mem_task_control(struct slpc_shared_data *data, u64 val,
+			  u32 enable_id, u32 disable_id)
+{
+	int ret = 0;
+
+	if (val == SLPC_PARAM_TASK_DEFAULT) {
+		/* set default */
+		slpc_mem_unset_param(data, enable_id);
+		slpc_mem_unset_param(data, disable_id);
+	} else if (val == SLPC_PARAM_TASK_ENABLED) {
+		/* set enable */
+		slpc_mem_set_param(data, enable_id, 1);
+		slpc_mem_unset_param(data, disable_id);
+	} else if (val == SLPC_PARAM_TASK_DISABLED) {
+		/* set disable */
+		slpc_mem_set_param(data, disable_id, 1);
+		slpc_mem_unset_param(data, enable_id);
+	} else {
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void slpc_shared_data_init(struct intel_slpc *slpc)
+{
+	struct drm_i915_private *dev_priv = slpc_to_i915(slpc);
+	struct page *page;
+	struct slpc_shared_data *data;
+	u64 val;
+
+	page = i915_vma_first_page(slpc->vma);
+	data = kmap_atomic(page);
+
+	memset(data, 0, sizeof(struct slpc_shared_data));
+
+	data->shared_data_size = sizeof(struct slpc_shared_data);
+	data->global_state = SLPC_GLOBAL_STATE_NOT_RUNNING;
+	data->platform_info.platform_sku =
+				slpc_get_platform_sku(dev_priv);
+	data->platform_info.slice_count =
+				slpc_get_slice_count(dev_priv);
+	data->platform_info.power_plan_source =
+		SLPC_POWER_PLAN_SOURCE(SLPC_POWER_PLAN_BALANCED,
+					    SLPC_POWER_SOURCE_AC);
+	rdmsrl(MSR_TURBO_RATIO_LIMIT, val);
+	data->platform_info.P0_freq = val;
+	rdmsrl(MSR_PLATFORM_INFO, val);
+	data->platform_info.P1_freq = val >> 8;
+	data->platform_info.Pe_freq = val >> 40;
+	data->platform_info.Pn_freq = val >> 48;
+
+	/* Enable only GTPERF task, Disable others */
+	val = SLPC_PARAM_TASK_ENABLED;
+	slpc_mem_task_control(data, val,
+			      SLPC_PARAM_TASK_ENABLE_GTPERF,
+			      SLPC_PARAM_TASK_DISABLE_GTPERF);
+
+	slpc_mem_task_control(data, val,
+			      SLPC_PARAM_TASK_ENABLE_BALANCER,
+			      SLPC_PARAM_TASK_DISABLE_BALANCER);
+
+	val = SLPC_PARAM_TASK_DISABLED;
+	slpc_mem_task_control(data, val,
+			      SLPC_PARAM_TASK_ENABLE_DCC,
+			      SLPC_PARAM_TASK_DISABLE_DCC);
+
+	slpc_mem_set_param(data, SLPC_PARAM_GTPERF_THRESHOLD_MAX_FPS, 0);
+
+	slpc_mem_set_param(data, SLPC_PARAM_GTPERF_ENABLE_FRAMERATE_STALLING,
+			   0);
+
+	slpc_mem_set_param(data, SLPC_PARAM_GLOBAL_ENABLE_IA_GT_BALANCING,
+			   1);
+
+	slpc_mem_set_param(data,
+			   SLPC_PARAM_GLOBAL_ENABLE_ADAPTIVE_BURST_TURBO,
+			   0);
+
+	slpc_mem_set_param(data, SLPC_PARAM_GLOBAL_ENABLE_EVAL_MODE, 0);
+
+	slpc_mem_set_param(data,
+			   SLPC_PARAM_GLOBAL_ENABLE_BALANCER_IN_NON_GAMING_MODE,
+			   1);
+
+	slpc_mem_set_param(data,
+			   SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+			   intel_gpu_freq(dev_priv,
+				dev_priv->rps.efficient_freq));
+	slpc_mem_set_param(data,
+			   SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ,
+			   intel_gpu_freq(dev_priv,
+				dev_priv->rps.efficient_freq));
+
+	kunmap_atomic(data);
+}
+
 void intel_slpc_init(struct intel_slpc *slpc)
 {
+	struct intel_guc *guc = slpc_to_guc(slpc);
+	struct drm_i915_private *dev_priv = slpc_to_i915(slpc);
+	struct i915_vma *vma;
+
+	slpc->active = false;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	gen6_init_rps_frequencies(dev_priv);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	/* Allocate shared data structure */
+	vma = slpc->vma;
+	if (!vma) {
+		vma = intel_guc_allocate_vma(guc,
+			       PAGE_ALIGN(sizeof(struct slpc_shared_data)));
+		if (IS_ERR(vma)) {
+			DRM_ERROR("slpc_shared_data allocation failed\n");
+			i915.enable_slpc = 0;
+			return;
+		}
+
+		slpc->vma = vma;
+		slpc_shared_data_init(slpc);
+	}
 }
 
 void intel_slpc_cleanup(struct intel_slpc *slpc)
 {
+	/* Release shared data structure */
+	i915_vma_unpin_and_release(&slpc->vma);
 }
 
 void intel_slpc_enable(struct intel_slpc *slpc)
diff --git a/drivers/gpu/drm/i915/intel_slpc.h b/drivers/gpu/drm/i915/intel_slpc.h
index c6836b6..c894b05 100644
--- a/drivers/gpu/drm/i915/intel_slpc.h
+++ b/drivers/gpu/drm/i915/intel_slpc.h
@@ -26,6 +26,7 @@ 
 
 struct intel_slpc {
 	bool active;
+	struct i915_vma *vma;
 };
 
 enum slpc_status {