@@ -1876,6 +1876,7 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_gem_request *rq,
struct intel_rps_client *rps);
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -6486,7 +6486,7 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
return INTEL_RC6_ENABLE;
}
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
/* All of these values are in units of 50MHz */
@@ -22,6 +22,7 @@
*
*/
#include <linux/firmware.h>
+#include <asm/msr-index.h>
#include "i915_drv.h"
#include "intel_uc.h"
@@ -64,12 +65,178 @@ struct slpc_param slpc_paramlist[SLPC_MAX_PARAM] = {
"Enable IBC when non-Gaming Mode is enabled"}
};
+static unsigned int slpc_get_platform_sku(struct drm_i915_private *dev_priv)
+{
+ enum slpc_platform_sku platform_sku;
+
+ if (IS_SKL_ULX(dev_priv))
+ platform_sku = SLPC_PLATFORM_SKU_ULX;
+ else if (IS_SKL_ULT(dev_priv))
+ platform_sku = SLPC_PLATFORM_SKU_ULT;
+ else
+ platform_sku = SLPC_PLATFORM_SKU_DT;
+
+ WARN_ON(platform_sku > 0xFF);
+
+ return platform_sku;
+}
+
+static unsigned int slpc_get_slice_count(struct drm_i915_private *dev_priv)
+{
+ unsigned int slice_count = 1;
+
+ if (IS_SKYLAKE(dev_priv))
+ slice_count = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
+
+ return slice_count;
+}
+
+void slpc_mem_set_param(struct slpc_shared_data *data,
+ u32 id,
+ u32 value)
+{
+ data->override_parameters_set_bits[id >> 5]
+ |= (1 << (id % 32));
+ data->override_parameters_values[id] = value;
+}
+
+void slpc_mem_unset_param(struct slpc_shared_data *data,
+ u32 id)
+{
+ data->override_parameters_set_bits[id >> 5]
+ &= (~(1 << (id % 32)));
+ data->override_parameters_values[id] = 0;
+}
+
+int slpc_mem_task_control(struct slpc_shared_data *data, u64 val,
+ u32 enable_id, u32 disable_id)
+{
+ int ret = 0;
+
+ if (val == SLPC_PARAM_TASK_DEFAULT) {
+ /* set default */
+ slpc_mem_unset_param(data, enable_id);
+ slpc_mem_unset_param(data, disable_id);
+ } else if (val == SLPC_PARAM_TASK_ENABLED) {
+ /* set enable */
+ slpc_mem_set_param(data, enable_id, 1);
+ slpc_mem_unset_param(data, disable_id);
+ } else if (val == SLPC_PARAM_TASK_DISABLED) {
+ /* set disable */
+ slpc_mem_set_param(data, disable_id, 1);
+ slpc_mem_unset_param(data, enable_id);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void slpc_shared_data_init(struct intel_slpc *slpc)
+{
+ struct drm_i915_private *dev_priv = slpc_to_i915(slpc);
+ struct page *page;
+ struct slpc_shared_data *data;
+ u64 val;
+
+ page = i915_vma_first_page(slpc->vma);
+ data = kmap_atomic(page);
+
+ memset(data, 0, sizeof(struct slpc_shared_data));
+
+ data->shared_data_size = sizeof(struct slpc_shared_data);
+ data->global_state = SLPC_GLOBAL_STATE_NOT_RUNNING;
+ data->platform_info.platform_sku =
+ slpc_get_platform_sku(dev_priv);
+ data->platform_info.slice_count =
+ slpc_get_slice_count(dev_priv);
+ data->platform_info.power_plan_source =
+ SLPC_POWER_PLAN_SOURCE(SLPC_POWER_PLAN_BALANCED,
+ SLPC_POWER_SOURCE_AC);
+ rdmsrl(MSR_TURBO_RATIO_LIMIT, val);
+ data->platform_info.P0_freq = val;
+ rdmsrl(MSR_PLATFORM_INFO, val);
+ data->platform_info.P1_freq = val >> 8;
+ data->platform_info.Pe_freq = val >> 40;
+ data->platform_info.Pn_freq = val >> 48;
+
+ /* Enable only GTPERF task, Disable others */
+ val = SLPC_PARAM_TASK_ENABLED;
+ slpc_mem_task_control(data, val,
+ SLPC_PARAM_TASK_ENABLE_GTPERF,
+ SLPC_PARAM_TASK_DISABLE_GTPERF);
+
+ slpc_mem_task_control(data, val,
+ SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
+
+ val = SLPC_PARAM_TASK_DISABLED;
+ slpc_mem_task_control(data, val,
+ SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GTPERF_THRESHOLD_MAX_FPS, 0);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GTPERF_ENABLE_FRAMERATE_STALLING,
+ 0);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GLOBAL_ENABLE_IA_GT_BALANCING,
+ 1);
+
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_ENABLE_ADAPTIVE_BURST_TURBO,
+ 0);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GLOBAL_ENABLE_EVAL_MODE, 0);
+
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_ENABLE_BALANCER_IN_NON_GAMING_MODE,
+ 1);
+
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ intel_gpu_freq(dev_priv,
+ dev_priv->pm.rps.efficient_freq));
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ,
+ intel_gpu_freq(dev_priv,
+ dev_priv->pm.rps.efficient_freq));
+
+ kunmap_atomic(data);
+}
+
void intel_slpc_init(struct intel_slpc *slpc)
{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *dev_priv = slpc_to_i915(slpc);
+ struct i915_vma *vma;
+
+ slpc->active = false;
+
+ mutex_lock(&dev_priv->pm.pcu_lock);
+ gen6_init_rps_frequencies(dev_priv);
+ mutex_unlock(&dev_priv->pm.pcu_lock);
+
+ /* Allocate shared data structure */
+ vma = slpc->vma;
+ if (!vma) {
+ vma = intel_guc_allocate_vma(guc,
+ PAGE_ALIGN(sizeof(struct slpc_shared_data)));
+ if (IS_ERR(vma)) {
+ DRM_ERROR("slpc_shared_data allocation failed\n");
+ i915.enable_slpc = 0;
+ return;
+ }
+
+ slpc->vma = vma;
+ slpc_shared_data_init(slpc);
+ }
}
void intel_slpc_cleanup(struct intel_slpc *slpc)
{
+ /* Release shared data structure */
+ i915_vma_unpin_and_release(&slpc->vma);
}
void intel_slpc_enable(struct intel_slpc *slpc)
@@ -26,6 +26,7 @@
struct intel_slpc {
bool active;
+ struct i915_vma *vma;
};
static inline int intel_slpc_enabled(void)