@@ -1816,6 +1816,7 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted);
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv);
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -5555,7 +5555,7 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
return INTEL_RC6_ENABLE;
}
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
+void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
/* All of these values are in units of 50MHz */
@@ -22,6 +22,7 @@
*
*/
#include <linux/firmware.h>
+#include <asm/msr-index.h>
#include "i915_drv.h"
#include "intel_uc.h"
@@ -288,12 +289,137 @@ int intel_slpc_task_status(struct drm_i915_private *dev_priv, u64 *val,
return ret;
}
+static unsigned int slpc_get_platform_sku(struct drm_i915_private *dev_priv)
+{
+ enum slpc_platform_sku platform_sku;
+
+ if (IS_SKL_ULX(dev_priv))
+ platform_sku = SLPC_PLATFORM_SKU_ULX;
+ else if (IS_SKL_ULT(dev_priv))
+ platform_sku = SLPC_PLATFORM_SKU_ULT;
+ else
+ platform_sku = SLPC_PLATFORM_SKU_DT;
+
+ WARN_ON(platform_sku > 0xFF);
+
+ return platform_sku;
+}
+
+static unsigned int slpc_get_slice_count(struct drm_i915_private *dev_priv)
+{
+ unsigned int slice_count = 1;
+
+ if (IS_SKYLAKE(dev_priv))
+ slice_count = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
+
+ return slice_count;
+}
+
+static void slpc_shared_data_init(struct drm_i915_private *dev_priv)
+{
+ struct page *page;
+ struct slpc_shared_data *data;
+ u64 val;
+
+ page = i915_vma_first_page(dev_priv->guc.slpc.vma);
+ data = kmap_atomic(page);
+
+ memset(data, 0, sizeof(struct slpc_shared_data));
+
+ data->shared_data_size = sizeof(struct slpc_shared_data);
+ data->global_state = SLPC_GLOBAL_STATE_NOT_RUNNING;
+ data->platform_info.platform_sku =
+ slpc_get_platform_sku(dev_priv);
+ data->platform_info.slice_count =
+ slpc_get_slice_count(dev_priv);
+ data->platform_info.power_plan_source =
+ SLPC_POWER_PLAN_SOURCE(SLPC_POWER_PLAN_BALANCED,
+ SLPC_POWER_SOURCE_AC);
+ rdmsrl(MSR_TURBO_RATIO_LIMIT, val);
+ data->platform_info.P0_freq = val;
+ rdmsrl(MSR_PLATFORM_INFO, val);
+ data->platform_info.P1_freq = val >> 8;
+ data->platform_info.Pe_freq = val >> 40;
+ data->platform_info.Pn_freq = val >> 48;
+
+ /* Enable only GTPERF task, Disable others */
+ val = SLPC_PARAM_TASK_ENABLED;
+ slpc_mem_task_control(data, val,
+ SLPC_PARAM_TASK_ENABLE_GTPERF,
+ SLPC_PARAM_TASK_DISABLE_GTPERF);
+
+ val = SLPC_PARAM_TASK_DISABLED;
+ slpc_mem_task_control(data, val,
+ SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
+
+ slpc_mem_task_control(data, val,
+ SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GTPERF_THRESHOLD_MAX_FPS, 0);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GTPERF_ENABLE_FRAMERATE_STALLING,
+ 0);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GLOBAL_ENABLE_IA_GT_BALANCING,
+ 0);
+
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_ENABLE_ADAPTIVE_BURST_TURBO,
+ 0);
+
+ slpc_mem_set_param(data, SLPC_PARAM_GLOBAL_ENABLE_EVAL_MODE, 0);
+
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_ENABLE_BALANCER_IN_NON_GAMING_MODE,
+ 0);
+
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ intel_gpu_freq(dev_priv,
+ dev_priv->rps.efficient_freq));
+ slpc_mem_set_param(data,
+ SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ,
+ intel_gpu_freq(dev_priv,
+ dev_priv->rps.efficient_freq));
+
+ kunmap_atomic(data);
+}
+
void intel_slpc_init(struct drm_i915_private *dev_priv)
{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct i915_vma *vma;
+
+ dev_priv->guc.slpc.active = false;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ gen6_init_rps_frequencies(dev_priv);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ /* Allocate shared data structure */
+ vma = dev_priv->guc.slpc.vma;
+ if (!vma) {
+ vma = intel_guc_allocate_vma(guc,
+ PAGE_ALIGN(sizeof(struct slpc_shared_data)));
+ if (IS_ERR(vma)) {
+ DRM_ERROR("slpc_shared_data allocation failed\n");
+ i915.enable_slpc = 0;
+ return;
+ }
+
+ dev_priv->guc.slpc.vma = vma;
+ slpc_shared_data_init(dev_priv);
+ }
}
void intel_slpc_cleanup(struct drm_i915_private *dev_priv)
{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ /* Release shared data structure */
+ i915_vma_unpin_and_release(&guc->slpc.vma);
}
void intel_slpc_enable(struct drm_i915_private *dev_priv)
@@ -26,6 +26,7 @@
struct intel_slpc {
bool active;
+ struct i915_vma *vma;
};
enum slpc_status {