@@ -6941,6 +6941,15 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_runtime_pm_get(dev_priv);
}
+ if (i915.enable_slpc) {
+ dev_priv->guc.slpc.active = intel_slpc_get_status(dev_priv);
+ if (!dev_priv->guc.slpc.active) {
+ i915.enable_slpc = 0;
+ intel_sanitize_gt_powersave(dev_priv);
+ } else
+ dev_priv->pm_rps_events = 0;
+ }
+
mutex_lock(&dev_priv->drm.struct_mutex);
mutex_lock(&dev_priv->rps.hw_lock);
@@ -82,6 +82,133 @@ static void slpc_shared_data_init(struct drm_i915_private *dev_priv)
kunmap_atomic(data);
}
+static void host2guc_slpc(struct drm_i915_private *dev_priv,
+ struct slpc_event_input *input, u32 len)
+{
+ u32 *data;
+ u32 output[SLPC_EVENT_MAX_OUTPUT_ARGS];
+ int ret = 0;
+
+ /*
+ * We have only 15 scratch registers for communication.
+ * the first we will use for the event ID in input and
+ * output data. Event processing status will be present
+ * in SOFT_SCRATCH(1) register.
+ */
+ BUILD_BUG_ON(SLPC_EVENT_MAX_INPUT_ARGS > 14);
+ BUILD_BUG_ON(SLPC_EVENT_MAX_OUTPUT_ARGS < 1);
+ BUILD_BUG_ON(SLPC_EVENT_MAX_OUTPUT_ARGS > 14);
+
+ data = (u32 *) input;
+ data[0] = INTEL_GUC_ACTION_SLPC_REQUEST;
+ ret = __intel_guc_send(&dev_priv->guc, data, len, output);
+
+ if (ret)
+ DRM_ERROR("event 0x%x status %d\n",
+ ((output[0] & 0xFF00) >> 8), ret);
+}
+
+static void host2guc_slpc_reset(struct drm_i915_private *dev_priv)
+{
+ struct slpc_event_input data = {0};
+ u32 shared_data_gtt_offset = i915_ggtt_offset(dev_priv->guc.slpc.vma);
+
+ data.header.value = SLPC_EVENT(SLPC_EVENT_RESET, 2);
+ data.args[0] = shared_data_gtt_offset;
+ data.args[1] = 0;
+
+ host2guc_slpc(dev_priv, &data, 4);
+}
+
+static void host2guc_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+ struct slpc_event_input data = {0};
+ u32 shared_data_gtt_offset = i915_ggtt_offset(dev_priv->guc.slpc.vma);
+
+ data.header.value = SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2);
+ data.args[0] = shared_data_gtt_offset;
+ data.args[1] = 0;
+
+ host2guc_slpc(dev_priv, &data, 4);
+}
+
+void intel_slpc_query_task_state(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->guc.slpc.active)
+ host2guc_slpc_query_task_state(dev_priv);
+}
+
+/*
+ * This function will reads the state updates from GuC SLPC into shared data
+ * by invoking H2G action. Returns current state of GuC SLPC.
+ */
+void intel_slpc_read_shared_data(struct drm_i915_private *dev_priv,
+ struct slpc_shared_data *data)
+{
+ struct page *page;
+ void *pv = NULL;
+
+ intel_slpc_query_task_state(dev_priv);
+
+ page = i915_vma_first_page(dev_priv->guc.slpc.vma);
+ pv = kmap_atomic(page);
+
+ drm_clflush_virt_range(pv, sizeof(struct slpc_shared_data));
+ memcpy(data, pv, sizeof(struct slpc_shared_data));
+
+ kunmap_atomic(pv);
+}
+
+const char *intel_slpc_get_state_str(enum slpc_global_state state)
+{
+ if (state == SLPC_GLOBAL_STATE_NOT_RUNNING)
+ return "not running";
+ else if (state == SLPC_GLOBAL_STATE_INITIALIZING)
+ return "initializing";
+ else if (state == SLPC_GLOBAL_STATE_RESETTING)
+ return "resetting";
+ else if (state == SLPC_GLOBAL_STATE_RUNNING)
+ return "running";
+ else if (state == SLPC_GLOBAL_STATE_SHUTTING_DOWN)
+ return "shutting down";
+ else if (state == SLPC_GLOBAL_STATE_ERROR)
+ return "error";
+ else
+ return "unknown";
+}
+bool intel_slpc_get_status(struct drm_i915_private *dev_priv)
+{
+ struct slpc_shared_data data;
+ bool ret = false;
+
+ intel_slpc_read_shared_data(dev_priv, &data);
+ DRM_INFO("SLPC state: %s\n",
+ intel_slpc_get_state_str(data.global_state));
+
+ switch (data.global_state) {
+ case SLPC_GLOBAL_STATE_RUNNING:
+ /* Capture required state from SLPC here */
+ ret = true;
+ break;
+ case SLPC_GLOBAL_STATE_ERROR:
+ DRM_ERROR("SLPC in error state.\n");
+ break;
+ case SLPC_GLOBAL_STATE_RESETTING:
+ /*
+ * SLPC enabling in GuC should be completing fast.
+ * If SLPC is taking time to initialize (unlikely as we are
+ * sending reset event during GuC load itself).
+ * TODO: Need to wait till state changes to RUNNING.
+ */
+ ret = true;
+ DRM_ERROR("SLPC not running yet.!!!");
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
void intel_slpc_init(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
@@ -118,6 +245,16 @@ void intel_slpc_cleanup(struct drm_i915_private *dev_priv)
void intel_slpc_enable(struct drm_i915_private *dev_priv)
{
+ struct page *page;
+ struct slpc_shared_data *data;
+
+ page = i915_vma_first_page(dev_priv->guc.slpc.vma);
+ data = kmap_atomic(page);
+ data->global_state = SLPC_GLOBAL_STATE_NOT_RUNNING;
+ kunmap_atomic(data);
+
+ host2guc_slpc_reset(dev_priv);
+ dev_priv->guc.slpc.active = true;
}
void intel_slpc_suspend(struct drm_i915_private *dev_priv)
@@ -195,6 +195,10 @@ enum slpc_status {
};
/* intel_slpc.c */
+void intel_slpc_read_shared_data(struct drm_i915_private *dev_priv,
+ struct slpc_shared_data *data);
+const char *intel_slpc_get_state_str(enum slpc_global_state state);
+bool intel_slpc_get_status(struct drm_i915_private *dev_priv);
void intel_slpc_init(struct drm_i915_private *dev_priv);
void intel_slpc_cleanup(struct drm_i915_private *dev_priv);
void intel_slpc_enable(struct drm_i915_private *dev_priv);
@@ -228,9 +228,11 @@ static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
return INTEL_GUC_RECV_IS_RESPONSE(val);
}
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+int __intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *output)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ union slpc_event_output_header header;
u32 status;
int i;
int ret;
@@ -277,12 +279,29 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
}
dev_priv->guc.action_status = status;
+ /*
+ * Output data from Host to GuC SLPC actions is populated in scratch
+ * registers SOFT_SCRATCH(1) to SOFT_SCRATCH(14) based on event.
+ * Currently only SLPC action status in GuC is meaningful as Host
+ * can query only overridden parameters and that are fetched from
+ * Host-GuC SLPC shared data.
+ */
+ if (output && !ret) {
+ output[0] = header.value = I915_READ(SOFT_SCRATCH(1));
+ ret = header.status;
+ }
+
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&guc->send_mutex);
return ret;
}
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ return __intel_guc_send(guc, action, len, NULL);
+}
+
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -193,6 +193,8 @@ struct intel_huc {
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
+int __intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *output);
int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);