@@ -266,6 +266,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -314,6 +316,13 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+ /* On all engines, use the whole device by default */
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_context *ce = to_intel_context(ctx, engine);
+
+ ce->sseu = intel_sseu_from_device_sseu(&INTEL_INFO(dev_priv)->sseu);
+ }
+
i915_gem_context_set_bannable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
@@ -30,6 +30,7 @@
#include <linux/radix-tree.h>
#include "i915_gem.h"
+#include "intel_device_info.h"
struct pid;
@@ -149,6 +150,8 @@ struct i915_gem_context {
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+ /** sseu: Control eu/slice partitioning */
+ union intel_sseu sseu;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -326,4 +329,17 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline union intel_sseu
+intel_sseu_from_device_sseu(const struct sseu_dev_info *sseu)
+{
+ union intel_sseu value = {
+ .slice_mask = sseu->slice_mask,
+ .subslice_mask = sseu->subslice_mask[0],
+ .min_eus_per_subslice = sseu->max_eus_per_subslice,
+ .max_eus_per_subslice = sseu->max_eus_per_subslice,
+ };
+
+ return value;
+}
+
#endif /* !__I915_GEM_CONTEXT_H__ */
@@ -39,6 +39,19 @@ struct drm_i915_gem_object;
struct i915_request;
struct i915_timeline;
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+union intel_sseu {
+ struct {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+ };
+ u64 value;
+};
+
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
@@ -2392,8 +2392,8 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
return logical_ring_init(engine);
}
-static u32
-make_rpcs(struct drm_i915_private *dev_priv)
+static u32 make_rpcs(const struct sseu_dev_info *sseu,
+ union intel_sseu ctx_sseu)
{
u32 rpcs = 0;
@@ -2403,24 +2403,23 @@ make_rpcs(struct drm_i915_private *dev_priv)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
+ if (sseu->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
- GEN8_RPCS_S_CNT_SHIFT;
+ rpcs |= hweight8(ctx_sseu.slice_mask) << GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
+ if (sseu->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) <<
+ rpcs |= hweight8(ctx_sseu.subslice_mask) <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
- rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+ if (sseu->has_eu_pg) {
+ rpcs |= ctx_sseu.min_eus_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+ rpcs |= ctx_sseu.max_eus_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2544,7 +2543,8 @@ static void execlists_init_reg_state(u32 *regs,
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev_priv));
+ make_rpcs(&INTEL_INFO(dev_priv)->sseu,
+ ctx->__engine[engine->id].sseu));
i915_oa_init_reg_state(engine, ctx, regs);
}