@@ -800,25 +800,6 @@ struct intel_csr {
func(overlay_needs_physical); \
func(supports_tv);
-struct sseu_dev_info {
- u8 slice_mask;
- u8 subslice_mask;
- u8 eu_total;
- u8 min_eu_per_subslice;
- u8 max_eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
-};
-
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
-}
-
/* Keep in gen based order, and chronological order within a gen */
enum intel_platform {
INTEL_PLATFORM_UNINITIALIZED = 0,
@@ -257,6 +257,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -305,6 +307,10 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+ /* On all engines, use the whole device by default */
+ for_each_engine(engine, dev_priv, id)
+ ctx->engine[id].sseu = INTEL_INFO(dev_priv)->sseu;
+
i915_gem_context_set_bannable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
@@ -40,6 +40,25 @@ struct i915_hw_ppgtt;
struct i915_vma;
struct intel_ring;
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 eu_total;
+ u8 min_eu_per_subslice;
+ u8 max_eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
#define DEFAULT_CONTEXT_HANDLE 0
/**
@@ -158,6 +177,8 @@ struct i915_gem_context {
u64 lrc_desc;
int pin_count;
bool initialised;
+ /** sseu: Control eu/slice partitioning */
+ struct sseu_dev_info sseu;
} engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -1850,8 +1850,7 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
return logical_ring_init(engine);
}
-static u32
-make_rpcs(struct drm_i915_private *dev_priv)
+static u32 make_rpcs(const struct sseu_dev_info *sseu)
{
u32 rpcs = 0;
@@ -1861,25 +1860,21 @@ make_rpcs(struct drm_i915_private *dev_priv)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
+ if (sseu->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
- GEN8_RPCS_S_CNT_SHIFT;
+ rpcs |= hweight8(sseu->slice_mask) << GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
+ if (sseu->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
- GEN8_RPCS_SS_CNT_SHIFT;
+ rpcs |= hweight8(sseu->subslice_mask) << GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
- rpcs |= INTEL_INFO(dev_priv)->sseu.min_eu_per_subslice <<
- GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev_priv)->sseu.max_eu_per_subslice <<
- GEN8_RPCS_EU_MAX_SHIFT;
+ if (sseu->has_eu_pg) {
+ rpcs |= sseu->min_eu_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+ rpcs |= sseu->max_eu_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -1993,7 +1988,7 @@ static void execlists_init_reg_state(u32 *regs,
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev_priv));
+ make_rpcs(&ctx->engine[engine->id].sseu));
i915_oa_init_reg_state(engine, ctx, regs);
}