Message ID | 20220804232125.211449-1-umesh.nerlige.ramappa@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | i915/pmu: Wire GuC backend to per-client busyness | expand |
On 05/08/2022 00:21, Umesh Nerlige Ramappa wrote: > From: John Harrison <John.C.Harrison@Intel.com> > > GuC provides engine_id and last_switch_in ticks for an active context in > the pphwsp. The context image provides a 32 bit total ticks which is the > accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This > information is used to calculate the context busyness as follows: > > If the engine_id is valid, then busyness is the sum of accumulated total > ticks and active ticks. Active ticks is calculated with current gt time > as reference. > > If engine_id is invalid, busyness is equal to accumulated total ticks. > > Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a > potential race was highlighted in an earlier review that can lead to > double accounting of busyness. While the solution to this is a wip, > busyness is still usable for platforms running GuC submission. > > Remaining work: Enable and test context busyness for > virtual_parent_context_ops and virtual_child_context_ops. I meant track the IGT work in the jira internally. :) Otherwise: Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Also, can someone else please do the full review? I'm afraid with the passage of time I forgot what little I knew about how GuC tracks this data. :( Some nits and questions below. > v2: (Tvrtko) > - Use COPS_RUNTIME_ACTIVE_TOTAL > - Add code comment for the race > - Undo local variables initializations > > v3: > - Add support for virtual engines based on > https://patchwork.freedesktop.org/series/105227/ > > v4: > - Update commit message with remaining work. > - Rebase > > Signed-off-by: John Harrison <John.C.Harrison@Intel.com> > Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com> > --- > drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- > drivers/gpu/drm/i915/gt/intel_context.h | 6 +- > drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ > drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++++++++++++++++++- > drivers/gpu/drm/i915/i915_drm_client.c | 6 +- > 6 files changed, 89 insertions(+), 11 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c > index 654a092ed3d6..e2d70a9fdac0 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.c > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, > child->parallel.parent = parent; > } > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > { > u64 total, active; > > + if (ce->ops->update_stats) > + ce->ops->update_stats(ce); > + > total = ce->stats.runtime.total; > if (ce->ops->flags & COPS_RUNTIME_CYCLES) > total *= ce->engine->gt->clock_period_ns; > > active = READ_ONCE(ce->stats.active); > - if (active) > + /* > + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend > + * already provides the total active time of the context, so skip this > + * calculation when this flag is set. > + */ > + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) > active = intel_context_clock() - active; > > return total + active; > diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h > index 8e2d70630c49..3d1d7436c1a4 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.h > +++ b/drivers/gpu/drm/i915/gt/intel_context.h > @@ -58,7 +58,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) > return !!ce->parallel.number_children; > } > > -static inline bool intel_context_is_pinned(struct intel_context *ce); > +static inline bool intel_context_is_pinned(const struct intel_context *ce); > > static inline struct intel_context * > intel_context_to_parent(struct intel_context *ce) > @@ -118,7 +118,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) > * Returns: true if the context is currently pinned for use by the GPU. > */ > static inline bool > -intel_context_is_pinned(struct intel_context *ce) > +intel_context_is_pinned(const struct intel_context *ce) > { > return atomic_read(&ce->pin_count); > } > @@ -362,7 +362,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) > clear_bit(CONTEXT_NOPREEMPT, &ce->flags); > } > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); > u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); > > static inline u64 intel_context_clock(void) > diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h > index 04eacae1aca5..f7ff4c7d81c7 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context_types.h > +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h > @@ -38,6 +38,9 @@ struct intel_context_ops { > #define COPS_RUNTIME_CYCLES_BIT 1 > #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) > > +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 > +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) > + > int (*alloc)(struct intel_context *ce); > > void (*revoke)(struct intel_context *ce, struct i915_request *rq, > @@ -56,6 +59,8 @@ struct intel_context_ops { > > void (*sched_disable)(struct intel_context *ce); > > + void (*update_stats)(struct intel_context *ce); > + > void (*reset)(struct intel_context *ce); > void (*destroy)(struct kref *kref); > > @@ -148,6 +153,7 @@ struct intel_context { > struct ewma_runtime avg; > u64 total; > u32 last; > + u64 start_gt_clk; Nit - put u64 next to u64 and u32 next to u32 to avoid holes. > I915_SELFTEST_DECLARE(u32 num_underflow); > I915_SELFTEST_DECLARE(u32 max_underflow); > } runtime; > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h > index 323b055e5db9..c7b54f1631b9 100644 > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h > @@ -196,6 +196,11 @@ static inline u8 guc_class_to_engine_class(u8 guc_class) > return guc_class_engine_class_map[guc_class]; > } > > +/* Per context engine usage stats: */ > +#define PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO (0x500 / sizeof(u32)) > +#define PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI (PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO + 1) > +#define PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID (PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI + 1) > + > /* Work item for submitting workloads into work queue of GuC. */ > struct guc_wq_item { > u32 header; > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > index 0d17da77e787..c9fefa254a7e 100644 > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c > @@ -378,7 +378,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce) > ce->guc_id.id = GUC_INVALID_CONTEXT_ID; > } > > -static inline struct intel_guc *ce_to_guc(struct intel_context *ce) > +static inline struct intel_guc *ce_to_guc(const struct intel_context *ce) This is odd since the helper now takes away constness. I can't really figure out why the change is needed? > { > return &ce->engine->gt->uc.guc; > } > @@ -1376,13 +1376,16 @@ static void __update_guc_busyness_stats(struct intel_guc *guc) > spin_unlock_irqrestore(&guc->timestamp.lock, flags); > } > > +static void __guc_context_update_clks(struct intel_context *ce); > static void guc_timestamp_ping(struct work_struct *wrk) > { > struct intel_guc *guc = container_of(wrk, typeof(*guc), > timestamp.work.work); > struct intel_uc *uc = container_of(guc, typeof(*uc), guc); > struct intel_gt *gt = guc_to_gt(guc); > + struct intel_context *ce; > intel_wakeref_t wakeref; > + unsigned long index; > int srcu, ret; > > /* > @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) > with_intel_runtime_pm(>->i915->runtime_pm, wakeref) > __update_guc_busyness_stats(guc); > > + /* adjust context stats for overflow */ > + xa_for_each(&guc->context_lookup, index, ce) > + __guc_context_update_clks(ce); > + > intel_gt_reset_unlock(gt, srcu); > > mod_delayed_work(system_highpri_wq, &guc->timestamp.work, > @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) > guc->timestamp.ping_delay); > } > > +static void __guc_context_update_clks(struct intel_context *ce) > +{ > + struct intel_guc *guc = ce_to_guc(ce); > + struct intel_gt *gt = ce->engine->gt; > + u32 *pphwsp, last_switch, engine_id; > + u64 start_gt_clk, active; > + unsigned long flags; > + ktime_t unused; > + > + spin_lock_irqsave(&guc->timestamp.lock, flags); > + > + /* > + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > + * relies on GuC and GPU for busyness calculations. Due to this, A > + * potential race was highlighted in an earlier review that can lead to > + * double accounting of busyness. While the solution to this is a wip, > + * busyness is still usable for platforms running GuC submission. > + */ > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); What about PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI? I see it defined but isn't used so is the timestmap 32 bit just ABI reserved 64 bits for future proofing or something? Regards, Tvrtko > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > + > + guc_update_pm_timestamp(guc, &unused); > + > + if (engine_id != 0xffffffff && last_switch) { > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > + __extend_last_switch(guc, &start_gt_clk, last_switch); > + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > + WRITE_ONCE(ce->stats.active, active); > + } else { > + lrc_update_runtime(ce); > + } > + > + spin_unlock_irqrestore(&guc->timestamp.lock, flags); > +} > + > +static void guc_context_update_stats(struct intel_context *ce) > +{ > + if (!intel_context_pin_if_active(ce)) { > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > + WRITE_ONCE(ce->stats.active, 0); > + return; > + } > + > + __guc_context_update_clks(ce); > + intel_context_unpin(ce); > +} > + > static inline bool > submission_disabled(struct intel_guc *guc) > { > @@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) > { > struct intel_guc *guc = ce_to_guc(ce); > > + lrc_update_runtime(ce); > unpin_guc_id(guc, ce); > lrc_unpin(ce); > > @@ -3344,6 +3402,7 @@ static void remove_from_context(struct i915_request *rq) > } > > static const struct intel_context_ops guc_context_ops = { > + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, > .alloc = guc_context_alloc, > > .pre_pin = guc_context_pre_pin, > @@ -3360,6 +3419,8 @@ static const struct intel_context_ops guc_context_ops = { > > .sched_disable = guc_context_sched_disable, > > + .update_stats = guc_context_update_stats, > + > .reset = lrc_reset, > .destroy = guc_context_destroy, > > @@ -3593,6 +3654,7 @@ static int guc_virtual_context_alloc(struct intel_context *ce) > } > > static const struct intel_context_ops virtual_guc_context_ops = { > + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, > .alloc = guc_virtual_context_alloc, > > .pre_pin = guc_virtual_context_pre_pin, > @@ -3608,6 +3670,7 @@ static const struct intel_context_ops virtual_guc_context_ops = { > .exit = guc_virtual_context_exit, > > .sched_disable = guc_context_sched_disable, > + .update_stats = guc_context_update_stats, > > .destroy = guc_context_destroy, > > diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c > index b09d1d386574..8d81119fff14 100644 > --- a/drivers/gpu/drm/i915/i915_drm_client.c > +++ b/drivers/gpu/drm/i915/i915_drm_client.c > @@ -147,11 +147,7 @@ void i915_drm_client_fdinfo(struct seq_file *m, struct file *f) > PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); > seq_printf(m, "drm-client-id:\t%u\n", client->id); > > - /* > - * Temporarily skip showing client engine information with GuC submission till > - * fetching engine busyness is implemented in the GuC submission backend > - */ > - if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc)) > + if (GRAPHICS_VER(i915) < 8) > return; > > for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
On Fri, Aug 05, 2022 at 10:45:30AM +0100, Tvrtko Ursulin wrote: > >On 05/08/2022 00:21, Umesh Nerlige Ramappa wrote: >>From: John Harrison <John.C.Harrison@Intel.com> >> >>GuC provides engine_id and last_switch_in ticks for an active context in >>the pphwsp. The context image provides a 32 bit total ticks which is the >>accumulated by the context (a.k.a. context[CTX_TIMESTAMP]). This >>information is used to calculate the context busyness as follows: >> >>If the engine_id is valid, then busyness is the sum of accumulated total >>ticks and active ticks. Active ticks is calculated with current gt time >>as reference. >> >>If engine_id is invalid, busyness is equal to accumulated total ticks. >> >>Since KMD (CPU) retrieves busyness data from 2 sources - GPU and GuC, a >>potential race was highlighted in an earlier review that can lead to >>double accounting of busyness. While the solution to this is a wip, >>busyness is still usable for platforms running GuC submission. >> >>Remaining work: Enable and test context busyness for >>virtual_parent_context_ops and virtual_child_context_ops. > >I meant track the IGT work in the jira internally. :) Oh, I did do that and added this here as well. Note that I have not enabled the busyness in i915 for the parent/child context ops since I was not able to verify it yet. > >Otherwise: > >Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> > >Also, can someone else please do the full review? I'm afraid with the >passage of time I forgot what little I knew about how GuC tracks this >data. :( I will ask around > >Some nits and questions below. > >>v2: (Tvrtko) >>- Use COPS_RUNTIME_ACTIVE_TOTAL >>- Add code comment for the race >>- Undo local variables initializations >> >>v3: >>- Add support for virtual engines based on >> https://patchwork.freedesktop.org/series/105227/ >> >>v4: >>- Update commit message with remaining work. >>- Rebase >> >>Signed-off-by: John Harrison <John.C.Harrison@Intel.com> >>Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com> >>--- >> drivers/gpu/drm/i915/gt/intel_context.c | 12 +++- >> drivers/gpu/drm/i915/gt/intel_context.h | 6 +- >> drivers/gpu/drm/i915/gt/intel_context_types.h | 6 ++ >> drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 5 ++ >> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 65 ++++++++++++++++++- >> drivers/gpu/drm/i915/i915_drm_client.c | 6 +- >> 6 files changed, 89 insertions(+), 11 deletions(-) >> >>diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c >>index 654a092ed3d6..e2d70a9fdac0 100644 >>--- a/drivers/gpu/drm/i915/gt/intel_context.c >>+++ b/drivers/gpu/drm/i915/gt/intel_context.c >>@@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, >> child->parallel.parent = parent; >> } >>-u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) >>+u64 intel_context_get_total_runtime_ns(struct intel_context *ce) >> { >> u64 total, active; >>+ if (ce->ops->update_stats) >>+ ce->ops->update_stats(ce); >>+ >> total = ce->stats.runtime.total; >> if (ce->ops->flags & COPS_RUNTIME_CYCLES) >> total *= ce->engine->gt->clock_period_ns; >> active = READ_ONCE(ce->stats.active); >>- if (active) >>+ /* >>+ * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend >>+ * already provides the total active time of the context, so skip this >>+ * calculation when this flag is set. >>+ */ >>+ if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) >> active = intel_context_clock() - active; >> return total + active; >>diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h >>index 8e2d70630c49..3d1d7436c1a4 100644 >>--- a/drivers/gpu/drm/i915/gt/intel_context.h >>+++ b/drivers/gpu/drm/i915/gt/intel_context.h >>@@ -58,7 +58,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) >> return !!ce->parallel.number_children; >> } >>-static inline bool intel_context_is_pinned(struct intel_context *ce); >>+static inline bool intel_context_is_pinned(const struct intel_context *ce); >> static inline struct intel_context * >> intel_context_to_parent(struct intel_context *ce) >>@@ -118,7 +118,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) >> * Returns: true if the context is currently pinned for use by the GPU. >> */ >> static inline bool >>-intel_context_is_pinned(struct intel_context *ce) >>+intel_context_is_pinned(const struct intel_context *ce) >> { >> return atomic_read(&ce->pin_count); >> } >>@@ -362,7 +362,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) >> clear_bit(CONTEXT_NOPREEMPT, &ce->flags); >> } >>-u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); >>+u64 intel_context_get_total_runtime_ns(struct intel_context *ce); >> u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); >> static inline u64 intel_context_clock(void) >>diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h >>index 04eacae1aca5..f7ff4c7d81c7 100644 >>--- a/drivers/gpu/drm/i915/gt/intel_context_types.h >>+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h >>@@ -38,6 +38,9 @@ struct intel_context_ops { >> #define COPS_RUNTIME_CYCLES_BIT 1 >> #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) >>+#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 >>+#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) >>+ >> int (*alloc)(struct intel_context *ce); >> void (*revoke)(struct intel_context *ce, struct i915_request *rq, >>@@ -56,6 +59,8 @@ struct intel_context_ops { >> void (*sched_disable)(struct intel_context *ce); >>+ void (*update_stats)(struct intel_context *ce); >>+ >> void (*reset)(struct intel_context *ce); >> void (*destroy)(struct kref *kref); >>@@ -148,6 +153,7 @@ struct intel_context { >> struct ewma_runtime avg; >> u64 total; >> u32 last; >>+ u64 start_gt_clk; > >Nit - put u64 next to u64 and u32 next to u32 to avoid holes. > >> I915_SELFTEST_DECLARE(u32 num_underflow); >> I915_SELFTEST_DECLARE(u32 max_underflow); >> } runtime; >>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h >>index 323b055e5db9..c7b54f1631b9 100644 >>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h >>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h >>@@ -196,6 +196,11 @@ static inline u8 guc_class_to_engine_class(u8 guc_class) >> return guc_class_engine_class_map[guc_class]; >> } >>+/* Per context engine usage stats: */ >>+#define PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO (0x500 / sizeof(u32)) >>+#define PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI (PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO + 1) >>+#define PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID (PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI + 1) >>+ >> /* Work item for submitting workloads into work queue of GuC. */ >> struct guc_wq_item { >> u32 header; >>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c >>index 0d17da77e787..c9fefa254a7e 100644 >>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c >>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c >>@@ -378,7 +378,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce) >> ce->guc_id.id = GUC_INVALID_CONTEXT_ID; >> } >>-static inline struct intel_guc *ce_to_guc(struct intel_context *ce) >>+static inline struct intel_guc *ce_to_guc(const struct intel_context *ce) > >This is odd since the helper now takes away constness. I can't really >figure out why the change is needed? > >> { >> return &ce->engine->gt->uc.guc; >> } >>@@ -1376,13 +1376,16 @@ static void __update_guc_busyness_stats(struct intel_guc *guc) >> spin_unlock_irqrestore(&guc->timestamp.lock, flags); >> } >>+static void __guc_context_update_clks(struct intel_context *ce); >> static void guc_timestamp_ping(struct work_struct *wrk) >> { >> struct intel_guc *guc = container_of(wrk, typeof(*guc), >> timestamp.work.work); >> struct intel_uc *uc = container_of(guc, typeof(*uc), guc); >> struct intel_gt *gt = guc_to_gt(guc); >>+ struct intel_context *ce; >> intel_wakeref_t wakeref; >>+ unsigned long index; >> int srcu, ret; >> /* >>@@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) >> with_intel_runtime_pm(>->i915->runtime_pm, wakeref) >> __update_guc_busyness_stats(guc); >>+ /* adjust context stats for overflow */ >>+ xa_for_each(&guc->context_lookup, index, ce) >>+ __guc_context_update_clks(ce); >>+ >> intel_gt_reset_unlock(gt, srcu); >> mod_delayed_work(system_highpri_wq, &guc->timestamp.work, >>@@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) >> guc->timestamp.ping_delay); >> } >>+static void __guc_context_update_clks(struct intel_context *ce) >>+{ >>+ struct intel_guc *guc = ce_to_guc(ce); >>+ struct intel_gt *gt = ce->engine->gt; >>+ u32 *pphwsp, last_switch, engine_id; >>+ u64 start_gt_clk, active; >>+ unsigned long flags; >>+ ktime_t unused; >>+ >>+ spin_lock_irqsave(&guc->timestamp.lock, flags); >>+ >>+ /* >>+ * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched >>+ * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) >>+ * relies on GuC and GPU for busyness calculations. Due to this, A >>+ * potential race was highlighted in an earlier review that can lead to >>+ * double accounting of busyness. While the solution to this is a wip, >>+ * busyness is still usable for platforms running GuC submission. >>+ */ >>+ pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; >>+ last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > >What about PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI? I see it defined but >isn't used so is the timestmap 32 bit just ABI reserved 64 bits for >future proofing or something? Yes, the _HI is not populated by GuC yet, but reserved for future extension to 64 bits. Thanks, Umesh > >Regards, > >Tvrtko > >>+ engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); >>+ >>+ guc_update_pm_timestamp(guc, &unused); >>+ >>+ if (engine_id != 0xffffffff && last_switch) { >>+ start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); >>+ __extend_last_switch(guc, &start_gt_clk, last_switch); >>+ active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); >>+ WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); >>+ WRITE_ONCE(ce->stats.active, active); >>+ } else { >>+ lrc_update_runtime(ce); >>+ } >>+ >>+ spin_unlock_irqrestore(&guc->timestamp.lock, flags); >>+} >>+ >>+static void guc_context_update_stats(struct intel_context *ce) >>+{ >>+ if (!intel_context_pin_if_active(ce)) { >>+ WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); >>+ WRITE_ONCE(ce->stats.active, 0); >>+ return; >>+ } >>+ >>+ __guc_context_update_clks(ce); >>+ intel_context_unpin(ce); >>+} >>+ >> static inline bool >> submission_disabled(struct intel_guc *guc) >> { >>@@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) >> { >> struct intel_guc *guc = ce_to_guc(ce); >>+ lrc_update_runtime(ce); >> unpin_guc_id(guc, ce); >> lrc_unpin(ce); >>@@ -3344,6 +3402,7 @@ static void remove_from_context(struct i915_request *rq) >> } >> static const struct intel_context_ops guc_context_ops = { >>+ .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, >> .alloc = guc_context_alloc, >> .pre_pin = guc_context_pre_pin, >>@@ -3360,6 +3419,8 @@ static const struct intel_context_ops guc_context_ops = { >> .sched_disable = guc_context_sched_disable, >>+ .update_stats = guc_context_update_stats, >>+ >> .reset = lrc_reset, >> .destroy = guc_context_destroy, >>@@ -3593,6 +3654,7 @@ static int guc_virtual_context_alloc(struct intel_context *ce) >> } >> static const struct intel_context_ops virtual_guc_context_ops = { >>+ .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, >> .alloc = guc_virtual_context_alloc, >> .pre_pin = guc_virtual_context_pre_pin, >>@@ -3608,6 +3670,7 @@ static const struct intel_context_ops virtual_guc_context_ops = { >> .exit = guc_virtual_context_exit, >> .sched_disable = guc_context_sched_disable, >>+ .update_stats = guc_context_update_stats, >> .destroy = guc_context_destroy, >>diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c >>index b09d1d386574..8d81119fff14 100644 >>--- a/drivers/gpu/drm/i915/i915_drm_client.c >>+++ b/drivers/gpu/drm/i915/i915_drm_client.c >>@@ -147,11 +147,7 @@ void i915_drm_client_fdinfo(struct seq_file *m, struct file *f) >> PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); >> seq_printf(m, "drm-client-id:\t%u\n", client->id); >>- /* >>- * Temporarily skip showing client engine information with GuC submission till >>- * fetching engine busyness is implemented in the GuC submission backend >>- */ >>- if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc)) >>+ if (GRAPHICS_VER(i915) < 8) >> return; >> for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > Hi Umesh, Still reviewing but I have a question below. > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c > index 654a092ed3d6..e2d70a9fdac0 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.c > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, > child->parallel.parent = parent; > } > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > { > u64 total, active; > > + if (ce->ops->update_stats) > + ce->ops->update_stats(ce); > + /snip/ > @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) > with_intel_runtime_pm(>->i915->runtime_pm, wakeref) > __update_guc_busyness_stats(guc); > > + /* adjust context stats for overflow */ > + xa_for_each(&guc->context_lookup, index, ce) > + __guc_context_update_clks(ce); > + The question is why do we have 2 functions: __guc_context_update_clks() (which we call periodically from guc_timestamp_ping()) and guc_context_update_stats() (which we call non-periodically from intel_context_get_total_runtime_ns()? Why don't we have just one function which is called from both places? Or rather why don't we call guc_context_update_stats() from both places? If we don't call guc_context_update_stats() periodically from guc_timestamp_ping() how e.g. does ce->stats.runtime.start_gt_clk get reset to 0? If it gets reset to 0 in __guc_context_update_clks() then why do we need to reset it in guc_context_update_stats()? Also IMO guc->timestamp.lock should be taken by this single function, (otherwise guc_context_update_stats() is modifying ce->stats.runtime.start_gt_clk without taking the lock). Thanks. -- Ashutosh > +static void __guc_context_update_clks(struct intel_context *ce) > +{ > + struct intel_guc *guc = ce_to_guc(ce); > + struct intel_gt *gt = ce->engine->gt; > + u32 *pphwsp, last_switch, engine_id; > + u64 start_gt_clk, active; > + unsigned long flags; > + ktime_t unused; > + > + spin_lock_irqsave(&guc->timestamp.lock, flags); > + > + /* > + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > + * relies on GuC and GPU for busyness calculations. Due to this, A > + * potential race was highlighted in an earlier review that can lead to > + * double accounting of busyness. While the solution to this is a wip, > + * busyness is still usable for platforms running GuC submission. > + */ > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > + > + guc_update_pm_timestamp(guc, &unused); > + > + if (engine_id != 0xffffffff && last_switch) { > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > + __extend_last_switch(guc, &start_gt_clk, last_switch); > + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > + WRITE_ONCE(ce->stats.active, active); > + } else { > + lrc_update_runtime(ce); > + } > + > + spin_unlock_irqrestore(&guc->timestamp.lock, flags); > +} > + > +static void guc_context_update_stats(struct intel_context *ce) > +{ > + if (!intel_context_pin_if_active(ce)) { > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > + WRITE_ONCE(ce->stats.active, 0); > + return; > + } > + > + __guc_context_update_clks(ce); > + intel_context_unpin(ce); > +}
On Wed, 24 Aug 2022 22:03:19 -0700, Dixit, Ashutosh wrote: > > On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > > > > Hi Umesh, > > Still reviewing but I have a question below. Please ignore this mail for now, mostly a result of my misunderstanding the code. I will ask again if I have any questions. Thanks. > > > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c > > index 654a092ed3d6..e2d70a9fdac0 100644 > > --- a/drivers/gpu/drm/i915/gt/intel_context.c > > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > > @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, > > child->parallel.parent = parent; > > } > > > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > > { > > u64 total, active; > > > > + if (ce->ops->update_stats) > > + ce->ops->update_stats(ce); > > + > > /snip/ > > > @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) > > with_intel_runtime_pm(>->i915->runtime_pm, wakeref) > > __update_guc_busyness_stats(guc); > > > > + /* adjust context stats for overflow */ > > + xa_for_each(&guc->context_lookup, index, ce) > > + __guc_context_update_clks(ce); > > + > > The question is why do we have 2 functions: __guc_context_update_clks() > (which we call periodically from guc_timestamp_ping()) and > guc_context_update_stats() (which we call non-periodically from > intel_context_get_total_runtime_ns()? Why don't we have just one function > which is called from both places? Or rather why don't we call > guc_context_update_stats() from both places? > > If we don't call guc_context_update_stats() periodically from > guc_timestamp_ping() how e.g. does ce->stats.runtime.start_gt_clk get reset > to 0? If it gets reset to 0 in __guc_context_update_clks() then why do we > need to reset it in guc_context_update_stats()? > > Also IMO guc->timestamp.lock should be taken by this single function, > (otherwise guc_context_update_stats() is modifying > ce->stats.runtime.start_gt_clk without taking the lock). > > Thanks. > -- > Ashutosh > > > +static void __guc_context_update_clks(struct intel_context *ce) > > +{ > > + struct intel_guc *guc = ce_to_guc(ce); > > + struct intel_gt *gt = ce->engine->gt; > > + u32 *pphwsp, last_switch, engine_id; > > + u64 start_gt_clk, active; > > + unsigned long flags; > > + ktime_t unused; > > + > > + spin_lock_irqsave(&guc->timestamp.lock, flags); > > + > > + /* > > + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > > + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > > + * relies on GuC and GPU for busyness calculations. Due to this, A > > + * potential race was highlighted in an earlier review that can lead to > > + * double accounting of busyness. While the solution to this is a wip, > > + * busyness is still usable for platforms running GuC submission. > > + */ > > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > > + > > + guc_update_pm_timestamp(guc, &unused); > > + > > + if (engine_id != 0xffffffff && last_switch) { > > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > > + __extend_last_switch(guc, &start_gt_clk, last_switch); > > + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); > > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > > + WRITE_ONCE(ce->stats.active, active); > > + } else { > > + lrc_update_runtime(ce); > > + } > > + > > + spin_unlock_irqrestore(&guc->timestamp.lock, flags); > > +} > > + > > +static void guc_context_update_stats(struct intel_context *ce) > > +{ > > + if (!intel_context_pin_if_active(ce)) { > > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > > + WRITE_ONCE(ce->stats.active, 0); > > + return; > > + } > > + > > + __guc_context_update_clks(ce); > > + intel_context_unpin(ce); > > +}
On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: Hi Umesh, I am fairly new to this code so some questions will be below will be newbie questions, thanks for bearing with me. > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c > index 654a092ed3d6..e2d70a9fdac0 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.c > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, > child->parallel.parent = parent; > } > > -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > { > u64 total, active; > > + if (ce->ops->update_stats) > + ce->ops->update_stats(ce); > + > total = ce->stats.runtime.total; > if (ce->ops->flags & COPS_RUNTIME_CYCLES) > total *= ce->engine->gt->clock_period_ns; > > active = READ_ONCE(ce->stats.active); > - if (active) > + /* > + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend > + * already provides the total active time of the context, so skip this > + * calculation when this flag is set. > + */ > + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) > active = intel_context_clock() - active; > > return total + active; /snip/ > @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) > with_intel_runtime_pm(>->i915->runtime_pm, wakeref) > __update_guc_busyness_stats(guc); > > + /* adjust context stats for overflow */ > + xa_for_each(&guc->context_lookup, index, ce) > + __guc_context_update_clks(ce); What is the reason for calling __guc_context_update_clks() periodically from guc_timestamp_ping() since it appears we should just be able to call __guc_context_update_clks() from intel_context_get_total_runtime_ns() to update 'active'? Is the reason for calling __guc_context_update_clks() periodically that the calculations in __guc_context_update_clks() become invalid if the counters overflow? > + > intel_gt_reset_unlock(gt, srcu); > > mod_delayed_work(system_highpri_wq, &guc->timestamp.work, > @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) > guc->timestamp.ping_delay); > } > > +static void __guc_context_update_clks(struct intel_context *ce) > +{ > + struct intel_guc *guc = ce_to_guc(ce); > + struct intel_gt *gt = ce->engine->gt; > + u32 *pphwsp, last_switch, engine_id; > + u64 start_gt_clk, active; > + unsigned long flags; > + ktime_t unused; > + > + spin_lock_irqsave(&guc->timestamp.lock, flags); > + > + /* > + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > + * relies on GuC and GPU for busyness calculations. Due to this, A > + * potential race was highlighted in an earlier review that can lead to > + * double accounting of busyness. While the solution to this is a wip, > + * busyness is still usable for platforms running GuC submission. > + */ > + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > + > + guc_update_pm_timestamp(guc, &unused); > + > + if (engine_id != 0xffffffff && last_switch) { > + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > + __extend_last_switch(guc, &start_gt_clk, last_switch); > + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > + WRITE_ONCE(ce->stats.active, active); Should not need WRITE_ONCE to update regular memory. Not even sure we need READ_ONCE above. > + } else { > + lrc_update_runtime(ce); As was being discussed, should not need this here in this function. See below too. > + } > + > + spin_unlock_irqrestore(&guc->timestamp.lock, flags); > +} > + > +static void guc_context_update_stats(struct intel_context *ce) > +{ > + if (!intel_context_pin_if_active(ce)) { > + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > + WRITE_ONCE(ce->stats.active, 0); Why do these need to be initialized to 0? Looks like the calculations in __guc_context_update_clks() will work even if we don't do this? Also I didn't follow the 'if (!intel_context_pin_if_active(ce))' check. > + return; > + } > + > + __guc_context_update_clks(ce); > + intel_context_unpin(ce); > +} > + > static inline bool > submission_disabled(struct intel_guc *guc) > { > @@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) > { > struct intel_guc *guc = ce_to_guc(ce); > > + lrc_update_runtime(ce); How about moving this into lrc_unpin() since that gets called from all guc context types (parent/child/virtual). > unpin_guc_id(guc, ce); > lrc_unpin(ce); > > @@ -3344,6 +3402,7 @@ static void remove_from_context(struct i915_request *rq) > } > > static const struct intel_context_ops guc_context_ops = { > + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, > .alloc = guc_context_alloc, > > .pre_pin = guc_context_pre_pin, > @@ -3360,6 +3419,8 @@ static const struct intel_context_ops guc_context_ops = { > > .sched_disable = guc_context_sched_disable, > > + .update_stats = guc_context_update_stats, > + > .reset = lrc_reset, > .destroy = guc_context_destroy, > > @@ -3593,6 +3654,7 @@ static int guc_virtual_context_alloc(struct intel_context *ce) > } > > static const struct intel_context_ops virtual_guc_context_ops = { > + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, > .alloc = guc_virtual_context_alloc, > > .pre_pin = guc_virtual_context_pre_pin, > @@ -3608,6 +3670,7 @@ static const struct intel_context_ops virtual_guc_context_ops = { > .exit = guc_virtual_context_exit, > > .sched_disable = guc_context_sched_disable, > + .update_stats = guc_context_update_stats, > > .destroy = guc_context_destroy, > > diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c > index b09d1d386574..8d81119fff14 100644 > --- a/drivers/gpu/drm/i915/i915_drm_client.c > +++ b/drivers/gpu/drm/i915/i915_drm_client.c > @@ -147,11 +147,7 @@ void i915_drm_client_fdinfo(struct seq_file *m, struct file *f) > PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); > seq_printf(m, "drm-client-id:\t%u\n", client->id); > > - /* > - * Temporarily skip showing client engine information with GuC submission till > - * fetching engine busyness is implemented in the GuC submission backend > - */ > - if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc)) > + if (GRAPHICS_VER(i915) < 8) > return; > > for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++) > -- > 2.37.1 >
On Wed, Aug 24, 2022 at 06:17:19PM -0700, Dixit, Ashutosh wrote: >On Fri, 05 Aug 2022 08:18:48 -0700, Umesh Nerlige Ramappa wrote: >> >> On Fri, Aug 05, 2022 at 10:45:30AM +0100, Tvrtko Ursulin wrote: >> > >> > On 05/08/2022 00:21, Umesh Nerlige Ramappa wrote: >> >> -static inline struct intel_guc *ce_to_guc(struct intel_context *ce) >> >> +static inline struct intel_guc *ce_to_guc(const struct intel_context *ce) >> > >> > This is odd since the helper now takes away constness. I can't really >> > figure out why the change is needed? > >Hi Umesh, I am also wondering about this, I think you missed answering this >question from Tvrtko. This helper 'adds' constness, so wasn't sure if the comment was intended for this helper. Thanks, Umesh > >Thanks. >-- >Ashutosh
On Thu, Aug 25, 2022 at 06:44:50PM -0700, Dixit, Ashutosh wrote: >On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > >Hi Umesh, I am fairly new to this code so some questions will be below will >be newbie questions, thanks for bearing with me. > >> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c >> index 654a092ed3d6..e2d70a9fdac0 100644 >> --- a/drivers/gpu/drm/i915/gt/intel_context.c >> +++ b/drivers/gpu/drm/i915/gt/intel_context.c >> @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, >> child->parallel.parent = parent; >> } >> >> -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) >> +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) >> { >> u64 total, active; >> >> + if (ce->ops->update_stats) >> + ce->ops->update_stats(ce); >> + >> total = ce->stats.runtime.total; >> if (ce->ops->flags & COPS_RUNTIME_CYCLES) >> total *= ce->engine->gt->clock_period_ns; >> >> active = READ_ONCE(ce->stats.active); >> - if (active) >> + /* >> + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend >> + * already provides the total active time of the context, so skip this >> + * calculation when this flag is set. >> + */ >> + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) >> active = intel_context_clock() - active; >> >> return total + active; > >/snip/ > >> @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) >> with_intel_runtime_pm(>->i915->runtime_pm, wakeref) >> __update_guc_busyness_stats(guc); >> >> + /* adjust context stats for overflow */ >> + xa_for_each(&guc->context_lookup, index, ce) >> + __guc_context_update_clks(ce); > >What is the reason for calling __guc_context_update_clks() periodically >from guc_timestamp_ping() since it appears we should just be able to call >__guc_context_update_clks() from intel_context_get_total_runtime_ns() to >update 'active'? Is the reason for calling __guc_context_update_clks() >periodically that the calculations in __guc_context_update_clks() become >invalid if the counters overflow? Correct, these are 32-bit counters and the worker just tracks overflow. > >> + >> intel_gt_reset_unlock(gt, srcu); >> >> mod_delayed_work(system_highpri_wq, &guc->timestamp.work, >> @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) >> guc->timestamp.ping_delay); >> } >> >> +static void __guc_context_update_clks(struct intel_context *ce) >> +{ >> + struct intel_guc *guc = ce_to_guc(ce); >> + struct intel_gt *gt = ce->engine->gt; >> + u32 *pphwsp, last_switch, engine_id; >> + u64 start_gt_clk, active; >> + unsigned long flags; >> + ktime_t unused; >> + >> + spin_lock_irqsave(&guc->timestamp.lock, flags); >> + >> + /* >> + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched >> + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) >> + * relies on GuC and GPU for busyness calculations. Due to this, A >> + * potential race was highlighted in an earlier review that can lead to >> + * double accounting of busyness. While the solution to this is a wip, >> + * busyness is still usable for platforms running GuC submission. >> + */ >> + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; >> + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); >> + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); >> + >> + guc_update_pm_timestamp(guc, &unused); >> + >> + if (engine_id != 0xffffffff && last_switch) { >> + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); >> + __extend_last_switch(guc, &start_gt_clk, last_switch); >> + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); >> + WRITE_ONCE(ce->stats.active, active); > >Should not need WRITE_ONCE to update regular memory. Not even sure we need >READ_ONCE above. Not sure I checked what they do. I was thinking these are needed for the memory ordering (as in be sure that start_gt_clk is updated before active). > >> + } else { >> + lrc_update_runtime(ce); > >As was being discussed, should not need this here in this function. See >below too. In short, I added this here so that a query for busyness following idle can be obtained immediately. For GuC backend, the context is unpinned after disabling scheduling on that context and that is asynchronous. Also if there are more requests on that context, the scheduling may not be disabled and unpin may not happen, so updated runtime would only be seen much much later. It is still safe to call from here because we know that the context is not active and has switched out. If it did switch in while we were reading this, that's still fine, we would only report the value stored in the context image. > >> + } >> + >> + spin_unlock_irqrestore(&guc->timestamp.lock, flags); >> +} >> + >> +static void guc_context_update_stats(struct intel_context *ce) >> +{ >> + if (!intel_context_pin_if_active(ce)) { >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); >> + WRITE_ONCE(ce->stats.active, 0); > >Why do these need to be initialized to 0? Looks like the calculations in >__guc_context_update_clks() will work even if we don't do this? Also I >didn't follow the 'if (!intel_context_pin_if_active(ce))' check. __guc_context_update_clks accesses the context image, so we need to make sure it's pinned. pin if active will not sleep/wait, so we can use it in this path. if context is not active, then we update the active stats to 0. > >> + return; >> + } >> + >> + __guc_context_update_clks(ce); >> + intel_context_unpin(ce); >> +} >> + >> static inline bool >> submission_disabled(struct intel_guc *guc) >> { >> @@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) >> { >> struct intel_guc *guc = cce_to_guc(ce); >> >> + lrc_update_runtime(ce); > >How about moving this into lrc_unpin() since that gets called from all guc >context types (parent/child/virtual). looks like lrc_unpin is called from context_unpin path. Same as above: for GuC, the context_unpin is an async operation and may not happen if there are multiple requests in queue. Thanks, Umesh > >> unpin_guc_id(guc, ce); >> lrc_unpin(ce); >> >> @@ -3344,6 +3402,7 @@ static void remove_from_context(struct i915_request *rq) >> } >> >> static const struct intel_context_ops guc_context_ops = { >> + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, >> .alloc = guc_context_alloc, >> >> .pre_pin = guc_context_pre_pin, >> @@ -3360,6 +3419,8 @@ static const struct intel_context_ops guc_context_ops = { >> >> .sched_disable = guc_context_sched_disable, >> >> + .update_stats = guc_context_update_stats, >> + >> .reset = lrc_reset, >> .destroy = guc_context_destroy, >> >> @@ -3593,6 +3654,7 @@ static int guc_virtual_context_alloc(struct intel_context *ce) >> } >> >> static const struct intel_context_ops virtual_guc_context_ops = { >> + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, >> .alloc = guc_virtual_context_alloc, >> >> .pre_pin = guc_virtual_context_pre_pin, >> @@ -3608,6 +3670,7 @@ static const struct intel_context_ops virtual_guc_context_ops = { >> .exit = guc_virtual_context_exit, >> >> .sched_disable = guc_context_sched_disable, >> + .update_stats = guc_context_update_stats, >> >> .destroy = guc_context_destroy, >> >> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c >> index b09d1d386574..8d81119fff14 100644 >> --- a/drivers/gpu/drm/i915/i915_drm_client.c >> +++ b/drivers/gpu/drm/i915/i915_drm_client.c >> @@ -147,11 +147,7 @@ void i915_drm_client_fdinfo(struct seq_file *m, struct file *f) >> PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); >> seq_printf(m, "drm-client-id:\t%u\n", client->id); >> >> - /* >> - * Temporarily skip showing client engine information with GuC submission till >> - * fetching engine busyness is implemented in the GuC submission backend >> - */ >> - if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc)) >> + if (GRAPHICS_VER(i915) < 8) >> return; >> >> for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++) >> -- >> 2.37.1 >>
On Fri, 26 Aug 2022 09:33:08 -0700, Umesh Nerlige Ramappa wrote: > Hi Umesh, Just to communicate my thoughts I have posted this patch on top of your patch: [1] https://patchwork.freedesktop.org/series/107983/ Could you please take a look at that and see if it makes sense. > On Thu, Aug 25, 2022 at 06:44:50PM -0700, Dixit, Ashutosh wrote: > > On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: > > > > Hi Umesh, I am fairly new to this code so some questions will be below will > > be newbie questions, thanks for bearing with me. > > > >> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c > >> index 654a092ed3d6..e2d70a9fdac0 100644 > >> --- a/drivers/gpu/drm/i915/gt/intel_context.c > >> +++ b/drivers/gpu/drm/i915/gt/intel_context.c > >> @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, > >> child->parallel.parent = parent; > >> } > >> > >> -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) > >> +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) > >> { > >> u64 total, active; > >> > >> + if (ce->ops->update_stats) > >> + ce->ops->update_stats(ce); > >> + > >> total = ce->stats.runtime.total; > >> if (ce->ops->flags & COPS_RUNTIME_CYCLES) > >> total *= ce->engine->gt->clock_period_ns; > >> > >> active = READ_ONCE(ce->stats.active); > >> - if (active) > >> + /* > >> + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend > >> + * already provides the total active time of the context, so skip this > >> + * calculation when this flag is set. > >> + */ > >> + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) > >> active = intel_context_clock() - active; > >> > >> return total + active; > > > > /snip/ > > > >> @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) > >> with_intel_runtime_pm(>->i915->runtime_pm, wakeref) > >> __update_guc_busyness_stats(guc); > >> > >> + /* adjust context stats for overflow */ > >> + xa_for_each(&guc->context_lookup, index, ce) > >> + __guc_context_update_clks(ce); > > > > What is the reason for calling __guc_context_update_clks() periodically > > from guc_timestamp_ping() since it appears we should just be able to call > > __guc_context_update_clks() from intel_context_get_total_runtime_ns() to > > update 'active'? Is the reason for calling __guc_context_update_clks() > > periodically that the calculations in __guc_context_update_clks() become > > invalid if the counters overflow? > > Correct, these are 32-bit counters and the worker just tracks overflow. OK. > > > > >> + > >> intel_gt_reset_unlock(gt, srcu); > >> > >> mod_delayed_work(system_highpri_wq, &guc->timestamp.work, > >> @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) > >> guc->timestamp.ping_delay); > >> } > >> > >> +static void __guc_context_update_clks(struct intel_context *ce) > >> +{ > >> + struct intel_guc *guc = ce_to_guc(ce); > >> + struct intel_gt *gt = ce->engine->gt; > >> + u32 *pphwsp, last_switch, engine_id; > >> + u64 start_gt_clk, active; > >> + unsigned long flags; > >> + ktime_t unused; > >> + > >> + spin_lock_irqsave(&guc->timestamp.lock, flags); > >> + > >> + /* > >> + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched > >> + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) > >> + * relies on GuC and GPU for busyness calculations. Due to this, A > >> + * potential race was highlighted in an earlier review that can lead to > >> + * double accounting of busyness. While the solution to this is a wip, > >> + * busyness is still usable for platforms running GuC submission. > >> + */ > >> + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; > >> + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); > >> + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); > >> + > >> + guc_update_pm_timestamp(guc, &unused); > >> + > >> + if (engine_id != 0xffffffff && last_switch) { > >> + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); > >> + __extend_last_switch(guc, &start_gt_clk, last_switch); > >> + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); > >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); > >> + WRITE_ONCE(ce->stats.active, active); > > > > Should not need WRITE_ONCE to update regular memory. Not even sure we need > > READ_ONCE above. > > Not sure I checked what they do. I was thinking these are needed for the > memory ordering (as in be sure that start_gt_clk is updated before > active). As long as our operations are done under correct locks we don't have to worry about memory ordering. That is one of the reasons I am doing everything under the spinlock in [1]. > > > > >> + } else { > >> + lrc_update_runtime(ce); > > > > As was being discussed, should not need this here in this function. See > > below too. > > In short, I added this here so that a query for busyness following idle can > be obtained immediately. For GuC backend, the context is unpinned after > disabling scheduling on that context and that is asynchronous. Also if > there are more requests on that context, the scheduling may not be disabled > and unpin may not happen, so updated runtime would only be seen much much > later. > > It is still safe to call from here because we know that the context is not > active and has switched out. If it did switch in while we were reading > this, that's still fine, we would only report the value stored in the > context image. Agreed, but in [1] I have made this unconditional, not sure if you will agree or see problems with that. > > > > >> + } > >> + > >> + spin_unlock_irqrestore(&guc->timestamp.lock, flags); > >> +} > >> + > >> +static void guc_context_update_stats(struct intel_context *ce) > >> +{ > >> + if (!intel_context_pin_if_active(ce)) { > >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); > >> + WRITE_ONCE(ce->stats.active, 0); > > > > Why do these need to be initialized to 0? Looks like the calculations in > > __guc_context_update_clks() will work even if we don't do this? Also I > > didn't follow the 'if (!intel_context_pin_if_active(ce))' check. > > __guc_context_update_clks accesses the context image, so we need to make > sure it's pinned. pin if active will not sleep/wait, so we can use it in > this path. I have added pinning in [1]. > if context is not active, then we update the active stats to 0. In [1] active is just a local variable and I don't touch ce->stats.active at all. > >> + return; > >> + } > >> + > >> + __guc_context_update_clks(ce); > >> + intel_context_unpin(ce); > >> +} > >> + > >> static inline bool > >> submission_disabled(struct intel_guc *guc) > >> { > >> @@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) > >> { > >> struct intel_guc *guc = cce_to_guc(ce); > >> > >> + lrc_update_runtime(ce); > > > > How about moving this into lrc_unpin() since that gets called from all guc > > context types (parent/child/virtual). > > looks like lrc_unpin is called from context_unpin path. > > Same as above: for GuC, the context_unpin is an async operation and may not > happen if there are multiple requests in queue. In [1] I have left lrc_unpin in guc_context_unpin but changed to lrc_update_runtime_locked. Thanks. -- Ashutosh
On Wed, Aug 31, 2022 at 01:25:11PM -0700, Dixit, Ashutosh wrote: >On Fri, 26 Aug 2022 09:33:08 -0700, Umesh Nerlige Ramappa wrote: >> > >Hi Umesh, > >Just to communicate my thoughts I have posted this patch on top of your >patch: > >[1] https://patchwork.freedesktop.org/series/107983/ > >Could you please take a look at that and see if it makes sense. > >> On Thu, Aug 25, 2022 at 06:44:50PM -0700, Dixit, Ashutosh wrote: >> > On Thu, 04 Aug 2022 16:21:25 -0700, Umesh Nerlige Ramappa wrote: >> > >> > Hi Umesh, I am fairly new to this code so some questions will be below will >> > be newbie questions, thanks for bearing with me. >> > >> >> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c >> >> index 654a092ed3d6..e2d70a9fdac0 100644 >> >> --- a/drivers/gpu/drm/i915/gt/intel_context.c >> >> +++ b/drivers/gpu/drm/i915/gt/intel_context.c >> >> @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, >> >> child->parallel.parent = parent; >> >> } >> >> >> >> -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) >> >> +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) >> >> { >> >> u64 total, active; >> >> >> >> + if (ce->ops->update_stats) >> >> + ce->ops->update_stats(ce); >> >> + >> >> total = ce->stats.runtime.total; >> >> if (ce->ops->flags & COPS_RUNTIME_CYCLES) >> >> total *= ce->engine->gt->clock_period_ns; >> >> >> >> active = READ_ONCE(ce->stats.active); >> >> - if (active) >> >> + /* >> >> + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend >> >> + * already provides the total active time of the context, so skip this >> >> + * calculation when this flag is set. >> >> + */ >> >> + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) >> >> active = intel_context_clock() - active; >> >> >> >> return total + active; >> > >> > /snip/ >> > >> >> @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) >> >> with_intel_runtime_pm(>->i915->runtime_pm, wakeref) >> >> __update_guc_busyness_stats(guc); >> >> >> >> + /* adjust context stats for overflow */ >> >> + xa_for_each(&guc->context_lookup, index, ce) >> >> + __guc_context_update_clks(ce); >> > >> > What is the reason for calling __guc_context_update_clks() periodically >> > from guc_timestamp_ping() since it appears we should just be able to call >> > __guc_context_update_clks() from intel_context_get_total_runtime_ns() to >> > update 'active'? Is the reason for calling __guc_context_update_clks() >> > periodically that the calculations in __guc_context_update_clks() become >> > invalid if the counters overflow? >> >> Correct, these are 32-bit counters and the worker just tracks overflow. > >OK. > >> >> > >> >> + >> >> intel_gt_reset_unlock(gt, srcu); >> >> >> >> mod_delayed_work(system_highpri_wq, &guc->timestamp.work, >> >> @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) >> >> guc->timestamp.ping_delay); >> >> } >> >> >> >> +static void __guc_context_update_clks(struct intel_context *ce) >> >> +{ >> >> + struct intel_guc *guc = ce_to_guc(ce); >> >> + struct intel_gt *gt = ce->engine->gt; >> >> + u32 *pphwsp, last_switch, engine_id; >> >> + u64 start_gt_clk, active; >> >> + unsigned long flags; >> >> + ktime_t unused; >> >> + >> >> + spin_lock_irqsave(&guc->timestamp.lock, flags); >> >> + >> >> + /* >> >> + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched >> >> + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) >> >> + * relies on GuC and GPU for busyness calculations. Due to this, A >> >> + * potential race was highlighted in an earlier review that can lead to >> >> + * double accounting of busyness. While the solution to this is a wip, >> >> + * busyness is still usable for platforms running GuC submission. >> >> + */ >> >> + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; >> >> + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); >> >> + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); >> >> + >> >> + guc_update_pm_timestamp(guc, &unused); >> >> + >> >> + if (engine_id != 0xffffffff && last_switch) { >> >> + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); >> >> + __extend_last_switch(guc, &start_gt_clk, last_switch); >> >> + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); >> >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); >> >> + WRITE_ONCE(ce->stats.active, active); >> > >> > Should not need WRITE_ONCE to update regular memory. Not even sure we need >> > READ_ONCE above. >> >> Not sure I checked what they do. I was thinking these are needed for the >> memory ordering (as in be sure that start_gt_clk is updated before >> active). > >As long as our operations are done under correct locks we don't have to >worry about memory ordering. That is one of the reasons I am doing >everything under the spinlock in [1]. > >> >> > >> >> + } else { >> >> + lrc_update_runtime(ce); >> > >> > As was being discussed, should not need this here in this function. See >> > below too. >> >> In short, I added this here so that a query for busyness following idle can >> be obtained immediately. For GuC backend, the context is unpinned after >> disabling scheduling on that context and that is asynchronous. Also if >> there are more requests on that context, the scheduling may not be disabled >> and unpin may not happen, so updated runtime would only be seen much much >> later. >> >> It is still safe to call from here because we know that the context is not >> active and has switched out. If it did switch in while we were reading >> this, that's still fine, we would only report the value stored in the >> context image. > >Agreed, but in [1] I have made this unconditional, not sure if you will >agree or see problems with that. That would get called every second (default intel_gpu_top query internal) for a long running workload. multiply that with all active contexts. > >> >> > >> >> + } >> >> + >> >> + spin_unlock_irqrestore(&guc->timestamp.lock, flags); >> >> +} >> >> + >> >> +static void guc_context_update_stats(struct intel_context *ce) >> >> +{ >> >> + if (!intel_context_pin_if_active(ce)) { >> >> + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); >> >> + WRITE_ONCE(ce->stats.active, 0); >> > >> > Why do these need to be initialized to 0? Looks like the calculations in >> > __guc_context_update_clks() will work even if we don't do this? Also I >> > didn't follow the 'if (!intel_context_pin_if_active(ce))' check. >> >> __guc_context_update_clks accesses the context image, so we need to make >> sure it's pinned. pin if active will not sleep/wait, so we can use it in >> this path. > >I have added pinning in [1]. > >> if context is not active, then we update the active stats to 0. > >In [1] active is just a local variable and I don't touch ce->stats.active >at all. > >> >> + return; >> >> + } >> >> + >> >> + __guc_context_update_clks(ce); >> >> + intel_context_unpin(ce); >> >> +} >> >> + >> >> static inline bool >> >> submission_disabled(struct intel_guc *guc) >> >> { >> >> @@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) >> >> { >> >> struct intel_guc *guc = cce_to_guc(ce); >> >> >> >> + lrc_update_runtime(ce); >> > >> > How about moving this into lrc_unpin() since that gets called from all guc >> > context types (parent/child/virtual). >> >> looks like lrc_unpin is called from context_unpin path. >> >> Same as above: for GuC, the context_unpin is an async operation and may not >> happen if there are multiple requests in queue. > >In [1] I have left lrc_unpin in guc_context_unpin but changed to >lrc_update_runtime_locked. From your rfc patch, I like - the idea of not touching ce->stats.active - having the update_stats return u64 - not doing a rmw for start_gt_clk With those changes, we are only accessing total in ce->stats, so we don't really need a lrc_update_runtime_locked. Thanks, Umesh > >Thanks. >-- >Ashutosh
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 654a092ed3d6..e2d70a9fdac0 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -576,16 +576,24 @@ void intel_context_bind_parent_child(struct intel_context *parent, child->parallel.parent = parent; } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) +u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { u64 total, active; + if (ce->ops->update_stats) + ce->ops->update_stats(ce); + total = ce->stats.runtime.total; if (ce->ops->flags & COPS_RUNTIME_CYCLES) total *= ce->engine->gt->clock_period_ns; active = READ_ONCE(ce->stats.active); - if (active) + /* + * When COPS_RUNTIME_ACTIVE_TOTAL is set for ce->cops, the backend + * already provides the total active time of the context, so skip this + * calculation when this flag is set. + */ + if (active && !(ce->ops->flags & COPS_RUNTIME_ACTIVE_TOTAL)) active = intel_context_clock() - active; return total + active; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 8e2d70630c49..3d1d7436c1a4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -58,7 +58,7 @@ static inline bool intel_context_is_parent(struct intel_context *ce) return !!ce->parallel.number_children; } -static inline bool intel_context_is_pinned(struct intel_context *ce); +static inline bool intel_context_is_pinned(const struct intel_context *ce); static inline struct intel_context * intel_context_to_parent(struct intel_context *ce) @@ -118,7 +118,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce) * Returns: true if the context is currently pinned for use by the GPU. */ static inline bool -intel_context_is_pinned(struct intel_context *ce) +intel_context_is_pinned(const struct intel_context *ce) { return atomic_read(&ce->pin_count); } @@ -362,7 +362,7 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, &ce->flags); } -u64 intel_context_get_total_runtime_ns(const struct intel_context *ce); +u64 intel_context_get_total_runtime_ns(struct intel_context *ce); u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); static inline u64 intel_context_clock(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 04eacae1aca5..f7ff4c7d81c7 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -38,6 +38,9 @@ struct intel_context_ops { #define COPS_RUNTIME_CYCLES_BIT 1 #define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT) +#define COPS_RUNTIME_ACTIVE_TOTAL_BIT 2 +#define COPS_RUNTIME_ACTIVE_TOTAL BIT(COPS_RUNTIME_ACTIVE_TOTAL_BIT) + int (*alloc)(struct intel_context *ce); void (*revoke)(struct intel_context *ce, struct i915_request *rq, @@ -56,6 +59,8 @@ struct intel_context_ops { void (*sched_disable)(struct intel_context *ce); + void (*update_stats)(struct intel_context *ce); + void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); @@ -148,6 +153,7 @@ struct intel_context { struct ewma_runtime avg; u64 total; u32 last; + u64 start_gt_clk; I915_SELFTEST_DECLARE(u32 num_underflow); I915_SELFTEST_DECLARE(u32 max_underflow); } runtime; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 323b055e5db9..c7b54f1631b9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -196,6 +196,11 @@ static inline u8 guc_class_to_engine_class(u8 guc_class) return guc_class_engine_class_map[guc_class]; } +/* Per context engine usage stats: */ +#define PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO (0x500 / sizeof(u32)) +#define PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI (PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO + 1) +#define PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID (PPHWSP_GUC_CONTEXT_USAGE_STAMP_HI + 1) + /* Work item for submitting workloads into work queue of GuC. */ struct guc_wq_item { u32 header; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 0d17da77e787..c9fefa254a7e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -378,7 +378,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce) ce->guc_id.id = GUC_INVALID_CONTEXT_ID; } -static inline struct intel_guc *ce_to_guc(struct intel_context *ce) +static inline struct intel_guc *ce_to_guc(const struct intel_context *ce) { return &ce->engine->gt->uc.guc; } @@ -1376,13 +1376,16 @@ static void __update_guc_busyness_stats(struct intel_guc *guc) spin_unlock_irqrestore(&guc->timestamp.lock, flags); } +static void __guc_context_update_clks(struct intel_context *ce); static void guc_timestamp_ping(struct work_struct *wrk) { struct intel_guc *guc = container_of(wrk, typeof(*guc), timestamp.work.work); struct intel_uc *uc = container_of(guc, typeof(*uc), guc); struct intel_gt *gt = guc_to_gt(guc); + struct intel_context *ce; intel_wakeref_t wakeref; + unsigned long index; int srcu, ret; /* @@ -1396,6 +1399,10 @@ static void guc_timestamp_ping(struct work_struct *wrk) with_intel_runtime_pm(>->i915->runtime_pm, wakeref) __update_guc_busyness_stats(guc); + /* adjust context stats for overflow */ + xa_for_each(&guc->context_lookup, index, ce) + __guc_context_update_clks(ce); + intel_gt_reset_unlock(gt, srcu); mod_delayed_work(system_highpri_wq, &guc->timestamp.work, @@ -1469,6 +1476,56 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) guc->timestamp.ping_delay); } +static void __guc_context_update_clks(struct intel_context *ce) +{ + struct intel_guc *guc = ce_to_guc(ce); + struct intel_gt *gt = ce->engine->gt; + u32 *pphwsp, last_switch, engine_id; + u64 start_gt_clk, active; + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + /* + * GPU updates ce->lrc_reg_state[CTX_TIMESTAMP] when context is switched + * out, however GuC updates PPHWSP offsets below. Hence KMD (CPU) + * relies on GuC and GPU for busyness calculations. Due to this, A + * potential race was highlighted in an earlier review that can lead to + * double accounting of busyness. While the solution to this is a wip, + * busyness is still usable for platforms running GuC submission. + */ + pphwsp = ((void *)ce->lrc_reg_state) - LRC_STATE_OFFSET; + last_switch = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_STAMP_LO]); + engine_id = READ_ONCE(pphwsp[PPHWSP_GUC_CONTEXT_USAGE_ENGINE_ID]); + + guc_update_pm_timestamp(guc, &unused); + + if (engine_id != 0xffffffff && last_switch) { + start_gt_clk = READ_ONCE(ce->stats.runtime.start_gt_clk); + __extend_last_switch(guc, &start_gt_clk, last_switch); + active = intel_gt_clock_interval_to_ns(gt, guc->timestamp.gt_stamp - start_gt_clk); + WRITE_ONCE(ce->stats.runtime.start_gt_clk, start_gt_clk); + WRITE_ONCE(ce->stats.active, active); + } else { + lrc_update_runtime(ce); + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void guc_context_update_stats(struct intel_context *ce) +{ + if (!intel_context_pin_if_active(ce)) { + WRITE_ONCE(ce->stats.runtime.start_gt_clk, 0); + WRITE_ONCE(ce->stats.active, 0); + return; + } + + __guc_context_update_clks(ce); + intel_context_unpin(ce); +} + static inline bool submission_disabled(struct intel_guc *guc) { @@ -2723,6 +2780,7 @@ static void guc_context_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); + lrc_update_runtime(ce); unpin_guc_id(guc, ce); lrc_unpin(ce); @@ -3344,6 +3402,7 @@ static void remove_from_context(struct i915_request *rq) } static const struct intel_context_ops guc_context_ops = { + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, .alloc = guc_context_alloc, .pre_pin = guc_context_pre_pin, @@ -3360,6 +3419,8 @@ static const struct intel_context_ops guc_context_ops = { .sched_disable = guc_context_sched_disable, + .update_stats = guc_context_update_stats, + .reset = lrc_reset, .destroy = guc_context_destroy, @@ -3593,6 +3654,7 @@ static int guc_virtual_context_alloc(struct intel_context *ce) } static const struct intel_context_ops virtual_guc_context_ops = { + .flags = COPS_RUNTIME_CYCLES | COPS_RUNTIME_ACTIVE_TOTAL, .alloc = guc_virtual_context_alloc, .pre_pin = guc_virtual_context_pre_pin, @@ -3608,6 +3670,7 @@ static const struct intel_context_ops virtual_guc_context_ops = { .exit = guc_virtual_context_exit, .sched_disable = guc_context_sched_disable, + .update_stats = guc_context_update_stats, .destroy = guc_context_destroy, diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c index b09d1d386574..8d81119fff14 100644 --- a/drivers/gpu/drm/i915/i915_drm_client.c +++ b/drivers/gpu/drm/i915/i915_drm_client.c @@ -147,11 +147,7 @@ void i915_drm_client_fdinfo(struct seq_file *m, struct file *f) PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); seq_printf(m, "drm-client-id:\t%u\n", client->id); - /* - * Temporarily skip showing client engine information with GuC submission till - * fetching engine busyness is implemented in the GuC submission backend - */ - if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc)) + if (GRAPHICS_VER(i915) < 8) return; for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)