diff mbox

[RFC,04/10] drm/i915: Move intel_engine_context_in/out into intel_lrc.c

Message ID 20180522123020.31624-5-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tvrtko Ursulin May 22, 2018, 12:30 p.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Intel_lrc.c is the only caller and so to avoid some header file ordering
issues in future patches move these two over there.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c        | 57 +++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h | 55 ------------------------
 2 files changed, 57 insertions(+), 55 deletions(-)

Comments

Chris Wilson May 22, 2018, 12:46 p.m. UTC | #1
Quoting Tvrtko Ursulin (2018-05-22 13:30:14)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Intel_lrc.c is the only caller and so to avoid some header file ordering
> issues in future patches move these two over there.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Expectation was that we would be using these in guc. Brief highlight of
how the plan changed?
-Chris
Tvrtko Ursulin May 22, 2018, 1:02 p.m. UTC | #2
On 22/05/2018 13:46, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2018-05-22 13:30:14)
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> Intel_lrc.c is the only caller and so to avoid some header file ordering
>> issues in future patches move these two over there.
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Expectation was that we would be using these in guc. Brief highlight of
> how the plan changed?

"Let's postpone the disaster" ? :) I am talking about header file 
ordering disaster, not GuC, so someone doesn't get me wrong.

But in actuality, since we have postponed GuC until Gen11, and there we 
get a different kind of GuC, the kind where will be no way for i915 to 
have visibility on when a context get in and out, and which physical 
engines. So I think we really won't have a need for these hooks in the 
Gen11 GuC code.

Regards,

Tvrtko
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8480c1534c4b..3947bdcd8ea6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -375,6 +375,63 @@  execlists_context_status_change(struct i915_request *rq, unsigned long status)
 				   status, rq);
 }
 
+static inline void
+intel_engine_context_in(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (READ_ONCE(engine->stats.enabled) == 0)
+		return;
+
+	write_seqlock_irqsave(&engine->stats.lock, flags);
+
+	if (engine->stats.enabled > 0) {
+		if (engine->stats.active++ == 0)
+			engine->stats.start = ktime_get();
+		GEM_BUG_ON(engine->stats.active == 0);
+	}
+
+	write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static inline void
+intel_engine_context_out(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (READ_ONCE(engine->stats.enabled) == 0)
+		return;
+
+	write_seqlock_irqsave(&engine->stats.lock, flags);
+
+	if (engine->stats.enabled > 0) {
+		ktime_t last;
+
+		if (engine->stats.active && --engine->stats.active == 0) {
+			/*
+			 * Decrement the active context count and in case GPU
+			 * is now idle add up to the running total.
+			 */
+			last = ktime_sub(ktime_get(), engine->stats.start);
+
+			engine->stats.total = ktime_add(engine->stats.total,
+							last);
+		} else if (engine->stats.active == 0) {
+			/*
+			 * After turning on engine stats, context out might be
+			 * the first event in which case we account from the
+			 * time stats gathering was turned on.
+			 */
+			last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+
+			engine->stats.total = ktime_add(engine->stats.total,
+							last);
+		}
+	}
+
+	write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
 inline void
 execlists_user_begin(struct intel_engine_execlists *execlists,
 		     const struct execlist_port *port)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7c25db5bcaaa..1bdc42deca3c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1091,61 +1091,6 @@  void intel_engine_dump(struct intel_engine_cs *engine,
 struct intel_engine_cs *
 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
 
-static inline void intel_engine_context_in(struct intel_engine_cs *engine)
-{
-	unsigned long flags;
-
-	if (READ_ONCE(engine->stats.enabled) == 0)
-		return;
-
-	write_seqlock_irqsave(&engine->stats.lock, flags);
-
-	if (engine->stats.enabled > 0) {
-		if (engine->stats.active++ == 0)
-			engine->stats.start = ktime_get();
-		GEM_BUG_ON(engine->stats.active == 0);
-	}
-
-	write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
-static inline void intel_engine_context_out(struct intel_engine_cs *engine)
-{
-	unsigned long flags;
-
-	if (READ_ONCE(engine->stats.enabled) == 0)
-		return;
-
-	write_seqlock_irqsave(&engine->stats.lock, flags);
-
-	if (engine->stats.enabled > 0) {
-		ktime_t last;
-
-		if (engine->stats.active && --engine->stats.active == 0) {
-			/*
-			 * Decrement the active context count and in case GPU
-			 * is now idle add up to the running total.
-			 */
-			last = ktime_sub(ktime_get(), engine->stats.start);
-
-			engine->stats.total = ktime_add(engine->stats.total,
-							last);
-		} else if (engine->stats.active == 0) {
-			/*
-			 * After turning on engine stats, context out might be
-			 * the first event in which case we account from the
-			 * time stats gathering was turned on.
-			 */
-			last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
-			engine->stats.total = ktime_add(engine->stats.total,
-							last);
-		}
-	}
-
-	write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
 int intel_enable_engine_stats(struct intel_engine_cs *engine);
 void intel_disable_engine_stats(struct intel_engine_cs *engine);