diff mbox series

[1/8] drm/i915/guc: Do not conflate lrc_desc with GuC id for registration

Message ID 20220217235207.930153-2-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show
Series Prep work for next GuC release | expand

Commit Message

John Harrison Feb. 17, 2022, 11:52 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The LRC descriptor pool is going away. So, stop using it as a check for
context registration, use the GuC id instead (being the thing that
actually gets registered with the GuC).

Also, rename the set/clear/query helper functions for context id
mappings to better reflect their purpose and to differentiate from
other registration related helper functions.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 69 ++++++++++---------
 1 file changed, 38 insertions(+), 31 deletions(-)

Comments

Daniele Ceraolo Spurio Feb. 18, 2022, 9:13 p.m. UTC | #1
On 2/17/2022 3:52 PM, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
>
> The LRC descriptor pool is going away. So, stop using it as a check for
> context registration, use the GuC id instead (being the thing that
> actually gets registered with the GuC).
>
> Also, rename the set/clear/query helper functions for context id
> mappings to better reflect their purpose and to differentiate from
> other registration related helper functions.
>
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>

Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>

Daniele

> ---
>   .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 69 ++++++++++---------
>   1 file changed, 38 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index b3a429a92c0d..7fb889e14995 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -514,31 +514,20 @@ static inline bool guc_submission_initialized(struct intel_guc *guc)
>   	return !!guc->lrc_desc_pool_vaddr;
>   }
>   
> -static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
> +static inline void _reset_lrc_desc(struct intel_guc *guc, u32 id)
>   {
> -	if (likely(guc_submission_initialized(guc))) {
> -		struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
> -		unsigned long flags;
> -
> -		memset(desc, 0, sizeof(*desc));
> +	struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
>   
> -		/*
> -		 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
> -		 * the lower level functions directly.
> -		 */
> -		xa_lock_irqsave(&guc->context_lookup, flags);
> -		__xa_erase(&guc->context_lookup, id);
> -		xa_unlock_irqrestore(&guc->context_lookup, flags);
> -	}
> +	memset(desc, 0, sizeof(*desc));
>   }
>   
> -static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
> +static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
>   {
>   	return __get_context(guc, id);
>   }
>   
> -static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
> -					   struct intel_context *ce)
> +static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
> +				      struct intel_context *ce)
>   {
>   	unsigned long flags;
>   
> @@ -551,6 +540,24 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
>   	xa_unlock_irqrestore(&guc->context_lookup, flags);
>   }
>   
> +static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
> +{
> +	unsigned long flags;
> +
> +	if (unlikely(!guc_submission_initialized(guc)))
> +		return;
> +
> +	_reset_lrc_desc(guc, id);
> +
> +	/*
> +	 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
> +	 * the lower level functions directly.
> +	 */
> +	xa_lock_irqsave(&guc->context_lookup, flags);
> +	__xa_erase(&guc->context_lookup, id);
> +	xa_unlock_irqrestore(&guc->context_lookup, flags);
> +}
> +
>   static void decr_outstanding_submission_g2h(struct intel_guc *guc)
>   {
>   	if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
> @@ -795,7 +802,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
>   	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
>   	GEM_BUG_ON(context_guc_id_invalid(ce));
>   	GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
> -	GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
> +	GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
>   
>   	/* Insert NOOP if this work queue item will wrap the tail pointer. */
>   	if (wqi_size > wq_space_until_wrap(ce)) {
> @@ -923,7 +930,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
>   	if (submit) {
>   		struct intel_context *ce = request_to_scheduling_context(last);
>   
> -		if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
> +		if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
>   			     !intel_context_is_banned(ce))) {
>   			ret = guc_lrc_desc_pin(ce, false);
>   			if (unlikely(ret == -EPIPE)) {
> @@ -1897,7 +1904,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
>   
>   	return submission_disabled(guc) || guc->stalled_request ||
>   		!i915_sched_engine_is_empty(sched_engine) ||
> -		!lrc_desc_registered(guc, ce->guc_id.id);
> +		!ctx_id_mapped(guc, ce->guc_id.id);
>   }
>   
>   static void guc_submit_request(struct i915_request *rq)
> @@ -1954,7 +1961,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
>   		else
>   			ida_simple_remove(&guc->submission_state.guc_ids,
>   					  ce->guc_id.id);
> -		reset_lrc_desc(guc, ce->guc_id.id);
> +		clr_ctx_id_mapping(guc, ce->guc_id.id);
>   		set_context_guc_id_invalid(ce);
>   	}
>   	if (!list_empty(&ce->guc_id.link))
> @@ -2250,10 +2257,10 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
>   	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
>   		   i915_gem_object_is_lmem(ce->ring->vma->obj));
>   
> -	context_registered = lrc_desc_registered(guc, desc_idx);
> +	context_registered = ctx_id_mapped(guc, desc_idx);
>   
> -	reset_lrc_desc(guc, desc_idx);
> -	set_lrc_desc_registered(guc, desc_idx, ce);
> +	clr_ctx_id_mapping(guc, desc_idx);
> +	set_ctx_id_mapping(guc, desc_idx, ce);
>   
>   	desc = __get_lrc_desc(guc, desc_idx);
>   	desc->engine_class = engine_class_to_guc_class(engine->class);
> @@ -2324,7 +2331,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
>   		}
>   		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
>   		if (unlikely(disabled)) {
> -			reset_lrc_desc(guc, desc_idx);
> +			clr_ctx_id_mapping(guc, desc_idx);
>   			return 0;	/* Will get registered later */
>   		}
>   
> @@ -2340,9 +2347,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
>   		with_intel_runtime_pm(runtime_pm, wakeref)
>   			ret = register_context(ce, loop);
>   		if (unlikely(ret == -EBUSY)) {
> -			reset_lrc_desc(guc, desc_idx);
> +			clr_ctx_id_mapping(guc, desc_idx);
>   		} else if (unlikely(ret == -ENODEV)) {
> -			reset_lrc_desc(guc, desc_idx);
> +			clr_ctx_id_mapping(guc, desc_idx);
>   			ret = 0;	/* Will get registered later */
>   		}
>   	}
> @@ -2529,7 +2536,7 @@ static bool context_cant_unblock(struct intel_context *ce)
>   
>   	return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
>   		context_guc_id_invalid(ce) ||
> -		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
> +		!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
>   		!intel_context_is_pinned(ce);
>   }
>   
> @@ -2699,7 +2706,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
>   	bool disabled;
>   
>   	GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
> -	GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
> +	GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
>   	GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
>   	GEM_BUG_ON(context_enabled(ce));
>   
> @@ -2816,7 +2823,7 @@ static void guc_context_destroy(struct kref *kref)
>   	 */
>   	spin_lock_irqsave(&guc->submission_state.lock, flags);
>   	destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
> -		!lrc_desc_registered(guc, ce->guc_id.id);
> +		!ctx_id_mapped(guc, ce->guc_id.id);
>   	if (likely(!destroy)) {
>   		if (!list_empty(&ce->guc_id.link))
>   			list_del_init(&ce->guc_id.link);
> @@ -3059,7 +3066,7 @@ static void guc_signal_context_fence(struct intel_context *ce)
>   static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
>   {
>   	return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
> -		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
> +		!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
>   		!submission_disabled(ce_to_guc(ce));
>   }
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index b3a429a92c0d..7fb889e14995 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -514,31 +514,20 @@  static inline bool guc_submission_initialized(struct intel_guc *guc)
 	return !!guc->lrc_desc_pool_vaddr;
 }
 
-static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
+static inline void _reset_lrc_desc(struct intel_guc *guc, u32 id)
 {
-	if (likely(guc_submission_initialized(guc))) {
-		struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
-		unsigned long flags;
-
-		memset(desc, 0, sizeof(*desc));
+	struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
 
-		/*
-		 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
-		 * the lower level functions directly.
-		 */
-		xa_lock_irqsave(&guc->context_lookup, flags);
-		__xa_erase(&guc->context_lookup, id);
-		xa_unlock_irqrestore(&guc->context_lookup, flags);
-	}
+	memset(desc, 0, sizeof(*desc));
 }
 
-static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
+static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
 {
 	return __get_context(guc, id);
 }
 
-static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
-					   struct intel_context *ce)
+static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
+				      struct intel_context *ce)
 {
 	unsigned long flags;
 
@@ -551,6 +540,24 @@  static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
 	xa_unlock_irqrestore(&guc->context_lookup, flags);
 }
 
+static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
+{
+	unsigned long flags;
+
+	if (unlikely(!guc_submission_initialized(guc)))
+		return;
+
+	_reset_lrc_desc(guc, id);
+
+	/*
+	 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
+	 * the lower level functions directly.
+	 */
+	xa_lock_irqsave(&guc->context_lookup, flags);
+	__xa_erase(&guc->context_lookup, id);
+	xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
 {
 	if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
@@ -795,7 +802,7 @@  static int __guc_wq_item_append(struct i915_request *rq)
 	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
 	GEM_BUG_ON(context_guc_id_invalid(ce));
 	GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
-	GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
+	GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
 
 	/* Insert NOOP if this work queue item will wrap the tail pointer. */
 	if (wqi_size > wq_space_until_wrap(ce)) {
@@ -923,7 +930,7 @@  static int guc_dequeue_one_context(struct intel_guc *guc)
 	if (submit) {
 		struct intel_context *ce = request_to_scheduling_context(last);
 
-		if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
+		if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
 			     !intel_context_is_banned(ce))) {
 			ret = guc_lrc_desc_pin(ce, false);
 			if (unlikely(ret == -EPIPE)) {
@@ -1897,7 +1904,7 @@  static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
 
 	return submission_disabled(guc) || guc->stalled_request ||
 		!i915_sched_engine_is_empty(sched_engine) ||
-		!lrc_desc_registered(guc, ce->guc_id.id);
+		!ctx_id_mapped(guc, ce->guc_id.id);
 }
 
 static void guc_submit_request(struct i915_request *rq)
@@ -1954,7 +1961,7 @@  static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
 		else
 			ida_simple_remove(&guc->submission_state.guc_ids,
 					  ce->guc_id.id);
-		reset_lrc_desc(guc, ce->guc_id.id);
+		clr_ctx_id_mapping(guc, ce->guc_id.id);
 		set_context_guc_id_invalid(ce);
 	}
 	if (!list_empty(&ce->guc_id.link))
@@ -2250,10 +2257,10 @@  static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
 		   i915_gem_object_is_lmem(ce->ring->vma->obj));
 
-	context_registered = lrc_desc_registered(guc, desc_idx);
+	context_registered = ctx_id_mapped(guc, desc_idx);
 
-	reset_lrc_desc(guc, desc_idx);
-	set_lrc_desc_registered(guc, desc_idx, ce);
+	clr_ctx_id_mapping(guc, desc_idx);
+	set_ctx_id_mapping(guc, desc_idx, ce);
 
 	desc = __get_lrc_desc(guc, desc_idx);
 	desc->engine_class = engine_class_to_guc_class(engine->class);
@@ -2324,7 +2331,7 @@  static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 		}
 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 		if (unlikely(disabled)) {
-			reset_lrc_desc(guc, desc_idx);
+			clr_ctx_id_mapping(guc, desc_idx);
 			return 0;	/* Will get registered later */
 		}
 
@@ -2340,9 +2347,9 @@  static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 		with_intel_runtime_pm(runtime_pm, wakeref)
 			ret = register_context(ce, loop);
 		if (unlikely(ret == -EBUSY)) {
-			reset_lrc_desc(guc, desc_idx);
+			clr_ctx_id_mapping(guc, desc_idx);
 		} else if (unlikely(ret == -ENODEV)) {
-			reset_lrc_desc(guc, desc_idx);
+			clr_ctx_id_mapping(guc, desc_idx);
 			ret = 0;	/* Will get registered later */
 		}
 	}
@@ -2529,7 +2536,7 @@  static bool context_cant_unblock(struct intel_context *ce)
 
 	return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
 		context_guc_id_invalid(ce) ||
-		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
+		!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
 		!intel_context_is_pinned(ce);
 }
 
@@ -2699,7 +2706,7 @@  static inline void guc_lrc_desc_unpin(struct intel_context *ce)
 	bool disabled;
 
 	GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
-	GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
+	GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
 	GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
 	GEM_BUG_ON(context_enabled(ce));
 
@@ -2816,7 +2823,7 @@  static void guc_context_destroy(struct kref *kref)
 	 */
 	spin_lock_irqsave(&guc->submission_state.lock, flags);
 	destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
-		!lrc_desc_registered(guc, ce->guc_id.id);
+		!ctx_id_mapped(guc, ce->guc_id.id);
 	if (likely(!destroy)) {
 		if (!list_empty(&ce->guc_id.link))
 			list_del_init(&ce->guc_id.link);
@@ -3059,7 +3066,7 @@  static void guc_signal_context_fence(struct intel_context *ce)
 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
 {
 	return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
-		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
+		!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
 		!submission_disabled(ce_to_guc(ce));
 }