From patchwork Tue Feb 4 12:49:02 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959110 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 9B185C02197 for ; Tue, 4 Feb 2025 12:48:46 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id A400E10E60B; Tue, 4 Feb 2025 12:48:40 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id A594C10E339; Tue, 4 Feb 2025 12:48:38 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 1/8] header/cleanup.h: Add _init_args to DEFINE_LOCK_GUARD_1(_COND) Date: Tue, 4 Feb 2025 13:49:02 +0100 Message-ID: <20250204124909.158315-2-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" This makes it possible to use the lock guards for guards that need extra arguments. I've been attempting to add a guard to xe_force_wake handling, but that required an extra argument specifying the domain. For nested spinlock handling, it could also be beneficial to be able to do something like this. For example: DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _nested, spin_lock_irqsave_nested(_T->lock, _T->flags, nest), unsigned nest); guard(spinlock_irqsave_nested, &lock, SINGLE_DEPTH_NESTING); The first optional argument in DEFINE_LOCK_GUARD_1 is now used for the struct members, the remainder goes to init_args to allow the same usage in the base case.. I'm abusing the preprocessor to add an extra meaning to the first optional argument is done by creating a __DO_DEFINE_LOCK_GUARD_1, and passing __VA_ARGS__ not ##__VA_ARGS__ to it to ensure _struct_members is empty when not passed explicitly. Cc: Ingo Molnar Cc: David Lechner Cc: Peter Zijlstra Cc: Will Deacon Cc: Waiman Long Cc: Boqun Feng --- include/linux/cleanup.h | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index ec00e3f7af2b3..dbaf02447f206 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -349,19 +349,23 @@ _label: \ * locks that don't have a native type (eg. RCU, preempt) or those that need a * 'fat' pointer (eg. spin_lock_irqsave). * - * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...) - * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...) - * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock) + * DEFINE_LOCK_GUARD_0(name, lock, unlock, _lock_members...) + * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, (opt)_lock_members, _init_args...) + * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock, _init_args...) * * will result in the following type: * * typedef struct { * type *lock; // 'type := void' for the _0 variant - * __VA_ARGS__; + * _lock_members; // use ; as separator to add multiple members * } class_##name##_t; * * As above, both _lock and _unlock are statements, except this time '_T' will * be a pointer to the above struct. + * + * For DEFINE_LOCK_GUARD_1 and DEFINE_LOCK_GUARD_1_COND, it adds all + * _init_args as local variables available to the lock statement. + * They need to be passed to all guard() functions as extra argument. */ #define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \ @@ -381,8 +385,8 @@ static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \ } -#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \ -static inline class_##_name##_t class_##_name##_constructor(_type *l) \ +#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock, ...) \ +static inline class_##_name##_t class_##_name##_constructor(_type *l, ##__VA_ARGS__) \ { \ class_##_name##_t _t = { .lock = l }, *_T = &_t; \ _lock; \ @@ -398,23 +402,27 @@ static inline class_##_name##_t class_##_name##_constructor(void) \ return _t; \ } -#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ +#define __DO_DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, _lock_members, _init_args...) \ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ -__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \ -__DEFINE_LOCK_GUARD_1(_name, _type, _lock) +__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, _lock_members) \ +__DEFINE_LOCK_GUARD_1(_name, _type, _lock, ##_init_args) + +/* Call __DO_DEFINE_LOCK_GUARD_1 here because of the 2 optional arguments */ +#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ + __DO_DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, __VA_ARGS__) #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_0(_name, _lock) -#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \ +#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock, ...) \ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\ if (_T->lock && !(_condlock)) _T->lock = NULL; \ _t; }), \ - typeof_member(class_##_name##_t, lock) l) \ + typeof_member(class_##_name##_t, lock) l, ##__VA_ARGS__) \ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \ { return class_##_name##_lock_ptr(_T); } From patchwork Tue Feb 4 12:49:03 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959111 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 3BC29C02194 for ; Tue, 4 Feb 2025 12:48:47 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id F08BE10E60E; Tue, 4 Feb 2025 12:48:40 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id B943110E18D; Tue, 4 Feb 2025 12:48:39 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 2/8] drm/xe/gt: Unify xe_hw_fence_irq_finish() calls. Date: Tue, 4 Feb 2025 13:49:03 +0100 Message-ID: <20250204124909.158315-3-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" Those calls should be from xe_gt_init, not the diverse amount of places they are called. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_gt.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 01a4a852b8f43..943bab94119fa 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -408,13 +408,11 @@ static void dump_pat_on_error(struct xe_gt *gt) static int gt_fw_domain_init(struct xe_gt *gt) { unsigned int fw_ref; - int err, i; + int err; fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) { - err = -ETIMEDOUT; - goto err_hw_fence_irq; - } + if (!fw_ref) + return -ETIMEDOUT; if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); @@ -455,9 +453,6 @@ static int gt_fw_domain_init(struct xe_gt *gt) err_force_wake: dump_pat_on_error(gt); xe_force_wake_put(gt_to_fw(gt), fw_ref); -err_hw_fence_irq: - for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) - xe_hw_fence_irq_finish(>->fence_irq[i]); return err; } @@ -465,7 +460,7 @@ static int gt_fw_domain_init(struct xe_gt *gt) static int all_fw_domain_init(struct xe_gt *gt) { unsigned int fw_ref; - int err, i; + int err; fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { @@ -543,8 +538,6 @@ static int all_fw_domain_init(struct xe_gt *gt) err_force_wake: xe_force_wake_put(gt_to_fw(gt), fw_ref); - for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) - xe_hw_fence_irq_finish(>->fence_irq[i]); return err; } @@ -596,35 +589,39 @@ int xe_gt_init(struct xe_gt *gt) err = xe_gt_pagefault_init(gt); if (err) - return err; + goto err; xe_mocs_init_early(gt); err = xe_gt_sysfs_init(gt); if (err) - return err; + goto err; err = gt_fw_domain_init(gt); if (err) - return err; + goto err; err = xe_gt_idle_init(>->gtidle); if (err) - return err; + goto err; err = xe_gt_freq_init(gt); if (err) - return err; + goto err; xe_force_wake_init_engines(gt, gt_to_fw(gt)); err = all_fw_domain_init(gt); if (err) - return err; + goto err; xe_gt_record_user_engines(gt); return 0; +err: + for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) + xe_hw_fence_irq_finish(>->fence_irq[i]); + return err; } /** From patchwork Tue Feb 4 12:49:04 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959112 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 07355C02196 for ; Tue, 4 Feb 2025 12:48:50 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 2096B10E612; Tue, 4 Feb 2025 12:48:43 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id BFED810E60C; Tue, 4 Feb 2025 12:48:40 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 3/8] drm/xe: Add scoped guards for xe_force_wake Date: Tue, 4 Feb 2025 13:49:04 +0100 Message-ID: <20250204124909.158315-4-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" Instead of finding bugs where we may or may not release force_wake, I've decided to be inspired by the spinlock guards, and use the same ones to do xe_force_wake handling. Examples are added as documentation in xe_force_wake.c Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_force_wake.c | 51 ++++++++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_force_wake.h | 15 +++++++++ 2 files changed, 66 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 4f6784e5abf88..805c19f6de9e7 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -16,6 +16,57 @@ #define XE_FORCE_WAKE_ACK_TIMEOUT_MS 50 +/** + * DOC: Force wake handling + * + * Traditionally, the force wake handling has been done using the error prone + * set of calls: + * + * int func(struct xe_force_wake *fw) + * { + * unsigned int fw_ref = xe_force_wake_get(fw, XE_FORCEWAKE_ALL); + * if (!fw_ref) + * return -ETIMEDOUT; + * + * err = do_something(); + * + * xe_force_wake_put(fw, fw_ref); + * return err; + * } + * + * A new, failure-safe approach is by using the scoped helpers, + * which changes the function to this: + * + * int func(struct xe_force_wake *fw) + * { + * scoped_cond_guard(xe_force_wake_get, return -ETIMEDOUT, fw, XE_FORCEWAKE_ALL) { + * return do_something(); + * } + * } + * + * For completeness, the following options also work: + * void func(struct xe_force_wake *fw) + * { + * scoped_guard(xe_force_wake_get, fw, XE_FORCEWAKE_ALL) { + * do_something_only_if_fw_acquired(); + * } + * } + * + * You can use xe_force_wake instead of force_wake_get, if the code + * must run but errors acquiring ignored: + * void func(struct xe_force_wake *fw) + * { + * scoped_guard(xe_force_wake, fw, XE_FORCEWAKE_ALL) { + * always_do_something_maybe_fw(); + * } + * + * do_something_no_fw(); + * + * guard(xe_force_wake)(fw, XE_FORCEWAKE_ALL); + * always_do_something_maybe_fw(); + * } + */ + static const char *str_wake_sleep(bool wake) { return wake ? "wake" : "sleep"; diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h index 0e3e84bfa51c3..0fb1baae0a3a3 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.h +++ b/drivers/gpu/drm/xe/xe_force_wake.h @@ -9,6 +9,8 @@ #include "xe_assert.h" #include "xe_force_wake_types.h" +#include + struct xe_gt; void xe_force_wake_init_gt(struct xe_gt *gt, @@ -61,4 +63,17 @@ xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains dom return fw_ref & domain; } +DEFINE_LOCK_GUARD_1(xe_force_wake, struct xe_force_wake, + _T->fw_ref = xe_force_wake_get(_T->lock, domain), + xe_force_wake_put(_T->lock, _T->fw_ref), + unsigned int fw_ref, enum xe_force_wake_domains domain); + +DEFINE_LOCK_GUARD_1_COND(xe_force_wake, _get, + _T->fw_ref = xe_force_wake_get_all(_T->lock, domain), + enum xe_force_wake_domains domain); + +/* Only useful for guard xe_force_wake, guard xe_force_wake_get gets all or nothing */ +#define xe_force_wake_scope_has_domain(domain) \ + (xe_force_wake_ref_has_domain(scope.fw_ref, domain)) + #endif From patchwork Tue Feb 4 12:49:05 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959114 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 7B0D6C02198 for ; Tue, 4 Feb 2025 12:48:52 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 6123310E611; Tue, 4 Feb 2025 12:48:44 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id 8BB6F10E60C; Tue, 4 Feb 2025 12:48:41 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 4/8] drm/xe: Add xe_force_wake_get_all Date: Tue, 4 Feb 2025 13:49:05 +0100 Message-ID: <20250204124909.158315-5-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" For most usecases, we want to get all the forcewakes, and failing to grab any is similar to failure to grab all. This makes the next patch to add cond guards a lot easier. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_force_wake.c | 110 ++++++++++++++++++++--------- drivers/gpu/drm/xe/xe_force_wake.h | 2 + 2 files changed, 77 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 805c19f6de9e7..cc00d5de8f0ae 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -211,27 +211,36 @@ static int domain_sleep_wait(struct xe_gt *gt, (ffs(tmp__) - 1))) && \ domain__->reg_ctl.addr) -/** - * xe_force_wake_get() : Increase the domain refcount - * @fw: struct xe_force_wake - * @domains: forcewake domains to get refcount on - * - * This function wakes up @domains if they are asleep and takes references. - * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized - * domains will be considered for refcount and it is a caller responsibility - * to check returned ref if it includes any specific domain by using - * xe_force_wake_ref_has_domain() function. Caller must call - * xe_force_wake_put() function to decrease incremented refcounts. - * - * Return: opaque reference to woken domains or zero if none of requested - * domains were awake. - */ -unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, - enum xe_force_wake_domains domains) +static void __xe_force_wake_put_inner(struct xe_force_wake *fw, + unsigned int fw_ref, unsigned int *ack_fail) +{ + struct xe_gt *gt = fw->gt; + struct xe_force_wake_domain *domain; + unsigned int tmp, sleep = 0; + + for_each_fw_domain_masked(domain, fw_ref, fw, tmp) { + xe_gt_assert(gt, domain->ref); + + if (!--domain->ref) { + sleep |= BIT(domain->id); + domain_sleep(gt, domain); + } + } + for_each_fw_domain_masked(domain, sleep, fw, tmp) { + if (domain_sleep_wait(gt, domain) == 0) + fw->awake_domains &= ~BIT(domain->id); + else + *ack_fail |= BIT(domain->id); + } +} + +static int __must_check __xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains, + bool all_or_nothing) { struct xe_gt *gt = fw->gt; struct xe_force_wake_domain *domain; - unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0; + unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0, sleep_failed = 0; unsigned int tmp, ref_rqst; unsigned long flags; @@ -257,6 +266,12 @@ unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, } } ref_incr &= ~awake_failed; + + if (all_or_nothing && awake_failed && ref_incr) { + __xe_force_wake_put_inner(fw, ref_incr, &sleep_failed); + ref_incr = 0; + } + spin_unlock_irqrestore(&fw->lock, flags); xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n", @@ -268,6 +283,46 @@ unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, return ref_incr; } +/** + * xe_force_wake_get() : Increase the domain refcount + * @fw: struct xe_force_wake + * @domains: forcewake domains to get refcount on + * + * This function wakes up @domains if they are asleep and takes references. + * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized + * domains will be considered for refcount and it is a caller responsibility + * to check returned ref if it includes any specific domain by using + * xe_force_wake_ref_has_domain() function. Caller must call + * xe_force_wake_put() function to decrease incremented refcounts. + * + * Return: opaque reference to woken domains or zero if none of requested + * domains were awake. + */ +unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, + enum xe_force_wake_domains domains) +{ + return __xe_force_wake_get(fw, domains, false); +} + +/** + * xe_force_wake_get_all() : Increase the domain refcount + * @fw: struct xe_force_wake + * @domains: forcewake domains to get refcount on + * + * This function wakes up @domains if they are asleep and takes references. + * Unlike xe_force_wake_get(), this function fails if any of the domains + * could not be woken up. It's all or nothing. This makes it always safe + * to check for 0 only. + * + * Return: opaque reference to woken domains or zero if not all of the requested + * domains could be woken up. + */ +unsigned int __must_check xe_force_wake_get_all(struct xe_force_wake *fw, + enum xe_force_wake_domains domains) +{ + return __xe_force_wake_get(fw, domains, true); +} + /** * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0 * @fw: Pointer to the force wake structure @@ -281,10 +336,8 @@ unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref) { struct xe_gt *gt = fw->gt; - struct xe_force_wake_domain *domain; - unsigned int tmp, sleep = 0; unsigned long flags; - int ack_fail = 0; + unsigned int ack_fail = 0; /* * Avoid unnecessary lock and unlock when the function is called @@ -297,20 +350,7 @@ void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref) fw_ref = fw->initialized_domains; spin_lock_irqsave(&fw->lock, flags); - for_each_fw_domain_masked(domain, fw_ref, fw, tmp) { - xe_gt_assert(gt, domain->ref); - - if (!--domain->ref) { - sleep |= BIT(domain->id); - domain_sleep(gt, domain); - } - } - for_each_fw_domain_masked(domain, sleep, fw, tmp) { - if (domain_sleep_wait(gt, domain) == 0) - fw->awake_domains &= ~BIT(domain->id); - else - ack_fail |= BIT(domain->id); - } + __xe_force_wake_put_inner(fw, fw_ref, &ack_fail); spin_unlock_irqrestore(&fw->lock, flags); xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n", diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h index 0fb1baae0a3a3..7102547260f67 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.h +++ b/drivers/gpu/drm/xe/xe_force_wake.h @@ -19,6 +19,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw); unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw, enum xe_force_wake_domains domains); +unsigned int __must_check xe_force_wake_get_all(struct xe_force_wake *fw, + enum xe_force_wake_domains domains); void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref); static inline int From patchwork Tue Feb 4 12:49:06 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959113 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 636D4C02197 for ; Tue, 4 Feb 2025 12:48:51 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 8673810E615; Tue, 4 Feb 2025 12:48:43 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id 6551610E610; Tue, 4 Feb 2025 12:48:42 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 5/8] drm/xe/coredump: Use guard helpers for xe_force_wake. Date: Tue, 4 Feb 2025 13:49:06 +0100 Message-ID: <20250204124909.158315-6-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" --- drivers/gpu/drm/xe/xe_devcoredump.c | 36 ++++++++++++++--------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 39fe485d20858..afe229fba8a9c 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -233,7 +233,6 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work) struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); struct xe_device *xe = coredump_to_xe(coredump); - unsigned int fw_ref; /* * NB: Despite passing a GFP_ flags parameter here, more allocations are done @@ -247,12 +246,13 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work) xe_pm_runtime_get(xe); /* keep going if fw fails as we still want to save the memory and SW data */ - fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) - xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); - xe_vm_snapshot_capture_delayed(ss->vm); - xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); - xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); + scoped_guard(xe_force_wake, gt_to_fw(ss->gt), XE_FORCEWAKE_ALL) { + if (!xe_force_wake_scope_has_domain(XE_FORCEWAKE_ALL)) + xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); + + xe_vm_snapshot_capture_delayed(ss->vm); + xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); + } xe_pm_runtime_put(xe); @@ -277,7 +277,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, u32 width_mask = (0x1 << q->width) - 1; const char *process_name = "no process"; - unsigned int fw_ref; bool cookie; int i; @@ -305,20 +304,19 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, } /* keep going if fw fails as we still want to save the memory and SW data */ - fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); - - ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true); - ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct); - ss->ge = xe_guc_exec_queue_snapshot_capture(q); - if (job) - ss->job = xe_sched_job_snapshot_capture(job); - ss->vm = xe_vm_snapshot_capture(q->vm); - - xe_engine_snapshot_capture_for_queue(q); + scoped_guard(xe_force_wake, gt_to_fw(ss->gt), XE_FORCEWAKE_ALL) { + ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true); + ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct); + ss->ge = xe_guc_exec_queue_snapshot_capture(q); + if (job) + ss->job = xe_sched_job_snapshot_capture(job); + ss->vm = xe_vm_snapshot_capture(q->vm); + + xe_engine_snapshot_capture_for_queue(q); + } queue_work(system_unbound_wq, &ss->work); - xe_force_wake_put(gt_to_fw(q->gt), fw_ref); dma_fence_end_signalling(cookie); } From patchwork Tue Feb 4 12:49:07 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959115 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 96707C0219B for ; Tue, 4 Feb 2025 12:48:53 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 12B9710E618; Tue, 4 Feb 2025 12:48:45 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id BC7CD10E611; Tue, 4 Feb 2025 12:48:43 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 6/8] drm/xe/gsc: Use guard helper for xe_gsc_print_info. Date: Tue, 4 Feb 2025 13:49:07 +0100 Message-ID: <20250204124909.158315-7-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" As an example on how it works. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_gsc.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index 1eb791ddc375c..aee9f58b1c3c6 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -600,7 +600,6 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) { struct xe_gt *gt = gsc_to_gt(gsc); struct xe_mmio *mmio = >->mmio; - unsigned int fw_ref; xe_uc_fw_print(&gsc->fw, p); @@ -609,17 +608,12 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p) if (!xe_uc_fw_is_enabled(&gsc->fw)) return; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); - if (!fw_ref) - return; - - drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", - xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)), - xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE))); - - xe_force_wake_put(gt_to_fw(gt), fw_ref); + scoped_guard(xe_force_wake_get, gt_to_fw(gt), XE_FW_GSC) + drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)), + xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE))); } From patchwork Tue Feb 4 12:49:08 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959116 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 54876C0219C for ; Tue, 4 Feb 2025 12:48:54 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id C501610E625; Tue, 4 Feb 2025 12:48:47 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id 5EDD010E61A; Tue, 4 Feb 2025 12:48:45 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 7/8] drm/xe/vram: Use xe_force_wake guard helper Date: Tue, 4 Feb 2025 13:49:08 +0100 Message-ID: <20250204124909.158315-8-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_vram.c | 45 ++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index b1f81dca610dc..9180bb4d29971 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -220,7 +220,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, { struct xe_device *xe = tile_to_xe(tile); struct xe_gt *gt = tile->primary_gt; - unsigned int fw_ref; u64 offset; u32 reg; @@ -240,33 +239,29 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, return 0; } - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return -ETIMEDOUT; - - /* actual size */ - if (unlikely(xe->info.platform == XE_DG1)) { - *tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR); - *tile_offset = 0; - } else { - reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id)); - *tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G; - *tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G; - } - - /* minus device usage */ - if (xe->info.has_flat_ccs) { - offset = get_flat_ccs_offset(gt, *tile_size); - } else { - offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE); - } + scoped_cond_guard(xe_force_wake_get, return -ETIMEDOUT, gt_to_fw(gt), XE_FW_GT) { + /* actual size */ + if (unlikely(xe->info.platform == XE_DG1)) { + *tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR); + *tile_offset = 0; + } else { + reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id)); + *tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G; + *tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G; + } - /* remove the tile offset so we have just the available size */ - *vram_size = offset - *tile_offset; + /* minus device usage */ + if (xe->info.has_flat_ccs) { + offset = get_flat_ccs_offset(gt, *tile_size); + } else { + offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE); + } - xe_force_wake_put(gt_to_fw(gt), fw_ref); + /* remove the tile offset so we have just the available size */ + *vram_size = offset - *tile_offset; - return 0; + return 0; + } } static void vram_fini(void *arg) From patchwork Tue Feb 4 12:49:09 2025 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13959117 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id E8694C02194 for ; Tue, 4 Feb 2025 12:48:53 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 7A1E010E620; Tue, 4 Feb 2025 12:48:47 +0000 (UTC) Received: from mblankhorst.nl (lankhorst.se [141.105.120.124]) by gabe.freedesktop.org (Postfix) with ESMTPS id 3B4E810E61A; Tue, 4 Feb 2025 12:48:46 +0000 (UTC) From: Maarten Lankhorst To: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org, Maarten Lankhorst , Ingo Molnar , David Lechner , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng Subject: [PATCH 8/8] drm/xe/gt: Convert to xe_force_wake guard helpers Date: Tue, 4 Feb 2025 13:49:09 +0100 Message-ID: <20250204124909.158315-9-dev@lankhorst.se> X-Mailer: git-send-email 2.47.1 In-Reply-To: <20250204124909.158315-1-dev@lankhorst.se> References: <20250204124909.158315-1-dev@lankhorst.se> MIME-Version: 1.0 X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_gt.c | 216 +++++++++++++------------------------ 1 file changed, 74 insertions(+), 142 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 943bab94119fa..c71041087a735 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -98,29 +98,24 @@ void xe_gt_sanitize(struct xe_gt *gt) static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) { - unsigned int fw_ref; u32 reg; if (!XE_WA(gt, 16023588340)) return; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return; + scoped_guard(xe_force_wake_get, gt_to_fw(gt), XE_FW_GT) { + if (!xe_gt_is_media_type(gt)) { + reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); + reg |= CG_DIS_CNTLBUS; + xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); + } - if (!xe_gt_is_media_type(gt)) { - reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); - reg |= CG_DIS_CNTLBUS; - xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); + xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); } - - xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3); - xe_force_wake_put(gt_to_fw(gt), fw_ref); } static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) { - unsigned int fw_ref; u32 reg; if (!XE_WA(gt, 16023588340)) @@ -129,15 +124,11 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) if (xe_gt_is_media_type(gt)) return; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return; - - reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); - reg &= ~CG_DIS_CNTLBUS; - xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); - - xe_force_wake_put(gt_to_fw(gt), fw_ref); + scoped_guard(xe_force_wake_get, gt_to_fw(gt), XE_FW_GT) { + reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); + reg &= ~CG_DIS_CNTLBUS; + xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); + } } /** @@ -407,17 +398,12 @@ static void dump_pat_on_error(struct xe_gt *gt) static int gt_fw_domain_init(struct xe_gt *gt) { - unsigned int fw_ref; int err; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return -ETIMEDOUT; - if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); if (err) - goto err_force_wake; + goto err_pat; if (IS_SRIOV_PF(gt_to_xe(gt))) xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt); } @@ -430,63 +416,53 @@ static int gt_fw_domain_init(struct xe_gt *gt) err = xe_hw_engines_init_early(gt); if (err) - goto err_force_wake; + goto err_pat; err = xe_hw_engine_class_sysfs_init(gt); if (err) - goto err_force_wake; + goto err_pat; /* Initialize CCS mode sysfs after early initialization of HW engines */ err = xe_gt_ccs_mode_sysfs_init(gt); if (err) - goto err_force_wake; + goto err_pat; /* * Stash hardware-reported version. Since this register does not exist * on pre-MTL platforms, reading it there will (correctly) return 0. */ gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID); - - xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; -err_force_wake: +err_pat: dump_pat_on_error(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); return err; } static int all_fw_domain_init(struct xe_gt *gt) { - unsigned int fw_ref; int err; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { - err = -ETIMEDOUT; - goto err_force_wake; - } - xe_gt_mcr_set_implicit_defaults(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); err = xe_gt_clock_init(gt); if (err) - goto err_force_wake; + return err; xe_mocs_init(gt); err = xe_execlist_init(gt); if (err) - goto err_force_wake; + return err; err = xe_hw_engines_init(gt); if (err) - goto err_force_wake; + return err; err = xe_uc_init_post_hwconfig(>->uc); if (err) - goto err_force_wake; + return err; if (!xe_gt_is_media_type(gt)) { /* @@ -497,10 +473,8 @@ static int all_fw_domain_init(struct xe_gt *gt) gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), IS_DGFX(xe) ? SZ_1M : SZ_512K, 16); - if (IS_ERR(gt->usm.bb_pool)) { - err = PTR_ERR(gt->usm.bb_pool); - goto err_force_wake; - } + if (IS_ERR(gt->usm.bb_pool)) + return PTR_ERR(gt->usm.bb_pool); } } @@ -508,15 +482,13 @@ static int all_fw_domain_init(struct xe_gt *gt) struct xe_tile *tile = gt_to_tile(gt); tile->migrate = xe_migrate_init(tile); - if (IS_ERR(tile->migrate)) { - err = PTR_ERR(tile->migrate); - goto err_force_wake; - } + if (IS_ERR(tile->migrate)) + return PTR_ERR(tile->migrate); } err = xe_uc_init_hw(>->uc); if (err) - goto err_force_wake; + return err; /* Configure default CCS mode of 1 engine with all resources */ if (xe_gt_ccs_mode_enabled(gt)) { @@ -532,14 +504,7 @@ static int all_fw_domain_init(struct xe_gt *gt) xe_gt_sriov_pf_init_hw(gt); } - xe_force_wake_put(gt_to_fw(gt), fw_ref); - return 0; - -err_force_wake: - xe_force_wake_put(gt_to_fw(gt), fw_ref); - - return err; } /* @@ -548,31 +513,25 @@ static int all_fw_domain_init(struct xe_gt *gt) */ int xe_gt_init_hwconfig(struct xe_gt *gt) { - unsigned int fw_ref; int err; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return -ETIMEDOUT; - - xe_gt_mcr_init_early(gt); - xe_pat_init(gt); - - err = xe_uc_init(>->uc); - if (err) - goto out_fw; + scoped_cond_guard(xe_force_wake_get, return -ETIMEDOUT, gt_to_fw(gt), XE_FW_GT) { + xe_gt_mcr_init_early(gt); + xe_pat_init(gt); - err = xe_uc_init_hwconfig(>->uc); - if (err) - goto out_fw; + err = xe_uc_init(>->uc); + if (err) + return err; - xe_gt_topology_init(gt); - xe_gt_mcr_init(gt); - xe_gt_enable_host_l2_vram(gt); + err = xe_uc_init_hwconfig(>->uc); + if (err) + return err; -out_fw: - xe_force_wake_put(gt_to_fw(gt), fw_ref); - return err; + xe_gt_topology_init(gt); + xe_gt_mcr_init(gt); + xe_gt_enable_host_l2_vram(gt); + } + return 0; } int xe_gt_init(struct xe_gt *gt) @@ -597,7 +556,8 @@ int xe_gt_init(struct xe_gt *gt) if (err) goto err; - err = gt_fw_domain_init(gt); + scoped_cond_guard(xe_force_wake_get, err = -ETIMEDOUT, gt_to_fw(gt), XE_FW_GT) + err = gt_fw_domain_init(gt); if (err) goto err; @@ -611,7 +571,8 @@ int xe_gt_init(struct xe_gt *gt) xe_force_wake_init_engines(gt, gt_to_fw(gt)); - err = all_fw_domain_init(gt); + scoped_cond_guard(xe_force_wake_get, err = -ETIMEDOUT, gt_to_fw(gt), XE_FORCEWAKE_ALL) + err = all_fw_domain_init(gt); if (err) goto err; @@ -767,7 +728,6 @@ static int do_gt_restart(struct xe_gt *gt) static int gt_reset(struct xe_gt *gt) { - unsigned int fw_ref; int err; if (xe_device_wedged(gt_to_xe(gt))) @@ -788,29 +748,24 @@ static int gt_reset(struct xe_gt *gt) xe_gt_sanitize(gt); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { - err = -ETIMEDOUT; - goto err_out; - } - - xe_uc_gucrc_disable(>->uc); - xe_uc_stop_prepare(>->uc); - xe_gt_pagefault_reset(gt); + scoped_cond_guard(xe_force_wake_get, err = -ETIMEDOUT; goto err_out, gt_to_fw(gt), XE_FORCEWAKE_ALL) { + xe_uc_gucrc_disable(>->uc); + xe_uc_stop_prepare(>->uc); + xe_gt_pagefault_reset(gt); - xe_uc_stop(>->uc); + xe_uc_stop(>->uc); - xe_gt_tlb_invalidation_reset(gt); + xe_gt_tlb_invalidation_reset(gt); - err = do_gt_reset(gt); - if (err) - goto err_out; + err = do_gt_reset(gt); + if (err) + goto err_out; - err = do_gt_restart(gt); - if (err) - goto err_out; + err = do_gt_restart(gt); + if (err) + goto err_out; + } - xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(gt_to_xe(gt)); xe_gt_info(gt, "reset done\n"); @@ -818,7 +773,6 @@ static int gt_reset(struct xe_gt *gt) return 0; err_out: - xe_force_wake_put(gt_to_fw(gt), fw_ref); XE_WARN_ON(xe_uc_start(>->uc)); err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); @@ -850,44 +804,32 @@ void xe_gt_reset_async(struct xe_gt *gt) void xe_gt_suspend_prepare(struct xe_gt *gt) { - unsigned int fw_ref; - - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + guard(xe_force_wake)(gt_to_fw(gt), XE_FORCEWAKE_ALL); xe_uc_stop_prepare(>->uc); - - xe_force_wake_put(gt_to_fw(gt), fw_ref); } int xe_gt_suspend(struct xe_gt *gt) { - unsigned int fw_ref; - int err; + int err = -ETIMEDOUT; xe_gt_dbg(gt, "suspending\n"); xe_gt_sanitize(gt); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) - goto err_msg; - - err = xe_uc_suspend(>->uc); - if (err) - goto err_force_wake; - - xe_gt_idle_disable_pg(gt); + scoped_cond_guard(xe_force_wake_get, goto err_msg, gt_to_fw(gt), XE_FORCEWAKE_ALL) { + err = xe_uc_suspend(>->uc); + if (err) + goto err_msg; - xe_gt_disable_host_l2_vram(gt); + xe_gt_idle_disable_pg(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_gt_disable_host_l2_vram(gt); + } xe_gt_dbg(gt, "suspended\n"); return 0; err_msg: - err = -ETIMEDOUT; -err_force_wake: - xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err)); return err; @@ -895,11 +837,8 @@ int xe_gt_suspend(struct xe_gt *gt) void xe_gt_shutdown(struct xe_gt *gt) { - unsigned int fw_ref; - - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + guard(xe_force_wake)(gt_to_fw(gt), XE_FORCEWAKE_ALL); do_gt_reset(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); } /** @@ -924,29 +863,22 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int xe_gt_resume(struct xe_gt *gt) { - unsigned int fw_ref; - int err; + int err = -ETIMEDOUT; xe_gt_dbg(gt, "resuming\n"); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) - goto err_msg; - - err = do_gt_restart(gt); - if (err) - goto err_force_wake; + scoped_cond_guard(xe_force_wake_get, goto err_msg, gt_to_fw(gt), XE_FORCEWAKE_ALL) { + err = do_gt_restart(gt); + if (err) + goto err_msg; - xe_gt_idle_enable_pg(gt); + xe_gt_idle_enable_pg(gt); + } - xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_dbg(gt, "resumed\n"); return 0; err_msg: - err = -ETIMEDOUT; -err_force_wake: - xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err)); return err;