From patchwork Fri Nov 18 01:58:54 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: John Harrison X-Patchwork-Id: 13047635 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id B3013C433FE for ; Fri, 18 Nov 2022 01:59:24 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id D7F9410E6D8; Fri, 18 Nov 2022 01:59:18 +0000 (UTC) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTPS id 42D1710E6D8; Fri, 18 Nov 2022 01:59:15 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1668736755; x=1700272755; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=q2d+TdewvbeItMxZwCyBrCS/PexZ/jstvUMUsRXzpu4=; b=WLyjCQ8N9mO9AmelVgVPlwJUqA2wsvMffHrPzD0CBZkPC/qs5E5zJoSG ipk3Mjc0bzXJfOwPEI2mLfHJepZBG2a5YR0mecM5rss8a3VR2Zya/2YFR BLgNOQyodOBiMjrUNsBlR41awX8/l72RctG9XBptx6GbuI8FjN7H/l/Lu uVESAhxGUgcIwxlC2yudPZTCBl8HkWVRsooVQdL/mzPhp2sBl1QBMoYEt OWOKvDCIdnBUVcb3V7wDCcODkP19pj0F/rkaQna4RrL+MO9yM3+jLbnpA JYAsNYL6heuRgPG7gLauTFKobr9n+lpzUVX00gQ0Yvl1nnQEbXBQ/gb7w Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="300565827" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="300565827" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Nov 2022 17:59:14 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="703563413" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="703563413" Received: from relo-linux-5.jf.intel.com ([10.165.21.143]) by fmsmga008.fm.intel.com with ESMTP; 17 Nov 2022 17:59:13 -0800 From: John.C.Harrison@Intel.com To: Intel-GFX@Lists.FreeDesktop.Org Subject: [PATCH v2 1/5] drm/i915/gt: Start adding module oriented dmesg output Date: Thu, 17 Nov 2022 17:58:54 -0800 Message-Id: <20221118015858.2548106-2-John.C.Harrison@Intel.com> X-Mailer: git-send-email 2.37.3 In-Reply-To: <20221118015858.2548106-1-John.C.Harrison@Intel.com> References: <20221118015858.2548106-1-John.C.Harrison@Intel.com> MIME-Version: 1.0 Organization: Intel Corporation (UK) Ltd. - Co. Reg. #1134945 - Pipers Way, Swindon SN3 1RJ X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: John Harrison , DRI-Devel@Lists.FreeDesktop.Org Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" From: John Harrison When trying to analyse bug reports from CI, customers, etc. it can be difficult to work out exactly what is happening on which GT in a multi-GT system. So add GT oriented debug/error message wrappers. If used instead of the drm_ equivalents, you get the same output but with a GT# prefix on it. v2: Go back to using lower case names (combined review feedback). Convert intel_gt.c as a first step. Signed-off-by: John Harrison --- drivers/gpu/drm/i915/gt/intel_gt.c | 96 ++++++++++++++---------------- drivers/gpu/drm/i915/gt/intel_gt.h | 35 +++++++++++ 2 files changed, 81 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 0325f071046ca..349fcfdd14a6d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -90,9 +90,8 @@ static int intel_gt_probe_lmem(struct intel_gt *gt) if (err == -ENODEV) return 0; - drm_err(&i915->drm, - "Failed to setup region(%d) type=%d\n", - err, INTEL_MEMORY_LOCAL); + gt_err(gt, "Failed to setup region(%d) type=%d\n", + err, INTEL_MEMORY_LOCAL); return err; } @@ -192,14 +191,14 @@ int intel_gt_init_hw(struct intel_gt *gt) ret = i915_ppgtt_init_hw(gt); if (ret) { - drm_err(&i915->drm, "Enabling PPGTT failed (%d)\n", ret); + gt_err(gt, "Enabling PPGTT failed (%d)\n", ret); goto out; } /* We can't enable contexts until all firmware is loaded */ ret = intel_uc_init_hw(>->uc); if (ret) { - i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); + gt_probe_error(gt, "Enabling uc failed (%d)\n", ret); goto out; } @@ -264,7 +263,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt, * some errors might have become stuck, * mask them. */ - drm_dbg(>->i915->drm, "EIR stuck: 0x%08x, masking\n", eir); + gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir); rmw_set(uncore, EMR, eir); intel_uncore_write(uncore, GEN2_IIR, I915_MASTER_ERROR_INTERRUPT); @@ -298,16 +297,16 @@ static void gen6_check_faults(struct intel_gt *gt) for_each_engine(engine, gt, id) { fault = GEN6_RING_FAULT_REG_READ(engine); if (fault & RING_FAULT_VALID) { - drm_dbg(&engine->i915->drm, "Unexpected fault\n" - "\tAddr: 0x%08lx\n" - "\tAddress space: %s\n" - "\tSource ID: %d\n" - "\tType: %d\n", - fault & PAGE_MASK, - fault & RING_FAULT_GTTSEL_MASK ? - "GGTT" : "PPGTT", - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); + gt_dbg(gt, "Unexpected fault\n" + "\tAddr: 0x%08lx\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault & PAGE_MASK, + fault & RING_FAULT_GTTSEL_MASK ? + "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); } } } @@ -334,17 +333,17 @@ static void xehp_check_faults(struct intel_gt *gt) fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | ((u64)fault_data0 << 12); - drm_dbg(>->i915->drm, "Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); + gt_dbg(gt, "Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + GEN8_RING_FAULT_ENGINE_ID(fault), + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); } } @@ -375,17 +374,17 @@ static void gen8_check_faults(struct intel_gt *gt) fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | ((u64)fault_data0 << 12); - drm_dbg(&uncore->i915->drm, "Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); + gt_dbg(gt, "Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + GEN8_RING_FAULT_ENGINE_ID(fault), + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); } } @@ -479,7 +478,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) if (IS_ERR(obj)) obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) { - drm_err(&i915->drm, "Failed to allocate scratch page\n"); + gt_err(gt, "Failed to allocate scratch page\n"); return PTR_ERR(obj); } @@ -729,8 +728,7 @@ int intel_gt_init(struct intel_gt *gt) err = intel_gt_init_hwconfig(gt); if (err) - drm_err(>->i915->drm, "Failed to retrieve hwconfig table: %pe\n", - ERR_PTR(err)); + gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err)); err = __engines_record_defaults(gt); if (err) @@ -891,7 +889,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915) gt->name = "Primary GT"; gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; - drm_dbg(&i915->drm, "Setting up %s\n", gt->name); + gt_dbg(gt, "Setting up %s\n", gt->name); ret = intel_gt_tile_setup(gt, phys_addr); if (ret) return ret; @@ -916,7 +914,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915) gt->info.engine_mask = gtdef->engine_mask; gt->info.id = i; - drm_dbg(&i915->drm, "Setting up %s\n", gt->name); + gt_dbg(gt, "Setting up %s\n", gt->name); if (GEM_WARN_ON(range_overflows_t(resource_size_t, gtdef->mapping_base, SZ_16M, @@ -951,7 +949,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915) return 0; err: - i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); + gt_probe_error(gt, "Failed to initialize %s! (%d)\n", gtdef->name, ret); intel_gt_release_all(i915); return ret; @@ -1004,8 +1002,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8, const unsigned int class = engine->class; struct reg_and_bit rb = { }; - if (drm_WARN_ON_ONCE(&engine->i915->drm, - class >= num || !regs[class].reg)) + if (gt_WARN_ON_ONCE(engine->gt, class >= num || !regs[class].reg)) return rb; rb.reg = regs[class]; @@ -1088,8 +1085,7 @@ static void mmio_invalidate_full(struct intel_gt *gt) return; } - if (drm_WARN_ONCE(&i915->drm, !num, - "Platform does not implement TLB invalidation!")) + if (gt_WARN_ONCE(gt, !num, "Platform does not implement TLB invalidation!")) return; intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); @@ -1142,8 +1138,8 @@ static void mmio_invalidate_full(struct intel_gt *gt) if (wait_for_invalidate(gt, rb)) drm_err_ratelimited(>->i915->drm, - "%s TLB invalidation did not complete in %ums!\n", - engine->name, TLB_INVAL_TIMEOUT_MS); + "GT%d: %s TLB invalidation did not complete in %ums!\n", + gt->info.id, engine->name, TLB_INVAL_TIMEOUT_MS); } /* diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index e0365d5562484..5bf2d53f4b4d0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -13,6 +13,41 @@ struct drm_i915_private; struct drm_printer; +#define gt_err(_gt, _fmt, ...) \ + drm_err(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + +#define gt_warn(_gt, _fmt, ...) \ + drm_warn(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + +#define gt_notice(_gt, _fmt, ...) \ + drm_notice(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + +#define gt_info(_gt, _fmt, ...) \ + drm_info(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + +#define gt_dbg(_gt, _fmt, ...) \ + drm_dbg(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + +#define gt_probe_error(_gt, _fmt, ...) \ + do { \ + if (i915_error_injected()) \ + gt_dbg(_gt, _fmt, ##__VA_ARGS__); \ + else \ + gt_err(_gt, _fmt, ##__VA_ARGS__); \ + } while (0) + +#define gt_WARN_ON(_gt, _condition) \ + gt_WARN(_gt, _condition, "%s", "gt_WARN_ON(" __stringify(_condition) ")") + +#define gt_WARN_ON_ONCE(_gt, _condition) \ + gt_WARN_ONCE(_gt, _condition, "%s", "gt_WARN_ONCE(" __stringify(_condition) ")") + +#define gt_WARN(_gt, _condition, _fmt, ...) \ + drm_WARN(&(_gt)->i915->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + +#define gt_WARN_ONCE(_gt, _condition, _fmt, ...) \ + drm_WARN_ONCE(&(_gt)->i915->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + #define GT_TRACE(gt, fmt, ...) do { \ const struct intel_gt *gt__ __maybe_unused = (gt); \ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \ From patchwork Fri Nov 18 01:58:55 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: John Harrison X-Patchwork-Id: 13047638 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id AE50DC4332F for ; Fri, 18 Nov 2022 01:59:42 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id D75A410E6DC; Fri, 18 Nov 2022 01:59:21 +0000 (UTC) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTPS id 80AA110E6D9; Fri, 18 Nov 2022 01:59:15 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1668736755; x=1700272755; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9RrzlnsEZMesWvXqM3XDOu9nmY8zCWTmYSu5t/+airg=; b=N/1tg9YdBMcVXmZdGreiOjlnQtqTVUYPeB9MJgJudKIQSBPkLpj/sUFl bgRsSrp84vRPzQJEwbLlbiWjOtMdTlXscSeYiuvUzBsIrmLCboyPAzbK6 6jrXCVR+NoJ3DfnTvEuLgXs/4/rI+5t4by+DYN2yiqJD+ZpQ/tnl5QY+8 IBd+OTkaBp6k2s/cGwVfNRGguRsuuozZSRIFqfPVLcMljlZAafpkpU2XQ EW93UJoLu3A4VqQOhI2e+YnC22Yzg4M+DnjD4+myUC55AIrz+IJtQLNbl j9CiIgIrfyh5I5k4GTCxL3CaoguW3f6AXrBZ/lhocUZV4hNi6EJlmxz7Q A==; X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="300565829" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="300565829" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Nov 2022 17:59:14 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="703563417" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="703563417" Received: from relo-linux-5.jf.intel.com ([10.165.21.143]) by fmsmga008.fm.intel.com with ESMTP; 17 Nov 2022 17:59:13 -0800 From: John.C.Harrison@Intel.com To: Intel-GFX@Lists.FreeDesktop.Org Subject: [PATCH v2 2/5] drm/i915/huc: Add HuC specific debug print wrappers Date: Thu, 17 Nov 2022 17:58:55 -0800 Message-Id: <20221118015858.2548106-3-John.C.Harrison@Intel.com> X-Mailer: git-send-email 2.37.3 In-Reply-To: <20221118015858.2548106-1-John.C.Harrison@Intel.com> References: <20221118015858.2548106-1-John.C.Harrison@Intel.com> MIME-Version: 1.0 Organization: Intel Corporation (UK) Ltd. - Co. Reg. #1134945 - Pipers Way, Swindon SN3 1RJ X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: John Harrison , DRI-Devel@Lists.FreeDesktop.Org Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" From: John Harrison Create a set of HuC printers and start using them. Signed-off-by: John Harrison --- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 31 ++++++++++---------------- drivers/gpu/drm/i915/gt/uc/intel_huc.h | 23 +++++++++++++++++++ 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index be855811d85df..0bbbc7192da63 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -107,11 +107,9 @@ static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrti if (!intel_huc_is_authenticated(huc)) { if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC) - drm_notice(&huc_to_gt(huc)->i915->drm, - "timed out waiting for MEI GSC init to load HuC\n"); + huc_notice(huc, "Timed out waiting for MEI GSC init to load FW\n"); else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP) - drm_notice(&huc_to_gt(huc)->i915->drm, - "timed out waiting for MEI PXP init to load HuC\n"); + huc_notice(huc, "Timed out waiting for MEI PXP init to load FW\n"); else MISSING_CASE(huc->delayed_load.status); @@ -174,8 +172,7 @@ static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *d case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */ case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */ - drm_info(&huc_to_gt(huc)->i915->drm, - "mei driver not bound, disabling HuC load\n"); + huc_info(huc, "- mei driver not bound, disabling HuC load\n"); gsc_init_error(huc); break; } @@ -193,8 +190,7 @@ void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus huc->delayed_load.nb.notifier_call = gsc_notifier; ret = bus_register_notifier(bus, &huc->delayed_load.nb); if (ret) { - drm_err(&huc_to_gt(huc)->i915->drm, - "failed to register GSC notifier\n"); + huc_err(huc, "Failed to register GSC notifier\n"); huc->delayed_load.nb.notifier_call = NULL; gsc_init_error(huc); } @@ -284,8 +280,7 @@ static int check_huc_loading_mode(struct intel_huc *huc) GSC_LOADS_HUC; if (fw_needs_gsc != hw_uses_gsc) { - drm_err(>->i915->drm, - "mismatch between HuC FW (%s) and HW (%s) load modes\n", + huc_err(huc, "Mismatch between FW (%s) and HW (%s) load modes\n", HUC_LOAD_MODE_STRING(fw_needs_gsc), HUC_LOAD_MODE_STRING(hw_uses_gsc)); return -ENOEXEC; @@ -294,19 +289,17 @@ static int check_huc_loading_mode(struct intel_huc *huc) /* make sure we can access the GSC via the mei driver if we need it */ if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) && fw_needs_gsc) { - drm_info(>->i915->drm, - "Can't load HuC due to missing MEI modules\n"); + huc_info(huc, "Can't load due to missing MEI modules\n"); return -EIO; } - drm_dbg(>->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc)); + huc_dbg(huc, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc)); return 0; } int intel_huc_init(struct intel_huc *huc) { - struct drm_i915_private *i915 = huc_to_gt(huc)->i915; int err; err = check_huc_loading_mode(huc); @@ -323,7 +316,7 @@ int intel_huc_init(struct intel_huc *huc) out: intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL); - drm_info(&i915->drm, "HuC init failed with %d\n", err); + huc_info(huc, "init failed with %d\n", err); return err; } @@ -366,13 +359,13 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc) delayed_huc_load_complete(huc); if (ret) { - drm_err(>->i915->drm, "HuC: Firmware not verified %d\n", ret); + huc_err(huc, "firmware not verified %d\n", ret); intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); return ret; } intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); - drm_info(>->i915->drm, "HuC authenticated\n"); + huc_info(huc, "authenticated\n"); return 0; } @@ -407,7 +400,7 @@ int intel_huc_auth(struct intel_huc *huc) ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data)); if (ret) { - DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); + huc_err(huc, "auth request not acked by GuC: %d\n", ret); goto fail; } @@ -419,7 +412,7 @@ int intel_huc_auth(struct intel_huc *huc) return 0; fail: - i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret); + huc_probe_error(huc, "authentication failed %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index 52db03620c609..f253c1c19f12f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -16,6 +16,29 @@ struct bus_type; +#define huc_err(_huc, _fmt, ...) \ + gt_err(huc_to_gt(_huc), "HuC " _fmt, ##__VA_ARGS__) + +#define huc_warn(_huc, _fmt, ...) \ + gt_warn(huc_to_gt(_huc), "HuC " _fmt, ##__VA_ARGS__) + +#define huc_notice(_huc, _fmt, ...) \ + gt_notice(huc_to_gt(_huc), "HuC " _fmt, ##__VA_ARGS__) + +#define huc_info(_huc, _fmt, ...) \ + gt_info(huc_to_gt(_huc), "HuC " _fmt, ##__VA_ARGS__) + +#define huc_dbg(_huc, _fmt, ...) \ + gt_dbg(huc_to_gt(_huc), "HuC " _fmt, ##__VA_ARGS__) + +#define huc_probe_error(_huc, _fmt, ...) \ + do { \ + if (i915_error_injected()) \ + huc_dbg(_huc, _fmt, ##__VA_ARGS__); \ + else \ + huc_err(_huc, _fmt, ##__VA_ARGS__); \ + } while (0) + enum intel_huc_delayed_load_status { INTEL_HUC_WAITING_ON_GSC = 0, INTEL_HUC_WAITING_ON_PXP, From patchwork Fri Nov 18 01:58:56 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: John Harrison X-Patchwork-Id: 13047637 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id D2DA6C4332F for ; Fri, 18 Nov 2022 01:59:37 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 24F1610E6DB; Fri, 18 Nov 2022 01:59:21 +0000 (UTC) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTPS id B70D610E6DA; Fri, 18 Nov 2022 01:59:15 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1668736755; x=1700272755; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=/K/Z4zkakHtu8GcUcB/RXfUj7fYMEkMOIUFPlc53iFQ=; b=BgEjjXTreyIsHsQXNichHN8Eou5rzplvbLaaLEchYuu6pV3Ybas+k4yT SAD/1RdHTdcL2kZ+H0Dwwnn74/rIU2FS0XOFcdJHgq0Gz3fa21NNaWTg6 RV4uY5S7rTDQ8ZjnPN57iVpftSCF26Be9Ns04wpEh7QPnhnDvf++hHdwE 9cJ7uU3yFMwOKjuWEKl+PZ0BlwOd88HxnKFT8XIiIxOWRKjhb7rcbd9sY RdppzhGI1lqewuJYEKlWZ4HH0CFZwvihp+aWNzJdYMSGoZvjj4wm/rl5C romFHT1RsULbFllqq5aMDtZtX+hmWf/FAzHGonaWCbHiRzXqfDIPJWZPM Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="300565831" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="300565831" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Nov 2022 17:59:14 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="703563420" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="703563420" Received: from relo-linux-5.jf.intel.com ([10.165.21.143]) by fmsmga008.fm.intel.com with ESMTP; 17 Nov 2022 17:59:13 -0800 From: John.C.Harrison@Intel.com To: Intel-GFX@Lists.FreeDesktop.Org Subject: [PATCH v2 3/5] drm/i915/guc: Add GuC specific debug print wrappers Date: Thu, 17 Nov 2022 17:58:56 -0800 Message-Id: <20221118015858.2548106-4-John.C.Harrison@Intel.com> X-Mailer: git-send-email 2.37.3 In-Reply-To: <20221118015858.2548106-1-John.C.Harrison@Intel.com> References: <20221118015858.2548106-1-John.C.Harrison@Intel.com> MIME-Version: 1.0 Organization: Intel Corporation (UK) Ltd. - Co. Reg. #1134945 - Pipers Way, Swindon SN3 1RJ X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: John Harrison , DRI-Devel@Lists.FreeDesktop.Org Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" From: John Harrison Create a set of GuC printers and start using them. Signed-off-by: John Harrison --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 32 ++++------ drivers/gpu/drm/i915/gt/uc/intel_guc.h | 35 +++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 8 +-- .../gpu/drm/i915/gt/uc/intel_guc_capture.c | 48 +++++--------- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 19 +++--- drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 37 ++++++----- drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c | 7 +-- drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 55 +++++++--------- .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 62 +++++++++---------- drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 34 +++++----- .../drm/i915/gt/uc/selftest_guc_hangcheck.c | 22 +++---- .../drm/i915/gt/uc/selftest_guc_multi_lrc.c | 10 +-- 12 files changed, 179 insertions(+), 190 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 52aede324788e..d9972510ee29b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -94,8 +94,8 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); spin_lock_irq(gt->irq_lock); - WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & - gt->pm_guc_events); + guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & + gt->pm_guc_events); gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); spin_unlock_irq(gt->irq_lock); @@ -339,7 +339,7 @@ static void guc_init_params(struct intel_guc *guc) params[GUC_CTL_DEVID] = guc_ctl_devid(guc); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); + guc_dbg(guc, "init param[%2d] = %#x\n", i, params[i]); } /* @@ -451,7 +451,7 @@ int intel_guc_init(struct intel_guc *guc) intel_uc_fw_fini(&guc->fw); out: intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL); - i915_probe_error(gt->i915, "failed with %d\n", ret); + guc_probe_error(guc, "init failed with %d\n", ret); return ret; } @@ -484,7 +484,6 @@ void intel_guc_fini(struct intel_guc *guc) int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, u32 *response_buf, u32 response_buf_size) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct intel_uncore *uncore = guc_to_gt(guc)->uncore; u32 header; int i; @@ -519,8 +518,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, 10, 10, &header); if (unlikely(ret)) { timeout: - drm_err(&i915->drm, "mmio request %#x: no reply %x\n", - request[0], header); + guc_err(guc, "mmio request %#x: no reply %x\n", request[0], header); goto out; } @@ -541,8 +539,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header); - drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n", - request[0], reason); + guc_dbg(guc, "mmio request %#x: retrying, reason %u\n", request[0], reason); goto retry; } @@ -550,16 +547,14 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header); u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header); - drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n", - request[0], error, hint); + guc_err(guc, "mmio request %#x: failure %x/%u\n", request[0], error, hint); ret = -ENXIO; goto out; } if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) { proto: - drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n", - request[0], header); + guc_err(guc, "mmio request %#x: unexpected reply %#x\n", request[0], header); ret = -EPROTO; goto out; } @@ -601,9 +596,9 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, msg = payload[0] & guc->msg_enabled_mask; if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED) - drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n"); + guc_err(guc, "early notification: Crash dump!\n"); if (msg & INTEL_GUC_RECV_MSG_EXCEPTION) - drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n"); + guc_err(guc, "early notification: Exception!\n"); return 0; } @@ -657,7 +652,7 @@ int intel_guc_suspend(struct intel_guc *guc) */ ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); if (ret) - DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret); + guc_err(guc, "suspend: RESET_CLIENT action failed with error %d!\n", ret); } /* Signal that the GuC isn't running. */ @@ -832,12 +827,11 @@ static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 va static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; int err = __guc_action_self_cfg(guc, key, len, value); if (unlikely(err)) - i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n", - ERR_PTR(err), key, value); + guc_probe_error(guc, "self-config; Unsuccessful (%pe) key %#hx value %#llx\n", + ERR_PTR(err), key, value); return err; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 1bb3f98292866..8c02d22bc7e82 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -24,6 +24,41 @@ struct __guc_ads_blob; struct intel_guc_state_capture; +#define guc_err(_guc, _fmt, ...) \ + gt_err(guc_to_gt(_guc), "GuC " _fmt, ##__VA_ARGS__) + +#define guc_warn(_guc, _fmt, ...) \ + gt_warn(guc_to_gt(_guc), "GuC " _fmt, ##__VA_ARGS__) + +#define guc_notice(_guc, _fmt, ...) \ + gt_notice(guc_to_gt(_guc), "GuC " _fmt, ##__VA_ARGS__) + +#define guc_info(_guc, _fmt, ...) \ + gt_info(guc_to_gt(_guc), "GuC " _fmt, ##__VA_ARGS__) + +#define guc_dbg(_guc, _fmt, ...) \ + gt_dbg(guc_to_gt(_guc), "GuC " _fmt, ##__VA_ARGS__) + +#define guc_probe_error(_guc, _fmt, ...) \ + do { \ + if (i915_error_injected()) \ + guc_dbg(_guc, _fmt, ##__VA_ARGS__); \ + else \ + guc_err(_guc, _fmt, ##__VA_ARGS__); \ + } while (0) + +#define guc_WARN_ON(_guc, _condition) \ + guc_WARN(_guc, _condition, "%s", "guc_WARN_ON(" __stringify(_condition) ")") + +#define guc_WARN_ON_ONCE(_guc, _condition) \ + guc_WARN_ONCE(_guc, _condition, "%s", "guc_WARN_ON_ONCE(" __stringify(_condition) ")") + +#define guc_WARN(_guc, _condition, _fmt, ...) \ + gt_WARN(guc_to_gt(_guc), _condition, "GuC " _fmt, ##__VA_ARGS__) + +#define guc_WARN_ONCE(_guc, _condition, _fmt, ...) \ + gt_WARN_ONCE(guc_to_gt(_guc), _condition, "GuC " _fmt, ##__VA_ARGS__) + /** * struct intel_guc - Top level structure of GuC. * diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index a7f737c4792e2..fa9a31176b4a7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -427,7 +427,7 @@ static long guc_mmio_reg_state_create(struct intel_guc *guc) guc->ads_regset = temp_set.storage; - drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n", + guc_dbg(guc, "used %zu KB for temporary ADS regset\n", (temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10); return total * sizeof(struct guc_mmio_reg); @@ -621,8 +621,7 @@ static void guc_init_golden_context(struct intel_guc *guc) engine = find_engine_state(gt, engine_class); if (!engine) { - drm_err(>->i915->drm, "No engine state recorded for class %d!\n", - engine_class); + guc_err(guc, "No engine state recorded for class %d!\n", engine_class); ads_blob_write(guc, ads.eng_state_size[guc_class], 0); ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0); continue; @@ -646,7 +645,6 @@ static int guc_capture_prep_lists(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0; struct guc_gt_system_info local_info; struct iosys_map info_map; @@ -751,7 +749,7 @@ guc_capture_prep_lists(struct intel_guc *guc) } if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size)) - drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n", + guc_warn(guc, "capture alloc size changed from %d to %d\n", guc->ads_capture_size, PAGE_ALIGN(total_size)); return PAGE_ALIGN(total_size); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c index 1d49a7ec0bd8f..a8876178da6ff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -353,7 +353,6 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc, u32 ipver) { struct intel_gt *gt = guc_to_gt(guc); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct sseu_dev_info *sseu; int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0; const struct __guc_mmio_reg_descr_group *list; @@ -402,7 +401,7 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc, } } - drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs); + guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs); guc->capture->extlists = extlists; } @@ -477,7 +476,6 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid, struct guc_mmio_reg *ptr, u16 num_entries) { u32 i = 0, j = 0; - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; const struct __guc_mmio_reg_descr_group *match; @@ -509,8 +507,7 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid, } } if (i < num_entries) - drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n", - (int)i, (int)num_entries); + guc_dbg(guc, "capture: Init reglist short %d out %d.\n", i, num_entries); return 0; } @@ -540,12 +537,11 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, size_t *size, bool is_purpose_est) { struct intel_guc_state_capture *gc = guc->capture; - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid]; int num_regs; if (!gc->reglists) { - drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n"); + guc_warn(guc, "capture: No reglist on this device\n"); return -ENODEV; } @@ -557,9 +553,9 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF && !guc_capture_get_one_list(gc->reglists, owner, type, classid)) { if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL) - drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n"); + guc_warn(guc, "capture: Missing reglist: Global!\n"); else - drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n", + guc_warn(guc, "capture: Missing reglist: %s(%u):%s(%u)!\n", __stringify_type(type), type, __stringify_engclass(classid), classid); return -ENODATA; @@ -592,7 +588,6 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi { struct intel_guc_state_capture *gc = guc->capture; struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid]; - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct guc_debug_capture_list *listnode; int ret, num_regs; u8 *caplist, *tmp; @@ -623,7 +618,7 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi caplist = kzalloc(size, GFP_KERNEL); if (!caplist) { - drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist"); + guc_dbg(guc, "capture: Failed to alloc cached caplist\n"); return -ENOMEM; } @@ -653,7 +648,6 @@ intel_guc_capture_getnullheader(struct intel_guc *guc, void **outptr, size_t *size) { struct intel_guc_state_capture *gc = guc->capture; - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; int tmp = sizeof(u32) * 4; void *null_header; @@ -665,7 +659,7 @@ intel_guc_capture_getnullheader(struct intel_guc *guc, null_header = kzalloc(tmp, GFP_KERNEL); if (!null_header) { - drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist"); + guc_dbg(guc, "capture: Failed to alloc cached null list\n"); return -ENOMEM; } @@ -727,7 +721,6 @@ guc_capture_output_min_size_est(struct intel_guc *guc) static void check_guc_capture_size(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; int min_size = guc_capture_output_min_size_est(guc); int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER; u32 buffer_size = intel_guc_log_section_size_capture(&guc->log); @@ -741,13 +734,13 @@ static void check_guc_capture_size(struct intel_guc *guc) * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE. */ if (min_size < 0) - drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n", + guc_warn(guc, "error state capture buffer minimum size calculation failed: %d!\n", min_size); else if (min_size > buffer_size) - drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n", + guc_warn(guc, "error state capture buffer maybe small: %d < %d\n", buffer_size, min_size); else if (spare_size > buffer_size) - drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n", + guc_dbg(guc, "error state capture buffer lacks spare size: %d < %d (min = %d)\n", buffer_size, spare_size, min_size); } @@ -848,7 +841,6 @@ static int guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf, u32 *dw) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; int tries = 2; int avail = 0; u32 *src_data; @@ -865,7 +857,7 @@ guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate * return 4; } if (avail) - drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n"); + guc_dbg(guc, "capture: Log not dword aligned, skipping.\n"); buf->rd = 0; } @@ -1118,13 +1110,12 @@ static void __guc_capture_create_prealloc_nodes(struct intel_guc *guc) { struct __guc_capture_parsed_output *node = NULL; - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; int i; for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) { node = guc_capture_alloc_one_node(guc); if (!node) { - drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n"); + guc_warn(guc, "capture pre-alloc-cache failure\n"); /* dont free the priors, use what we got and cleanup at shutdown */ return; } @@ -1169,7 +1160,6 @@ guc_capture_create_prealloc_nodes(struct intel_guc *guc) static int guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct guc_state_capture_group_header_t ghdr = {0}; struct guc_state_capture_header_t hdr = {0}; struct __guc_capture_parsed_output *node = NULL; @@ -1183,7 +1173,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat if (!i) return -ENODATA; if (i % sizeof(u32)) { - drm_warn(&i915->drm, "GuC Capture new entries unaligned\n"); + guc_warn(guc, "capture new entries unaligned\n"); ret = -EIO; goto bailout; } @@ -1301,8 +1291,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat break; } if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL) - drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n", - datatype); + guc_dbg(guc, "cpture missing global dump: %08x!\n", datatype); } node->is_partial = is_partial; node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner); @@ -1322,7 +1311,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios); if (numregs > guc->capture->max_mmio_per_node) { - drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n"); + guc_dbg(guc, "capture list extraction clipped by prealloc!\n"); numregs = guc->capture->max_mmio_per_node; } node->reginfo[datatype].num_regs = numregs; @@ -1367,7 +1356,6 @@ static void __guc_capture_process_output(struct intel_guc *guc) { unsigned int buffer_size, read_offset, write_offset, full_count; struct intel_uc *uc = container_of(guc, typeof(*uc), guc); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct guc_log_buffer_state log_buf_state_local; struct guc_log_buffer_state *log_buf_state; struct __guc_capture_bufstate buf; @@ -1403,7 +1391,7 @@ static void __guc_capture_process_output(struct intel_guc *guc) write_offset = buffer_size; } else if (unlikely((read_offset > buffer_size) || (write_offset > buffer_size))) { - drm_err(&i915->drm, "invalid GuC log capture buffer state!\n"); + guc_err(guc, "capture: invalid buffer state!\n"); /* copy whole buffer as offsets are unreliable */ read_offset = 0; write_offset = buffer_size; @@ -1586,13 +1574,11 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_context *ce) { struct __guc_capture_parsed_output *n, *ntmp; - struct drm_i915_private *i915; struct intel_guc *guc; if (!gt || !ee || !ce) return; - i915 = gt->i915; guc = >->uc.guc; if (!guc->capture) return; @@ -1615,7 +1601,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt, return; } } - drm_dbg(&i915->drm, "GuC capture can't match ee to node\n"); + guc_dbg(guc, "capture can't match ee to node\n"); } void intel_guc_capture_process(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 5b86b2e286e07..4a2811aa3d5e1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -103,8 +103,9 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status) return uk_val == INTEL_GUC_LOAD_STATUS_READY; } -static int guc_wait_ucode(struct intel_uncore *uncore) +static int guc_wait_ucode(struct intel_gt *gt) { + struct intel_uncore *uncore = gt->uncore; u32 status; int ret; @@ -127,12 +128,8 @@ static int guc_wait_ucode(struct intel_uncore *uncore) */ ret = wait_for(guc_ready(uncore, &status), 200); if (ret) { - struct drm_device *drm = &uncore->i915->drm; - - drm_info(drm, "GuC load failed: status = 0x%08X\n", status); - drm_info(drm, "GuC load failed: status: Reset = %d, " - "BootROM = 0x%02X, UKernel = 0x%02X, " - "MIA = 0x%02X, Auth = 0x%02X\n", + gt_info(gt, "GuC load failed: status = 0x%08X\n", status); + gt_info(gt, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n", REG_FIELD_GET(GS_MIA_IN_RESET, status), REG_FIELD_GET(GS_BOOTROM_MASK, status), REG_FIELD_GET(GS_UKERNEL_MASK, status), @@ -140,13 +137,13 @@ static int guc_wait_ucode(struct intel_uncore *uncore) REG_FIELD_GET(GS_AUTH_STATUS_MASK, status)); if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { - drm_info(drm, "GuC firmware signature verification failed\n"); + gt_info(gt, "GuC firmware signature verification failed\n"); ret = -ENOEXEC; } if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) { - drm_info(drm, "GuC firmware exception. EIP: %#x\n", - intel_uncore_read(uncore, SOFT_SCRATCH(13))); + gt_info(gt, "GuC firmware exception. EIP: %#x\n", + intel_uncore_read(uncore, SOFT_SCRATCH(13))); ret = -ENXIO; } } @@ -194,7 +191,7 @@ int intel_guc_fw_upload(struct intel_guc *guc) if (ret) goto out; - ret = guc_wait_ucode(uncore); + ret = guc_wait_ucode(gt); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 68331c538b0a7..8c9a020700b52 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -39,7 +39,6 @@ struct guc_log_section { static void _guc_log_init_sizes(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = { { GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT, @@ -82,12 +81,12 @@ static void _guc_log_init_sizes(struct intel_guc_log *log) } if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units)) - drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!", + guc_err(guc, "log: Mis-aligned %s size: 0x%X vs 0x%X!\n", sections[i].name, log->sizes[i].bytes, log->sizes[i].units); log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units; if (!log->sizes[i].count) { - drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name); + guc_err(guc, "log: zero %s size!\n", sections[i].name); } else { /* Size is +1 unit */ log->sizes[i].count--; @@ -95,14 +94,14 @@ static void _guc_log_init_sizes(struct intel_guc_log *log) /* Clip to field size */ if (log->sizes[i].count > sections[i].max) { - drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!", + guc_err(guc, "log: %s size too large: %d vs %d!\n", sections[i].name, log->sizes[i].count + 1, sections[i].max + 1); log->sizes[i].count = sections[i].max; } } if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) { - drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!", + guc_err(guc, "log: Unit mis-match for crash and debug sections: %d vs %d!\n", log->sizes[GUC_LOG_SECTIONS_CRASH].units, log->sizes[GUC_LOG_SECTIONS_DEBUG].units); log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units; @@ -383,7 +382,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) mutex_lock(&log->relay.lock); - if (WARN_ON(!intel_guc_log_relay_created(log))) + if (guc_WARN_ON(log_to_guc(log), !intel_guc_log_relay_created(log))) goto out_unlock; /* Get the pointer to shared GuC log buffer */ @@ -451,7 +450,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) write_offset = buffer_size; } else if (unlikely((read_offset > buffer_size) || (write_offset > buffer_size))) { - DRM_ERROR("invalid log buffer state\n"); + guc_err(log_to_guc(log), "log: invalid buffer state\n"); /* copy whole buffer as offsets are unreliable */ read_offset = 0; write_offset = buffer_size; @@ -520,7 +519,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log) static int guc_log_relay_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + struct intel_gt *gt = guc_to_gt(guc); struct rchan *guc_log_relay_chan; size_t n_subbufs, subbuf_size; int ret; @@ -543,11 +542,11 @@ static int guc_log_relay_create(struct intel_guc_log *log) n_subbufs = 8; guc_log_relay_chan = relay_open("guc_log", - dev_priv->drm.primary->debugfs_root, + gt->i915->drm.primary->debugfs_root, subbuf_size, n_subbufs, - &relay_callbacks, dev_priv); + &relay_callbacks, gt->i915); if (!guc_log_relay_chan) { - DRM_ERROR("Couldn't create relay chan for GuC logging\n"); + guc_err(guc, "log: Failed to create relay chan\n"); ret = -ENOMEM; return ret; @@ -596,7 +595,7 @@ static u32 __get_default_log_level(struct intel_guc_log *log) } if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + guc_warn(guc, "log: Incompatible options detected: %s=%d, %s!\n", "guc_log_level", i915->params.guc_log_level, "verbosity too high"); return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || @@ -641,15 +640,15 @@ int intel_guc_log_create(struct intel_guc_log *log) log->buf_addr = vaddr; log->level = __get_default_log_level(log); - DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", - log->level, str_enabled_disabled(log->level), - str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), - GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); + guc_dbg(guc, "guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", + log->level, str_enabled_disabled(log->level), + str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), + GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); return 0; err: - DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret); + guc_err(guc, "log: Failed to allocate or map buffer: %d\n", ret); return ret; } @@ -687,7 +686,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) GUC_LOG_LEVEL_IS_ENABLED(level), GUC_LOG_LEVEL_TO_VERBOSITY(level)); if (ret) { - DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); + guc_dbg(guc, "guc_log_control action failed %d\n", ret); goto out_unlock; } @@ -905,7 +904,7 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p, map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(map)) { - DRM_DEBUG("Failed to pin object\n"); + guc_dbg(guc, "log: Failed to pin buffer object\n"); drm_puts(p, "(log data unaccessible)\n"); free_page((unsigned long)page); return PTR_ERR(map); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c index b5855091cf6a9..62f3b97111f64 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c @@ -70,13 +70,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable) ret = guc_action_control_gucrc(guc, enable); if (ret) { - i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n", - str_enable_disable(enable), ERR_PTR(ret)); + guc_probe_error(guc, "RC: Failed to %s: %pe\n", + str_enable_disable(enable), ERR_PTR(ret)); return ret; } - drm_info(>->i915->drm, "GuC RC: %s\n", - str_enabled_disabled(enable)); + guc_info(guc, "RC %s\n", str_enabled_disabled(enable)); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 63464933cbceb..b9ef0b4a123e1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -171,14 +171,12 @@ static int guc_action_slpc_query(struct intel_guc *guc, u32 offset) static int slpc_query_task_state(struct intel_guc_slpc *slpc) { struct intel_guc *guc = slpc_to_guc(slpc); - struct drm_i915_private *i915 = slpc_to_i915(slpc); u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); int ret; ret = guc_action_slpc_query(guc, offset); if (unlikely(ret)) - i915_probe_error(i915, "Failed to query task state (%pe)\n", - ERR_PTR(ret)); + guc_probe_error(guc, "SLPC: Failed to query task state (%pe)\n", ERR_PTR(ret)); drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES); @@ -188,15 +186,14 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc) static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value) { struct intel_guc *guc = slpc_to_guc(slpc); - struct drm_i915_private *i915 = slpc_to_i915(slpc); int ret; GEM_BUG_ON(id >= SLPC_MAX_PARAM); ret = guc_action_slpc_set_param(guc, id, value); if (ret) - i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n", - id, value, ERR_PTR(ret)); + guc_probe_error(guc, "SLPC: Failed to set param %d to %u (%pe)\n", + id, value, ERR_PTR(ret)); return ret; } @@ -236,8 +233,7 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, freq); if (ret) - drm_notice(&i915->drm, - "Failed to send set_param for min freq(%d): (%d)\n", + guc_notice(guc, "SLPC: Failed to send set_param for min freq(%d): (%d)\n", freq, ret); } @@ -267,7 +263,6 @@ static void slpc_boost_work(struct work_struct *work) int intel_guc_slpc_init(struct intel_guc_slpc *slpc) { struct intel_guc *guc = slpc_to_guc(slpc); - struct drm_i915_private *i915 = slpc_to_i915(slpc); u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); int err; @@ -275,9 +270,8 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc) err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); if (unlikely(err)) { - i915_probe_error(i915, - "Failed to allocate SLPC struct (err=%pe)\n", - ERR_PTR(err)); + guc_probe_error(guc, "SLPC: Failed to allocate SLPC struct (err=%pe)\n", + ERR_PTR(err)); return err; } @@ -338,7 +332,6 @@ static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset) static int slpc_reset(struct intel_guc_slpc *slpc) { - struct drm_i915_private *i915 = slpc_to_i915(slpc); struct intel_guc *guc = slpc_to_guc(slpc); u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); int ret; @@ -346,15 +339,14 @@ static int slpc_reset(struct intel_guc_slpc *slpc) ret = guc_action_slpc_reset(guc, offset); if (unlikely(ret < 0)) { - i915_probe_error(i915, "SLPC reset action failed (%pe)\n", - ERR_PTR(ret)); + guc_probe_error(guc, "SLPC: Reset action failed (%pe)\n", ERR_PTR(ret)); return ret; } if (!ret) { if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) { - i915_probe_error(i915, "SLPC not enabled! State = %s\n", - slpc_get_state_string(slpc)); + guc_probe_error(guc, "SLPC: Not enabled! State = %s\n", + slpc_get_state_string(slpc)); return -EIO; } } @@ -495,8 +487,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, val < slpc->rp1_freq); if (ret) { - i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n", - ERR_PTR(ret)); + guc_probe_error(slpc_to_guc(slpc), "SLPC: Failed to toggle efficient freq (%pe)\n", + ERR_PTR(ret)); goto out; } @@ -611,15 +603,12 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc) static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc) { - struct drm_i915_private *i915 = slpc_to_i915(slpc); int slpc_min_freq; int ret; ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq); if (ret) { - drm_err(&i915->drm, - "Failed to get min freq: (%d)\n", - ret); + guc_err(slpc_to_guc(slpc), "SLPC: Failed to get min freq: (%d)\n", ret); return false; } @@ -685,8 +674,7 @@ int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode) with_intel_runtime_pm(&i915->runtime_pm, wakeref) { ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode); if (ret) - drm_err(&i915->drm, - "Override gucrc mode %d failed %d\n", + guc_err(slpc_to_guc(slpc), "SLPC: Override gucrc mode %d failed %d\n", mode, ret); } @@ -702,9 +690,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc) with_intel_runtime_pm(&i915->runtime_pm, wakeref) { ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE); if (ret) - drm_err(&i915->drm, - "Unsetting gucrc mode failed %d\n", - ret); + guc_err(slpc_to_guc(slpc), "SLPC: Unsetting gucrc mode failed %d\n", ret); } return ret; @@ -726,6 +712,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc) int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) { struct drm_i915_private *i915 = slpc_to_i915(slpc); + struct intel_guc *guc = slpc_to_guc(slpc); int ret; GEM_BUG_ON(!slpc->vma); @@ -734,8 +721,8 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) ret = slpc_reset(slpc); if (unlikely(ret < 0)) { - i915_probe_error(i915, "SLPC Reset event returned (%pe)\n", - ERR_PTR(ret)); + guc_probe_error(guc, "SLPC: Reset event returned (%pe)\n", + ERR_PTR(ret)); return ret; } @@ -753,16 +740,16 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) /* Set SLPC max limit to RP0 */ ret = slpc_use_fused_rp0(slpc); if (unlikely(ret)) { - i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n", - ERR_PTR(ret)); + guc_probe_error(guc, "SLPC: Failed to set max to RP0 (%pe)\n", + ERR_PTR(ret)); return ret; } /* Revert SLPC min/max to softlimits if necessary */ ret = slpc_set_softlimits(slpc); if (unlikely(ret)) { - i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n", - ERR_PTR(ret)); + guc_probe_error(guc, "SLPC: Failed to set softlimits (%pe)\n", + ERR_PTR(ret)); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 0a42f1807f52c..da8ab18aa4ce7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1443,8 +1443,7 @@ static void guc_init_engine_stats(struct intel_guc *guc) int ret = guc_action_enable_usage_stats(guc); if (ret) - drm_err(>->i915->drm, - "Failed to enable usage stats: %d!\n", ret); + guc_err(guc, "Failed to enable usage stats: %d!\n", ret); } } @@ -3584,8 +3583,7 @@ static int guc_request_alloc(struct i915_request *rq) intel_context_sched_disable_unpin(ce); else if (intel_context_is_closed(ce)) if (wait_for(context_close_done(ce), 1500)) - drm_warn(&guc_to_gt(guc)->i915->drm, - "timed out waiting on context sched close before realloc\n"); + guc_warn(guc, "timed out waiting on context sched close before realloc\n"); /* * Call pin_guc_id here rather than in the pinning step as with * dma_resv, contexts can be repeatedly pinned / unpinned trashing the @@ -4350,7 +4348,7 @@ static int __guc_action_set_scheduling_policies(struct intel_guc *guc, return ret; if (ret != policy->count) { - drm_warn(&guc_to_gt(guc)->i915->drm, "GuC global scheduler policy processed %d of %d KLVs!", + guc_warn(guc, "H2G: global scheduler policy processed %d of %d KLVs!\n", ret, policy->count); if (ret > policy->count) return -EPROTO; @@ -4383,9 +4381,9 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc) ret = __guc_action_set_scheduling_policies(guc, &policy); if (ret) - i915_probe_error(gt->i915, - "Failed to configure global scheduling policies: %pe!\n", - ERR_PTR(ret)); + guc_probe_error(guc, + "Failed to configure global scheduling policies: %pe!\n", + ERR_PTR(ret)); } return ret; @@ -4484,21 +4482,18 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id) struct intel_context *ce; if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) { - drm_err(&guc_to_gt(guc)->i915->drm, - "Invalid ctx_id %u\n", ctx_id); + guc_err(guc, "Invalid ctx_id %u\n", ctx_id); return NULL; } ce = __get_context(guc, ctx_id); if (unlikely(!ce)) { - drm_err(&guc_to_gt(guc)->i915->drm, - "Context is NULL, ctx_id %u\n", ctx_id); + guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id); return NULL; } if (unlikely(intel_context_is_child(ce))) { - drm_err(&guc_to_gt(guc)->i915->drm, - "Context is child, ctx_id %u\n", ctx_id); + guc_err(guc, "Context is child, ctx_id %u\n", ctx_id); return NULL; } @@ -4513,7 +4508,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, u32 ctx_id; if (unlikely(len < 1)) { - drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len); + guc_err(guc, "notification: Invalid length %u for deregister done\n", len); return -EPROTO; } ctx_id = msg[0]; @@ -4565,7 +4560,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, u32 ctx_id; if (unlikely(len < 2)) { - drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len); + guc_err(guc, "notification: Invalid length %u for sched done\n", len); return -EPROTO; } ctx_id = msg[0]; @@ -4577,8 +4572,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, if (unlikely(context_destroyed(ce) || (!context_pending_enable(ce) && !context_pending_disable(ce)))) { - drm_err(&guc_to_gt(guc)->i915->drm, - "Bad context sched_state 0x%x, ctx_id %u\n", + guc_err(guc, "notification: Bad context sched_state 0x%x, ctx_id %u\n", ce->guc_state.sched_state, ctx_id); return -EPROTO; } @@ -4666,8 +4660,8 @@ static void guc_handle_context_reset(struct intel_guc *guc, capture_error_state(guc, ce); guc_context_replay(ce); } else { - drm_info(&guc_to_gt(guc)->i915->drm, - "Ignoring context reset notification of exiting context 0x%04X on %s", + guc_info(guc, + "Ignoring context reset notification of exiting context 0x%04X on %s\n", ce->guc_id.id, ce->engine->name); } } @@ -4680,7 +4674,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, int ctx_id; if (unlikely(len != 1)) { - drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); + guc_err(guc, "notification: Invalid length %u for context reset\n", len); return -EPROTO; } @@ -4713,13 +4707,13 @@ int intel_guc_error_capture_process_msg(struct intel_guc *guc, u32 status; if (unlikely(len != 1)) { - drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); + guc_err(guc, "notification: Invalid length %u for error capture\n", len); return -EPROTO; } status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK; if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE) - drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space"); + guc_warn(guc, "notification: Error capture buffer overflow\n"); intel_guc_capture_process(guc); @@ -4762,13 +4756,12 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { struct intel_engine_cs *engine; - struct intel_gt *gt = guc_to_gt(guc); u8 guc_class, instance; u32 reason; unsigned long flags; if (unlikely(len != 3)) { - drm_err(>->i915->drm, "Invalid length %u", len); + guc_err(guc, "notification: Invalid length %u for engine failure\n", len); return -EPROTO; } @@ -4778,8 +4771,8 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, engine = intel_guc_lookup_engine(guc, guc_class, instance); if (unlikely(!engine)) { - drm_err(>->i915->drm, - "Invalid engine %d:%d", guc_class, instance); + guc_err(guc, "notification: Invalid engine %d:%d for engine failure\n", + guc_class, instance); return -EPROTO; } @@ -4787,7 +4780,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, * This is an unexpected failure of a hardware feature. So, log a real * error message not just the informational that comes with the reset. */ - drm_err(>->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X", + guc_err(guc, "notification: Engine reset request failed on %d:%d (%s) because 0x%08X\n", guc_class, instance, engine->name, reason); spin_lock_irqsave(&guc->submission_state.lock, flags); @@ -5297,6 +5290,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, unsigned long flags) { struct guc_virtual_engine *ve; + struct intel_gt *gt; struct intel_guc *guc; unsigned int n; int err; @@ -5305,10 +5299,11 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, if (!ve) return ERR_PTR(-ENOMEM); - guc = &siblings[0]->gt->uc.guc; + gt = siblings[0]->gt; + guc = >->uc.guc; ve->base.i915 = siblings[0]->i915; - ve->base.gt = siblings[0]->gt; + ve->base.gt = gt; ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; @@ -5336,8 +5331,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, GEM_BUG_ON(!is_power_of_2(sibling->mask)); if (sibling->mask & ve->base.mask) { - DRM_DEBUG("duplicate %s entry in load balancer\n", - sibling->name); + guc_dbg(guc, "Duplicate %s entry in load balancer\n", sibling->name); err = -EINVAL; goto err_put; } @@ -5346,8 +5340,8 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, ve->base.logical_mask |= sibling->logical_mask; if (n != 0 && ve->base.class != sibling->class) { - DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", - sibling->class, ve->base.class); + guc_dbg(guc, "Invalid mixing of engine class, sibling %d, already %d\n", + sibling->class, ve->base.class); err = -EINVAL; goto err_put; } else if (n == 0) { diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index e28518fe8b908..d588c32d65c54 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -65,7 +65,7 @@ static int intel_guc_scrub_ctbs(void *arg) ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); - drm_err(>->i915->drm, "Failed to create context, %d: %d\n", i, ret); + gt_err(gt, "Failed to create context, %d: %d\n", i, ret); goto err; } @@ -86,7 +86,7 @@ static int intel_guc_scrub_ctbs(void *arg) if (IS_ERR(rq)) { ret = PTR_ERR(rq); - drm_err(>->i915->drm, "Failed to create request, %d: %d\n", i, ret); + gt_err(gt, "Failed to create request, %d: %d\n", i, ret); goto err; } @@ -96,7 +96,7 @@ static int intel_guc_scrub_ctbs(void *arg) for (i = 0; i < 3; ++i) { ret = i915_request_wait(last[i], 0, HZ); if (ret < 0) { - drm_err(>->i915->drm, "Last request failed to complete: %d\n", ret); + gt_err(gt, "Last request failed to complete: %d\n", ret); goto err; } i915_request_put(last[i]); @@ -113,7 +113,7 @@ static int intel_guc_scrub_ctbs(void *arg) /* GT will not idle if G2H are lost */ ret = intel_gt_wait_for_idle(gt, HZ); if (ret < 0) { - drm_err(>->i915->drm, "GT failed to idle: %d\n", ret); + gt_err(gt, "GT failed to idle: %d\n", ret); goto err; } @@ -153,7 +153,7 @@ static int intel_guc_steal_guc_ids(void *arg) ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL); if (!ce) { - drm_err(>->i915->drm, "Context array allocation failed\n"); + guc_err(guc, "Context array allocation failed\n"); return -ENOMEM; } @@ -167,24 +167,24 @@ static int intel_guc_steal_guc_ids(void *arg) if (IS_ERR(ce[context_index])) { ret = PTR_ERR(ce[context_index]); ce[context_index] = NULL; - drm_err(>->i915->drm, "Failed to create context: %d\n", ret); + guc_err(guc, "Failed to create context: %d\n", ret); goto err_wakeref; } ret = igt_spinner_init(&spin, engine->gt); if (ret) { - drm_err(>->i915->drm, "Failed to create spinner: %d\n", ret); + guc_err(guc, "Failed to create spinner: %d\n", ret); goto err_contexts; } spin_rq = igt_spinner_create_request(&spin, ce[context_index], MI_ARB_CHECK); if (IS_ERR(spin_rq)) { ret = PTR_ERR(spin_rq); - drm_err(>->i915->drm, "Failed to create spinner request: %d\n", ret); + guc_err(guc, "Failed to create spinner request: %d\n", ret); goto err_contexts; } ret = request_add_spin(spin_rq, &spin); if (ret) { - drm_err(>->i915->drm, "Failed to add Spinner request: %d\n", ret); + guc_err(guc, "Failed to add Spinner request: %d\n", ret); goto err_spin_rq; } @@ -194,7 +194,7 @@ static int intel_guc_steal_guc_ids(void *arg) if (IS_ERR(ce[context_index])) { ret = PTR_ERR(ce[context_index--]); ce[context_index] = NULL; - drm_err(>->i915->drm, "Failed to create context: %d\n", ret); + guc_err(guc, "Failed to create context: %d\n", ret); goto err_spin_rq; } @@ -203,7 +203,7 @@ static int intel_guc_steal_guc_ids(void *arg) ret = PTR_ERR(rq); rq = NULL; if (ret != -EAGAIN) { - drm_err(>->i915->drm, "Failed to create request, %d: %d\n", + guc_err(guc, "Failed to create request, %d: %d\n", context_index, ret); goto err_spin_rq; } @@ -218,7 +218,7 @@ static int intel_guc_steal_guc_ids(void *arg) igt_spinner_end(&spin); ret = intel_selftest_wait_for_rq(spin_rq); if (ret) { - drm_err(>->i915->drm, "Spin request failed to complete: %d\n", ret); + guc_err(guc, "Spin request failed to complete: %d\n", ret); i915_request_put(last); goto err_spin_rq; } @@ -230,7 +230,7 @@ static int intel_guc_steal_guc_ids(void *arg) ret = i915_request_wait(last, 0, HZ * 30); i915_request_put(last); if (ret < 0) { - drm_err(>->i915->drm, "Last request failed to complete: %d\n", ret); + guc_err(guc, "Last request failed to complete: %d\n", ret); goto err_spin_rq; } @@ -238,7 +238,7 @@ static int intel_guc_steal_guc_ids(void *arg) rq = nop_user_request(ce[context_index], NULL); if (IS_ERR(rq)) { ret = PTR_ERR(rq); - drm_err(>->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret); + guc_err(guc, "Failed to steal guc_id, %d: %d\n", context_index, ret); goto err_spin_rq; } @@ -246,20 +246,20 @@ static int intel_guc_steal_guc_ids(void *arg) ret = i915_request_wait(rq, 0, HZ); i915_request_put(rq); if (ret < 0) { - drm_err(>->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret); + guc_err(guc, "Request with stolen guc_id failed to complete: %d\n", ret); goto err_spin_rq; } /* Wait for idle */ ret = intel_gt_wait_for_idle(gt, HZ * 30); if (ret < 0) { - drm_err(>->i915->drm, "GT failed to idle: %d\n", ret); + guc_err(guc, "GT failed to idle: %d\n", ret); goto err_spin_rq; } /* Verify a guc_id was stolen */ if (guc->number_guc_id_stolen == number_guc_id_stolen) { - drm_err(>->i915->drm, "No guc_id was stolen"); + guc_err(guc, "No guc_id was stolen\n"); ret = -EINVAL; } else { ret = 0; diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c index d91b58f704039..07018ec75c21f 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c @@ -45,7 +45,7 @@ static int intel_hang_guc(void *arg) ctx = kernel_context(gt->i915, NULL); if (IS_ERR(ctx)) { - drm_err(>->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx)); + gt_err(gt, "Failed get kernel context: %ld\n", PTR_ERR(ctx)); return PTR_ERR(ctx); } @@ -54,7 +54,7 @@ static int intel_hang_guc(void *arg) ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); - drm_err(>->i915->drm, "Failed to create spinner request: %d\n", ret); + gt_err(gt, "Failed to create spinner request: %d\n", ret); goto err; } @@ -63,13 +63,13 @@ static int intel_hang_guc(void *arg) old_beat = engine->props.heartbeat_interval_ms; ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL); if (ret) { - drm_err(>->i915->drm, "Failed to boost heatbeat interval: %d\n", ret); + gt_err(gt, "Failed to boost heatbeat interval: %d\n", ret); goto err; } ret = igt_spinner_init(&spin, engine->gt); if (ret) { - drm_err(>->i915->drm, "Failed to create spinner: %d\n", ret); + gt_err(gt, "Failed to create spinner: %d\n", ret); goto err; } @@ -77,28 +77,28 @@ static int intel_hang_guc(void *arg) intel_context_put(ce); if (IS_ERR(rq)) { ret = PTR_ERR(rq); - drm_err(>->i915->drm, "Failed to create spinner request: %d\n", ret); + gt_err(gt, "Failed to create spinner request: %d\n", ret); goto err_spin; } ret = request_add_spin(rq, &spin); if (ret) { i915_request_put(rq); - drm_err(>->i915->drm, "Failed to add Spinner request: %d\n", ret); + gt_err(gt, "Failed to add Spinner request: %d\n", ret); goto err_spin; } ret = intel_reset_guc(gt); if (ret) { i915_request_put(rq); - drm_err(>->i915->drm, "Failed to reset GuC, ret = %d\n", ret); + gt_err(gt, "Failed to reset GuC, ret = %d\n", ret); goto err_spin; } guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); if (!(guc_status & GS_MIA_IN_RESET)) { i915_request_put(rq); - drm_err(>->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status); + gt_err(gt, "GuC failed to reset: status = 0x%08X\n", guc_status); ret = -EIO; goto err_spin; } @@ -107,12 +107,12 @@ static int intel_hang_guc(void *arg) ret = intel_selftest_wait_for_rq(rq); i915_request_put(rq); if (ret) { - drm_err(>->i915->drm, "Request failed to complete: %d\n", ret); + gt_err(gt, "Request failed to complete: %d\n", ret); goto err_spin; } if (i915_reset_count(global) == reset_count) { - drm_err(>->i915->drm, "Failed to record a GPU reset\n"); + gt_err(gt, "Failed to record a GPU reset\n"); ret = -EINVAL; goto err_spin; } @@ -132,7 +132,7 @@ static int intel_hang_guc(void *arg) ret = intel_selftest_wait_for_rq(rq); i915_request_put(rq); if (ret) { - drm_err(>->i915->drm, "No-op failed to complete: %d\n", ret); + gt_err(gt, "No-op failed to complete: %d\n", ret); goto err; } } diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c index d17982c36d256..04e28735a2ee6 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c @@ -115,30 +115,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class) parent = multi_lrc_create_parent(gt, class, 0); if (IS_ERR(parent)) { - drm_err(>->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent)); + gt_err(gt, "Failed creating contexts: %ld", PTR_ERR(parent)); return PTR_ERR(parent); } else if (!parent) { - drm_dbg(>->i915->drm, "Not enough engines in class: %d", class); + gt_dbg(gt, "Not enough engines in class: %d", class); return 0; } rq = multi_lrc_nop_request(parent); if (IS_ERR(rq)) { ret = PTR_ERR(rq); - drm_err(>->i915->drm, "Failed creating requests: %d", ret); + gt_err(gt, "Failed creating requests: %d", ret); goto out; } ret = intel_selftest_wait_for_rq(rq); if (ret) - drm_err(>->i915->drm, "Failed waiting on request: %d", ret); + gt_err(gt, "Failed waiting on request: %d", ret); i915_request_put(rq); if (ret >= 0) { ret = intel_gt_wait_for_idle(gt, HZ * 5); if (ret < 0) - drm_err(>->i915->drm, "GT failed to idle: %d\n", ret); + gt_err(gt, "GT failed to idle: %d\n", ret); } out: From patchwork Fri Nov 18 01:58:57 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: John Harrison X-Patchwork-Id: 13047640 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 977F4C43217 for ; Fri, 18 Nov 2022 01:59:55 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 3E55810E6E8; Fri, 18 Nov 2022 01:59:36 +0000 (UTC) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTPS id 6BFD510E6D7; Fri, 18 Nov 2022 01:59:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1668736756; x=1700272756; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=ZyAhhjuPb1rlj1zQp59kMzushTiz7Ux+XsGNLXbgxz8=; b=h6SnOoCx5fUSEaJLCwgLWTcqGn/XwkAvDkKF0RViKz5OsNfCufgV8Bo3 6Tm7tGKL38eoHV9A+DOKf+wU/p3pZ9Fa/jddu8VVnISXJ+i6Kd/9Xo5XZ EppZk+lX6Hblq7AGdlGVnQh9e0Su3+NushHBsg5P2AQScfK9JMBmfXacc cpTI2/rXb4hLb9zOdTnBTxa5HI0sQLmYPgtLTHuF+mPR499hrKXPc65tN Ahw9mG+6+7pyLhES5Kx4ZkHqAD3uvWJaOXVgW37k1/FPgpcjJJszhdwad fJ3Teo511+YKAMeigkNBMN8F7aK0VOiyUa3clAfEJI2Pjzw7v+uGPc0i1 w==; X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="300565833" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="300565833" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Nov 2022 17:59:14 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="703563423" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="703563423" Received: from relo-linux-5.jf.intel.com ([10.165.21.143]) by fmsmga008.fm.intel.com with ESMTP; 17 Nov 2022 17:59:14 -0800 From: John.C.Harrison@Intel.com To: Intel-GFX@Lists.FreeDesktop.Org Subject: [PATCH v2 4/5] drm/i915/guc: Add GuC CT specific debug print wrappers Date: Thu, 17 Nov 2022 17:58:57 -0800 Message-Id: <20221118015858.2548106-5-John.C.Harrison@Intel.com> X-Mailer: git-send-email 2.37.3 In-Reply-To: <20221118015858.2548106-1-John.C.Harrison@Intel.com> References: <20221118015858.2548106-1-John.C.Harrison@Intel.com> MIME-Version: 1.0 Organization: Intel Corporation (UK) Ltd. - Co. Reg. #1134945 - Pipers Way, Swindon SN3 1RJ X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: John Harrison , DRI-Devel@Lists.FreeDesktop.Org Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" From: John Harrison Re-work the existing GuC CT printers and extend as required to match the new wrapping scheme. Signed-off-by: John Harrison --- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 222 +++++++++++----------- 1 file changed, 113 insertions(+), 109 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 2b22065e87bf9..9d404fb377637 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -18,31 +18,49 @@ static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) return container_of(ct, struct intel_guc, ct); } -static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct) -{ - return guc_to_gt(ct_to_guc(ct)); -} - static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct) { - return ct_to_gt(ct)->i915; -} + struct intel_guc *guc = ct_to_guc(ct); + struct intel_gt *gt = guc_to_gt(guc); -static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct) -{ - return &ct_to_i915(ct)->drm; + return gt->i915; } -#define CT_ERROR(_ct, _fmt, ...) \ - drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__) +#define ct_err(_ct, _fmt, ...) \ + guc_err(ct_to_guc(_ct), "CT " _fmt, ##__VA_ARGS__) + +#define ct_warn(_ct, _fmt, ...) \ + guc_warn(ct_to_guc(_ct), "CT " _fmt, ##__VA_ARGS__) + +#define ct_notice(_ct, _fmt, ...) \ + guc_notice(ct_to_guc(_ct), "CT " _fmt, ##__VA_ARGS__) + +#define ct_info(_ct, _fmt, ...) \ + guc_info(ct_to_guc(_ct), "CT " _fmt, ##__VA_ARGS__) + #ifdef CONFIG_DRM_I915_DEBUG_GUC -#define CT_DEBUG(_ct, _fmt, ...) \ - drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__) +#define ct_dbg(_ct, _fmt, ...) \ + guc_dbg(ct_to_guc(_ct), "CT " _fmt, ##__VA_ARGS__) #else -#define CT_DEBUG(...) do { } while (0) +#define ct_dbg(...) do { } while (0) #endif -#define CT_PROBE_ERROR(_ct, _fmt, ...) \ - i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__) + +#define ct_probe_error(_ct, _fmt, ...) \ + do { \ + if (i915_error_injected()) \ + ct_dbg(_ct, _fmt, ##__VA_ARGS__); \ + else \ + ct_err(_ct, _fmt, ##__VA_ARGS__); \ + } while (0) + +#define ct_WARN_ON(_ct, _condition) \ + ct_WARN(_ct, _condition, "%s", "ct_WARN_ON(" __stringify(_condition) ")") + +#define ct_WARN(_ct, _condition, _fmt, ...) \ + guc_WARN(ct_to_guc(_ct), _condition, "CT " _fmt, ##__VA_ARGS__) + +#define ct_WARN_ONCE(_ct, _condition, _fmt, ...) \ + guc_WARN_ONCE(ct_to_guc(_ct), _condition, "CT " _fmt, ##__VA_ARGS__) /** * DOC: CTB Blob @@ -170,7 +188,7 @@ static int ct_control_enable(struct intel_guc_ct *ct, bool enable) err = guc_action_control_ctb(ct_to_guc(ct), enable ? GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE); if (unlikely(err)) - CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n", + ct_probe_error(ct, "Failed to control/%s CTB (%pe)\n", str_enable_disable(enable), ERR_PTR(err)); return err; @@ -201,7 +219,7 @@ static int ct_register_buffer(struct intel_guc_ct *ct, bool send, size); if (unlikely(err)) failed: - CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n", + ct_probe_error(ct, "Failed to register %s buffer (%pe)\n", send ? "SEND" : "RECV", ERR_PTR(err)); return err; @@ -235,21 +253,21 @@ int intel_guc_ct_init(struct intel_guc_ct *ct) blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE; err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob); if (unlikely(err)) { - CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n", + ct_probe_error(ct, "Failed to allocate %u for CTB data (%pe)\n", blob_size, ERR_PTR(err)); return err; } - CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size); + ct_dbg(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size); /* store pointers to desc and cmds for send ctb */ desc = blob; cmds = blob + 2 * CTB_DESC_SIZE; cmds_size = CTB_H2G_BUFFER_SIZE; resv_space = 0; - CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send", - ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, - resv_space); + ct_dbg(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send", + ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, + resv_space); guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space); @@ -258,9 +276,9 @@ int intel_guc_ct_init(struct intel_guc_ct *ct) cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE; cmds_size = CTB_G2H_BUFFER_SIZE; resv_space = G2H_ROOM_BUFFER_SIZE; - CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv", - ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, - resv_space); + ct_dbg(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv", + ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, + resv_space); guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space); @@ -338,7 +356,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) return 0; err_out: - CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err)); + ct_probe_error(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err)); return err; } @@ -387,14 +405,12 @@ static int ct_write(struct intel_guc_ct *ct, #ifdef CONFIG_DRM_I915_DEBUG_GUC if (unlikely(tail != READ_ONCE(desc->tail))) { - CT_ERROR(ct, "Tail was modified %u != %u\n", - desc->tail, tail); + ct_err(ct, "Tail was modified %u != %u\n", desc->tail, tail); desc->status |= GUC_CTB_STATUS_MISMATCH; goto corrupted; } if (unlikely(READ_ONCE(desc->head) >= size)) { - CT_ERROR(ct, "Invalid head offset %u >= %u)\n", - desc->head, size); + ct_err(ct, "Invalid head offset %u >= %u)\n", desc->head, size); desc->status |= GUC_CTB_STATUS_OVERFLOW; goto corrupted; } @@ -415,8 +431,8 @@ static int ct_write(struct intel_guc_ct *ct, FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | GUC_HXG_EVENT_MSG_0_DATA0, action[0]); - CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n", - tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]); + ct_dbg(ct, "writing (tail %u) %*ph %*ph %*ph\n", + tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]); cmds[tail] = header; tail = (tail + 1) % size; @@ -447,8 +463,8 @@ static int ct_write(struct intel_guc_ct *ct, return 0; corrupted: - CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n", - desc->head, desc->tail, desc->status); + ct_err(ct, "Corrupted descriptor on write head=%u tail=%u status=%#x\n", + desc->head, desc->tail, desc->status); ctb->broken = true; return -EPIPE; } @@ -507,17 +523,14 @@ static inline bool ct_deadlocked(struct intel_guc_ct *ct) struct guc_ct_buffer_desc *send = ct->ctbs.send.desc; struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc; - CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n", - ktime_ms_delta(ktime_get(), ct->stall_time), - send->status, recv->status); - CT_ERROR(ct, "H2G Space: %u (Bytes)\n", - atomic_read(&ct->ctbs.send.space) * 4); - CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head); - CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail); - CT_ERROR(ct, "G2H Space: %u (Bytes)\n", - atomic_read(&ct->ctbs.recv.space) * 4); - CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head); - CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail); + ct_err(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n", + ktime_ms_delta(ktime_get(), ct->stall_time), send->status, recv->status); + ct_err(ct, "H2G Space: %u (Bytes)\n", atomic_read(&ct->ctbs.send.space) * 4); + ct_err(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head); + ct_err(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail); + ct_err(ct, "G2H Space: %u (Bytes)\n", atomic_read(&ct->ctbs.recv.space) * 4); + ct_err(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head); + ct_err(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail); ct->ctbs.send.broken = true; } @@ -563,8 +576,7 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw) head = READ_ONCE(desc->head); if (unlikely(head > ctb->size)) { - CT_ERROR(ct, "Invalid head offset %u >= %u)\n", - head, ctb->size); + ct_err(ct, "Invalid head offset %u >= %u)\n", head, ctb->size); desc->status |= GUC_CTB_STATUS_OVERFLOW; ctb->broken = true; return false; @@ -715,17 +727,17 @@ static int ct_send(struct intel_guc_ct *ct, /* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress. * In this case, output is debug rather than error info */ - CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n", - action[0], request.fence); + ct_dbg(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n", + action[0], request.fence); else - CT_ERROR(ct, "No response for request %#x (fence %u)\n", - action[0], request.fence); + ct_err(ct, "No response for request %#x (fence %u)\n", + action[0], request.fence); goto unlink; } if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { - CT_DEBUG(ct, "retrying request %#x (%u)\n", *action, - FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status)); + ct_dbg(ct, "retrying request %#x (%u)\n", *action, + FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status)); send_again = true; goto unlink; } @@ -737,12 +749,12 @@ static int ct_send(struct intel_guc_ct *ct, if (response_buf) { /* There shall be no data in the status */ - WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status)); + ct_WARN_ON(ct, FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status)); /* Return actual response len */ err = request.response_len; } else { /* There shall be no response payload */ - WARN_ON(request.response_len); + ct_WARN_ON(ct, request.response_len); /* Return data decoded from the status dword */ err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status); } @@ -771,7 +783,7 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, struct intel_guc *guc = ct_to_guc(ct); struct intel_uc *uc = container_of(guc, struct intel_uc, guc); - WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action); + ct_WARN(ct, !uc->reset_in_progress, "Unexpected send: action=%#x\n", *action); return -ENODEV; } @@ -784,11 +796,11 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, ret = ct_send(ct, action, len, response_buf, response_buf_size, &status); if (unlikely(ret < 0)) { if (ret != -ENODEV) - CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n", - action[0], ERR_PTR(ret), status); + ct_err(ct, "sending action %#x failed (%pe) status=%#X\n", + action[0], ERR_PTR(ret), status); } else if (unlikely(ret)) { - CT_DEBUG(ct, "send action %#x returned %d (%#x)\n", - action[0], ret, ret); + ct_dbg(ct, "send action %#x returned %d (%#x)\n", + action[0], ret, ret); } return ret; @@ -838,7 +850,7 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) * contexts/engines being reset. But should never happen as * no contexts should be active when CLIENT_RESET is sent. */ - CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n"); + ct_err(ct, "Unexpected G2H after GuC has stopped!\n"); status &= ~GUC_CTB_STATUS_UNUSED; } @@ -850,15 +862,13 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) #ifdef CONFIG_DRM_I915_DEBUG_GUC if (unlikely(head != READ_ONCE(desc->head))) { - CT_ERROR(ct, "Head was modified %u != %u\n", - desc->head, head); + ct_err(ct, "Head was modified %u != %u\n", desc->head, head); desc->status |= GUC_CTB_STATUS_MISMATCH; goto corrupted; } #endif if (unlikely(tail >= size)) { - CT_ERROR(ct, "Invalid tail offset %u >= %u)\n", - tail, size); + ct_err(ct, "Invalid tail offset %u >= %u)\n", tail, size); desc->status |= GUC_CTB_STATUS_OVERFLOW; goto corrupted; } @@ -873,7 +883,7 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) /* beware of buffer wrap case */ if (unlikely(available < 0)) available += size; - CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size); + ct_dbg(ct, "read available %d (%u:%u:%u)\n", available, head, tail, size); GEM_BUG_ON(available < 0); header = cmds[head]; @@ -882,24 +892,24 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) /* message len with header */ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN; if (unlikely(len > (u32)available)) { - CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n", - 4, &header, - 4 * (head + available - 1 > size ? - size - head : available - 1), &cmds[head], - 4 * (head + available - 1 > size ? - available - 1 - size + head : 0), &cmds[0]); + ct_err(ct, "Incomplete message %*ph %*ph %*ph\n", + 4, &header, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); desc->status |= GUC_CTB_STATUS_UNDERFLOW; goto corrupted; } *msg = ct_alloc_msg(len); if (!*msg) { - CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n", - 4, &header, - 4 * (head + available - 1 > size ? - size - head : available - 1), &cmds[head], - 4 * (head + available - 1 > size ? - available - 1 - size + head : 0), &cmds[0]); + ct_err(ct, "No memory for message %*ph %*ph %*ph\n", + 4, &header, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); return available; } @@ -909,7 +919,7 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) (*msg)->msg[i] = cmds[head]; head = (head + 1) % size; } - CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg); + ct_dbg(ct, "received %*ph\n", 4 * len, (*msg)->msg); /* update local copies */ ctb->head = head; @@ -920,8 +930,8 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) return available - len; corrupted: - CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n", - desc->head, desc->tail, desc->status); + ct_err(ct, "Corrupted descriptor on read head=%u tail=%u status=%#x\n", + desc->head, desc->tail, desc->status); ctb->broken = true; return -EPIPE; } @@ -944,18 +954,17 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY && FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE); - CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]); + ct_dbg(ct, "response fence %u status %#x\n", fence, hxg[0]); spin_lock_irqsave(&ct->requests.lock, flags); list_for_each_entry(req, &ct->requests.pending, link) { if (unlikely(fence != req->fence)) { - CT_DEBUG(ct, "request %u awaits response\n", - req->fence); + ct_dbg(ct, "request %u awaits response\n", req->fence); continue; } if (unlikely(datalen > req->response_len)) { - CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n", - req->fence, datalen, req->response_len); + ct_err(ct, "response %u too long (datalen %u > %u)\n", + req->fence, datalen, req->response_len); datalen = min(datalen, req->response_len); err = -EMSGSIZE; } @@ -967,12 +976,11 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r break; } if (!found) { - CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence); - CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence, - ct->requests.last_fence); + ct_err(ct, "Unsolicited response (fence %u)\n", fence); + ct_err(ct, "Could not find fence=%u, last_fence=%u\n", fence, + ct->requests.last_fence); list_for_each_entry(req, &ct->requests.pending, link) - CT_ERROR(ct, "request %u awaits response\n", - req->fence); + ct_err(ct, "request %u awaits response\n", req->fence); err = -ENOKEY; } spin_unlock_irqrestore(&ct->requests.lock, flags); @@ -998,7 +1006,7 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); len = hxg_len - GUC_HXG_MSG_MIN_LEN; - CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload); + ct_dbg(ct, "request %x %*ph\n", action, 4 * len, payload); switch (action) { case INTEL_GUC_ACTION_DEFAULT: @@ -1016,9 +1024,6 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r break; case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: ret = intel_guc_error_capture_process_msg(guc, payload, len); - if (unlikely(ret)) - CT_ERROR(ct, "error capture notification failed %x %*ph\n", - action, 4 * len, payload); break; case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION: ret = intel_guc_engine_failure_process_msg(guc, payload, len); @@ -1028,11 +1033,11 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r ret = 0; break; case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED: - CT_ERROR(ct, "Received GuC crash dump notification!\n"); + guc_err(guc, "notification: Crash dump!\n"); ret = 0; break; case INTEL_GUC_ACTION_NOTIFY_EXCEPTION: - CT_ERROR(ct, "Received GuC exception notification!\n"); + guc_err(guc, "notification: Exception!\n"); ret = 0; break; default: @@ -1041,8 +1046,7 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r } if (unlikely(ret)) { - CT_ERROR(ct, "Failed to process request %04x (%pe)\n", - action, ERR_PTR(ret)); + ct_err(ct, "Failed to process request %04x (%pe)\n", action, ERR_PTR(ret)); return ret; } @@ -1070,8 +1074,8 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) err = ct_process_request(ct, request); if (unlikely(err)) { - CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", - ERR_PTR(err), 4 * request->size, request->msg); + ct_err(ct, "Failed to process message (%pe) %*ph\n", + ERR_PTR(err), 4 * request->size, request->msg); ct_free_msg(request); } @@ -1149,8 +1153,8 @@ static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg) if (unlikely(err)) { failed: - CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n", - ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg); + ct_err(ct, "Failed to handle HXG message (%pe) %*ph\n", + ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg); } return err; } @@ -1166,8 +1170,8 @@ static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg) err = -EOPNOTSUPP; if (unlikely(err)) { - CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", - ERR_PTR(err), 4 * msg->size, msg->msg); + ct_err(ct, "Failed to handle message (%pe) %*ph\n", + ERR_PTR(err), 4 * msg->size, msg->msg); ct_free_msg(msg); } } @@ -1198,7 +1202,7 @@ static void ct_try_receive_message(struct intel_guc_ct *ct) { int ret; - if (GEM_WARN_ON(!ct->enabled)) + if (ct_WARN_ON(ct, !ct->enabled)) return; ret = ct_receive(ct); @@ -1220,7 +1224,7 @@ static void ct_receive_tasklet_func(struct tasklet_struct *t) void intel_guc_ct_event_handler(struct intel_guc_ct *ct) { if (unlikely(!ct->enabled)) { - WARN(1, "Unexpected GuC event received while CT disabled!\n"); + ct_warn(ct, "Unexpected event received while disabled!\n"); return; } From patchwork Fri Nov 18 01:58:58 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: John Harrison X-Patchwork-Id: 13047639 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 3B5C0C4332F for ; Fri, 18 Nov 2022 01:59:46 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 4F43B10E6E2; Fri, 18 Nov 2022 01:59:22 +0000 (UTC) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTPS id A306110E6D8; Fri, 18 Nov 2022 01:59:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1668736756; x=1700272756; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=JQ14fNlddLHzFZiYkajj+NHGJPiTvI9/P8uLgdke33M=; b=Mj9CPmFi0I75CIkFL3Wnb3t/xp4oR85A0BLZaCLylR9VEuzqwrntAxH0 tzkvJQHOQf9xItP947pueoJlzkhvuz5vAvzh621mr/dmzVB8DHYV353Jd zwygdHRvU79thm6bsCR5qZkXCDQtlRrYt8Bq61h7KUHWRAEDcPO0j+D52 0eEOEwwsjaZV3WVHnt+HpTvgvRMrJ6osNKo71X1SRMVvhZLiCl/Yq26r8 20VCHnBWUxJZwgDcDTxS/jsxuGZLS1PPmkkUfuTEF54YNkCINc8YCPh67 54gr8Nxwmn/IcLtgOmVIzW5yTUB6kiN6XVYUGJMYPKfCm/9JT7KwZ3LeH Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="300565835" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="300565835" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Nov 2022 17:59:14 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10534"; a="703563426" X-IronPort-AV: E=Sophos;i="5.96,172,1665471600"; d="scan'208";a="703563426" Received: from relo-linux-5.jf.intel.com ([10.165.21.143]) by fmsmga008.fm.intel.com with ESMTP; 17 Nov 2022 17:59:14 -0800 From: John.C.Harrison@Intel.com To: Intel-GFX@Lists.FreeDesktop.Org Subject: [PATCH v2 5/5] drm/i915/uc: Update the gt/uc code to use gt_err and friends Date: Thu, 17 Nov 2022 17:58:58 -0800 Message-Id: <20221118015858.2548106-6-John.C.Harrison@Intel.com> X-Mailer: git-send-email 2.37.3 In-Reply-To: <20221118015858.2548106-1-John.C.Harrison@Intel.com> References: <20221118015858.2548106-1-John.C.Harrison@Intel.com> MIME-Version: 1.0 Organization: Intel Corporation (UK) Ltd. - Co. Reg. #1134945 - Pipers Way, Swindon SN3 1RJ X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: John Harrison , DRI-Devel@Lists.FreeDesktop.Org Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" From: John Harrison Use the new module oriented output message helpers where possible. Signed-off-by: John Harrison --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 108 +++++++++++------------ drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 98 ++++++++++---------- 2 files changed, 99 insertions(+), 107 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 1d28286e6f066..269be95625342 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -65,29 +65,29 @@ static int __intel_uc_reset_hw(struct intel_uc *uc) ret = intel_reset_guc(gt); if (ret) { - DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); + gt_err(gt, "Failed to reset GuC, ret = %d\n", ret); return ret; } guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); - WARN(!(guc_status & GS_MIA_IN_RESET), - "GuC status: 0x%x, MIA core expected to be in reset\n", - guc_status); + gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET), + "GuC status: 0x%x, MIA core expected to be in reset\n", + guc_status); return ret; } static void __confirm_options(struct intel_uc *uc) { - struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_gt *gt = uc_to_gt(uc); + struct drm_i915_private *i915 = gt->i915; - drm_dbg(&i915->drm, - "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n", - i915->params.enable_guc, - str_yes_no(intel_uc_wants_guc(uc)), - str_yes_no(intel_uc_wants_guc_submission(uc)), - str_yes_no(intel_uc_wants_huc(uc)), - str_yes_no(intel_uc_wants_guc_slpc(uc))); + gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n", + i915->params.enable_guc, + str_yes_no(intel_uc_wants_guc(uc)), + str_yes_no(intel_uc_wants_guc_submission(uc)), + str_yes_no(intel_uc_wants_huc(uc)), + str_yes_no(intel_uc_wants_guc_slpc(uc))); if (i915->params.enable_guc == 0) { GEM_BUG_ON(intel_uc_wants_guc(uc)); @@ -98,26 +98,22 @@ static void __confirm_options(struct intel_uc *uc) } if (!intel_uc_supports_guc(uc)) - drm_info(&i915->drm, - "Incompatible option enable_guc=%d - %s\n", - i915->params.enable_guc, "GuC is not supported!"); + gt_info(gt, "Incompatible option enable_guc=%d - %s\n", + i915->params.enable_guc, "GuC is not supported!"); if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC && !intel_uc_supports_huc(uc)) - drm_info(&i915->drm, - "Incompatible option enable_guc=%d - %s\n", - i915->params.enable_guc, "HuC is not supported!"); + gt_info(gt, "Incompatible option enable_guc=%d - %s\n", + i915->params.enable_guc, "HuC is not supported!"); if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && !intel_uc_supports_guc_submission(uc)) - drm_info(&i915->drm, - "Incompatible option enable_guc=%d - %s\n", - i915->params.enable_guc, "GuC submission is N/A"); + gt_info(gt, "Incompatible option enable_guc=%d - %s\n", + i915->params.enable_guc, "GuC submission is N/A"); if (i915->params.enable_guc & ~ENABLE_GUC_MASK) - drm_info(&i915->drm, - "Incompatible option enable_guc=%d - %s\n", - i915->params.enable_guc, "undocumented flag"); + gt_info(gt, "Incompatible option enable_guc=%d - %s\n", + i915->params.enable_guc, "undocumented flag"); } void intel_uc_init_early(struct intel_uc *uc) @@ -249,15 +245,13 @@ static int guc_enable_communication(struct intel_guc *guc) intel_guc_ct_event_handler(&guc->ct); spin_unlock_irq(gt->irq_lock); - drm_dbg(&i915->drm, "GuC communication enabled\n"); + guc_dbg(guc, "communication enabled\n"); return 0; } static void guc_disable_communication(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; - /* * Events generated during or after CT disable are logged by guc in * via mmio. Make sure the register is clear before disabling CT since @@ -277,7 +271,7 @@ static void guc_disable_communication(struct intel_guc *guc) */ guc_get_mmio_msg(guc); - drm_dbg(&i915->drm, "GuC communication disabled\n"); + guc_dbg(guc, "communication disabled\n"); } static void __uc_fetch_firmwares(struct intel_uc *uc) @@ -290,8 +284,8 @@ static void __uc_fetch_firmwares(struct intel_uc *uc) if (err) { /* Make sure we transition out of transient "SELECTED" state */ if (intel_uc_wants_huc(uc)) { - drm_dbg(&uc_to_gt(uc)->i915->drm, - "Failed to fetch GuC: %d disabling HuC\n", err); + gt_dbg(uc_to_gt(uc), + "Failed to fetch GuC: %d disabling HuC\n", err); intel_uc_fw_change_status(&uc->huc.fw, INTEL_UC_FIRMWARE_ERROR); } @@ -364,7 +358,7 @@ static int uc_init_wopcm(struct intel_uc *uc) int err; if (unlikely(!base || !size)) { - i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n"); + gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n"); return -E2BIG; } @@ -395,13 +389,13 @@ static int uc_init_wopcm(struct intel_uc *uc) return 0; err_out: - i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n"); - i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", - i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET), - intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); - i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", - i915_mmio_reg_offset(GUC_WOPCM_SIZE), - intel_uncore_read(uncore, GUC_WOPCM_SIZE)); + gt_probe_error(gt, "Failed to init uC WOPCM registers!\n"); + gt_probe_error(gt, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", + i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET), + intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); + gt_probe_error(gt, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", + i915_mmio_reg_offset(GUC_WOPCM_SIZE), + intel_uncore_read(uncore, GUC_WOPCM_SIZE)); return err; } @@ -433,18 +427,19 @@ static int __uc_check_hw(struct intel_uc *uc) static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw) { - struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_gt *gt = uc_to_gt(uc); - drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n", - intel_uc_fw_type_repr(fw->type), fw->file_selected.path, - fw->file_selected.major_ver, - fw->file_selected.minor_ver, - fw->file_selected.patch_ver); + gt_info(gt, "%s firmware %s version %u.%u.%u\n", + intel_uc_fw_type_repr(fw->type), fw->file_selected.path, + fw->file_selected.major_ver, + fw->file_selected.minor_ver, + fw->file_selected.patch_ver); } static int __uc_init_hw(struct intel_uc *uc) { - struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_gt *gt = uc_to_gt(uc); + struct drm_i915_private *i915 = gt->i915; struct intel_guc *guc = &uc->guc; struct intel_huc *huc = &uc->huc; int ret, attempts; @@ -496,8 +491,8 @@ static int __uc_init_hw(struct intel_uc *uc) if (ret == 0) break; - DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " - "retry %d more time(s)\n", ret, attempts); + gt_dbg(gt, "GuC fw load failed: %d; will reset and retry %d more time(s)\n", + ret, attempts); } /* Did we succeded or run out of retries? */ @@ -531,10 +526,10 @@ static int __uc_init_hw(struct intel_uc *uc) intel_rps_lower_unslice(&uc_to_gt(uc)->rps); } - drm_info(&i915->drm, "GuC submission %s\n", - str_enabled_disabled(intel_uc_uses_guc_submission(uc))); - drm_info(&i915->drm, "GuC SLPC %s\n", - str_enabled_disabled(intel_uc_uses_guc_slpc(uc))); + gt_info(gt, "GuC submission %s\n", + str_enabled_disabled(intel_uc_uses_guc_submission(uc))); + gt_info(gt, "GuC SLPC %s\n", + str_enabled_disabled(intel_uc_uses_guc_slpc(uc))); return 0; @@ -552,12 +547,12 @@ static int __uc_init_hw(struct intel_uc *uc) __uc_sanitize(uc); if (!ret) { - drm_notice(&i915->drm, "GuC is uninitialized\n"); + gt_notice(gt, "GuC is uninitialized\n"); /* We want to run without GuC submission */ return 0; } - i915_probe_error(i915, "GuC initialization failed %d\n", ret); + gt_probe_error(gt, "GuC initialization failed %d\n", ret); /* We want to keep KMS alive */ return -EIO; @@ -656,6 +651,7 @@ void intel_uc_runtime_suspend(struct intel_uc *uc) void intel_uc_suspend(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; + struct intel_gt *gt = uc_to_gt(uc); intel_wakeref_t wakeref; int err; @@ -664,10 +660,10 @@ void intel_uc_suspend(struct intel_uc *uc) return; } - with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) { err = intel_guc_suspend(guc); if (err) - DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); + guc_dbg(guc, "failed to suspend, err=%d\n", err); } } @@ -695,7 +691,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication) err = intel_guc_resume(guc); if (err) { - DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); + guc_dbg(guc, "failed to resume, err=%d\n", err); return err; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 0c80ba51a4bdc..9f10b66599376 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -11,6 +11,7 @@ #include #include "gem/i915_gem_lmem.h" +#include "gt/intel_gt.h" #include "intel_uc_fw.h" #include "intel_uc_fw_abi.h" #include "i915_drv.h" @@ -37,11 +38,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, enum intel_uc_fw_status status) { uc_fw->__status = status; - drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm, - "%s firmware -> %s\n", - intel_uc_fw_type_repr(uc_fw->type), - status == INTEL_UC_FIRMWARE_SELECTED ? - uc_fw->file_selected.path : intel_uc_fw_status_repr(status)); + gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n", + intel_uc_fw_type_repr(uc_fw->type), + status == INTEL_UC_FIRMWARE_SELECTED ? + uc_fw->file_selected.path : intel_uc_fw_status_repr(status)); } #endif @@ -482,15 +482,14 @@ static int check_ccs_header(struct intel_gt *gt, const struct firmware *fw, struct intel_uc_fw *uc_fw) { - struct drm_i915_private *i915 = gt->i915; struct uc_css_header *css; size_t size; /* Check the size of the blob before examining buffer contents */ if (unlikely(fw->size < sizeof(struct uc_css_header))) { - drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - fw->size, sizeof(struct uc_css_header)); + gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + fw->size, sizeof(struct uc_css_header)); return -ENODATA; } @@ -500,10 +499,9 @@ static int check_ccs_header(struct intel_gt *gt, size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - css->exponent_size_dw) * sizeof(u32); if (unlikely(size != sizeof(struct uc_css_header))) { - drm_warn(&i915->drm, - "%s firmware %s: unexpected header size: %zu != %zu\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - fw->size, sizeof(struct uc_css_header)); + gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + fw->size, sizeof(struct uc_css_header)); return -EPROTO; } @@ -516,18 +514,18 @@ static int check_ccs_header(struct intel_gt *gt, /* At least, it should have header, uCode and RSA. Size of all three. */ size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size; if (unlikely(fw->size < size)) { - drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - fw->size, size); + gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + fw->size, size); return -ENOEXEC; } /* Sanity check whether this fw is not larger than whole WOPCM memory */ size = __intel_uc_fw_get_upload_size(uc_fw); if (unlikely(size >= gt->wopcm.size)) { - drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - size, (size_t)gt->wopcm.size); + gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + size, (size_t)gt->wopcm.size); return -E2BIG; } @@ -578,10 +576,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal)); if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) { - drm_err(&i915->drm, - "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K); + gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K); /* try to find another blob to load */ release_firmware(fw); @@ -624,10 +621,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) if (uc_fw->file_wanted.major_ver) { /* Check the file's major version was as it claimed */ if (uc_fw->file_selected.major_ver != uc_fw->file_wanted.major_ver) { - drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver, - uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver); + gt_notice(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver, + uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver); if (!intel_uc_fw_is_overridden(uc_fw)) { err = -ENOEXEC; goto fail; @@ -642,16 +639,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) /* Preserve the version that was really wanted */ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted)); - drm_notice(&i915->drm, - "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n", - intel_uc_fw_type_repr(uc_fw->type), - uc_fw->file_wanted.path, - uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver, - uc_fw->file_selected.path, - uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver); - drm_info(&i915->drm, - "Consider updating your linux-firmware pkg or downloading from %s\n", - INTEL_UC_FIRMWARE_URL); + gt_notice(gt, "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->file_wanted.path, + uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver, + uc_fw->file_selected.path, + uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver); + gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n", + INTEL_UC_FIRMWARE_URL); } if (HAS_LMEM(i915)) { @@ -679,10 +674,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) INTEL_UC_FIRMWARE_MISSING : INTEL_UC_FIRMWARE_ERROR); - i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err); - drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n", - intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); + gt_probe_error(gt, "%s firmware %s: fetch failed with error %d\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err); + gt_info(gt, "%s firmware(s) can be downloaded from %s\n", + intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); release_firmware(fw); /* OK even if fw is NULL */ return err; @@ -788,9 +783,9 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) /* Wait for DMA to finish */ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); if (ret) - drm_err(>->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uncore_read_fw(uncore, DMA_CTRL)); + gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uncore_read_fw(uncore, DMA_CTRL)); /* Disable the bits once DMA is over */ intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); @@ -836,9 +831,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) return 0; fail: - i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, - err); + gt_probe_error(gt, "Failed to load %s firmware %s (%d)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, + err); intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL); return err; } @@ -914,6 +909,7 @@ static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw) int intel_uc_fw_init(struct intel_uc_fw *uc_fw) { + struct intel_gt *gt = __uc_fw_to_gt(uc_fw); int err; /* this should happen before the load! */ @@ -924,15 +920,15 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw) err = i915_gem_object_pin_pages_unlocked(uc_fw->obj); if (err) { - DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); + gt_dbg(gt, "%s fw pin-pages err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); goto out; } err = uc_fw_rsa_data_create(uc_fw); if (err) { - DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); + gt_dbg(gt, "%s fw rsa data creation failed, err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); goto out_unpin; }