diff mbox series

[v9,4/6] drm/xe/vsec: Support BMG devices

Message ID 20240725122346.4063913-5-michael.j.ruhl@intel.com (mailing list archive)
State Changes Requested, archived
Headers show
Series Support PMT features in Xe | expand

Commit Message

Michael J. Ruhl July 25, 2024, 12:23 p.m. UTC
Utilize the PMT callback API to add support for the BMG
devices.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
---
 drivers/gpu/drm/xe/Makefile          |   1 +
 drivers/gpu/drm/xe/xe_device.c       |   5 +
 drivers/gpu/drm/xe/xe_device_types.h |   6 +
 drivers/gpu/drm/xe/xe_vsec.c         | 222 +++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_vsec.h         |  13 ++
 5 files changed, 247 insertions(+)
 create mode 100644 drivers/gpu/drm/xe/xe_vsec.c
 create mode 100644 drivers/gpu/drm/xe/xe_vsec.h

Comments

David E. Box Aug. 7, 2024, 7:23 p.m. UTC | #1
Hi Mike,

Reviewed-by: David E. Box <david.e.box@linux.intel.com>

On Thu, 2024-07-25 at 08:23 -0400, Michael J. Ruhl wrote:
> Utilize the PMT callback API to add support for the BMG
> devices.
> 
> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
> ---
>  drivers/gpu/drm/xe/Makefile          |   1 +
>  drivers/gpu/drm/xe/xe_device.c       |   5 +
>  drivers/gpu/drm/xe/xe_device_types.h |   6 +
>  drivers/gpu/drm/xe/xe_vsec.c         | 222 +++++++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_vsec.h         |  13 ++
>  5 files changed, 247 insertions(+)
>  create mode 100644 drivers/gpu/drm/xe/xe_vsec.c
>  create mode 100644 drivers/gpu/drm/xe/xe_vsec.h
> 
> diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> index 1ff9602a52f6..a3c044b46fed 100644
> --- a/drivers/gpu/drm/xe/Makefile
> +++ b/drivers/gpu/drm/xe/Makefile
> @@ -112,6 +112,7 @@ xe-y += xe_bb.o \
>  	xe_vm.o \
>  	xe_vram.o \
>  	xe_vram_freq.o \
> +	xe_vsec.o \
>  	xe_wait_user_fence.o \
>  	xe_wa.o \
>  	xe_wopcm.o
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index 1aba6f9eaa19..0bdfbe849e64 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -53,6 +53,7 @@
>  #include "xe_ttm_sys_mgr.h"
>  #include "xe_vm.h"
>  #include "xe_vram.h"
> +#include "xe_vsec.h"
>  #include "xe_wait_user_fence.h"
>  #include "xe_wa.h"
>  
> @@ -370,6 +371,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
>  		goto err;
>  	}
>  
> +	drmm_mutex_init(&xe->drm, &xe->pmt.lock);
> +
>  	err = xe_display_create(xe);
>  	if (WARN_ON(err))
>  		goto err;
> @@ -745,6 +748,8 @@ int xe_device_probe(struct xe_device *xe)
>  	for_each_gt(gt, xe, id)
>  		xe_gt_sanitize_freq(gt);
>  
> +	xe_vsec_init(xe);
> +
>  	return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
>  
>  err_fini_display:
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h
> b/drivers/gpu/drm/xe/xe_device_types.h
> index 5b7292a9a66d..2509d7428f2d 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -458,6 +458,12 @@ struct xe_device {
>  		struct mutex lock;
>  	} d3cold;
>  
> +	/** @pmt: Support the PMT driver callback interface */
> +	struct {
> +		/** @pmt.lock: protect access for telemetry data */
> +		struct mutex lock;
> +	} pmt;
> +
>  	/**
>  	 * @pm_callback_task: Track the active task that is running in either
>  	 * the runtime_suspend or runtime_resume callbacks.
> diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
> new file mode 100644
> index 000000000000..2c967aaa4072
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vsec.c
> @@ -0,0 +1,222 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright © 2022 - 2024 Intel Corporation
> + */
> +#include <linux/bitfield.h>
> +#include <linux/bits.h>
> +#include <linux/intel_vsec.h>
> +#include <linux/module.h>
> +#include <linux/mutex.h>
> +#include <linux/pci.h>
> +
> +#include "xe_device.h"
> +#include "xe_device_types.h"
> +#include "xe_drv.h"
> +#include "xe_mmio.h"
> +#include "xe_platform_types.h"
> +#include "xe_pm.h"
> +#include "xe_vsec.h"
> +
> +#define SOC_BASE		0x280000
> +
> +#define BMG_PMT_BASE		0xDB000
> +#define BMG_DISCOVERY_OFFSET	(SOC_BASE + BMG_PMT_BASE)
> +
> +#define BMG_TELEMETRY_BASE	0xE0000
> +#define BMG_TELEMETRY_OFFSET	(SOC_BASE + BMG_TELEMETRY_BASE)
> +
> +#define BMG_DEVICE_ID 0xE2F8
> +
> +#define GFX_BAR			0
> +
> +#define SG_REMAP_INDEX1		XE_REG(SOC_BASE + 0x08)
> +#define SG_REMAP_BITS		GENMASK(31, 24)
> +
> +static struct intel_vsec_header bmg_telemetry = {
> +	.length = 0x10,
> +	.id = VSEC_ID_TELEMETRY,
> +	.num_entries = 2,
> +	.entry_size = 4,
> +	.tbir = GFX_BAR,
> +	.offset = BMG_DISCOVERY_OFFSET,
> +};
> +
> +static struct intel_vsec_header *bmg_capabilities[] = {
> +	&bmg_telemetry,
> +	NULL
> +};
> +
> +enum xe_vsec {
> +	XE_VSEC_UNKNOWN = 0,
> +	XE_VSEC_BMG,
> +};
> +
> +static struct intel_vsec_platform_info xe_vsec_info[] = {
> +	[XE_VSEC_BMG] = {
> +		.caps = VSEC_CAP_TELEMETRY,
> +		.headers = bmg_capabilities,
> +	},
> +	{ }
> +};
> +
> +/*
> + * The GUID will have the following bits to decode:
> + *
> + * X(4bits) - {Telemetry space iteration number (0,1,..)}
> + * X(4bits) - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
> + * X(4bits) - SOC_SKU
> + * XXXX(16bits)– Device ID – changes for each down bin SKU’s
> + * X(2bits) - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
> + * X(2bits) - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
> + */
> +#define GUID_TELEM_ITERATION	GENMASK(3, 0)
> +#define GUID_SEGMENT		GENMASK(7, 4)
> +#define GUID_SOC_SKU		GENMASK(11, 8)
> +#define GUID_DEVICE_ID		GENMASK(27, 12)
> +#define GUID_CAP_TYPE		GENMASK(29, 28)
> +#define GUID_RECORD_ID		GENMASK(31, 30)
> +
> +#define PUNIT_TELEMETRY_OFFSET		0x0200
> +#define PUNIT_WATCHER_OFFSET		0x14A0
> +#define OOBMSM_0_WATCHER_OFFSET		0x18D8
> +#define OOBMSM_1_TELEMETRY_OFFSET	0x1000
> +
> +enum record_id {
> +	PUNIT,
> +	OOBMSM_0,
> +	OOBMSM_1
> +};
> +
> +enum capability {
> +	CRASHLOG,
> +	TELEMETRY,
> +	WATCHER
> +};
> +
> +static int guid_decode(u32 guid, int *index, u32 *offset)
> +{
> +	u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
> +	u32 cap_type  = FIELD_GET(GUID_CAP_TYPE, guid);
> +	u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
> +
> +	if (device_id != BMG_DEVICE_ID)
> +		return -ENODEV;
> +
> +	if (record_id > OOBMSM_1 || cap_type > WATCHER)
> +		return -EINVAL;
> +
> +	*offset = 0;
> +
> +	if (cap_type == CRASHLOG) {
> +		*index = record_id == PUNIT ? 2 : 4;
> +		return 0;
> +	}
> +
> +	switch (record_id) {
> +	case PUNIT:
> +		*index = 0;
> +		if (cap_type == TELEMETRY)
> +			*offset = PUNIT_TELEMETRY_OFFSET;
> +		else
> +			*offset = PUNIT_WATCHER_OFFSET;
> +		break;
> +
> +	case OOBMSM_0:
> +		*index = 1;
> +		if (cap_type == WATCHER)
> +			*offset = OOBMSM_0_WATCHER_OFFSET;
> +		break;
> +
> +	case OOBMSM_1:
> +		*index = 1;
> +		if (cap_type == TELEMETRY)
> +			*offset = OOBMSM_1_TELEMETRY_OFFSET;
> +		break;
> +	}
> +
> +	return 0;
> +}
> +
> +static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, u32
> count)
> +{
> +	struct xe_device *xe = pdev_to_xe_device(pdev);
> +	void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
> +	u32 mem_region;
> +	u32 offset;
> +	int ret;
> +
> +	ret = guid_decode(guid, &mem_region, &offset);
> +	if (ret)
> +		return ret;
> +
> +	telem_addr += offset;
> +
> +	mutex_lock(&xe->pmt.lock);
> +
> +	/* indicate that we are not at an appropriate power level */
> +	ret = -ENODATA;
> +	if (xe_pm_runtime_get_if_active(xe) > 0) {
> +		/* set SoC re-mapper index register based on GUID memory
> region */
> +		xe_mmio_rmw32(xe->tiles[0].primary_gt, SG_REMAP_INDEX1,
> SG_REMAP_BITS,
> +			      FIELD_PREP(SG_REMAP_BITS, mem_region));
> +
> +		memcpy_fromio(data, telem_addr, count);
> +		ret = count;
> +		xe_pm_runtime_put(xe);
> +	}
> +	mutex_unlock(&xe->pmt.lock);
> +
> +	return ret;
> +}
> +
> +struct pmt_callbacks xe_pmt_cb = {
> +	.read_telem = xe_pmt_telem_read,
> +};
> +
> +static const int vsec_platforms[] = {
> +	[XE_BATTLEMAGE] = XE_VSEC_BMG,
> +};
> +
> +static enum xe_vsec get_platform_info(struct xe_device *xe)
> +{
> +	if (xe->info.platform > XE_BATTLEMAGE)
> +		return XE_VSEC_UNKNOWN;
> +
> +	return vsec_platforms[xe->info.platform];
> +}
> +
> +/**
> + * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
> + * interface
> + * @xe: valid xe instance
> + */
> +void xe_vsec_init(struct xe_device *xe)
> +{
> +	struct intel_vsec_platform_info *info;
> +	struct device *dev = xe->drm.dev;
> +	struct pci_dev *pdev = to_pci_dev(dev);
> +	enum xe_vsec platform;
> +
> +	platform = get_platform_info(xe);
> +	if (platform == XE_VSEC_UNKNOWN)
> +		return;
> +
> +	info = &xe_vsec_info[platform];
> +	if (!info->headers)
> +		return;
> +
> +	switch (platform) {
> +	case XE_VSEC_BMG:
> +		info->priv_data = &xe_pmt_cb;
> +		break;
> +	default:
> +		break;
> +	}
> +
> +	/*
> +	 * Register a VSEC. Cleanup is handled using device managed
> +	 * resources.
> +	 */
> +	intel_vsec_register(pdev, info);
> +}
> +MODULE_IMPORT_NS(INTEL_VSEC);
> diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
> new file mode 100644
> index 000000000000..3fd29a21cad6
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vsec.h
> @@ -0,0 +1,13 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright © 2022 - 2024 Intel Corporation
> + */
> +
> +#ifndef _XE_VSEC_H_
> +#define _XE_VSEC_H_
> +
> +struct xe_device;
> +
> +void xe_vsec_init(struct xe_device *xe);
> +
> +#endif
Ilpo Järvinen Aug. 12, 2024, 9:01 a.m. UTC | #2
On Thu, 25 Jul 2024, Michael J. Ruhl wrote:

> Utilize the PMT callback API to add support for the BMG
> devices.

The shortlog and commit message are a bit terse on details what this 
change is about, it's all hidden into the acronyms :-).

> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
> ---

> diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
> new file mode 100644
> index 000000000000..2c967aaa4072
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vsec.c
> @@ -0,0 +1,222 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright © 2022 - 2024 Intel Corporation
> + */
> +#include <linux/bitfield.h>
> +#include <linux/bits.h>
> +#include <linux/intel_vsec.h>
> +#include <linux/module.h>
> +#include <linux/mutex.h>
> +#include <linux/pci.h>
> +
> +#include "xe_device.h"
> +#include "xe_device_types.h"
> +#include "xe_drv.h"
> +#include "xe_mmio.h"
> +#include "xe_platform_types.h"
> +#include "xe_pm.h"
> +#include "xe_vsec.h"
> +
> +#define SOC_BASE		0x280000
> +
> +#define BMG_PMT_BASE		0xDB000
> +#define BMG_DISCOVERY_OFFSET	(SOC_BASE + BMG_PMT_BASE)
> +
> +#define BMG_TELEMETRY_BASE	0xE0000
> +#define BMG_TELEMETRY_OFFSET	(SOC_BASE + BMG_TELEMETRY_BASE)
> +
> +#define BMG_DEVICE_ID 0xE2F8
> +
> +#define GFX_BAR			0
> +
> +#define SG_REMAP_INDEX1		XE_REG(SOC_BASE + 0x08)
> +#define SG_REMAP_BITS		GENMASK(31, 24)
> +
> +static struct intel_vsec_header bmg_telemetry = {
> +	.length = 0x10,
> +	.id = VSEC_ID_TELEMETRY,
> +	.num_entries = 2,
> +	.entry_size = 4,
> +	.tbir = GFX_BAR,
> +	.offset = BMG_DISCOVERY_OFFSET,
> +};
> +
> +static struct intel_vsec_header *bmg_capabilities[] = {
> +	&bmg_telemetry,
> +	NULL
> +};
> +
> +enum xe_vsec {
> +	XE_VSEC_UNKNOWN = 0,
> +	XE_VSEC_BMG,
> +};
> +
> +static struct intel_vsec_platform_info xe_vsec_info[] = {
> +	[XE_VSEC_BMG] = {
> +		.caps = VSEC_CAP_TELEMETRY,
> +		.headers = bmg_capabilities,
> +	},
> +	{ }
> +};
> +
> +/*
> + * The GUID will have the following bits to decode:
> + *
> + * X(4bits) - {Telemetry space iteration number (0,1,..)}
> + * X(4bits) - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
> + * X(4bits) - SOC_SKU
> + * XXXX(16bits)– Device ID – changes for each down bin SKU’s
> + * X(2bits) - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
> + * X(2bits) - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
> + */
> +#define GUID_TELEM_ITERATION	GENMASK(3, 0)
> +#define GUID_SEGMENT		GENMASK(7, 4)
> +#define GUID_SOC_SKU		GENMASK(11, 8)
> +#define GUID_DEVICE_ID		GENMASK(27, 12)
> +#define GUID_CAP_TYPE		GENMASK(29, 28)
> +#define GUID_RECORD_ID		GENMASK(31, 30)
> +
> +#define PUNIT_TELEMETRY_OFFSET		0x0200
> +#define PUNIT_WATCHER_OFFSET		0x14A0
> +#define OOBMSM_0_WATCHER_OFFSET		0x18D8
> +#define OOBMSM_1_TELEMETRY_OFFSET	0x1000
> +
> +enum record_id {
> +	PUNIT,
> +	OOBMSM_0,
> +	OOBMSM_1
> +};
> +
> +enum capability {
> +	CRASHLOG,
> +	TELEMETRY,
> +	WATCHER
> +};
> +
> +static int guid_decode(u32 guid, int *index, u32 *offset)
> +{
> +	u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
> +	u32 cap_type  = FIELD_GET(GUID_CAP_TYPE, guid);
> +	u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
> +
> +	if (device_id != BMG_DEVICE_ID)
> +		return -ENODEV;
> +
> +	if (record_id > OOBMSM_1 || cap_type > WATCHER)
> +		return -EINVAL;
> +
> +	*offset = 0;
> +
> +	if (cap_type == CRASHLOG) {
> +		*index = record_id == PUNIT ? 2 : 4;
> +		return 0;
> +	}
> +
> +	switch (record_id) {
> +	case PUNIT:
> +		*index = 0;
> +		if (cap_type == TELEMETRY)
> +			*offset = PUNIT_TELEMETRY_OFFSET;
> +		else
> +			*offset = PUNIT_WATCHER_OFFSET;
> +		break;
> +
> +	case OOBMSM_0:
> +		*index = 1;
> +		if (cap_type == WATCHER)
> +			*offset = OOBMSM_0_WATCHER_OFFSET;
> +		break;
> +
> +	case OOBMSM_1:
> +		*index = 1;
> +		if (cap_type == TELEMETRY)
> +			*offset = OOBMSM_1_TELEMETRY_OFFSET;
> +		break;
> +	}
> +
> +	return 0;
> +}
> +
> +static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, u32 count)
> +{
> +	struct xe_device *xe = pdev_to_xe_device(pdev);
> +	void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
> +	u32 mem_region;
> +	u32 offset;
> +	int ret;
> +
> +	ret = guid_decode(guid, &mem_region, &offset);
> +	if (ret)
> +		return ret;
> +
> +	telem_addr += offset;
> +
> +	mutex_lock(&xe->pmt.lock);
> +
> +	/* indicate that we are not at an appropriate power level */
> +	ret = -ENODATA;
> +	if (xe_pm_runtime_get_if_active(xe) > 0) {

xe_pm_runtime_get_if_active() returns bool so > 0 looks odd. In fact, 
active > 0 compare is already done by that called function so perhaps you 
mixed up what kind of value is returned by xe_pm_runtime_get_if_active().

Also, I'd restructure this logic with guard & use of reverse logic.

	guard(mutex)(&xe->pmt.lock);

	/* indicate that we are not at an appropriate power level */
	if (!xe_pm_runtime_get_if_active(xe))
		return -ENODATA;

	... the rest of the code de-indented by one level (minus the 
	mutex_unlock() which is no longer needed because guard() is used).

With those fixed, I think this one is ready to go so after fixing, please 
add:

Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>


> +		/* set SoC re-mapper index register based on GUID memory region */
> +		xe_mmio_rmw32(xe->tiles[0].primary_gt, SG_REMAP_INDEX1, SG_REMAP_BITS,
> +			      FIELD_PREP(SG_REMAP_BITS, mem_region));
> +
> +		memcpy_fromio(data, telem_addr, count);
> +		ret = count;
> +		xe_pm_runtime_put(xe);
> +	}
> +	mutex_unlock(&xe->pmt.lock);
> +
> +	return ret;
> +}
Michael J. Ruhl Aug. 12, 2024, 5:14 p.m. UTC | #3
> -----Original Message-----
> From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
> Sent: Monday, August 12, 2024 5:01 AM
> To: Ruhl, Michael J <michael.j.ruhl@intel.com>
> Cc: intel-xe@lists.freedesktop.org; platform-driver-x86@vger.kernel.org;
> david.e.box@linux.intel.com; Brost, Matthew <matthew.brost@intel.com>;
> Andy Shevchenko <andriy.shevchenko@linux.intel.com>; Vivi, Rodrigo
> <rodrigo.vivi@intel.com>
> Subject: Re: [PATCH v9 4/6] drm/xe/vsec: Support BMG devices
> 
> On Thu, 25 Jul 2024, Michael J. Ruhl wrote:
> 
> > Utilize the PMT callback API to add support for the BMG devices.
> 
> The shortlog and commit message are a bit terse on details what this change is
> about, it's all hidden into the acronyms :-).
> 
> > Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> > Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
> > ---
> 
> > diff --git a/drivers/gpu/drm/xe/xe_vsec.c
> > b/drivers/gpu/drm/xe/xe_vsec.c new file mode 100644 index
> > 000000000000..2c967aaa4072
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_vsec.c
> > @@ -0,0 +1,222 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * Copyright © 2022 - 2024 Intel Corporation  */ #include
> > +<linux/bitfield.h> #include <linux/bits.h> #include
> > +<linux/intel_vsec.h> #include <linux/module.h> #include
> > +<linux/mutex.h> #include <linux/pci.h>
> > +
> > +#include "xe_device.h"
> > +#include "xe_device_types.h"
> > +#include "xe_drv.h"
> > +#include "xe_mmio.h"
> > +#include "xe_platform_types.h"
> > +#include "xe_pm.h"
> > +#include "xe_vsec.h"
> > +
> > +#define SOC_BASE		0x280000
> > +
> > +#define BMG_PMT_BASE		0xDB000
> > +#define BMG_DISCOVERY_OFFSET	(SOC_BASE + BMG_PMT_BASE)
> > +
> > +#define BMG_TELEMETRY_BASE	0xE0000
> > +#define BMG_TELEMETRY_OFFSET	(SOC_BASE + BMG_TELEMETRY_BASE)
> > +
> > +#define BMG_DEVICE_ID 0xE2F8
> > +
> > +#define GFX_BAR			0
> > +
> > +#define SG_REMAP_INDEX1		XE_REG(SOC_BASE + 0x08)
> > +#define SG_REMAP_BITS		GENMASK(31, 24)
> > +
> > +static struct intel_vsec_header bmg_telemetry = {
> > +	.length = 0x10,
> > +	.id = VSEC_ID_TELEMETRY,
> > +	.num_entries = 2,
> > +	.entry_size = 4,
> > +	.tbir = GFX_BAR,
> > +	.offset = BMG_DISCOVERY_OFFSET,
> > +};
> > +
> > +static struct intel_vsec_header *bmg_capabilities[] = {
> > +	&bmg_telemetry,
> > +	NULL
> > +};
> > +
> > +enum xe_vsec {
> > +	XE_VSEC_UNKNOWN = 0,
> > +	XE_VSEC_BMG,
> > +};
> > +
> > +static struct intel_vsec_platform_info xe_vsec_info[] = {
> > +	[XE_VSEC_BMG] = {
> > +		.caps = VSEC_CAP_TELEMETRY,
> > +		.headers = bmg_capabilities,
> > +	},
> > +	{ }
> > +};
> > +
> > +/*
> > + * The GUID will have the following bits to decode:
> > + *
> > + * X(4bits) - {Telemetry space iteration number (0,1,..)}
> > + * X(4bits) - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
> > + * X(4bits) - SOC_SKU
> > + * XXXX(16bits)– Device ID – changes for each down bin SKU’s
> > + * X(2bits) - Capability Type (Crashlog-0, Telemetry Aggregator-1,
> > +Watcher-2)
> > + * X(2bits) - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)  */
> > +#define GUID_TELEM_ITERATION	GENMASK(3, 0)
> > +#define GUID_SEGMENT		GENMASK(7, 4)
> > +#define GUID_SOC_SKU		GENMASK(11, 8)
> > +#define GUID_DEVICE_ID		GENMASK(27, 12)
> > +#define GUID_CAP_TYPE		GENMASK(29, 28)
> > +#define GUID_RECORD_ID		GENMASK(31, 30)
> > +
> > +#define PUNIT_TELEMETRY_OFFSET		0x0200
> > +#define PUNIT_WATCHER_OFFSET		0x14A0
> > +#define OOBMSM_0_WATCHER_OFFSET		0x18D8
> > +#define OOBMSM_1_TELEMETRY_OFFSET	0x1000
> > +
> > +enum record_id {
> > +	PUNIT,
> > +	OOBMSM_0,
> > +	OOBMSM_1
> > +};
> > +
> > +enum capability {
> > +	CRASHLOG,
> > +	TELEMETRY,
> > +	WATCHER
> > +};
> > +
> > +static int guid_decode(u32 guid, int *index, u32 *offset) {
> > +	u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
> > +	u32 cap_type  = FIELD_GET(GUID_CAP_TYPE, guid);
> > +	u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
> > +
> > +	if (device_id != BMG_DEVICE_ID)
> > +		return -ENODEV;
> > +
> > +	if (record_id > OOBMSM_1 || cap_type > WATCHER)
> > +		return -EINVAL;
> > +
> > +	*offset = 0;
> > +
> > +	if (cap_type == CRASHLOG) {
> > +		*index = record_id == PUNIT ? 2 : 4;
> > +		return 0;
> > +	}
> > +
> > +	switch (record_id) {
> > +	case PUNIT:
> > +		*index = 0;
> > +		if (cap_type == TELEMETRY)
> > +			*offset = PUNIT_TELEMETRY_OFFSET;
> > +		else
> > +			*offset = PUNIT_WATCHER_OFFSET;
> > +		break;
> > +
> > +	case OOBMSM_0:
> > +		*index = 1;
> > +		if (cap_type == WATCHER)
> > +			*offset = OOBMSM_0_WATCHER_OFFSET;
> > +		break;
> > +
> > +	case OOBMSM_1:
> > +		*index = 1;
> > +		if (cap_type == TELEMETRY)
> > +			*offset = OOBMSM_1_TELEMETRY_OFFSET;
> > +		break;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64
> > +*data, u32 count) {
> > +	struct xe_device *xe = pdev_to_xe_device(pdev);
> > +	void __iomem *telem_addr = xe->mmio.regs +
> BMG_TELEMETRY_OFFSET;
> > +	u32 mem_region;
> > +	u32 offset;
> > +	int ret;
> > +
> > +	ret = guid_decode(guid, &mem_region, &offset);
> > +	if (ret)
> > +		return ret;
> > +
> > +	telem_addr += offset;
> > +
> > +	mutex_lock(&xe->pmt.lock);
> > +
> > +	/* indicate that we are not at an appropriate power level */
> > +	ret = -ENODATA;
> > +	if (xe_pm_runtime_get_if_active(xe) > 0) {
> 
> xe_pm_runtime_get_if_active() returns bool so > 0 looks odd. In fact, active >
> 0 compare is already done by that called function so perhaps you mixed up
> what kind of value is returned by xe_pm_runtime_get_if_active().

Hi Ilpo,

Yup, the underlying pm_runtime_xxx call can return an error...and I missed the bool
conversion.  I will update.
 
> Also, I'd restructure this logic with guard & use of reverse logic.
> 
> 	guard(mutex)(&xe->pmt.lock);
> 
> 	/* indicate that we are not at an appropriate power level */
> 	if (!xe_pm_runtime_get_if_active(xe))
> 		return -ENODATA;
> 
> 	... the rest of the code de-indented by one level (minus the
> 	mutex_unlock() which is no longer needed because guard() is used).
> 
> With those fixed, I think this one is ready to go so after fixing, please
> add:
> 
> Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>

The guard is new to me.  I will get this updated.

Thank you!

M
 
> 
> > +		/* set SoC re-mapper index register based on GUID memory
> region */
> > +		xe_mmio_rmw32(xe->tiles[0].primary_gt,
> SG_REMAP_INDEX1, SG_REMAP_BITS,
> > +			      FIELD_PREP(SG_REMAP_BITS, mem_region));
> > +
> > +		memcpy_fromio(data, telem_addr, count);
> > +		ret = count;
> > +		xe_pm_runtime_put(xe);
> > +	}
> > +	mutex_unlock(&xe->pmt.lock);
> > +
> > +	return ret;
> > +}
> 
> --
>  i.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 1ff9602a52f6..a3c044b46fed 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -112,6 +112,7 @@  xe-y += xe_bb.o \
 	xe_vm.o \
 	xe_vram.o \
 	xe_vram_freq.o \
+	xe_vsec.o \
 	xe_wait_user_fence.o \
 	xe_wa.o \
 	xe_wopcm.o
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 1aba6f9eaa19..0bdfbe849e64 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -53,6 +53,7 @@ 
 #include "xe_ttm_sys_mgr.h"
 #include "xe_vm.h"
 #include "xe_vram.h"
+#include "xe_vsec.h"
 #include "xe_wait_user_fence.h"
 #include "xe_wa.h"
 
@@ -370,6 +371,8 @@  struct xe_device *xe_device_create(struct pci_dev *pdev,
 		goto err;
 	}
 
+	drmm_mutex_init(&xe->drm, &xe->pmt.lock);
+
 	err = xe_display_create(xe);
 	if (WARN_ON(err))
 		goto err;
@@ -745,6 +748,8 @@  int xe_device_probe(struct xe_device *xe)
 	for_each_gt(gt, xe, id)
 		xe_gt_sanitize_freq(gt);
 
+	xe_vsec_init(xe);
+
 	return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
 
 err_fini_display:
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 5b7292a9a66d..2509d7428f2d 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -458,6 +458,12 @@  struct xe_device {
 		struct mutex lock;
 	} d3cold;
 
+	/** @pmt: Support the PMT driver callback interface */
+	struct {
+		/** @pmt.lock: protect access for telemetry data */
+		struct mutex lock;
+	} pmt;
+
 	/**
 	 * @pm_callback_task: Track the active task that is running in either
 	 * the runtime_suspend or runtime_resume callbacks.
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
new file mode 100644
index 000000000000..2c967aaa4072
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -0,0 +1,222 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 - 2024 Intel Corporation
+ */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/intel_vsec.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_drv.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_pm.h"
+#include "xe_vsec.h"
+
+#define SOC_BASE		0x280000
+
+#define BMG_PMT_BASE		0xDB000
+#define BMG_DISCOVERY_OFFSET	(SOC_BASE + BMG_PMT_BASE)
+
+#define BMG_TELEMETRY_BASE	0xE0000
+#define BMG_TELEMETRY_OFFSET	(SOC_BASE + BMG_TELEMETRY_BASE)
+
+#define BMG_DEVICE_ID 0xE2F8
+
+#define GFX_BAR			0
+
+#define SG_REMAP_INDEX1		XE_REG(SOC_BASE + 0x08)
+#define SG_REMAP_BITS		GENMASK(31, 24)
+
+static struct intel_vsec_header bmg_telemetry = {
+	.length = 0x10,
+	.id = VSEC_ID_TELEMETRY,
+	.num_entries = 2,
+	.entry_size = 4,
+	.tbir = GFX_BAR,
+	.offset = BMG_DISCOVERY_OFFSET,
+};
+
+static struct intel_vsec_header *bmg_capabilities[] = {
+	&bmg_telemetry,
+	NULL
+};
+
+enum xe_vsec {
+	XE_VSEC_UNKNOWN = 0,
+	XE_VSEC_BMG,
+};
+
+static struct intel_vsec_platform_info xe_vsec_info[] = {
+	[XE_VSEC_BMG] = {
+		.caps = VSEC_CAP_TELEMETRY,
+		.headers = bmg_capabilities,
+	},
+	{ }
+};
+
+/*
+ * The GUID will have the following bits to decode:
+ *
+ * X(4bits) - {Telemetry space iteration number (0,1,..)}
+ * X(4bits) - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
+ * X(4bits) - SOC_SKU
+ * XXXX(16bits)– Device ID – changes for each down bin SKU’s
+ * X(2bits) - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
+ * X(2bits) - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
+ */
+#define GUID_TELEM_ITERATION	GENMASK(3, 0)
+#define GUID_SEGMENT		GENMASK(7, 4)
+#define GUID_SOC_SKU		GENMASK(11, 8)
+#define GUID_DEVICE_ID		GENMASK(27, 12)
+#define GUID_CAP_TYPE		GENMASK(29, 28)
+#define GUID_RECORD_ID		GENMASK(31, 30)
+
+#define PUNIT_TELEMETRY_OFFSET		0x0200
+#define PUNIT_WATCHER_OFFSET		0x14A0
+#define OOBMSM_0_WATCHER_OFFSET		0x18D8
+#define OOBMSM_1_TELEMETRY_OFFSET	0x1000
+
+enum record_id {
+	PUNIT,
+	OOBMSM_0,
+	OOBMSM_1
+};
+
+enum capability {
+	CRASHLOG,
+	TELEMETRY,
+	WATCHER
+};
+
+static int guid_decode(u32 guid, int *index, u32 *offset)
+{
+	u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
+	u32 cap_type  = FIELD_GET(GUID_CAP_TYPE, guid);
+	u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
+
+	if (device_id != BMG_DEVICE_ID)
+		return -ENODEV;
+
+	if (record_id > OOBMSM_1 || cap_type > WATCHER)
+		return -EINVAL;
+
+	*offset = 0;
+
+	if (cap_type == CRASHLOG) {
+		*index = record_id == PUNIT ? 2 : 4;
+		return 0;
+	}
+
+	switch (record_id) {
+	case PUNIT:
+		*index = 0;
+		if (cap_type == TELEMETRY)
+			*offset = PUNIT_TELEMETRY_OFFSET;
+		else
+			*offset = PUNIT_WATCHER_OFFSET;
+		break;
+
+	case OOBMSM_0:
+		*index = 1;
+		if (cap_type == WATCHER)
+			*offset = OOBMSM_0_WATCHER_OFFSET;
+		break;
+
+	case OOBMSM_1:
+		*index = 1;
+		if (cap_type == TELEMETRY)
+			*offset = OOBMSM_1_TELEMETRY_OFFSET;
+		break;
+	}
+
+	return 0;
+}
+
+static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, u32 count)
+{
+	struct xe_device *xe = pdev_to_xe_device(pdev);
+	void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
+	u32 mem_region;
+	u32 offset;
+	int ret;
+
+	ret = guid_decode(guid, &mem_region, &offset);
+	if (ret)
+		return ret;
+
+	telem_addr += offset;
+
+	mutex_lock(&xe->pmt.lock);
+
+	/* indicate that we are not at an appropriate power level */
+	ret = -ENODATA;
+	if (xe_pm_runtime_get_if_active(xe) > 0) {
+		/* set SoC re-mapper index register based on GUID memory region */
+		xe_mmio_rmw32(xe->tiles[0].primary_gt, SG_REMAP_INDEX1, SG_REMAP_BITS,
+			      FIELD_PREP(SG_REMAP_BITS, mem_region));
+
+		memcpy_fromio(data, telem_addr, count);
+		ret = count;
+		xe_pm_runtime_put(xe);
+	}
+	mutex_unlock(&xe->pmt.lock);
+
+	return ret;
+}
+
+struct pmt_callbacks xe_pmt_cb = {
+	.read_telem = xe_pmt_telem_read,
+};
+
+static const int vsec_platforms[] = {
+	[XE_BATTLEMAGE] = XE_VSEC_BMG,
+};
+
+static enum xe_vsec get_platform_info(struct xe_device *xe)
+{
+	if (xe->info.platform > XE_BATTLEMAGE)
+		return XE_VSEC_UNKNOWN;
+
+	return vsec_platforms[xe->info.platform];
+}
+
+/**
+ * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
+ * interface
+ * @xe: valid xe instance
+ */
+void xe_vsec_init(struct xe_device *xe)
+{
+	struct intel_vsec_platform_info *info;
+	struct device *dev = xe->drm.dev;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	enum xe_vsec platform;
+
+	platform = get_platform_info(xe);
+	if (platform == XE_VSEC_UNKNOWN)
+		return;
+
+	info = &xe_vsec_info[platform];
+	if (!info->headers)
+		return;
+
+	switch (platform) {
+	case XE_VSEC_BMG:
+		info->priv_data = &xe_pmt_cb;
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * Register a VSEC. Cleanup is handled using device managed
+	 * resources.
+	 */
+	intel_vsec_register(pdev, info);
+}
+MODULE_IMPORT_NS(INTEL_VSEC);
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
new file mode 100644
index 000000000000..3fd29a21cad6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -0,0 +1,13 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2022 - 2024 Intel Corporation
+ */
+
+#ifndef _XE_VSEC_H_
+#define _XE_VSEC_H_
+
+struct xe_device;
+
+void xe_vsec_init(struct xe_device *xe);
+
+#endif