diff mbox

[v11,6/6] drm/i915: expose rcs topology through query uAPI

Message ID 20180122082149.9847-7-lionel.g.landwerlin@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Lionel Landwerlin Jan. 22, 2018, 8:21 a.m. UTC
With the introduction of asymmetric slices in CNL, we cannot rely on
the previous SUBSLICE_MASK getparam to tell userspace what subslices
are available. Here we introduce a more detailed way of querying the
Gen's GPU topology that doesn't aggregate numbers.

This is essential for monitoring parts of the GPU with the OA unit,
because counters need to be normalized to the number of
EUs/subslices/slices. The current aggregated numbers like EU_TOTAL do
not gives us sufficient information.

As a bonus we can draw representations of the GPU :

        https://imgur.com/a/vuqpa

v2: Rename uapi struct s/_mask/_info/ (Tvrtko)
    Report max_slice/subslice/eus_per_subslice rather than strides (Tvrtko)
    Add uapi macros to read data from *_info structs (Tvrtko)

v3: Use !!(v & DRM_I915_BIT()) for uapi macros instead of custom shifts (Tvrtko)

v4: factorize query item writting (Tvrtko)
    tweak uapi struct/define names (Tvrtko)

v5: Replace ALIGN() macro (Chris)

v6: Updated uapi comments (Tvrtko)
    Moved flags != 0 checks into vfuncs (Tvrtko)

v7: Use access_ok() before copying anything, to avoid overflows (Chris)
    Switch BUG_ON() to GEM_WARN_ON() (Tvrtko)

v8: Tweak uapi comments style to match the coding style (Lionel)

v9: Fix error in comment about computation of enabled subslice (Tvrtko)

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/i915/i915_query.c | 110 ++++++++++++++++++++++++++++++++++++++
 include/uapi/drm/i915_drm.h       |  71 ++++++++++++++++++++++++
 2 files changed, 181 insertions(+)

Comments

Tvrtko Ursulin Jan. 22, 2018, 9:44 a.m. UTC | #1
On 22/01/2018 08:21, Lionel Landwerlin wrote:
> With the introduction of asymmetric slices in CNL, we cannot rely on
> the previous SUBSLICE_MASK getparam to tell userspace what subslices
> are available. Here we introduce a more detailed way of querying the
> Gen's GPU topology that doesn't aggregate numbers.
> 
> This is essential for monitoring parts of the GPU with the OA unit,
> because counters need to be normalized to the number of
> EUs/subslices/slices. The current aggregated numbers like EU_TOTAL do
> not gives us sufficient information.
> 
> As a bonus we can draw representations of the GPU :
> 
>          https://imgur.com/a/vuqpa
> 
> v2: Rename uapi struct s/_mask/_info/ (Tvrtko)
>      Report max_slice/subslice/eus_per_subslice rather than strides (Tvrtko)
>      Add uapi macros to read data from *_info structs (Tvrtko)
> 
> v3: Use !!(v & DRM_I915_BIT()) for uapi macros instead of custom shifts (Tvrtko)
> 
> v4: factorize query item writting (Tvrtko)
>      tweak uapi struct/define names (Tvrtko)
> 
> v5: Replace ALIGN() macro (Chris)
> 
> v6: Updated uapi comments (Tvrtko)
>      Moved flags != 0 checks into vfuncs (Tvrtko)
> 
> v7: Use access_ok() before copying anything, to avoid overflows (Chris)
>      Switch BUG_ON() to GEM_WARN_ON() (Tvrtko)
> 
> v8: Tweak uapi comments style to match the coding style (Lionel)
> 
> v9: Fix error in comment about computation of enabled subslice (Tvrtko)
> 
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_query.c | 110 ++++++++++++++++++++++++++++++++++++++
>   include/uapi/drm/i915_drm.h       |  71 ++++++++++++++++++++++++
>   2 files changed, 181 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
> index 5aa886313cf9..ff87ec8a321a 100644
> --- a/drivers/gpu/drm/i915/i915_query.c
> +++ b/drivers/gpu/drm/i915/i915_query.c
> @@ -25,8 +25,118 @@
>   #include "i915_drv.h"
>   #include <uapi/drm/i915_drm.h>
>   
> +static int copy_query_data(struct drm_i915_query_item *query_item,
> +			   const void *item_ptr, u32 item_length,
> +			   const void *data_ptr, u32 data_length)
> +{
> +	u32 total_length = item_length + data_length;
> +
> +	if (GEM_WARN_ON(add_overflows(item_length, data_length)))
> +		return -EINVAL;
> +
> +	if (query_item->length == 0)
> +		return total_length;
> +
> +	if (query_item->length < total_length)
> +		return -EINVAL;
> +
> +	if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
> +		       total_length))
> +		return -EFAULT;
> +
> +	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
> +			   item_ptr, item_length))
> +		return -EFAULT;
> +
> +	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + item_length),
> +			   data_ptr, data_length))
> +		return -EFAULT;
> +
> +	return total_length;
> +}
> +
> +static int query_slice_info(struct drm_i915_private *dev_priv,
> +			    struct drm_i915_query_item *query_item)
> +{
> +	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
> +	struct drm_i915_query_slice_info slice_info;
> +
> +	if (query_item->flags != 0)
> +		return -EINVAL;
> +
> +	if (sseu->max_slices == 0)
> +		return -ENODEV;
> +
> +	/*
> +	 * If we ever change the internal slice mask data type, we'll need to
> +	 * update this function.
> +	 */
> +	BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
> +
> +	memset(&slice_info, 0, sizeof(slice_info));
> +	slice_info.max_slices = sseu->max_slices;
> +
> +	return copy_query_data(query_item, &slice_info, sizeof(slice_info),
> +			       &sseu->slice_mask, sizeof(sseu->slice_mask));
> +}
> +
> +static int query_subslice_info(struct drm_i915_private *dev_priv,
> +			       struct drm_i915_query_item *query_item)
> +{
> +	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
> +	struct drm_i915_query_subslice_info subslice_info;
> +	u32 data_length;
> +
> +	if (query_item->flags != 0)
> +		return -EINVAL;
> +
> +	if (sseu->max_slices == 0)
> +		return -ENODEV;
> +
> +	memset(&subslice_info, 0, sizeof(subslice_info));
> +	subslice_info.max_slices = sseu->max_slices;
> +	subslice_info.max_subslices = sseu->max_subslices;
> +
> +	data_length = subslice_info.max_slices *
> +		DIV_ROUND_UP(subslice_info.max_subslices,
> +			     sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
> +
> +	return copy_query_data(query_item,
> +			       &subslice_info, sizeof(subslice_info),
> +			       sseu->subslice_mask, data_length);
> +}
> +
> +static int query_eu_info(struct drm_i915_private *dev_priv,
> +			 struct drm_i915_query_item *query_item)
> +{
> +	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
> +	struct drm_i915_query_eu_info eu_info;
> +	u32 data_length;
> +
> +	if (query_item->flags != 0)
> +		return -EINVAL;
> +
> +	if (sseu->max_slices == 0)
> +		return -ENODEV;
> +
> +	memset(&eu_info, 0, sizeof(eu_info));
> +	eu_info.max_slices = sseu->max_slices;
> +	eu_info.max_subslices = sseu->max_subslices;
> +	eu_info.max_eus_per_subslice = sseu->max_eus_per_subslice;
> +
> +	data_length = eu_info.max_slices * eu_info.max_subslices *
> +		DIV_ROUND_UP(eu_info.max_eus_per_subslice, BITS_PER_BYTE);
> +
> +	return copy_query_data(query_item,
> +			       &eu_info, sizeof(eu_info),
> +			       sseu->eu_mask, data_length);
> +}
> +
>   static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
>   					struct drm_i915_query_item *query_item) = {
> +	query_slice_info,
> +	query_subslice_info,
> +	query_eu_info,
>   };
>   
>   int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
> index 03f031652d86..7ae8e9aa0206 100644
> --- a/include/uapi/drm/i915_drm.h
> +++ b/include/uapi/drm/i915_drm.h
> @@ -1618,6 +1618,9 @@ struct drm_i915_perf_oa_config {
>   
>   struct drm_i915_query_item {
>   	__u64 query_id;
> +#define DRM_I915_QUERY_SLICE_INFO	0x01
> +#define DRM_I915_QUERY_SUBSLICE_INFO	0x02
> +#define DRM_I915_QUERY_EU_INFO		0x03
>   
>   	/*
>   	 * When set to zero by userspace, this is filled with the size of the
> @@ -1654,6 +1657,74 @@ struct drm_i915_query {
>   	__u64 items_ptr;
>   };
>   
> +#define DRM_I915_BIT(bit) ((__u32)1 << (bit))
> +#define DRM_I915_DIV_ROUND_UP(val, div) (((val) + (div) - 1) / (div))
> +
> +/*
> + * Data written by the kernel with query DRM_I915_QUERY_SLICE_INFO :
> + *
> + * data: each bit indicates whether a slice is available (1) or fused off (0).
> + *       Formula to tell if slice X is available :
> + *
> + *            (data[X / 8] >> (X % 8)) & 1
> + *
> + *       Alternatively, use DRM_I915_QUERY_SLICE_AVAILABLE() to query a
> + *       given subslice's availability.
> + */
> +struct drm_i915_query_slice_info {
> +	__u32 max_slices;
> +
> +#define DRM_I915_QUERY_SLICE_AVAILABLE(info, slice) \
> +	!!((info)->data[(slice) / 8] & DRM_I915_BIT((slice) % 8))
> +	__u8 data[];
> +};
> +
> +/*
> + * Data written by the kernel with query DRM_I915_QUERY_SUBSLICE_INFO :
> + *
> + * data: each bit indicates whether a subslice is available (1) or fused off
> + *       (0). Formula to tell if slice X subslice Y is available :
> + *
> + *            (data[(X * ALIGN(max_subslices, 8) / 8) + Y / 8] >> (Y % 8)) & 1
> + *
> + *       Alternatively, use DRM_I915_QUERY_SUBSLICE_AVAILABLE() to query a
> + *       given subslice's availability.
> + */
> +struct drm_i915_query_subslice_info {
> +	__u32 max_slices;
> +	__u32 max_subslices;
> +
> +#define DRM_I915_QUERY_SUBSLICE_AVAILABLE(info, slice, subslice) \
> +	!!((info)->data[(slice) * DRM_I915_DIV_ROUND_UP((info)->max_subslices, 8) + \
> +			(subslice) / 8] & DRM_I915_BIT((subslice) % 8))
> +	__u8 data[];
> +};
> +
> +/*
> + * Data written by the kernel with query DRM_I915_QUERY_EU_INFO :
> + *
> + * data: Each bit indicates whether an EU is available (1) or fused off (0).
> + *       Formula to tell if slice X subslice Y eu Z is available :
> + *
> + *           (data[X * max_subslices * ALIGN(max_eus_per_subslice, 8) / 8 +
> + *                 Y * ALIGN(max_eus_per_subslice, 8) / 8 +
> + *                 Z / 8] >> (Z % 8)) & 1
> + *
> + *       Alternatively, use DRM_I915_QUERY_EU_AVAILABLE() to query a given
> + *       EU's availability.
> + */
> +struct drm_i915_query_eu_info {
> +	__u32 max_slices;
> +	__u32 max_subslices;
> +	__u32 max_eus_per_subslice;
> +
> +#define DRM_I915_QUERY_EU_AVAILABLE(info, slice, subslice, eu) \
> +	!!((info)->data[(slice) * DRM_I915_DIV_ROUND_UP((info)->max_eus_per_subslice, 8) * (info)->max_subslices + \
> +			(subslice) * DRM_I915_DIV_ROUND_UP((info)->max_eus_per_subslice, 8) + \
> +			(eu) / 8] & DRM_I915_BIT((eu) % 8))
> +	__u8 data[];
> +};
> +
>   #if defined(__cplusplus)
>   }
>   #endif
> 

Looks OK to me.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 5aa886313cf9..ff87ec8a321a 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -25,8 +25,118 @@ 
 #include "i915_drv.h"
 #include <uapi/drm/i915_drm.h>
 
+static int copy_query_data(struct drm_i915_query_item *query_item,
+			   const void *item_ptr, u32 item_length,
+			   const void *data_ptr, u32 data_length)
+{
+	u32 total_length = item_length + data_length;
+
+	if (GEM_WARN_ON(add_overflows(item_length, data_length)))
+		return -EINVAL;
+
+	if (query_item->length == 0)
+		return total_length;
+
+	if (query_item->length < total_length)
+		return -EINVAL;
+
+	if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
+		       total_length))
+		return -EFAULT;
+
+	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+			   item_ptr, item_length))
+		return -EFAULT;
+
+	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + item_length),
+			   data_ptr, data_length))
+		return -EFAULT;
+
+	return total_length;
+}
+
+static int query_slice_info(struct drm_i915_private *dev_priv,
+			    struct drm_i915_query_item *query_item)
+{
+	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+	struct drm_i915_query_slice_info slice_info;
+
+	if (query_item->flags != 0)
+		return -EINVAL;
+
+	if (sseu->max_slices == 0)
+		return -ENODEV;
+
+	/*
+	 * If we ever change the internal slice mask data type, we'll need to
+	 * update this function.
+	 */
+	BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
+
+	memset(&slice_info, 0, sizeof(slice_info));
+	slice_info.max_slices = sseu->max_slices;
+
+	return copy_query_data(query_item, &slice_info, sizeof(slice_info),
+			       &sseu->slice_mask, sizeof(sseu->slice_mask));
+}
+
+static int query_subslice_info(struct drm_i915_private *dev_priv,
+			       struct drm_i915_query_item *query_item)
+{
+	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+	struct drm_i915_query_subslice_info subslice_info;
+	u32 data_length;
+
+	if (query_item->flags != 0)
+		return -EINVAL;
+
+	if (sseu->max_slices == 0)
+		return -ENODEV;
+
+	memset(&subslice_info, 0, sizeof(subslice_info));
+	subslice_info.max_slices = sseu->max_slices;
+	subslice_info.max_subslices = sseu->max_subslices;
+
+	data_length = subslice_info.max_slices *
+		DIV_ROUND_UP(subslice_info.max_subslices,
+			     sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+
+	return copy_query_data(query_item,
+			       &subslice_info, sizeof(subslice_info),
+			       sseu->subslice_mask, data_length);
+}
+
+static int query_eu_info(struct drm_i915_private *dev_priv,
+			 struct drm_i915_query_item *query_item)
+{
+	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+	struct drm_i915_query_eu_info eu_info;
+	u32 data_length;
+
+	if (query_item->flags != 0)
+		return -EINVAL;
+
+	if (sseu->max_slices == 0)
+		return -ENODEV;
+
+	memset(&eu_info, 0, sizeof(eu_info));
+	eu_info.max_slices = sseu->max_slices;
+	eu_info.max_subslices = sseu->max_subslices;
+	eu_info.max_eus_per_subslice = sseu->max_eus_per_subslice;
+
+	data_length = eu_info.max_slices * eu_info.max_subslices *
+		DIV_ROUND_UP(eu_info.max_eus_per_subslice, BITS_PER_BYTE);
+
+	return copy_query_data(query_item,
+			       &eu_info, sizeof(eu_info),
+			       sseu->eu_mask, data_length);
+}
+
 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
 					struct drm_i915_query_item *query_item) = {
+	query_slice_info,
+	query_subslice_info,
+	query_eu_info,
 };
 
 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 03f031652d86..7ae8e9aa0206 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1618,6 +1618,9 @@  struct drm_i915_perf_oa_config {
 
 struct drm_i915_query_item {
 	__u64 query_id;
+#define DRM_I915_QUERY_SLICE_INFO	0x01
+#define DRM_I915_QUERY_SUBSLICE_INFO	0x02
+#define DRM_I915_QUERY_EU_INFO		0x03
 
 	/*
 	 * When set to zero by userspace, this is filled with the size of the
@@ -1654,6 +1657,74 @@  struct drm_i915_query {
 	__u64 items_ptr;
 };
 
+#define DRM_I915_BIT(bit) ((__u32)1 << (bit))
+#define DRM_I915_DIV_ROUND_UP(val, div) (((val) + (div) - 1) / (div))
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_SLICE_INFO :
+ *
+ * data: each bit indicates whether a slice is available (1) or fused off (0).
+ *       Formula to tell if slice X is available :
+ *
+ *            (data[X / 8] >> (X % 8)) & 1
+ *
+ *       Alternatively, use DRM_I915_QUERY_SLICE_AVAILABLE() to query a
+ *       given subslice's availability.
+ */
+struct drm_i915_query_slice_info {
+	__u32 max_slices;
+
+#define DRM_I915_QUERY_SLICE_AVAILABLE(info, slice) \
+	!!((info)->data[(slice) / 8] & DRM_I915_BIT((slice) % 8))
+	__u8 data[];
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_SUBSLICE_INFO :
+ *
+ * data: each bit indicates whether a subslice is available (1) or fused off
+ *       (0). Formula to tell if slice X subslice Y is available :
+ *
+ *            (data[(X * ALIGN(max_subslices, 8) / 8) + Y / 8] >> (Y % 8)) & 1
+ *
+ *       Alternatively, use DRM_I915_QUERY_SUBSLICE_AVAILABLE() to query a
+ *       given subslice's availability.
+ */
+struct drm_i915_query_subslice_info {
+	__u32 max_slices;
+	__u32 max_subslices;
+
+#define DRM_I915_QUERY_SUBSLICE_AVAILABLE(info, slice, subslice) \
+	!!((info)->data[(slice) * DRM_I915_DIV_ROUND_UP((info)->max_subslices, 8) + \
+			(subslice) / 8] & DRM_I915_BIT((subslice) % 8))
+	__u8 data[];
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_EU_INFO :
+ *
+ * data: Each bit indicates whether an EU is available (1) or fused off (0).
+ *       Formula to tell if slice X subslice Y eu Z is available :
+ *
+ *           (data[X * max_subslices * ALIGN(max_eus_per_subslice, 8) / 8 +
+ *                 Y * ALIGN(max_eus_per_subslice, 8) / 8 +
+ *                 Z / 8] >> (Z % 8)) & 1
+ *
+ *       Alternatively, use DRM_I915_QUERY_EU_AVAILABLE() to query a given
+ *       EU's availability.
+ */
+struct drm_i915_query_eu_info {
+	__u32 max_slices;
+	__u32 max_subslices;
+	__u32 max_eus_per_subslice;
+
+#define DRM_I915_QUERY_EU_AVAILABLE(info, slice, subslice, eu) \
+	!!((info)->data[(slice) * DRM_I915_DIV_ROUND_UP((info)->max_eus_per_subslice, 8) * (info)->max_subslices + \
+			(subslice) * DRM_I915_DIV_ROUND_UP((info)->max_eus_per_subslice, 8) + \
+			(eu) / 8] & DRM_I915_BIT((eu) % 8))
+	__u8 data[];
+};
+
 #if defined(__cplusplus)
 }
 #endif