diff mbox series

[1/2] drm: Add DRM-managed alloc_workqueue() and alloc_ordered_workqueue()

Message ID 20230110152447.5611-1-jiasheng@iscas.ac.cn (mailing list archive)
State New, archived
Headers show
Series [1/2] drm: Add DRM-managed alloc_workqueue() and alloc_ordered_workqueue() | expand

Commit Message

Jiasheng Jiang Jan. 10, 2023, 3:24 p.m. UTC
Add drmm_alloc_workqueue() and drmm_alloc_ordered_workqueue(), the helpers
that provide managed workqueue cleanup. The workqueue will be destroyed
with the final reference of the DRM device.

Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
---
 drivers/gpu/drm/drm_managed.c | 66 +++++++++++++++++++++++++++++++++++
 include/drm/drm_managed.h     |  8 +++++
 2 files changed, 74 insertions(+)

Comments

Daniel Vetter Jan. 11, 2023, 10:46 p.m. UTC | #1
On Tue, Jan 10, 2023 at 11:24:47PM +0800, Jiasheng Jiang wrote:
> Add drmm_alloc_workqueue() and drmm_alloc_ordered_workqueue(), the helpers
> that provide managed workqueue cleanup. The workqueue will be destroyed
> with the final reference of the DRM device.
> 
> Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>

Yeah I think this looks nice.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

I'm assuming driver maintainers will pick this up, if not please holler.

Also the threading seems broken, it's not a patch series. The b4 tool or
git send-email (of all the patches of the entire series at once, not each
individually) should get this right.

Unfortunately I did't find the right link in the kernel docs, or at least
they're not as detailed as I hoped.

Also your previous submission had iirc a bunch more patches, do you plan
to include them all in the next patch set?
-Daniel


> ---
>  drivers/gpu/drm/drm_managed.c | 66 +++++++++++++++++++++++++++++++++++
>  include/drm/drm_managed.h     |  8 +++++
>  2 files changed, 74 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
> index 4cf214de50c4..d3bd6247eec9 100644
> --- a/drivers/gpu/drm/drm_managed.c
> +++ b/drivers/gpu/drm/drm_managed.c
> @@ -271,6 +271,13 @@ static void drmm_mutex_release(struct drm_device *dev, void *res)
>  	mutex_destroy(lock);
>  }
>  
> +static void drmm_destroy_workqueue(struct drm_device *dev, void *res)
> +{
> +	struct workqueue_struct *wq = res;
> +
> +	destroy_workqueue(wq);
> +}
> +
>  /**
>   * drmm_mutex_init - &drm_device-managed mutex_init()
>   * @dev: DRM device
> @@ -289,3 +296,62 @@ int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
>  	return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
>  }
>  EXPORT_SYMBOL(drmm_mutex_init);
> +
> +/**
> + * drmm_alloc_workqueue - &drm_device-managed alloc_workqueue()
> + * @dev: DRM device
> + * @wq: workqueue to be allocated
> + *
> + * Returns:
> + * 0 on success, or a negative errno code otherwise.
> + *
> + * This is a &drm_device-managed version of alloc_workqueue().
> + * The initialized lock is automatically destroyed on the final
> + * drm_dev_put().
> + */
> +int drmm_alloc_workqueue(struct drm_device *dev,
> +			  struct workqueue_struct *wq, const char *fmt,
> +			  unsigned int flags, int max_active, ...)
> +{
> +	va_list args;
> +
> +	va_start(args, max_active);
> +	wq = alloc_workqueue(fmt, flags, max_active, args);
> +	va_end(args);
> +
> +	if (!wq)
> +		return -ENOMEM;
> +
> +	return drmm_add_action_or_reset(dev, drmm_destroy_workqueue, wq);
> +}
> +EXPORT_SYMBOL(drmm_alloc_workqueue);
> +
> +/**
> + * drmm_alloc_ordered_workqueue - &drm_device-managed
> + * alloc_ordered_workqueue()
> + * @dev: DRM device
> + * @wq: workqueue to be allocated
> + *
> + * Returns:
> + * 0 on success, or a negative errno code otherwise.
> + *
> + * This is a &drm_device-managed version of alloc_ordered_workqueue().
> + * The initialized lock is automatically destroyed on the final
> + * drm_dev_put().
> + */
> +int drmm_alloc_ordered_workqueue(struct drm_device *dev,
> +				  struct workqueue_struct *wq,
> +				  const char *fmt, unsigned int flags, ...)
> +{
> +	va_list args;
> +
> +	va_start(args, flags);
> +	wq = alloc_ordered_workqueue(fmt, flags, args);
> +	va_end(args);
> +
> +	if (!wq)
> +		return -ENOMEM;
> +
> +	return drmm_add_action_or_reset(dev, drmm_destroy_workqueue, wq);
> +}
> +EXPORT_SYMBOL(drmm_alloc_ordered_workqueue);
> diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
> index 359883942612..68cecc14e1af 100644
> --- a/include/drm/drm_managed.h
> +++ b/include/drm/drm_managed.h
> @@ -107,4 +107,12 @@ void drmm_kfree(struct drm_device *dev, void *data);
>  
>  int drmm_mutex_init(struct drm_device *dev, struct mutex *lock);
>  
> +int drmm_alloc_workqueue(struct drm_device *dev,
> +			  struct workqueue_struct *wq, const char *fmt,
> +			  unsigned int flags, int max_active, ...);
> +
> +int drmm_alloc_ordered_workqueue(struct drm_device *dev,
> +				  struct workqueue_struct *wq,
> +				  const char *fmt, unsigned int flags, ...);
> +
>  #endif
> -- 
> 2.25.1
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 4cf214de50c4..d3bd6247eec9 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -271,6 +271,13 @@  static void drmm_mutex_release(struct drm_device *dev, void *res)
 	mutex_destroy(lock);
 }
 
+static void drmm_destroy_workqueue(struct drm_device *dev, void *res)
+{
+	struct workqueue_struct *wq = res;
+
+	destroy_workqueue(wq);
+}
+
 /**
  * drmm_mutex_init - &drm_device-managed mutex_init()
  * @dev: DRM device
@@ -289,3 +296,62 @@  int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
 	return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
 }
 EXPORT_SYMBOL(drmm_mutex_init);
+
+/**
+ * drmm_alloc_workqueue - &drm_device-managed alloc_workqueue()
+ * @dev: DRM device
+ * @wq: workqueue to be allocated
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ *
+ * This is a &drm_device-managed version of alloc_workqueue().
+ * The initialized lock is automatically destroyed on the final
+ * drm_dev_put().
+ */
+int drmm_alloc_workqueue(struct drm_device *dev,
+			  struct workqueue_struct *wq, const char *fmt,
+			  unsigned int flags, int max_active, ...)
+{
+	va_list args;
+
+	va_start(args, max_active);
+	wq = alloc_workqueue(fmt, flags, max_active, args);
+	va_end(args);
+
+	if (!wq)
+		return -ENOMEM;
+
+	return drmm_add_action_or_reset(dev, drmm_destroy_workqueue, wq);
+}
+EXPORT_SYMBOL(drmm_alloc_workqueue);
+
+/**
+ * drmm_alloc_ordered_workqueue - &drm_device-managed
+ * alloc_ordered_workqueue()
+ * @dev: DRM device
+ * @wq: workqueue to be allocated
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ *
+ * This is a &drm_device-managed version of alloc_ordered_workqueue().
+ * The initialized lock is automatically destroyed on the final
+ * drm_dev_put().
+ */
+int drmm_alloc_ordered_workqueue(struct drm_device *dev,
+				  struct workqueue_struct *wq,
+				  const char *fmt, unsigned int flags, ...)
+{
+	va_list args;
+
+	va_start(args, flags);
+	wq = alloc_ordered_workqueue(fmt, flags, args);
+	va_end(args);
+
+	if (!wq)
+		return -ENOMEM;
+
+	return drmm_add_action_or_reset(dev, drmm_destroy_workqueue, wq);
+}
+EXPORT_SYMBOL(drmm_alloc_ordered_workqueue);
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index 359883942612..68cecc14e1af 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -107,4 +107,12 @@  void drmm_kfree(struct drm_device *dev, void *data);
 
 int drmm_mutex_init(struct drm_device *dev, struct mutex *lock);
 
+int drmm_alloc_workqueue(struct drm_device *dev,
+			  struct workqueue_struct *wq, const char *fmt,
+			  unsigned int flags, int max_active, ...);
+
+int drmm_alloc_ordered_workqueue(struct drm_device *dev,
+				  struct workqueue_struct *wq,
+				  const char *fmt, unsigned int flags, ...);
+
 #endif