diff mbox series

[v2,1/5] ACPI / x86: Add support for LPS0 callback handler

Message ID 20220310191724.1440-1-mario.limonciello@amd.com (mailing list archive)
State Superseded, archived
Headers show
Series [v2,1/5] ACPI / x86: Add support for LPS0 callback handler | expand

Commit Message

Mario Limonciello March 10, 2022, 7:17 p.m. UTC
Currenty the latest thing run during a suspend to idle attempt is
the LPS0 `prepare_late` callback and the earliest thing is the
`resume_early` callback.

There is a desire for the `amd-pmc` driver to suspend later in the
suspend process (ideally the very last thing), so create a callback
that it or any other driver can hook into to do this.

Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
changes from v1->v2:
 * Change register/unregister arguments to be struct

 drivers/acpi/x86/s2idle.c | 65 ++++++++++++++++++++++++++++++++++++++-
 include/linux/acpi.h      | 11 ++++++-
 2 files changed, 74 insertions(+), 2 deletions(-)

Comments

David E. Box March 10, 2022, 9:43 p.m. UTC | #1
On Thu, 2022-03-10 at 13:17 -0600, Mario Limonciello wrote:
> Currenty the latest thing run during a suspend to idle attempt is
> the LPS0 `prepare_late` callback and the earliest thing is the
> `resume_early` callback.
> 
> There is a desire for the `amd-pmc` driver to suspend later in the
> suspend process (ideally the very last thing), so create a callback
> that it or any other driver can hook into to do this.
> 
> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> ---
> changes from v1->v2:
>  * Change register/unregister arguments to be struct
> 
>  drivers/acpi/x86/s2idle.c | 65 ++++++++++++++++++++++++++++++++++++++-
>  include/linux/acpi.h      | 11 ++++++-
>  2 files changed, 74 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
> index abc06e7f89d8..7418443580d4 100644
> --- a/drivers/acpi/x86/s2idle.c
> +++ b/drivers/acpi/x86/s2idle.c
> @@ -86,6 +86,9 @@ struct lpi_device_constraint_amd {
>  	int min_dstate;
>  };
>  
> +static LIST_HEAD(lps0_callback_handler_head);
> +static DEFINE_MUTEX(lps0_callback_handler_mutex);
> +
>  static struct lpi_constraints *lpi_constraints_table;
>  static int lpi_constraints_table_size;
>  static int rev_id;
> @@ -444,6 +447,9 @@ static struct acpi_scan_handler lps0_handler = {
>  
>  int acpi_s2idle_prepare_late(void)
>  {
> +	struct lps0_callback_handler *handler;
> +	int rc = 0;
> +
>  	if (!lps0_device_handle || sleep_no_lps0)
>  		return 0;
>  
> @@ -474,14 +480,31 @@ int acpi_s2idle_prepare_late(void)
>  		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
>  				lps0_dsm_func_mask_microsoft,
> lps0_dsm_guid_microsoft);
>  	}
> -	return 0;
> +
> +	mutex_lock(&lps0_callback_handler_mutex);
> +	list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
> +		rc = handler->prepare_late_callback(handler->context);

Check callback exists first. Sorry I missed earlier.

> +		if (rc)
> +			goto out;
> +	}
> +out:
> +	mutex_unlock(&lps0_callback_handler_mutex);
> +
> +	return rc;
>  }
>  
>  void acpi_s2idle_restore_early(void)
>  {
> +	struct lps0_callback_handler *handler;
> +
>  	if (!lps0_device_handle || sleep_no_lps0)
>  		return;
>  
> +	mutex_lock(&lps0_callback_handler_mutex);
> +	list_for_each_entry(handler, &lps0_callback_handler_head, list_node)
> +		handler->restore_early_callback(handler->context);

Here too.

David

> +	mutex_unlock(&lps0_callback_handler_mutex);
> +
>  	/* Modern standby exit */
>  	if (lps0_dsm_func_mask_microsoft > 0)
>  		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
> @@ -524,4 +547,44 @@ void acpi_s2idle_setup(void)
>  	s2idle_set_ops(&acpi_s2idle_ops_lps0);
>  }
>  
> +int acpi_register_lps0_callbacks(struct lps0_callback_handler *arg)
> +{
> +	struct lps0_callback_handler *handler;
> +
> +	if (!lps0_device_handle || sleep_no_lps0)
> +		return -ENODEV;
> +
> +	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
> +	if (!handler)
> +		return -ENOMEM;
> +	handler->prepare_late_callback = arg->prepare_late_callback;
> +	handler->restore_early_callback = arg->restore_early_callback;
> +	handler->context = arg->context;
> +
> +	mutex_lock(&lps0_callback_handler_mutex);
> +	list_add(&handler->list_node, &lps0_callback_handler_head);
> +	mutex_unlock(&lps0_callback_handler_mutex);
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
> +
> +void acpi_unregister_lps0_callbacks(struct lps0_callback_handler *arg)
> +{
> +	struct lps0_callback_handler *handler;
> +
> +	mutex_lock(&lps0_callback_handler_mutex);
> +	list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
> +		if (handler->prepare_late_callback == arg->prepare_late_callback 
> &&
> +		    handler->restore_early_callback == arg-
> >restore_early_callback &&
> +		    handler->context == arg->context) {
> +			list_del(&handler->list_node);
> +			kfree(handler);
> +			break;
> +		}
> +	}
> +	mutex_unlock(&lps0_callback_handler_mutex);
> +}
> +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks);
> +
>  #endif /* CONFIG_SUSPEND */
> diff --git a/include/linux/acpi.h b/include/linux/acpi.h
> index 6274758648e3..df105f5e03e5 100644
> --- a/include/linux/acpi.h
> +++ b/include/linux/acpi.h
> @@ -1023,7 +1023,16 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8
> sleep_state,
>  
>  acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
>  					   u32 val_a, u32 val_b);
> -
> +#ifdef CONFIG_X86
> +struct lps0_callback_handler {
> +	struct list_head list_node;
> +	int (*prepare_late_callback)(void *context);
> +	void (*restore_early_callback)(void *context);
> +	void *context;
> +};
> +int acpi_register_lps0_callbacks(struct lps0_callback_handler *arg);
> +void acpi_unregister_lps0_callbacks(struct lps0_callback_handler *arg);
> +#endif /* CONFIG_X86 */
>  #ifndef CONFIG_IA64
>  void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
>  #else
diff mbox series

Patch

diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index abc06e7f89d8..7418443580d4 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -86,6 +86,9 @@  struct lpi_device_constraint_amd {
 	int min_dstate;
 };
 
+static LIST_HEAD(lps0_callback_handler_head);
+static DEFINE_MUTEX(lps0_callback_handler_mutex);
+
 static struct lpi_constraints *lpi_constraints_table;
 static int lpi_constraints_table_size;
 static int rev_id;
@@ -444,6 +447,9 @@  static struct acpi_scan_handler lps0_handler = {
 
 int acpi_s2idle_prepare_late(void)
 {
+	struct lps0_callback_handler *handler;
+	int rc = 0;
+
 	if (!lps0_device_handle || sleep_no_lps0)
 		return 0;
 
@@ -474,14 +480,31 @@  int acpi_s2idle_prepare_late(void)
 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
 				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
 	}
-	return 0;
+
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+		rc = handler->prepare_late_callback(handler->context);
+		if (rc)
+			goto out;
+	}
+out:
+	mutex_unlock(&lps0_callback_handler_mutex);
+
+	return rc;
 }
 
 void acpi_s2idle_restore_early(void)
 {
+	struct lps0_callback_handler *handler;
+
 	if (!lps0_device_handle || sleep_no_lps0)
 		return;
 
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_for_each_entry(handler, &lps0_callback_handler_head, list_node)
+		handler->restore_early_callback(handler->context);
+	mutex_unlock(&lps0_callback_handler_mutex);
+
 	/* Modern standby exit */
 	if (lps0_dsm_func_mask_microsoft > 0)
 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
@@ -524,4 +547,44 @@  void acpi_s2idle_setup(void)
 	s2idle_set_ops(&acpi_s2idle_ops_lps0);
 }
 
+int acpi_register_lps0_callbacks(struct lps0_callback_handler *arg)
+{
+	struct lps0_callback_handler *handler;
+
+	if (!lps0_device_handle || sleep_no_lps0)
+		return -ENODEV;
+
+	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+	if (!handler)
+		return -ENOMEM;
+	handler->prepare_late_callback = arg->prepare_late_callback;
+	handler->restore_early_callback = arg->restore_early_callback;
+	handler->context = arg->context;
+
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_add(&handler->list_node, &lps0_callback_handler_head);
+	mutex_unlock(&lps0_callback_handler_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
+
+void acpi_unregister_lps0_callbacks(struct lps0_callback_handler *arg)
+{
+	struct lps0_callback_handler *handler;
+
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+		if (handler->prepare_late_callback == arg->prepare_late_callback &&
+		    handler->restore_early_callback == arg->restore_early_callback &&
+		    handler->context == arg->context) {
+			list_del(&handler->list_node);
+			kfree(handler);
+			break;
+		}
+	}
+	mutex_unlock(&lps0_callback_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks);
+
 #endif /* CONFIG_SUSPEND */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6274758648e3..df105f5e03e5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1023,7 +1023,16 @@  void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
 
 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
 					   u32 val_a, u32 val_b);
-
+#ifdef CONFIG_X86
+struct lps0_callback_handler {
+	struct list_head list_node;
+	int (*prepare_late_callback)(void *context);
+	void (*restore_early_callback)(void *context);
+	void *context;
+};
+int acpi_register_lps0_callbacks(struct lps0_callback_handler *arg);
+void acpi_unregister_lps0_callbacks(struct lps0_callback_handler *arg);
+#endif /* CONFIG_X86 */
 #ifndef CONFIG_IA64
 void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
 #else