Message ID | 20220310151705.577442-1-mario.limonciello@amd.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | [1/4] ACPI / x86: Add support for LPS0 callback handler | expand |
Hi Mario, On Thu, 2022-03-10 at 09:17 -0600, Mario Limonciello wrote: > Currenty the latest thing run during a suspend to idle attempt is > the LPS0 `prepare_late` callback and the earliest thing is the > `resume_early` callback. > > There is a desire for the `amd-pmc` driver to suspend later in the > suspend process (ideally the very last thing), so create a callback > that it or any other driver can hook into to do this. > > Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> > --- > drivers/acpi/x86/s2idle.c | 76 ++++++++++++++++++++++++++++++++++++++- > include/linux/acpi.h | 9 ++++- > 2 files changed, 83 insertions(+), 2 deletions(-) > > diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c > index abc06e7f89d8..652dc2d75458 100644 > --- a/drivers/acpi/x86/s2idle.c > +++ b/drivers/acpi/x86/s2idle.c > @@ -86,6 +86,16 @@ struct lpi_device_constraint_amd { > int min_dstate; > }; > > +struct lps0_callback_handler { > + struct list_head list_node; > + int (*prepare_late_callback)(void *context); > + void (*restore_early_callback)(void *context); > + void *context; > +}; Maybe put this in acpi.h ... > + > +static LIST_HEAD(lps0_callback_handler_head); > +static DEFINE_MUTEX(lps0_callback_handler_mutex); > + > static struct lpi_constraints *lpi_constraints_table; > static int lpi_constraints_table_size; > static int rev_id; > @@ -444,6 +454,9 @@ static struct acpi_scan_handler lps0_handler = { > > int acpi_s2idle_prepare_late(void) > { > + struct lps0_callback_handler *handler; > + int rc = 0; > + > if (!lps0_device_handle || sleep_no_lps0) > return 0; > > @@ -474,14 +487,31 @@ int acpi_s2idle_prepare_late(void) > acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, > lps0_dsm_func_mask_microsoft, > lps0_dsm_guid_microsoft); > } > - return 0; > + > + mutex_lock(&lps0_callback_handler_mutex); > + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) { > + rc = handler->prepare_late_callback(handler->context); > + if (rc) > + goto out; > + } > +out: > + mutex_unlock(&lps0_callback_handler_mutex); > + > + return rc; > } > > void acpi_s2idle_restore_early(void) > { > + struct lps0_callback_handler *handler; > + > if (!lps0_device_handle || sleep_no_lps0) > return; > > + mutex_lock(&lps0_callback_handler_mutex); > + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) > + handler->restore_early_callback(handler->context); > + mutex_unlock(&lps0_callback_handler_mutex); > + > /* Modern standby exit */ > if (lps0_dsm_func_mask_microsoft > 0) > acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, > @@ -524,4 +554,48 @@ void acpi_s2idle_setup(void) > s2idle_set_ops(&acpi_s2idle_ops_lps0); > } > > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context), > + void (*restore_early)(void *context), > + void *context) ... and just have "struct lps0_callback_handler *handler" be the argument here. David > +{ > + struct lps0_callback_handler *handler; > + > + if (!lps0_device_handle || sleep_no_lps0) > + return -ENODEV; > + > + handler = kmalloc(sizeof(*handler), GFP_KERNEL); > + if (!handler) > + return -ENOMEM; > + handler->prepare_late_callback = prepare_late; > + handler->restore_early_callback = restore_early; > + handler->context = context; > + > + mutex_lock(&lps0_callback_handler_mutex); > + list_add(&handler->list_node, &lps0_callback_handler_head); > + mutex_unlock(&lps0_callback_handler_mutex); > + > + return 0; > +} > +EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks); > + > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context), > + void (*restore_early)(void *context), > + void *context) > +{ > + struct lps0_callback_handler *handler; > + > + mutex_lock(&lps0_callback_handler_mutex); > + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) { > + if (handler->prepare_late_callback == prepare_late && > + handler->restore_early_callback == restore_early && > + handler->context == context) { > + list_del(&handler->list_node); > + kfree(handler); > + break; > + } > + } > + mutex_unlock(&lps0_callback_handler_mutex); > +} > +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks); > + > #endif /* CONFIG_SUSPEND */ > diff --git a/include/linux/acpi.h b/include/linux/acpi.h > index 6274758648e3..cae0fde309f2 100644 > --- a/include/linux/acpi.h > +++ b/include/linux/acpi.h > @@ -1023,7 +1023,14 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 > sleep_state, > > acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, > u32 val_a, u32 val_b); > - > +#ifdef CONFIG_X86 > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context), > + void (*restore_early)(void *context), > + void *context); > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context), > + void (*restore_early)(void *context), > + void *context); > +#endif /* CONFIG_X86 */ > #ifndef CONFIG_IA64 > void arch_reserve_mem_area(acpi_physical_address addr, size_t size); > #else
[Public] > On Thu, 2022-03-10 at 09:17 -0600, Mario Limonciello wrote: > > Currenty the latest thing run during a suspend to idle attempt is > > the LPS0 `prepare_late` callback and the earliest thing is the > > `resume_early` callback. > > > > There is a desire for the `amd-pmc` driver to suspend later in the > > suspend process (ideally the very last thing), so create a callback > > that it or any other driver can hook into to do this. > > > > Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> > > --- > > drivers/acpi/x86/s2idle.c | 76 > ++++++++++++++++++++++++++++++++++++++- > > include/linux/acpi.h | 9 ++++- > > 2 files changed, 83 insertions(+), 2 deletions(-) > > > > diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c > > index abc06e7f89d8..652dc2d75458 100644 > > --- a/drivers/acpi/x86/s2idle.c > > +++ b/drivers/acpi/x86/s2idle.c > > @@ -86,6 +86,16 @@ struct lpi_device_constraint_amd { > > int min_dstate; > > }; > > > > +struct lps0_callback_handler { > > + struct list_head list_node; > > + int (*prepare_late_callback)(void *context); > > + void (*restore_early_callback)(void *context); > > + void *context; > > +}; > > Maybe put this in acpi.h Wonderful suggestion, thanks! I'll adopt this for v2 after some other feedback comes in on the approach. > > ... > > > > + > > +static LIST_HEAD(lps0_callback_handler_head); > > +static DEFINE_MUTEX(lps0_callback_handler_mutex); > > + > > static struct lpi_constraints *lpi_constraints_table; > > static int lpi_constraints_table_size; > > static int rev_id; > > @@ -444,6 +454,9 @@ static struct acpi_scan_handler lps0_handler = { > > > > int acpi_s2idle_prepare_late(void) > > { > > + struct lps0_callback_handler *handler; > > + int rc = 0; > > + > > if (!lps0_device_handle || sleep_no_lps0) > > return 0; > > > > @@ -474,14 +487,31 @@ int acpi_s2idle_prepare_late(void) > > acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, > > lps0_dsm_func_mask_microsoft, > > lps0_dsm_guid_microsoft); > > } > > - return 0; > > + > > + mutex_lock(&lps0_callback_handler_mutex); > > + list_for_each_entry(handler, &lps0_callback_handler_head, > list_node) { > > + rc = handler->prepare_late_callback(handler->context); > > + if (rc) > > + goto out; > > + } > > +out: > > + mutex_unlock(&lps0_callback_handler_mutex); > > + > > + return rc; > > } > > > > void acpi_s2idle_restore_early(void) > > { > > + struct lps0_callback_handler *handler; > > + > > if (!lps0_device_handle || sleep_no_lps0) > > return; > > > > + mutex_lock(&lps0_callback_handler_mutex); > > + list_for_each_entry(handler, &lps0_callback_handler_head, > list_node) > > + handler->restore_early_callback(handler->context); > > + mutex_unlock(&lps0_callback_handler_mutex); > > + > > /* Modern standby exit */ > > if (lps0_dsm_func_mask_microsoft > 0) > > acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, > > @@ -524,4 +554,48 @@ void acpi_s2idle_setup(void) > > s2idle_set_ops(&acpi_s2idle_ops_lps0); > > } > > > > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context), > > + void (*restore_early)(void *context), > > + void *context) > > ... and just have "struct lps0_callback_handler *handler" be the argument > here. > > David > > > +{ > > + struct lps0_callback_handler *handler; > > + > > + if (!lps0_device_handle || sleep_no_lps0) > > + return -ENODEV; > > + > > + handler = kmalloc(sizeof(*handler), GFP_KERNEL); > > + if (!handler) > > + return -ENOMEM; > > + handler->prepare_late_callback = prepare_late; > > + handler->restore_early_callback = restore_early; > > + handler->context = context; > > + > > + mutex_lock(&lps0_callback_handler_mutex); > > + list_add(&handler->list_node, &lps0_callback_handler_head); > > + mutex_unlock(&lps0_callback_handler_mutex); > > + > > + return 0; > > +} > > +EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks); > > + > > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context), > > + void (*restore_early)(void *context), > > + void *context) > > +{ > > + struct lps0_callback_handler *handler; > > + > > + mutex_lock(&lps0_callback_handler_mutex); > > + list_for_each_entry(handler, &lps0_callback_handler_head, > list_node) { > > + if (handler->prepare_late_callback == prepare_late && > > + handler->restore_early_callback == restore_early && > > + handler->context == context) { > > + list_del(&handler->list_node); > > + kfree(handler); > > + break; > > + } > > + } > > + mutex_unlock(&lps0_callback_handler_mutex); > > +} > > +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks); > > + > > #endif /* CONFIG_SUSPEND */ > > diff --git a/include/linux/acpi.h b/include/linux/acpi.h > > index 6274758648e3..cae0fde309f2 100644 > > --- a/include/linux/acpi.h > > +++ b/include/linux/acpi.h > > @@ -1023,7 +1023,14 @@ void acpi_os_set_prepare_extended_sleep(int > (*func)(u8 > > sleep_state, > > > > acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, > > u32 val_a, u32 val_b); > > - > > +#ifdef CONFIG_X86 > > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context), > > + void (*restore_early)(void *context), > > + void *context); > > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context), > > + void (*restore_early)(void *context), > > + void *context); > > +#endif /* CONFIG_X86 */ > > #ifndef CONFIG_IA64 > > void arch_reserve_mem_area(acpi_physical_address addr, size_t size); > > #else
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index abc06e7f89d8..652dc2d75458 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -86,6 +86,16 @@ struct lpi_device_constraint_amd { int min_dstate; }; +struct lps0_callback_handler { + struct list_head list_node; + int (*prepare_late_callback)(void *context); + void (*restore_early_callback)(void *context); + void *context; +}; + +static LIST_HEAD(lps0_callback_handler_head); +static DEFINE_MUTEX(lps0_callback_handler_mutex); + static struct lpi_constraints *lpi_constraints_table; static int lpi_constraints_table_size; static int rev_id; @@ -444,6 +454,9 @@ static struct acpi_scan_handler lps0_handler = { int acpi_s2idle_prepare_late(void) { + struct lps0_callback_handler *handler; + int rc = 0; + if (!lps0_device_handle || sleep_no_lps0) return 0; @@ -474,14 +487,31 @@ int acpi_s2idle_prepare_late(void) acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); } - return 0; + + mutex_lock(&lps0_callback_handler_mutex); + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) { + rc = handler->prepare_late_callback(handler->context); + if (rc) + goto out; + } +out: + mutex_unlock(&lps0_callback_handler_mutex); + + return rc; } void acpi_s2idle_restore_early(void) { + struct lps0_callback_handler *handler; + if (!lps0_device_handle || sleep_no_lps0) return; + mutex_lock(&lps0_callback_handler_mutex); + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) + handler->restore_early_callback(handler->context); + mutex_unlock(&lps0_callback_handler_mutex); + /* Modern standby exit */ if (lps0_dsm_func_mask_microsoft > 0) acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, @@ -524,4 +554,48 @@ void acpi_s2idle_setup(void) s2idle_set_ops(&acpi_s2idle_ops_lps0); } +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context), + void (*restore_early)(void *context), + void *context) +{ + struct lps0_callback_handler *handler; + + if (!lps0_device_handle || sleep_no_lps0) + return -ENODEV; + + handler = kmalloc(sizeof(*handler), GFP_KERNEL); + if (!handler) + return -ENOMEM; + handler->prepare_late_callback = prepare_late; + handler->restore_early_callback = restore_early; + handler->context = context; + + mutex_lock(&lps0_callback_handler_mutex); + list_add(&handler->list_node, &lps0_callback_handler_head); + mutex_unlock(&lps0_callback_handler_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks); + +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context), + void (*restore_early)(void *context), + void *context) +{ + struct lps0_callback_handler *handler; + + mutex_lock(&lps0_callback_handler_mutex); + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) { + if (handler->prepare_late_callback == prepare_late && + handler->restore_early_callback == restore_early && + handler->context == context) { + list_del(&handler->list_node); + kfree(handler); + break; + } + } + mutex_unlock(&lps0_callback_handler_mutex); +} +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks); + #endif /* CONFIG_SUSPEND */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6274758648e3..cae0fde309f2 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1023,7 +1023,14 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); - +#ifdef CONFIG_X86 +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context), + void (*restore_early)(void *context), + void *context); +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context), + void (*restore_early)(void *context), + void *context); +#endif /* CONFIG_X86 */ #ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else
Currenty the latest thing run during a suspend to idle attempt is the LPS0 `prepare_late` callback and the earliest thing is the `resume_early` callback. There is a desire for the `amd-pmc` driver to suspend later in the suspend process (ideally the very last thing), so create a callback that it or any other driver can hook into to do this. Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> --- drivers/acpi/x86/s2idle.c | 76 ++++++++++++++++++++++++++++++++++++++- include/linux/acpi.h | 9 ++++- 2 files changed, 83 insertions(+), 2 deletions(-)