Message ID | 20220426172346.3508411-9-cezary.rojewski@intel.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | ASoC: Intel: avs: Driver core and PCM operations | expand |
On 4/26/22 12:23, Cezary Rojewski wrote: > Audio DSP device supports D0 substates in form of D0ix, allowing for > preserving more power even when device is still considered active (D0). > When entered, certain domains which are not being currently used become > power gated. Entering and leaving D0ix is a complex process and differs > between firmware generations. > > Conditions that disallow D0i3 and require immediate D0i0 transition > include but may not be limited to: IPC traffic, firmware tracing and > SRAM I/O. To make D0ix toggling sane, delay D0i3 transition and refresh > the timer each time an IPC is requrested. typo: requested. I find it odd to list all kinds of criteria but only handle one in the end. Do the other matter, is this a TODO, unclear what you are trying to say. > int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry); > diff --git a/sound/soc/intel/avs/dsp.c b/sound/soc/intel/avs/dsp.c > index 3ff17bd22a5a..2f18b137ff42 100644 > --- a/sound/soc/intel/avs/dsp.c > +++ b/sound/soc/intel/avs/dsp.c > @@ -152,6 +152,15 @@ static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id) > > adev->core_refs[core_id]++; > if (adev->core_refs[core_id] == 1) { > + /* > + * No cores other than main-core can be running for DSP > + * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted, conscious failure? what's that? > + * simply d0ix power state will no longer be attempted. > + */ > + ret = avs_dsp_disable_d0ix(adev); > + if (ret && ret != -AVS_EIPC) > + goto err_disable_d0ix; > + > ret = avs_dsp_enable(adev, mask); > if (ret) > goto err_enable_dsp; tatic int > +avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) > +{ > + struct avs_ipc *ipc = adev->ipc; > + int ret; > + > + /* Is transition required? */ > + if (ipc->in_d0ix == enable) > + return 0; > + > + ret = avs_dsp_op(adev, set_d0ix, enable); > + if (ret) { > + /* Prevent further d0ix attempts on conscious IPC failure. */ ?? > + if (ret == -AVS_EIPC) > + atomic_inc(&ipc->d0ix_disable_depth); > + > + ipc->in_d0ix = false; > + return ret; > + } > + > + ipc->in_d0ix = enable; > + return 0; > +} > + > +static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx) > +{ > + if (atomic_read(&adev->ipc->d0ix_disable_depth)) > + return; > + > + mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, > + msecs_to_jiffies(AVS_D0IX_DELAY_MS)); > +} > + > +static void avs_dsp_d0ix_work(struct work_struct *work) > +{ > + struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); > + > + avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); > +} > + > +static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx) > +{ > + struct avs_ipc *ipc = adev->ipc; > + > + if (!atomic_read(&ipc->d0ix_disable_depth)) { > + cancel_delayed_work_sync(&ipc->d0ix_work); > + return avs_dsp_set_d0ix(adev, false); > + } > + > + return 0; > +} > + > +int avs_dsp_disable_d0ix(struct avs_dev *adev) > +{ > + struct avs_ipc *ipc = adev->ipc; > + > + /* Prevent PG only on the first disable. */ > + if (atomic_add_return(1, &ipc->d0ix_disable_depth) == 1) { > + cancel_delayed_work_sync(&ipc->d0ix_work); > + return avs_dsp_set_d0ix(adev, false); > + } > + > + return 0; > +} > + > +int avs_dsp_enable_d0ix(struct avs_dev *adev) > +{ > + struct avs_ipc *ipc = adev->ipc; > + > + if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) > + queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, > + msecs_to_jiffies(AVS_D0IX_DELAY_MS)); > + return 0; > +} > > static void avs_dsp_recovery(struct avs_dev *adev) > { > @@ -391,10 +467,35 @@ static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request > return ret; > } > > +static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request, > + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, > + bool schedule_d0ix) > +{ > + int ret; > + > + if (wake_d0i0) { > + ret = avs_dsp_wake_d0i0(adev, request); > + if (ret) > + return ret; > + } > + > + ret = avs_dsp_do_send_msg(adev, request, reply, timeout); > + if (ret) > + return ret; > + > + if (schedule_d0ix) > + avs_dsp_schedule_d0ix(adev, request); > + > + return 0; > +} > + > int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, > struct avs_ipc_msg *reply, int timeout) > { > - return avs_dsp_do_send_msg(adev, request, reply, timeout); > + bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true); > + bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false); > + > + return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix); > } > > int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, > @@ -403,6 +504,19 @@ int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, > return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms); > } > > +int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, > + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0) > +{ > + return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false); > +} > + > +int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, > + struct avs_ipc_msg *reply, bool wake_d0i0) > +{ > + return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, > + wake_d0i0); > +} > + > static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) > { > struct avs_ipc *ipc = adev->ipc; > @@ -463,6 +577,7 @@ int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) > ipc->ready = false; > ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; > INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); > + INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); > init_completion(&ipc->done_completion); > init_completion(&ipc->busy_completion); > spin_lock_init(&ipc->rx_lock); > @@ -475,4 +590,6 @@ void avs_ipc_block(struct avs_ipc *ipc) > { > ipc->ready = false; > cancel_work_sync(&ipc->recovery_work); > + cancel_delayed_work_sync(&ipc->d0ix_work); > + ipc->in_d0ix = false; > } > diff --git a/sound/soc/intel/avs/messages.c b/sound/soc/intel/avs/messages.c > index 3da33150aabf..6404fce8cde4 100644 > --- a/sound/soc/intel/avs/messages.c > +++ b/sound/soc/intel/avs/messages.c > @@ -432,7 +432,7 @@ int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup) > request.data = &dx; > request.size = sizeof(dx); > > - ret = avs_dsp_send_msg(adev, &request, NULL); > + ret = avs_dsp_send_pm_msg(adev, &request, NULL, true); > if (ret) > avs_ipc_err(adev, &request, "set dx", ret); > > @@ -456,7 +456,7 @@ int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming) > > request.header = msg.val; > > - ret = avs_dsp_send_msg(adev, &request, NULL); > + ret = avs_dsp_send_pm_msg(adev, &request, NULL, false); > if (ret) > avs_ipc_err(adev, &request, "set d0ix", ret); >
On 2022-04-26 11:58 PM, Pierre-Louis Bossart wrote: > On 4/26/22 12:23, Cezary Rojewski wrote: >> Audio DSP device supports D0 substates in form of D0ix, allowing for >> preserving more power even when device is still considered active (D0). >> When entered, certain domains which are not being currently used become >> power gated. Entering and leaving D0ix is a complex process and differs >> between firmware generations. >> >> Conditions that disallow D0i3 and require immediate D0i0 transition >> include but may not be limited to: IPC traffic, firmware tracing and >> SRAM I/O. To make D0ix toggling sane, delay D0i3 transition and refresh >> the timer each time an IPC is requrested. > > typo: requested. Ack. > I find it odd to list all kinds of criteria but only handle one in the end. Do the other matter, is this a TODO, unclear what you are trying to say. Good question. Firmware tracing code is part of debugfs.c file which has not yet been shared. But all other usages, not listed here, come down to invoking enable_d0ix() or disable_d0ix() whenever given operation blocks DSP from transitioning to D0iX. Other usages such as directly accessing SRAM (outside of IPC handling) is non-existant in the avs-driver. When IPCs, most firmware generations take care of toggling d0ix for you. >> int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry); >> diff --git a/sound/soc/intel/avs/dsp.c b/sound/soc/intel/avs/dsp.c >> index 3ff17bd22a5a..2f18b137ff42 100644 >> --- a/sound/soc/intel/avs/dsp.c >> +++ b/sound/soc/intel/avs/dsp.c >> @@ -152,6 +152,15 @@ static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id) >> >> adev->core_refs[core_id]++; >> if (adev->core_refs[core_id] == 1) { >> + /* >> + * No cores other than main-core can be running for DSP >> + * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted, > > conscious failure? what's that? Any IPC failure which does not end in firmware throwing an exception or failing to deliver the response (IPC timeout). Sending response with status=<some error> is still a valid response. >> + * simply d0ix power state will no longer be attempted. >> + */ >> + ret = avs_dsp_disable_d0ix(adev); >> + if (ret && ret != -AVS_EIPC) >> + goto err_disable_d0ix; >> + >> ret = avs_dsp_enable(adev, mask); >> if (ret) >> goto err_enable_dsp; > tatic int >> +avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) >> +{ >> + struct avs_ipc *ipc = adev->ipc; >> + int ret; >> + >> + /* Is transition required? */ >> + if (ipc->in_d0ix == enable) >> + return 0; >> + >> + ret = avs_dsp_op(adev, set_d0ix, enable); >> + if (ret) { >> + /* Prevent further d0ix attempts on conscious IPC failure. */ > > ?? Same as above but as I'm not sure whether '??' relates to comment above or the usage of 'conscious' word, I'll add to that: To improve user-experience, we block any d0ix further d0ix attempts if even one SET_D0IX IPC fails. Audio can be streamed just fine without d0ix substate albeit it might not be as power efficient as with transition enabled. >> + if (ret == -AVS_EIPC) >> + atomic_inc(&ipc->d0ix_disable_depth); >> + >> + ipc->in_d0ix = false; >> + return ret; >> + } >> + >> + ipc->in_d0ix = enable; >> + return 0; >> +}
On 2022-04-29 4:19 PM, Cezary Rojewski wrote: > On 2022-04-26 11:58 PM, Pierre-Louis Bossart wrote: >> On 4/26/22 12:23, Cezary Rojewski wrote: >>> Audio DSP device supports D0 substates in form of D0ix, allowing for >>> preserving more power even when device is still considered active (D0). >>> When entered, certain domains which are not being currently used become >>> power gated. Entering and leaving D0ix is a complex process and differs >>> between firmware generations. >>> >>> Conditions that disallow D0i3 and require immediate D0i0 transition >>> include but may not be limited to: IPC traffic, firmware tracing and >>> SRAM I/O. To make D0ix toggling sane, delay D0i3 transition and refresh >>> the timer each time an IPC is requrested. >> >> typo: requested. > > Ack. > >> I find it odd to list all kinds of criteria but only handle one in the >> end. Do the other matter, is this a TODO, unclear what you are trying >> to say. > > > Good question. Firmware tracing code is part of debugfs.c file which has > not yet been shared. But all other usages, not listed here, come down to > invoking enable_d0ix() or disable_d0ix() whenever given operation blocks > DSP from transitioning to D0iX. > > Other usages such as directly accessing SRAM (outside of IPC handling) > is non-existant in the avs-driver. When IPCs, most firmware generations > take care of toggling d0ix for you. Sorry for the million typos. In the last one what I meant is: directly accessing SRAM is a separate case, that is, when done outside of IPC protocol. We do not do that in the avs-driver. Why IPC protocol is 'so special'? Most firmware generations take care of toggling D0iX for the software so there is no need to disable the transition, read the reply from SRAM, do anything else necessary and re-enable it. Note: it's not true for all the generations :) Regardless of the firmware generation used, software should be smart about choosing the right time for the transition. If we were to transition blindly after every single IPC, DSP would probably end up consuming more power than if no D0iX request were ever sent. Regards, Czarek
diff --git a/sound/soc/intel/avs/avs.h b/sound/soc/intel/avs/avs.h index 917a8b06cace..c3323f90b693 100644 --- a/sound/soc/intel/avs/avs.h +++ b/sound/soc/intel/avs/avs.h @@ -22,6 +22,7 @@ struct avs_dev; struct avs_tplg; struct avs_tplg_library; struct avs_soc_component; +struct avs_ipc_msg; /* * struct avs_dsp_ops - Platform-specific DSP operations @@ -48,6 +49,8 @@ struct avs_dsp_ops { int (* const log_buffer_offset)(struct avs_dev *, u32); int (* const log_buffer_status)(struct avs_dev *, union avs_notify_msg *); int (* const coredump)(struct avs_dev *, union avs_notify_msg *); + bool (* const d0ix_toggle)(struct avs_dev *, struct avs_ipc_msg *, bool); + int (* const set_d0ix)(struct avs_dev *, bool); }; #define avs_dsp_op(adev, op, ...) \ @@ -191,6 +194,9 @@ struct avs_ipc { struct completion busy_completion; struct work_struct recovery_work; + struct delayed_work d0ix_work; + atomic_t d0ix_disable_depth; + bool in_d0ix; }; #define AVS_EIPC EREMOTEIO @@ -227,6 +233,11 @@ int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *reply, int timeout); int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply); +/* Two variants below are for messages that control DSP power states. */ +int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0); +int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, bool wake_d0i0); int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout); int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request); @@ -234,6 +245,9 @@ void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable); int avs_ipc_init(struct avs_ipc *ipc, struct device *dev); void avs_ipc_block(struct avs_ipc *ipc); +int avs_dsp_disable_d0ix(struct avs_dev *adev); +int avs_dsp_enable_d0ix(struct avs_dev *adev); + /* Firmware resources management */ int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry); diff --git a/sound/soc/intel/avs/dsp.c b/sound/soc/intel/avs/dsp.c index 3ff17bd22a5a..2f18b137ff42 100644 --- a/sound/soc/intel/avs/dsp.c +++ b/sound/soc/intel/avs/dsp.c @@ -152,6 +152,15 @@ static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id) adev->core_refs[core_id]++; if (adev->core_refs[core_id] == 1) { + /* + * No cores other than main-core can be running for DSP + * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted, + * simply d0ix power state will no longer be attempted. + */ + ret = avs_dsp_disable_d0ix(adev); + if (ret && ret != -AVS_EIPC) + goto err_disable_d0ix; + ret = avs_dsp_enable(adev, mask); if (ret) goto err_enable_dsp; @@ -160,6 +169,8 @@ static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id) return 0; err_enable_dsp: + avs_dsp_enable_d0ix(adev); +err_disable_d0ix: adev->core_refs[core_id]--; err: dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret); @@ -185,6 +196,9 @@ static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id) ret = avs_dsp_disable(adev, mask); if (ret) goto err; + + /* Match disable_d0ix in avs_dsp_get_core(). */ + avs_dsp_enable_d0ix(adev); } return 0; diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c index b535bbb5953a..0820d8f93c7c 100644 --- a/sound/soc/intel/avs/ipc.c +++ b/sound/soc/intel/avs/ipc.c @@ -13,6 +13,82 @@ #include "registers.h" #define AVS_IPC_TIMEOUT_MS 300 +#define AVS_D0IX_DELAY_MS 300 + +static int +avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) +{ + struct avs_ipc *ipc = adev->ipc; + int ret; + + /* Is transition required? */ + if (ipc->in_d0ix == enable) + return 0; + + ret = avs_dsp_op(adev, set_d0ix, enable); + if (ret) { + /* Prevent further d0ix attempts on conscious IPC failure. */ + if (ret == -AVS_EIPC) + atomic_inc(&ipc->d0ix_disable_depth); + + ipc->in_d0ix = false; + return ret; + } + + ipc->in_d0ix = enable; + return 0; +} + +static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx) +{ + if (atomic_read(&adev->ipc->d0ix_disable_depth)) + return; + + mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, + msecs_to_jiffies(AVS_D0IX_DELAY_MS)); +} + +static void avs_dsp_d0ix_work(struct work_struct *work) +{ + struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); + + avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); +} + +static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx) +{ + struct avs_ipc *ipc = adev->ipc; + + if (!atomic_read(&ipc->d0ix_disable_depth)) { + cancel_delayed_work_sync(&ipc->d0ix_work); + return avs_dsp_set_d0ix(adev, false); + } + + return 0; +} + +int avs_dsp_disable_d0ix(struct avs_dev *adev) +{ + struct avs_ipc *ipc = adev->ipc; + + /* Prevent PG only on the first disable. */ + if (atomic_add_return(1, &ipc->d0ix_disable_depth) == 1) { + cancel_delayed_work_sync(&ipc->d0ix_work); + return avs_dsp_set_d0ix(adev, false); + } + + return 0; +} + +int avs_dsp_enable_d0ix(struct avs_dev *adev) +{ + struct avs_ipc *ipc = adev->ipc; + + if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) + queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, + msecs_to_jiffies(AVS_D0IX_DELAY_MS)); + return 0; +} static void avs_dsp_recovery(struct avs_dev *adev) { @@ -391,10 +467,35 @@ static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request return ret; } +static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, + bool schedule_d0ix) +{ + int ret; + + if (wake_d0i0) { + ret = avs_dsp_wake_d0i0(adev, request); + if (ret) + return ret; + } + + ret = avs_dsp_do_send_msg(adev, request, reply, timeout); + if (ret) + return ret; + + if (schedule_d0ix) + avs_dsp_schedule_d0ix(adev, request); + + return 0; +} + int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply, int timeout) { - return avs_dsp_do_send_msg(adev, request, reply, timeout); + bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true); + bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false); + + return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix); } int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, @@ -403,6 +504,19 @@ int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms); } +int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, int timeout, bool wake_d0i0) +{ + return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false); +} + +int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, + struct avs_ipc_msg *reply, bool wake_d0i0) +{ + return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, + wake_d0i0); +} + static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) { struct avs_ipc *ipc = adev->ipc; @@ -463,6 +577,7 @@ int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) ipc->ready = false; ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); + INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); init_completion(&ipc->done_completion); init_completion(&ipc->busy_completion); spin_lock_init(&ipc->rx_lock); @@ -475,4 +590,6 @@ void avs_ipc_block(struct avs_ipc *ipc) { ipc->ready = false; cancel_work_sync(&ipc->recovery_work); + cancel_delayed_work_sync(&ipc->d0ix_work); + ipc->in_d0ix = false; } diff --git a/sound/soc/intel/avs/messages.c b/sound/soc/intel/avs/messages.c index 3da33150aabf..6404fce8cde4 100644 --- a/sound/soc/intel/avs/messages.c +++ b/sound/soc/intel/avs/messages.c @@ -432,7 +432,7 @@ int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup) request.data = &dx; request.size = sizeof(dx); - ret = avs_dsp_send_msg(adev, &request, NULL); + ret = avs_dsp_send_pm_msg(adev, &request, NULL, true); if (ret) avs_ipc_err(adev, &request, "set dx", ret); @@ -456,7 +456,7 @@ int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming) request.header = msg.val; - ret = avs_dsp_send_msg(adev, &request, NULL); + ret = avs_dsp_send_pm_msg(adev, &request, NULL, false); if (ret) avs_ipc_err(adev, &request, "set d0ix", ret);