diff mbox

[08/10] ARM: OMAP5/DRA7: PM: cpuidle MPU CSWR support

Message ID 1408716154-26101-9-git-send-email-nm@ti.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nishanth Menon Aug. 22, 2014, 2:02 p.m. UTC
From: Santosh Shilimkar <santosh.shilimkar@ti.com>

Add OMAP5/DRA74/72 CPUIDLE support.

This patch adds MPUSS low power states in cpuidle.

        C1 - CPU0 WFI + CPU1 WFI + MPU ON
        C2 - CPU0 RET + CPU1 RET + MPU CSWR

Tested on DRA74/72-EVM for C1 and C2 states.

NOTE: DRA7 does not do voltage scaling as part of retention transition
and has Mercury which speeds up transition paths - Latency numbers are
based on measurements done by toggling GPIOs.

Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
[ j-keerthy@ti.com rework on 3.14]
Signed-off-by: Keerthy <j-keerthy@ti.com>
[nm@ti.com: updates based on profiling, OMAP5 squashed]
Signed-off-by: Nishanth Menon <nm@ti.com>
---
 arch/arm/mach-omap2/cpuidle44xx.c |   82 ++++++++++++++++++++++++++++++++++++-
 arch/arm/mach-omap2/pm44xx.c      |    2 +-
 2 files changed, 82 insertions(+), 2 deletions(-)

Comments

Kevin Hilman Aug. 27, 2014, 7:13 p.m. UTC | #1
+ Daniel (cpuidle maintainer)

Nishanth Menon <nm@ti.com> writes:

> From: Santosh Shilimkar <santosh.shilimkar@ti.com>
>
> Add OMAP5/DRA74/72 CPUIDLE support.
>
> This patch adds MPUSS low power states in cpuidle.
>
>         C1 - CPU0 WFI + CPU1 WFI + MPU ON
>         C2 - CPU0 RET + CPU1 RET + MPU CSWR
>
> Tested on DRA74/72-EVM for C1 and C2 states.
>
> NOTE: DRA7 does not do voltage scaling as part of retention transition
> and has Mercury which speeds up transition paths - Latency numbers are
> based on measurements done by toggling GPIOs.
>
> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
> [ j-keerthy@ti.com rework on 3.14]
> Signed-off-by: Keerthy <j-keerthy@ti.com>
> [nm@ti.com: updates based on profiling, OMAP5 squashed]
> Signed-off-by: Nishanth Menon <nm@ti.com>
> ---
>  arch/arm/mach-omap2/cpuidle44xx.c |   82 ++++++++++++++++++++++++++++++++++++-
>  arch/arm/mach-omap2/pm44xx.c      |    2 +-
>  2 files changed, 82 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
> index 2498ab0..8ad4f44 100644
> --- a/arch/arm/mach-omap2/cpuidle44xx.c
> +++ b/arch/arm/mach-omap2/cpuidle44xx.c
> @@ -22,6 +22,7 @@
>  #include "common.h"
>  #include "pm.h"
>  #include "prm.h"
> +#include "soc.h"
>  #include "clockdomain.h"
>  
>  #define MAX_CPUS	2
> @@ -31,6 +32,7 @@ struct idle_statedata {
>  	u32 cpu_state;
>  	u32 mpu_logic_state;
>  	u32 mpu_state;
> +	u32 mpu_state_vote;
>  };
>  
>  static struct idle_statedata omap4_idle_data[] = {
> @@ -51,12 +53,26 @@ static struct idle_statedata omap4_idle_data[] = {
>  	},
>  };
>  
> +static struct idle_statedata dra7_idle_data[] = {
> +	{
> +		.cpu_state = PWRDM_POWER_ON,
> +		.mpu_state = PWRDM_POWER_ON,
> +		.mpu_logic_state = PWRDM_POWER_ON,
> +	},
> +	{
> +		.cpu_state = PWRDM_POWER_RET,
> +		.mpu_state = PWRDM_POWER_RET,
> +		.mpu_logic_state = PWRDM_POWER_RET,
> +	},
> +};
> +
>  static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
>  static struct clockdomain *cpu_clkdm[MAX_CPUS];
>  
>  static atomic_t abort_barrier;
>  static bool cpu_done[MAX_CPUS];
>  static struct idle_statedata *state_ptr = &omap4_idle_data[0];
> +static DEFINE_RAW_SPINLOCK(mpu_lock);
>  
>  /* Private functions */
>  
> @@ -78,6 +94,32 @@ static int omap_enter_idle_simple(struct cpuidle_device *dev,
>  	return index;
>  }
>  
> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
> +			       struct cpuidle_driver *drv,
> +			       int index)
> +{
> +	struct idle_statedata *cx = state_ptr + index;
> +	unsigned long flag;
> +
> +	raw_spin_lock_irqsave(&mpu_lock, flag);
> +	cx->mpu_state_vote++;
> +	if (cx->mpu_state_vote == num_online_cpus()) {
> +		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
> +		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
> +	}
> +	raw_spin_unlock_irqrestore(&mpu_lock, flag);
> +
> +	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
> +
> +	raw_spin_lock_irqsave(&mpu_lock, flag);
> +	if (cx->mpu_state_vote == num_online_cpus())
> +		omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
> +	cx->mpu_state_vote--;
> +	raw_spin_unlock_irqrestore(&mpu_lock, flag);
> +
> +	return index;
> +}

Hmm, maybe OMAP5/DRA7 CPUidle driver should be a new one based on MCPM?

Kevin
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nishanth Menon Aug. 27, 2014, 7:35 p.m. UTC | #2
On Wed, Aug 27, 2014 at 2:13 PM, Kevin Hilman
<khilman@deeprootsystems.com> wrote:
> + Daniel (cpuidle maintainer)
[...]
>> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
>> +                            struct cpuidle_driver *drv,
>> +                            int index)
>> +{
>> +     struct idle_statedata *cx = state_ptr + index;
>> +     unsigned long flag;
>> +
>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>> +     cx->mpu_state_vote++;
>> +     if (cx->mpu_state_vote == num_online_cpus()) {
>> +             pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
>> +             omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
>> +     }
>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>> +
>> +     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
>> +
>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>> +     if (cx->mpu_state_vote == num_online_cpus())
>> +             omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
>> +     cx->mpu_state_vote--;
>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>> +
>> +     return index;
>> +}
>
> Hmm, maybe OMAP5/DRA7 CPUidle driver should be a new one based on MCPM?

Trying to understand benefit of MCPM here - at least without a deeper
understanding of mcpm infrastructure benefits (first look seemed a
little heavy for OMAP5/DRA7 needs).

Neither DRA7/OMAP5 are multi-cluster, the SoCs are not targetted for
"OFF" of CPU1/0, we have mercury hardware to help with context and
sync issues.

Being able to reuse most of existing OMAP4 infrastructure code is
useful as well to leave the existing omap4 framework as being lighter
in complexity -esp in a cpuidle like hot path?

The spin_lock is only for the programming of MPU power domain in a
consistent manner - I suppose might have been the trigger for
proposing mcpm?

---
Regards,
Nishanth Menon
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Santosh Shilimkar Aug. 27, 2014, 7:41 p.m. UTC | #3
On Wednesday 27 August 2014 03:35 PM, Nishanth Menon wrote:
> On Wed, Aug 27, 2014 at 2:13 PM, Kevin Hilman
> <khilman@deeprootsystems.com> wrote:
>> + Daniel (cpuidle maintainer)
> [...]
>>> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
>>> +                            struct cpuidle_driver *drv,
>>> +                            int index)
>>> +{
>>> +     struct idle_statedata *cx = state_ptr + index;
>>> +     unsigned long flag;
>>> +
>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>>> +     cx->mpu_state_vote++;
>>> +     if (cx->mpu_state_vote == num_online_cpus()) {
>>> +             pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
>>> +             omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
>>> +     }
>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>>> +
>>> +     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
>>> +
>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>>> +     if (cx->mpu_state_vote == num_online_cpus())
>>> +             omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
>>> +     cx->mpu_state_vote--;
>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>>> +
>>> +     return index;
>>> +}
>>
>> Hmm, maybe OMAP5/DRA7 CPUidle driver should be a new one based on MCPM?
> 
> Trying to understand benefit of MCPM here - at least without a deeper
> understanding of mcpm infrastructure benefits (first look seemed a
> little heavy for OMAP5/DRA7 needs).
> 
> Neither DRA7/OMAP5 are multi-cluster, the SoCs are not targetted for
> "OFF" of CPU1/0, we have mercury hardware to help with context and
> sync issues.
> 
> Being able to reuse most of existing OMAP4 infrastructure code is
> useful as well to leave the existing omap4 framework as being lighter
> in complexity -esp in a cpuidle like hot path?
> 
> The spin_lock is only for the programming of MPU power domain in a
> consistent manner - I suppose might have been the trigger for
> proposing mcpm?
> 
Mostly not....

I think this is coming because last time Nicolas Pitre tried to convert
the OMAP CPUIdle into MCPM but because of various ordering requirements,
OMAP wasn't suitable and then the plan was dropped later.

Just to make clear, OMAP OMAP5/DRA7 as well the ordering requirement
remains the same for deeper states. Its just the mercury retention state
which we are able to enter without ordering requirements and hence
the voting scheme.

Hope this clarifies to you as well as Kevin just in case he missed the
part of the deeper C-states requirements.

Regards,
Santosh



--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kevin Hilman Aug. 27, 2014, 8:22 p.m. UTC | #4
Santosh Shilimkar <santosh.shilimkar@ti.com> writes:

> On Wednesday 27 August 2014 03:35 PM, Nishanth Menon wrote:
>> On Wed, Aug 27, 2014 at 2:13 PM, Kevin Hilman
>> <khilman@deeprootsystems.com> wrote:
>>> + Daniel (cpuidle maintainer)
>> [...]
>>>> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
>>>> +                            struct cpuidle_driver *drv,
>>>> +                            int index)
>>>> +{
>>>> +     struct idle_statedata *cx = state_ptr + index;
>>>> +     unsigned long flag;
>>>> +
>>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>>>> +     cx->mpu_state_vote++;
>>>> +     if (cx->mpu_state_vote == num_online_cpus()) {
>>>> +             pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
>>>> +             omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
>>>> +     }
>>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>>>> +
>>>> +     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
>>>> +
>>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>>>> +     if (cx->mpu_state_vote == num_online_cpus())
>>>> +             omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
>>>> +     cx->mpu_state_vote--;
>>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>>>> +
>>>> +     return index;
>>>> +}
>>>
>>> Hmm, maybe OMAP5/DRA7 CPUidle driver should be a new one based on MCPM?
>> 
>> Trying to understand benefit of MCPM here - at least without a deeper
>> understanding of mcpm infrastructure benefits (first look seemed a
>> little heavy for OMAP5/DRA7 needs).
>> 
>> Neither DRA7/OMAP5 are multi-cluster, the SoCs are not targetted for
>> "OFF" of CPU1/0, we have mercury hardware to help with context and
>> sync issues.
>> 
>> Being able to reuse most of existing OMAP4 infrastructure code is
>> useful as well to leave the existing omap4 framework as being lighter
>> in complexity -esp in a cpuidle like hot path?
>> 
>> The spin_lock is only for the programming of MPU power domain in a
>> consistent manner - I suppose might have been the trigger for
>> proposing mcpm?
>> 
> Mostly not....
>
> I think this is coming because last time Nicolas Pitre tried to convert
> the OMAP CPUIdle into MCPM but because of various ordering requirements,
> OMAP wasn't suitable and then the plan was dropped later.
>
> Just to make clear, OMAP OMAP5/DRA7 as well the ordering requirement
> remains the same for deeper states. Its just the mercury retention state
> which we are able to enter without ordering requirements and hence
> the voting scheme.

Ah, OK.  This is the part that I'm missing.  So for deeper states you'll
need to be using omap_enter_idle_coupled()

> Hope this clarifies to you as well as Kevin just in case he missed the
> part of the deeper C-states requirements.

Yes, thanks for clarifying.

That being said, I think MCPM can now do essentially what the coupled
states code is doing. Even so, that's probably not a reason to hold up
this patch, but Daniel gets to make that call.

Kevin
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nishanth Menon Sept. 5, 2014, 9:18 p.m. UTC | #5
Daniel,

On 13:22-20140827, Kevin Hilman wrote:
> Santosh Shilimkar <santosh.shilimkar@ti.com> writes:
> 
> > On Wednesday 27 August 2014 03:35 PM, Nishanth Menon wrote:
> >> On Wed, Aug 27, 2014 at 2:13 PM, Kevin Hilman
> >> <khilman@deeprootsystems.com> wrote:
> >>> + Daniel (cpuidle maintainer)
> >> [...]
> >>>> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
> >>>> +                            struct cpuidle_driver *drv,
> >>>> +                            int index)
> >>>> +{
> >>>> +     struct idle_statedata *cx = state_ptr + index;
> >>>> +     unsigned long flag;
> >>>> +
> >>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
> >>>> +     cx->mpu_state_vote++;
> >>>> +     if (cx->mpu_state_vote == num_online_cpus()) {
> >>>> +             pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
> >>>> +             omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
> >>>> +     }
> >>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
> >>>> +
> >>>> +     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
> >>>> +
> >>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
> >>>> +     if (cx->mpu_state_vote == num_online_cpus())
> >>>> +             omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
> >>>> +     cx->mpu_state_vote--;
> >>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
> >>>> +
> >>>> +     return index;
> >>>> +}
> >>>
> >>> Hmm, maybe OMAP5/DRA7 CPUidle driver should be a new one based on MCPM?
> >> 
> >> Trying to understand benefit of MCPM here - at least without a deeper
> >> understanding of mcpm infrastructure benefits (first look seemed a
> >> little heavy for OMAP5/DRA7 needs).
> >> 
> >> Neither DRA7/OMAP5 are multi-cluster, the SoCs are not targetted for
> >> "OFF" of CPU1/0, we have mercury hardware to help with context and
> >> sync issues.
> >> 
> >> Being able to reuse most of existing OMAP4 infrastructure code is
> >> useful as well to leave the existing omap4 framework as being lighter
> >> in complexity -esp in a cpuidle like hot path?
> >> 
> >> The spin_lock is only for the programming of MPU power domain in a
> >> consistent manner - I suppose might have been the trigger for
> >> proposing mcpm?
> >> 
> > Mostly not....
> >
> > I think this is coming because last time Nicolas Pitre tried to convert
> > the OMAP CPUIdle into MCPM but because of various ordering requirements,
> > OMAP wasn't suitable and then the plan was dropped later.
> >
> > Just to make clear, OMAP OMAP5/DRA7 as well the ordering requirement
> > remains the same for deeper states. Its just the mercury retention state
> > which we are able to enter without ordering requirements and hence
> > the voting scheme.
> 
> Ah, OK.  This is the part that I'm missing.  So for deeper states you'll
> need to be using omap_enter_idle_coupled()
> 
> > Hope this clarifies to you as well as Kevin just in case he missed the
> > part of the deeper C-states requirements.
> 
> Yes, thanks for clarifying.
> 
> That being said, I think MCPM can now do essentially what the coupled
> states code is doing. Even so, that's probably not a reason to hold up
> this patch, but Daniel gets to make that call.


Gentle ping.. You can find the discussion and the patch here:
https://patchwork.kernel.org/patch/4764661/
Nishanth Menon Sept. 16, 2014, 4:34 p.m. UTC | #6
Daniel,

On 09/05/2014 04:18 PM, Nishanth Menon wrote:
> Daniel,
> 
> On 13:22-20140827, Kevin Hilman wrote:
>> Santosh Shilimkar <santosh.shilimkar@ti.com> writes:
>>
>>> On Wednesday 27 August 2014 03:35 PM, Nishanth Menon wrote:
>>>> On Wed, Aug 27, 2014 at 2:13 PM, Kevin Hilman
>>>> <khilman@deeprootsystems.com> wrote:
>>>>> + Daniel (cpuidle maintainer)
>>>> [...]
>>>>>> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
>>>>>> +                            struct cpuidle_driver *drv,
>>>>>> +                            int index)
>>>>>> +{
>>>>>> +     struct idle_statedata *cx = state_ptr + index;
>>>>>> +     unsigned long flag;
>>>>>> +
>>>>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>>>>>> +     cx->mpu_state_vote++;
>>>>>> +     if (cx->mpu_state_vote == num_online_cpus()) {
>>>>>> +             pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
>>>>>> +             omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
>>>>>> +     }
>>>>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>>>>>> +
>>>>>> +     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
>>>>>> +
>>>>>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>>>>>> +     if (cx->mpu_state_vote == num_online_cpus())
>>>>>> +             omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
>>>>>> +     cx->mpu_state_vote--;
>>>>>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>>>>>> +
>>>>>> +     return index;
>>>>>> +}
>>>>>
>>>>> Hmm, maybe OMAP5/DRA7 CPUidle driver should be a new one based on MCPM?
>>>>
>>>> Trying to understand benefit of MCPM here - at least without a deeper
>>>> understanding of mcpm infrastructure benefits (first look seemed a
>>>> little heavy for OMAP5/DRA7 needs).
>>>>
>>>> Neither DRA7/OMAP5 are multi-cluster, the SoCs are not targetted for
>>>> "OFF" of CPU1/0, we have mercury hardware to help with context and
>>>> sync issues.
>>>>
>>>> Being able to reuse most of existing OMAP4 infrastructure code is
>>>> useful as well to leave the existing omap4 framework as being lighter
>>>> in complexity -esp in a cpuidle like hot path?
>>>>
>>>> The spin_lock is only for the programming of MPU power domain in a
>>>> consistent manner - I suppose might have been the trigger for
>>>> proposing mcpm?
>>>>
>>> Mostly not....
>>>
>>> I think this is coming because last time Nicolas Pitre tried to convert
>>> the OMAP CPUIdle into MCPM but because of various ordering requirements,
>>> OMAP wasn't suitable and then the plan was dropped later.
>>>
>>> Just to make clear, OMAP OMAP5/DRA7 as well the ordering requirement
>>> remains the same for deeper states. Its just the mercury retention state
>>> which we are able to enter without ordering requirements and hence
>>> the voting scheme.
>>
>> Ah, OK.  This is the part that I'm missing.  So for deeper states you'll
>> need to be using omap_enter_idle_coupled()
>>
>>> Hope this clarifies to you as well as Kevin just in case he missed the
>>> part of the deeper C-states requirements.
>>
>> Yes, thanks for clarifying.
>>
>> That being said, I think MCPM can now do essentially what the coupled
>> states code is doing. Even so, that's probably not a reason to hold up
>> this patch, but Daniel gets to make that call.
> 
> 
> Gentle ping.. You can find the discussion and the patch here:
> https://patchwork.kernel.org/patch/4764661/
> 

Ping on this again.. we are pretty close to approaching v3.18 merge
window and this discussion has'nt gotten further.
Daniel Lezcano Sept. 17, 2014, 6:49 p.m. UTC | #7
On 08/22/2014 07:02 AM, Nishanth Menon wrote:
> From: Santosh Shilimkar <santosh.shilimkar@ti.com>
>
> Add OMAP5/DRA74/72 CPUIDLE support.
>
> This patch adds MPUSS low power states in cpuidle.
>
>          C1 - CPU0 WFI + CPU1 WFI + MPU ON
>          C2 - CPU0 RET + CPU1 RET + MPU CSWR
>
> Tested on DRA74/72-EVM for C1 and C2 states.
>
> NOTE: DRA7 does not do voltage scaling as part of retention transition
> and has Mercury which speeds up transition paths - Latency numbers are
> based on measurements done by toggling GPIOs.
>
> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
> [ j-keerthy@ti.com rework on 3.14]
> Signed-off-by: Keerthy <j-keerthy@ti.com>
> [nm@ti.com: updates based on profiling, OMAP5 squashed]
> Signed-off-by: Nishanth Menon <nm@ti.com>
> ---
>   arch/arm/mach-omap2/cpuidle44xx.c |   82 ++++++++++++++++++++++++++++++++++++-
>   arch/arm/mach-omap2/pm44xx.c      |    2 +-
>   2 files changed, 82 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
> index 2498ab0..8ad4f44 100644
> --- a/arch/arm/mach-omap2/cpuidle44xx.c
> +++ b/arch/arm/mach-omap2/cpuidle44xx.c
> @@ -22,6 +22,7 @@
>   #include "common.h"
>   #include "pm.h"
>   #include "prm.h"
> +#include "soc.h"
>   #include "clockdomain.h"
>
>   #define MAX_CPUS	2
> @@ -31,6 +32,7 @@ struct idle_statedata {
>   	u32 cpu_state;
>   	u32 mpu_logic_state;
>   	u32 mpu_state;
> +	u32 mpu_state_vote;
>   };
>
>   static struct idle_statedata omap4_idle_data[] = {
> @@ -51,12 +53,26 @@ static struct idle_statedata omap4_idle_data[] = {
>   	},
>   };
>
> +static struct idle_statedata dra7_idle_data[] = {
> +	{
> +		.cpu_state = PWRDM_POWER_ON,
> +		.mpu_state = PWRDM_POWER_ON,
> +		.mpu_logic_state = PWRDM_POWER_ON,
> +	},
> +	{
> +		.cpu_state = PWRDM_POWER_RET,
> +		.mpu_state = PWRDM_POWER_RET,
> +		.mpu_logic_state = PWRDM_POWER_RET,
> +	},
> +};
> +
>   static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
>   static struct clockdomain *cpu_clkdm[MAX_CPUS];
>
>   static atomic_t abort_barrier;
>   static bool cpu_done[MAX_CPUS];
>   static struct idle_statedata *state_ptr = &omap4_idle_data[0];
> +static DEFINE_RAW_SPINLOCK(mpu_lock);
>
>   /* Private functions */
>
> @@ -78,6 +94,32 @@ static int omap_enter_idle_simple(struct cpuidle_device *dev,
>   	return index;
>   }
>
> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
> +			       struct cpuidle_driver *drv,
> +			       int index)
> +{
> +	struct idle_statedata *cx = state_ptr + index;
> +	unsigned long flag;
> +
> +	raw_spin_lock_irqsave(&mpu_lock, flag);

Why do you need this spin_lock_irqsave ? Aren't the local irqs already 
disabled ?

> +	cx->mpu_state_vote++;
> +	if (cx->mpu_state_vote == num_online_cpus()) {
> +		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
> +		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
> +	}
> +	raw_spin_unlock_irqrestore(&mpu_lock, flag);
> +
> +	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
> +
> +	raw_spin_lock_irqsave(&mpu_lock, flag);
> +	if (cx->mpu_state_vote == num_online_cpus())
> +		omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
> +	cx->mpu_state_vote--;
> +	raw_spin_unlock_irqrestore(&mpu_lock, flag);

I am not sure that will work. What happens if a cpu exits idle and then 
re-enter idle immediately ?

Could you try a long run of this little program:

https://git.linaro.org/power/pm-qa.git/blob/HEAD:/cpuidle/cpuidle_killer.c

> +	return index;
> +}
> +
>   static int omap_enter_idle_coupled(struct cpuidle_device *dev,
>   			struct cpuidle_driver *drv,
>   			int index)
> @@ -224,6 +266,34 @@ static struct cpuidle_driver omap4_idle_driver = {
>   	.safe_state_index = 0,
>   };
>
> +static struct cpuidle_driver dra7_idle_driver = {
> +	.name				= "dra7_idle",
> +	.owner				= THIS_MODULE,
> +	.states = {
> +		{
> +			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
> +			.exit_latency = 2 + 2,
> +			.target_residency = 5,
> +			.flags = CPUIDLE_FLAG_TIME_VALID,
> +			.enter = omap_enter_idle_simple,
> +			.name = "C1",
> +			.desc = "CPUx WFI, MPUSS ON"
> +		},
> +		{
> +			/* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
> +			.exit_latency = 48 + 60,
> +			.target_residency = 100,
> +			.flags = CPUIDLE_FLAG_TIME_VALID
> +					| CPUIDLE_FLAG_TIMER_STOP,
> +			.enter = omap_enter_idle_smp,
> +			.name = "C2",
> +			.desc = "CPUx CSWR, MPUSS CSWR",
> +		},
> +	},
> +	.state_count = ARRAY_SIZE(dra7_idle_data),
> +	.safe_state_index = 0,
> +};
> +
>   /* Public functions */
>
>   /**
> @@ -234,6 +304,16 @@ static struct cpuidle_driver omap4_idle_driver = {
>    */
>   int __init omap4_idle_init(void)
>   {
> +	struct cpuidle_driver *idle_driver;
> +
> +	if (soc_is_dra7xx() || soc_is_omap54xx()) {
> +		state_ptr = &dra7_idle_data[0];
> +		idle_driver = &dra7_idle_driver;
> +	} else {
> +		state_ptr = &omap4_idle_data[0];
> +		idle_driver = &omap4_idle_driver;
> +	}
> +
>   	mpu_pd = pwrdm_lookup("mpu_pwrdm");
>   	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
>   	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
> @@ -248,5 +328,5 @@ int __init omap4_idle_init(void)
>   	/* Configure the broadcast timer on each cpu */
>   	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
>
> -	return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
> +	return cpuidle_register(idle_driver, cpu_online_mask);
>   }
> diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
> index c063833..1d22162 100644
> --- a/arch/arm/mach-omap2/pm44xx.c
> +++ b/arch/arm/mach-omap2/pm44xx.c
> @@ -293,7 +293,7 @@ int __init omap4_pm_init(void)
>   	/* Overwrite the default cpu_do_idle() */
>   	arm_pm_idle = omap_default_idle;
>
> -	if (cpu_is_omap44xx())
> +	if (cpu_is_omap44xx() || soc_is_dra7xx() || soc_is_omap54xx())
>   		omap4_idle_init();
>
>   err2:
>
Santosh Shilimkar Sept. 17, 2014, 11:20 p.m. UTC | #8
Sorry for the format. Emailing from webmail.
Daniel Lezcano Sept. 18, 2014, 12:22 a.m. UTC | #9
On 09/17/2014 04:20 PM, Shilimkar, Santosh wrote:
> Sorry for the format. Emailing from webmail.
> ________________________________________

[ ... ]

>> +static int omap_enter_idle_smp(struct cpuidle_device *dev,
>> +                            struct cpuidle_driver *drv,
>> +                            int index)
>> +{
>> +     struct idle_statedata *cx = state_ptr + index;
>> +     unsigned long flag;
>> +
>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>
> Why do you need this spin_lock_irqsave ? Aren't the local irqs already
> disabled ?
>
> [Santosh] Actually at one point of time before the idle consolidation the local
> irq disable was inside the idle drivers. Now with that moved to core layer,
> I think plain spin_lock/unlock() should work.

ok.

>> +     cx->mpu_state_vote++;
>> +     if (cx->mpu_state_vote == num_online_cpus()) {
>> +             pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
>> +             omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
>> +     }
>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>> +
>> +     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
>> +
>> +     raw_spin_lock_irqsave(&mpu_lock, flag);
>> +     if (cx->mpu_state_vote == num_online_cpus())
>> +             omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
>> +     cx->mpu_state_vote--;
>> +     raw_spin_unlock_irqrestore(&mpu_lock, flag);
>
> I am not sure that will work. What happens if a cpu exits idle and then
> re-enter idle immediately ?
>
> [Santosh] It works and that case is already taken care. CPU exist the idle and then votes
> out for cluster state and if it reenters with the right targeted state, the cluster state would
> be picked.

It isn't possible to have one cpu disabling the coherency, while the 
other one is looking for a lock ? Or eg. cpu0 is on WFI then cpu1 is the 
last entering idle. While cpu1 is entering 'lowpower', cpu0 exits the 
wfi check the state vote and set the power domain on. In the meantime 
cpu1 disables the coherency and cpu0 decrease the vote and release the 
lock. Could be possible there is a very small racy window here ?

> Could you try a long run of this little program:
>
> https://git.linaro.org/power/pm-qa.git/blob/HEAD:/cpuidle/cpuidle_killer.c
>
> [Santosh] I am sure there will not be any issue with the long run test case here.
> Lets see if Nishant sees anything otherwise

Ok. Make sure the cpu is effectively entering your C2 state with the 
sleep duration in the test program.
Santosh Shilimkar Sept. 18, 2014, 12:42 a.m. UTC | #10

Nishanth Menon Sept. 18, 2014, 1:41 p.m. UTC | #11
On 09/17/2014 07:22 PM, Daniel Lezcano wrote:
> On 09/17/2014 04:20 PM, Shilimkar, Santosh wrote:
[...]
>> Could you try a long run of this little program:
>>
>> https://git.linaro.org/power/pm-qa.git/blob/HEAD:/cpuidle/cpuidle_killer.c
>>
>> [Santosh] I am sure there will not be any issue with the long run test case here.
>> Lets see if Nishant sees anything otherwise
> 
> Ok. Make sure the cpu is effectively entering your C2 state with the 
> sleep duration in the test program.

Test kernel:
https://github.com/nmenon/linux-2.6-playground/commits/testing/tmlind-test-suspend-resume
(I decided to merge in various send for pull branches from maintainers
and apply cpuidle on top)..

Controlled test run as follows on 4 different impacted platforms and 1
platform as legacy reference.

What we are looking for is
> cpu1_pwrdm (ON),OFF:0,RET:2677,INA:0,ON:2678,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:0
RET:2677 indicated CPU1 hit C2
> cpu0_pwrdm (ON),OFF:0,RET:2677,INA:0,ON:2678,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:0
RET:2677 indicated CPU0 hit C2
> mpu_pwrdm (ON),OFF:0,RET:2667,INA:0,ON:2668,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:2667,RET-MEMBANK2-OFF:0
RET:2667 indicates that CPU0 and CPU1 managed to achieve RET together,
else by hardware constraints in place, MPU power domain will fail to
transition.

What I see in all cases below is that transitions do take place (C2 is
successfully hit).

Test #1: 120 seconds:
CMD: set -x;uname -a;cat /sys/kernel/debug/pm_debug/count;sleep
1;./cpuidle_killer_120;sleep 1;cat /sys/kernel/debug/pm_debug/count;set +x

OMAP4 Panda-ES: (2 a9) - not impacted as part of this patch - just
base test vector
http://fpaste.org/134547/14110454/

OMAP5 uEVM: (2 a15)
http://fpaste.org/134546/10454181/

DRA74x: (2 a15)
http://fpaste.org/134543/11045286/

DRA72: (2 a15)
http://fpaste.org/134544/11045335/

AM572x(DRA74x variant): (2 A15)
http://fpaste.org/134545/10453761/


Test #2: 1200 seconds: (http://fpaste.org/134564/47289141/)
CMD: set -x;uname -a;cat /sys/kernel/debug/pm_debug/count;sleep
1;./cpuidle_killer_1200;sleep 1;cat
/sys/kernel/debug/pm_debug/count;set +x

OMAP4 Panda-ES: (2 a9) - not impacted as part of this patch - just
base test vector
http://fpaste.org/134563/41104728/

OMAP5 uEVM: (2 a15)
http://fpaste.org/134562/47221141/

DRA74x EVM: (2 a15)
http://fpaste.org/134559/11047098/

DRA72 EVM: (2 a15)
http://fpaste.org/134560/11047151/

AM572x EVM: (2 A15)
http://fpaste.org/134561/47189141/
Nishanth Menon Sept. 18, 2014, 1:50 p.m. UTC | #12
On 09/18/2014 08:41 AM, Nishanth Menon wrote:
> On 09/17/2014 07:22 PM, Daniel Lezcano wrote:
>> On 09/17/2014 04:20 PM, Shilimkar, Santosh wrote:
> [...]
>>> Could you try a long run of this little program:
>>>
>>> https://git.linaro.org/power/pm-qa.git/blob/HEAD:/cpuidle/cpuidle_killer.c
>>>
>>> [Santosh] I am sure there will not be any issue with the long run test case here.
>>> Lets see if Nishant sees anything otherwise
>>
>> Ok. Make sure the cpu is effectively entering your C2 state with the 
>> sleep duration in the test program.
> 
> Test kernel:
> https://github.com/nmenon/linux-2.6-playground/commits/testing/tmlind-test-suspend-resume
> (I decided to merge in various send for pull branches from maintainers
> and apply cpuidle on top)..
> 
> Controlled test run as follows on 4 different impacted platforms and 1
> platform as legacy reference.
> 
> What we are looking for is
>> cpu1_pwrdm (ON),OFF:0,RET:2677,INA:0,ON:2678,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:0
> RET:2677 indicated CPU1 hit C2
>> cpu0_pwrdm (ON),OFF:0,RET:2677,INA:0,ON:2678,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:0
> RET:2677 indicated CPU0 hit C2
>> mpu_pwrdm (ON),OFF:0,RET:2667,INA:0,ON:2668,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:2667,RET-MEMBANK2-OFF:0
> RET:2667 indicates that CPU0 and CPU1 managed to achieve RET together,
> else by hardware constraints in place, MPU power domain will fail to
> transition.
> 
> What I see in all cases below is that transitions do take place (C2 is
> successfully hit).
> 
> Test #1: 120 seconds:
> CMD: set -x;uname -a;cat /sys/kernel/debug/pm_debug/count;sleep
> 1;./cpuidle_killer_120;sleep 1;cat /sys/kernel/debug/pm_debug/count;set +x
> 
> OMAP4 Panda-ES: (2 a9) - not impacted as part of this patch - just
> base test vector
> http://fpaste.org/134547/14110454/
> 
> OMAP5 uEVM: (2 a15)
> http://fpaste.org/134546/10454181/
> 
> DRA74x: (2 a15)
> http://fpaste.org/134543/11045286/
> 
> DRA72: (2 a15)
	  ^^
Correction should have been 1 a15

> http://fpaste.org/134544/11045335/
> 
> AM572x(DRA74x variant): (2 A15)
> http://fpaste.org/134545/10453761/
> 
> 
> Test #2: 1200 seconds: (http://fpaste.org/134564/47289141/)
> CMD: set -x;uname -a;cat /sys/kernel/debug/pm_debug/count;sleep
> 1;./cpuidle_killer_1200;sleep 1;cat
> /sys/kernel/debug/pm_debug/count;set +x
> 
> OMAP4 Panda-ES: (2 a9) - not impacted as part of this patch - just
> base test vector
> http://fpaste.org/134563/41104728/
> 
> OMAP5 uEVM: (2 a15)
> http://fpaste.org/134562/47221141/
> 
> DRA74x EVM: (2 a15)
> http://fpaste.org/134559/11047098/
> 
> DRA72 EVM: (2 a15)
	      ^^
Correction should have been 1 a15

> http://fpaste.org/134560/11047151/
> 
> AM572x EVM: (2 A15)
> http://fpaste.org/134561/47189141/
> 
>
Nishanth Menon Sept. 22, 2014, 1:02 p.m. UTC | #13
On 09/18/2014 08:50 AM, Nishanth Menon wrote:
> On 09/18/2014 08:41 AM, Nishanth Menon wrote:
>> On 09/17/2014 07:22 PM, Daniel Lezcano wrote:
>>> On 09/17/2014 04:20 PM, Shilimkar, Santosh wrote:
>> [...]
>>>> Could you try a long run of this little program:
>>>>
>>>> https://git.linaro.org/power/pm-qa.git/blob/HEAD:/cpuidle/cpuidle_killer.c
>>>>
>>>> [Santosh] I am sure there will not be any issue with the long run test case here.
>>>> Lets see if Nishant sees anything otherwise
>>>
>>> Ok. Make sure the cpu is effectively entering your C2 state with the
>>> sleep duration in the test program.
>>
>> Test kernel:
>> https://github.com/nmenon/linux-2.6-playground/commits/testing/tmlind-test-suspend-resume
>> (I decided to merge in various send for pull branches from maintainers
>> and apply cpuidle on top)..
>>
>> Controlled test run as follows on 4 different impacted platforms and 1
>> platform as legacy reference.
>>
>> What we are looking for is
>>> cpu1_pwrdm (ON),OFF:0,RET:2677,INA:0,ON:2678,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:0
>> RET:2677 indicated CPU1 hit C2
>>> cpu0_pwrdm (ON),OFF:0,RET:2677,INA:0,ON:2678,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:0
>> RET:2677 indicated CPU0 hit C2
>>> mpu_pwrdm (ON),OFF:0,RET:2667,INA:0,ON:2668,RET-LOGIC-OFF:0,RET-MEMBANK1-OFF:2667,RET-MEMBANK2-OFF:0
>> RET:2667 indicates that CPU0 and CPU1 managed to achieve RET together,
>> else by hardware constraints in place, MPU power domain will fail to
>> transition.
>>
>> What I see in all cases below is that transitions do take place (C2 is
>> successfully hit).
>>
>> Test #1: 120 seconds:
>> CMD: set -x;uname -a;cat /sys/kernel/debug/pm_debug/count;sleep
>> 1;./cpuidle_killer_120;sleep 1;cat /sys/kernel/debug/pm_debug/count;set +x
>>
>> OMAP4 Panda-ES: (2 a9) - not impacted as part of this patch - just
>> base test vector
>> http://fpaste.org/134547/14110454/
>>
>> OMAP5 uEVM: (2 a15)
>> http://fpaste.org/134546/10454181/
>>
>> DRA74x: (2 a15)
>> http://fpaste.org/134543/11045286/
>>
>> DRA72: (2 a15)
> 	  ^^
> Correction should have been 1 a15
>
>> http://fpaste.org/134544/11045335/
>>
>> AM572x(DRA74x variant): (2 A15)
>> http://fpaste.org/134545/10453761/
>>
>>
>> Test #2: 1200 seconds: (http://fpaste.org/134564/47289141/)
>> CMD: set -x;uname -a;cat /sys/kernel/debug/pm_debug/count;sleep
>> 1;./cpuidle_killer_1200;sleep 1;cat
>> /sys/kernel/debug/pm_debug/count;set +x
>>
>> OMAP4 Panda-ES: (2 a9) - not impacted as part of this patch - just
>> base test vector
>> http://fpaste.org/134563/41104728/
>>
>> OMAP5 uEVM: (2 a15)
>> http://fpaste.org/134562/47221141/
>>
>> DRA74x EVM: (2 a15)
>> http://fpaste.org/134559/11047098/
>>
>> DRA72 EVM: (2 a15)
> 	      ^^
> Correction should have been 1 a15
>
>> http://fpaste.org/134560/11047151/
>>
>> AM572x EVM: (2 A15)
>> http://fpaste.org/134561/47189141/
>>
>>

Daniel, Santosh: Gentle ping:

Any further comments? OR are we ok for this for being merged?


---
Regards,
Nishanth Menon

--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nishanth Menon Sept. 22, 2014, 1:17 p.m. UTC | #14
Daniel,
On 08:02-20140922, Nishanth Menon wrote:
[...]
> >>Test #2: 1200 seconds: (http://fpaste.org/134564/47289141/)

Santosh wanted me to make certain that the following is noted;
^^ -> is around 20 mins. -> So highlighting again.. I assume this is
long enough test duration you requested for.

> >>CMD: set -x;uname -a;cat /sys/kernel/debug/pm_debug/count;sleep
> >>1;./cpuidle_killer_1200;sleep 1;cat
> >>/sys/kernel/debug/pm_debug/count;set +x
> >>
> >>OMAP4 Panda-ES: (2 a9) - not impacted as part of this patch - just
> >>base test vector
> >>http://fpaste.org/134563/41104728/
> >>
> >>OMAP5 uEVM: (2 a15)
> >>http://fpaste.org/134562/47221141/
> >>
> >>DRA74x EVM: (2 a15)
> >>http://fpaste.org/134559/11047098/
> >>
> >>DRA72 EVM: (2 a15)
> >	      ^^
> >Correction should have been 1 a15
> >
> >>http://fpaste.org/134560/11047151/
> >>
> >>AM572x EVM: (2 A15)
> >>http://fpaste.org/134561/47189141/
> >>
> >>
> 
> Daniel, Santosh: Gentle ping:
> 
> Any further comments? OR are we ok for this for being merged?
>
diff mbox

Patch

diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 2498ab0..8ad4f44 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -22,6 +22,7 @@ 
 #include "common.h"
 #include "pm.h"
 #include "prm.h"
+#include "soc.h"
 #include "clockdomain.h"
 
 #define MAX_CPUS	2
@@ -31,6 +32,7 @@  struct idle_statedata {
 	u32 cpu_state;
 	u32 mpu_logic_state;
 	u32 mpu_state;
+	u32 mpu_state_vote;
 };
 
 static struct idle_statedata omap4_idle_data[] = {
@@ -51,12 +53,26 @@  static struct idle_statedata omap4_idle_data[] = {
 	},
 };
 
+static struct idle_statedata dra7_idle_data[] = {
+	{
+		.cpu_state = PWRDM_POWER_ON,
+		.mpu_state = PWRDM_POWER_ON,
+		.mpu_logic_state = PWRDM_POWER_ON,
+	},
+	{
+		.cpu_state = PWRDM_POWER_RET,
+		.mpu_state = PWRDM_POWER_RET,
+		.mpu_logic_state = PWRDM_POWER_RET,
+	},
+};
+
 static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
 static struct clockdomain *cpu_clkdm[MAX_CPUS];
 
 static atomic_t abort_barrier;
 static bool cpu_done[MAX_CPUS];
 static struct idle_statedata *state_ptr = &omap4_idle_data[0];
+static DEFINE_RAW_SPINLOCK(mpu_lock);
 
 /* Private functions */
 
@@ -78,6 +94,32 @@  static int omap_enter_idle_simple(struct cpuidle_device *dev,
 	return index;
 }
 
+static int omap_enter_idle_smp(struct cpuidle_device *dev,
+			       struct cpuidle_driver *drv,
+			       int index)
+{
+	struct idle_statedata *cx = state_ptr + index;
+	unsigned long flag;
+
+	raw_spin_lock_irqsave(&mpu_lock, flag);
+	cx->mpu_state_vote++;
+	if (cx->mpu_state_vote == num_online_cpus()) {
+		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+	}
+	raw_spin_unlock_irqrestore(&mpu_lock, flag);
+
+	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+
+	raw_spin_lock_irqsave(&mpu_lock, flag);
+	if (cx->mpu_state_vote == num_online_cpus())
+		omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
+	cx->mpu_state_vote--;
+	raw_spin_unlock_irqrestore(&mpu_lock, flag);
+
+	return index;
+}
+
 static int omap_enter_idle_coupled(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			int index)
@@ -224,6 +266,34 @@  static struct cpuidle_driver omap4_idle_driver = {
 	.safe_state_index = 0,
 };
 
+static struct cpuidle_driver dra7_idle_driver = {
+	.name				= "dra7_idle",
+	.owner				= THIS_MODULE,
+	.states = {
+		{
+			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
+			.exit_latency = 2 + 2,
+			.target_residency = 5,
+			.flags = CPUIDLE_FLAG_TIME_VALID,
+			.enter = omap_enter_idle_simple,
+			.name = "C1",
+			.desc = "CPUx WFI, MPUSS ON"
+		},
+		{
+			/* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
+			.exit_latency = 48 + 60,
+			.target_residency = 100,
+			.flags = CPUIDLE_FLAG_TIME_VALID
+					| CPUIDLE_FLAG_TIMER_STOP,
+			.enter = omap_enter_idle_smp,
+			.name = "C2",
+			.desc = "CPUx CSWR, MPUSS CSWR",
+		},
+	},
+	.state_count = ARRAY_SIZE(dra7_idle_data),
+	.safe_state_index = 0,
+};
+
 /* Public functions */
 
 /**
@@ -234,6 +304,16 @@  static struct cpuidle_driver omap4_idle_driver = {
  */
 int __init omap4_idle_init(void)
 {
+	struct cpuidle_driver *idle_driver;
+
+	if (soc_is_dra7xx() || soc_is_omap54xx()) {
+		state_ptr = &dra7_idle_data[0];
+		idle_driver = &dra7_idle_driver;
+	} else {
+		state_ptr = &omap4_idle_data[0];
+		idle_driver = &omap4_idle_driver;
+	}
+
 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
 	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
 	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
@@ -248,5 +328,5 @@  int __init omap4_idle_init(void)
 	/* Configure the broadcast timer on each cpu */
 	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
 
-	return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
+	return cpuidle_register(idle_driver, cpu_online_mask);
 }
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index c063833..1d22162 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -293,7 +293,7 @@  int __init omap4_pm_init(void)
 	/* Overwrite the default cpu_do_idle() */
 	arm_pm_idle = omap_default_idle;
 
-	if (cpu_is_omap44xx())
+	if (cpu_is_omap44xx() || soc_is_dra7xx() || soc_is_omap54xx())
 		omap4_idle_init();
 
 err2: